aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-02 09:54:54 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:31:43 -0500
commitaea1b35e29e658d42d7ba2237f3aa7f93e18509d (patch)
tree0221b1c12b5c3e3ed7f2bb2ffc957b09891bcb51 /kernel/rcutree.c
parent0989cb46783188ea7346ba6490be0046b9b7a725 (diff)
rcu: Allow dyntick-idle mode for CPUs with callbacks
Currently, RCU does not permit a CPU to enter dyntick-idle mode if that CPU has any RCU callbacks queued. This means that workloads for which each CPU wakes up and does some RCU updates every few ticks will never enter dyntick-idle mode. This can result in significant unnecessary power consumption, so this patch permits a given to enter dyntick-idle mode if it has callbacks, but only if that same CPU has completed all current work for the RCU core. We determine use rcu_pending() to determine whether a given CPU has completed all current work for the RCU core. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 9888a0ad2d4e..b1711c48a7ec 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -365,6 +365,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
365 current->pid, current->comm, 365 current->pid, current->comm,
366 idle->pid, idle->comm); /* must be idle task! */ 366 idle->pid, idle->comm); /* must be idle task! */
367 } 367 }
368 rcu_prepare_for_idle(smp_processor_id());
368 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */ 369 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
369 smp_mb__before_atomic_inc(); /* See above. */ 370 smp_mb__before_atomic_inc(); /* See above. */
370 atomic_inc(&rdtp->dynticks); 371 atomic_inc(&rdtp->dynticks);
@@ -1085,6 +1086,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
1085 * callbacks are waiting on the grace period that just now 1086 * callbacks are waiting on the grace period that just now
1086 * completed. 1087 * completed.
1087 */ 1088 */
1089 rcu_schedule_wake_gp_end();
1088 if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) { 1090 if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) {
1089 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1091 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
1090 1092
@@ -1670,6 +1672,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
1670 &__get_cpu_var(rcu_sched_data)); 1672 &__get_cpu_var(rcu_sched_data));
1671 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data)); 1673 __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1672 rcu_preempt_process_callbacks(); 1674 rcu_preempt_process_callbacks();
1675 rcu_wake_cpus_for_gp_end();
1673 trace_rcu_utilization("End RCU core"); 1676 trace_rcu_utilization("End RCU core");
1674} 1677}
1675 1678
@@ -1923,7 +1926,7 @@ static int rcu_pending(int cpu)
1923 * by the current CPU, even if none need be done immediately, returning 1926 * by the current CPU, even if none need be done immediately, returning
1924 * 1 if so. 1927 * 1 if so.
1925 */ 1928 */
1926static int rcu_needs_cpu_quick_check(int cpu) 1929static int rcu_cpu_has_callbacks(int cpu)
1927{ 1930{
1928 /* RCU callbacks either ready or pending? */ 1931 /* RCU callbacks either ready or pending? */
1929 return per_cpu(rcu_sched_data, cpu).nxtlist || 1932 return per_cpu(rcu_sched_data, cpu).nxtlist ||