aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2012-12-28 14:30:36 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2013-03-26 11:04:51 -0400
commitc0f4dfd4f90f1667d234d21f15153ea09a2eaa66 (patch)
treee06fa6eef015a373849855249752ec525ca8ad4b /kernel/rcutree.c
parentb11cc5760a9c48c870ad286e8a6d8fdb998fa58d (diff)
rcu: Make RCU_FAST_NO_HZ take advantage of numbered callbacks
Because RCU callbacks are now associated with the number of the grace period that they must wait for, CPUs can now take advance callbacks corresponding to grace periods that ended while a given CPU was in dyntick-idle mode. This eliminates the need to try forcing the RCU state machine while entering idle, thus reducing the CPU intensiveness of RCU_FAST_NO_HZ, which should increase its energy efficiency. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 2015bce749f9..7b1d7769872a 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -2640,19 +2640,27 @@ static int rcu_pending(int cpu)
2640} 2640}
2641 2641
2642/* 2642/*
2643 * Check to see if any future RCU-related work will need to be done 2643 * Return true if the specified CPU has any callback. If all_lazy is
2644 * by the current CPU, even if none need be done immediately, returning 2644 * non-NULL, store an indication of whether all callbacks are lazy.
2645 * 1 if so. 2645 * (If there are no callbacks, all of them are deemed to be lazy.)
2646 */ 2646 */
2647static int rcu_cpu_has_callbacks(int cpu) 2647static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
2648{ 2648{
2649 bool al = true;
2650 bool hc = false;
2651 struct rcu_data *rdp;
2649 struct rcu_state *rsp; 2652 struct rcu_state *rsp;
2650 2653
2651 /* RCU callbacks either ready or pending? */ 2654 for_each_rcu_flavor(rsp) {
2652 for_each_rcu_flavor(rsp) 2655 rdp = per_cpu_ptr(rsp->rda, cpu);
2653 if (per_cpu_ptr(rsp->rda, cpu)->nxtlist) 2656 if (rdp->qlen != rdp->qlen_lazy)
2654 return 1; 2657 al = false;
2655 return 0; 2658 if (rdp->nxtlist)
2659 hc = true;
2660 }
2661 if (all_lazy)
2662 *all_lazy = al;
2663 return hc;
2656} 2664}
2657 2665
2658/* 2666/*
@@ -2871,7 +2879,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2871 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; 2879 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
2872 atomic_set(&rdp->dynticks->dynticks, 2880 atomic_set(&rdp->dynticks->dynticks,
2873 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); 2881 (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
2874 rcu_prepare_for_idle_init(cpu);
2875 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ 2882 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
2876 2883
2877 /* Add CPU to rcu_node bitmasks. */ 2884 /* Add CPU to rcu_node bitmasks. */
@@ -2945,7 +2952,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2945 */ 2952 */
2946 for_each_rcu_flavor(rsp) 2953 for_each_rcu_flavor(rsp)
2947 rcu_cleanup_dying_cpu(rsp); 2954 rcu_cleanup_dying_cpu(rsp);
2948 rcu_cleanup_after_idle(cpu);
2949 break; 2955 break;
2950 case CPU_DEAD: 2956 case CPU_DEAD:
2951 case CPU_DEAD_FROZEN: 2957 case CPU_DEAD_FROZEN: