aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-02-22 20:04:59 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-25 04:34:55 -0500
commit8bd93a2c5d4cab2ae17d06350daa7dbf546a4634 (patch)
tree3facbdbfbcc1b169fad20f456b0a2521adadfb25 /kernel/rcutree_plugin.h
parent998f2ac3fea93bfa8b55c279fff68f7c5b9ab93d (diff)
rcu: Accelerate grace period if last non-dynticked CPU
Currently, rcu_needs_cpu() simply checks whether the current CPU has an outstanding RCU callback, which means that the last CPU to go into dyntick-idle mode might wait a few ticks for the relevant grace periods to complete. However, if all the other CPUs are in dyntick-idle mode, and if this CPU is in a quiescent state (which it is for RCU-bh and RCU-sched any time that we are considering going into dyntick-idle mode), then the grace period is instantly complete. This patch therefore repeatedly invokes the RCU grace-period machinery in order to force any needed grace periods to complete quickly. It does so a limited number of times in order to prevent starvation by an RCU callback function that might pass itself to call_rcu(). However, if any CPU other than the current one is not in dyntick-idle mode, fall back to simply checking (with fix to bug noted by Lai Jiangshan). Also, take advantage of last grace-period forcing, the opportunity to do so noted by Steve Rostedt. And apply simplified #ifdef condition suggested by Frederic Weisbecker. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-15-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h69
1 files changed, 69 insertions, 0 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index e77cdf321e13..a82566696b0b 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -906,3 +906,72 @@ static void __init __rcu_init_preempt(void)
906} 906}
907 907
908#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 908#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
909
910#if !defined(CONFIG_RCU_FAST_NO_HZ)
911
912/*
913 * Check to see if any future RCU-related work will need to be done
914 * by the current CPU, even if none need be done immediately, returning
915 * 1 if so. This function is part of the RCU implementation; it is -not-
916 * an exported member of the RCU API.
917 *
918 * Because we have preemptible RCU, just check whether this CPU needs
919 * any flavor of RCU. Do not chew up lots of CPU cycles with preemption
920 * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
921 */
922int rcu_needs_cpu(int cpu)
923{
924 return rcu_needs_cpu_quick_check(cpu);
925}
926
927#else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
928
929#define RCU_NEEDS_CPU_FLUSHES 5
930
931/*
932 * Check to see if any future RCU-related work will need to be done
933 * by the current CPU, even if none need be done immediately, returning
934 * 1 if so. This function is part of the RCU implementation; it is -not-
935 * an exported member of the RCU API.
936 *
937 * Because we are not supporting preemptible RCU, attempt to accelerate
938 * any current grace periods so that RCU no longer needs this CPU, but
939 * only if all other CPUs are already in dynticks-idle mode. This will
940 * allow the CPU cores to be powered down immediately, as opposed to after
941 * waiting many milliseconds for grace periods to elapse.
942 */
943int rcu_needs_cpu(int cpu)
944{
945 int c = 1;
946 int i;
947 int thatcpu;
948
949 /* Don't bother unless we are the last non-dyntick-idle CPU. */
950 for_each_cpu_not(thatcpu, nohz_cpu_mask)
951 if (thatcpu != cpu)
952 return rcu_needs_cpu_quick_check(cpu);
953
954 /* Try to push remaining RCU-sched and RCU-bh callbacks through. */
955 for (i = 0; i < RCU_NEEDS_CPU_FLUSHES && c; i++) {
956 c = 0;
957 if (per_cpu(rcu_sched_data, cpu).nxtlist) {
958 rcu_sched_qs(cpu);
959 force_quiescent_state(&rcu_sched_state, 0);
960 __rcu_process_callbacks(&rcu_sched_state,
961 &per_cpu(rcu_sched_data, cpu));
962 c = !!per_cpu(rcu_sched_data, cpu).nxtlist;
963 }
964 if (per_cpu(rcu_bh_data, cpu).nxtlist) {
965 rcu_bh_qs(cpu);
966 force_quiescent_state(&rcu_bh_state, 0);
967 __rcu_process_callbacks(&rcu_bh_state,
968 &per_cpu(rcu_bh_data, cpu));
969 c = !!per_cpu(rcu_bh_data, cpu).nxtlist;
970 }
971 }
972
973 /* If RCU callbacks are still pending, RCU still needs this CPU. */
974 return c;
975}
976
977#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */