aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-11-22 20:46:19 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:32:01 -0500
commit84ad00cb61f1cb21f0b63bc6f7dc254399eb3830 (patch)
tree7f275af0caaead54a5e8847afa308f5e2b33997a /kernel/rcutree_plugin.h
parent3084f2f80cc8a1fd66233722d88beac0fe85e26f (diff)
rcu: Avoid needlessly IPIing CPUs at GP end
If a CPU enters dyntick-idle mode with callbacks pending, it will need an IPI at the end of the grace period. However, if it exits dyntick-idle mode before the grace period ends, it will be needlessly IPIed at the end of the grace period. Therefore, this commit clears the per-CPU rcu_awake_at_gp_end flag when a CPU determines that it does not need it. This in turn requires disabling interrupts across much of rcu_prepare_for_idle() in order to avoid having nested interrupts clearing this state out from under us. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 45790bfb6e8c..c4daf1e19e01 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -2027,6 +2027,9 @@ int rcu_needs_cpu(int cpu)
2027static void rcu_prepare_for_idle(int cpu) 2027static void rcu_prepare_for_idle(int cpu)
2028{ 2028{
2029 int c = 0; 2029 int c = 0;
2030 unsigned long flags;
2031
2032 local_irq_save(flags);
2030 2033
2031 /* 2034 /*
2032 * If there are no callbacks on this CPU or if RCU has no further 2035 * If there are no callbacks on this CPU or if RCU has no further
@@ -2036,14 +2039,17 @@ static void rcu_prepare_for_idle(int cpu)
2036 if (!rcu_cpu_has_callbacks(cpu)) { 2039 if (!rcu_cpu_has_callbacks(cpu)) {
2037 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2040 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2038 per_cpu(rcu_dyntick_drain, cpu) = 0; 2041 per_cpu(rcu_dyntick_drain, cpu) = 0;
2042 per_cpu(rcu_awake_at_gp_end, cpu) = 0;
2043 local_irq_restore(flags);
2039 trace_rcu_prep_idle("No callbacks"); 2044 trace_rcu_prep_idle("No callbacks");
2040 return; 2045 return;
2041 } 2046 }
2042 if (!rcu_pending(cpu)) { 2047 if (!rcu_pending(cpu)) {
2043 trace_rcu_prep_idle("Dyntick with callbacks");
2044 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2048 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
2045 per_cpu(rcu_dyntick_drain, cpu) = 0; 2049 per_cpu(rcu_dyntick_drain, cpu) = 0;
2046 per_cpu(rcu_awake_at_gp_end, cpu) = 1; 2050 per_cpu(rcu_awake_at_gp_end, cpu) = 1;
2051 local_irq_restore(flags);
2052 trace_rcu_prep_idle("Dyntick with callbacks");
2047 return; /* Nothing to do immediately. */ 2053 return; /* Nothing to do immediately. */
2048 } 2054 }
2049 2055
@@ -2052,6 +2058,7 @@ static void rcu_prepare_for_idle(int cpu)
2052 * refrained from disabling the scheduling-clock tick. 2058 * refrained from disabling the scheduling-clock tick.
2053 */ 2059 */
2054 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { 2060 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
2061 local_irq_restore(flags);
2055 trace_rcu_prep_idle("In holdoff"); 2062 trace_rcu_prep_idle("In holdoff");
2056 return; 2063 return;
2057 } 2064 }
@@ -2060,9 +2067,11 @@ static void rcu_prepare_for_idle(int cpu)
2060 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2067 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2061 /* First time through, initialize the counter. */ 2068 /* First time through, initialize the counter. */
2062 per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; 2069 per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
2070 per_cpu(rcu_awake_at_gp_end, cpu) = 0;
2063 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2071 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
2064 /* We have hit the limit, so time to give up. */ 2072 /* We have hit the limit, so time to give up. */
2065 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2073 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
2074 local_irq_restore(flags);
2066 trace_rcu_prep_idle("Begin holdoff"); 2075 trace_rcu_prep_idle("Begin holdoff");
2067 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ 2076 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2068 return; 2077 return;
@@ -2095,10 +2104,13 @@ static void rcu_prepare_for_idle(int cpu)
2095 * So try forcing the callbacks through the grace period. 2104 * So try forcing the callbacks through the grace period.
2096 */ 2105 */
2097 if (c) { 2106 if (c) {
2107 local_irq_restore(flags);
2098 trace_rcu_prep_idle("More callbacks"); 2108 trace_rcu_prep_idle("More callbacks");
2099 invoke_rcu_core(); 2109 invoke_rcu_core();
2100 } else 2110 } else {
2111 local_irq_restore(flags);
2101 trace_rcu_prep_idle("Callbacks drained"); 2112 trace_rcu_prep_idle("Callbacks drained");
2113 }
2102} 2114}
2103 2115
2104/* 2116/*