diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-11-30 18:41:14 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-12-11 13:32:10 -0500 |
commit | f23f7fa1c8effca19b52b98fc71016109d21db59 (patch) | |
tree | e669b1b0cd96820949dd9deee1e5113fd667fb36 /kernel/rcutree_plugin.h | |
parent | dff1672d9199fffddb58fa7970ccf59005fc35f3 (diff) |
rcu: Adaptive dyntick-idle preparation
If there are other CPUs active at a given point in time, then there is a
limit to what a given CPU can do to advance the current RCU grace period.
Beyond this limit, attempting to force the RCU grace period forward will
do nothing but consume energy burning CPU cycles.
Therefore, this commit takes an adaptive approach to RCU_FAST_NO_HZ
preparations for idle. It pushes the RCU core state machine for
two cycles unconditionally, and then it will push from zero to three
additional cycles, but only as long as the RCU core has work for this
CPU to do immediately. The rcu_pending() function is used to check
whether the RCU core has such work.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 54 |
1 files changed, 43 insertions, 11 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index adb6e666c6f4..8cd9efe7e81f 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1994,8 +1994,40 @@ static void rcu_prepare_for_idle(int cpu) | |||
1994 | 1994 | ||
1995 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 1995 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1996 | 1996 | ||
1997 | #define RCU_NEEDS_CPU_FLUSHES 5 /* Allow for callback self-repost. */ | 1997 | /* |
1998 | * This code is invoked when a CPU goes idle, at which point we want | ||
1999 | * to have the CPU do everything required for RCU so that it can enter | ||
2000 | * the energy-efficient dyntick-idle mode. This is handled by a | ||
2001 | * state machine implemented by rcu_prepare_for_idle() below. | ||
2002 | * | ||
2003 | * The following three proprocessor symbols control this state machine: | ||
2004 | * | ||
2005 | * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt | ||
2006 | * to satisfy RCU. Beyond this point, it is better to incur a periodic | ||
2007 | * scheduling-clock interrupt than to loop through the state machine | ||
2008 | * at full power. | ||
2009 | * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are | ||
2010 | * optional if RCU does not need anything immediately from this | ||
2011 | * CPU, even if this CPU still has RCU callbacks queued. The first | ||
2012 | * times through the state machine are mandatory: we need to give | ||
2013 | * the state machine a chance to communicate a quiescent state | ||
2014 | * to the RCU core. | ||
2015 | * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted | ||
2016 | * to sleep in dyntick-idle mode with RCU callbacks pending. This | ||
2017 | * is sized to be roughly one RCU grace period. Those energy-efficiency | ||
2018 | * benchmarkers who might otherwise be tempted to set this to a large | ||
2019 | * number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your | ||
2020 | * system. And if you are -that- concerned about energy efficiency, | ||
2021 | * just power the system down and be done with it! | ||
2022 | * | ||
2023 | * The values below work well in practice. If future workloads require | ||
2024 | * adjustment, they can be converted into kernel config parameters, though | ||
2025 | * making the state machine smarter might be a better option. | ||
2026 | */ | ||
2027 | #define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */ | ||
2028 | #define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */ | ||
1998 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ | 2029 | #define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ |
2030 | |||
1999 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | 2031 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
2000 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | 2032 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
2001 | static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer); | 2033 | static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer); |
@@ -2110,17 +2142,17 @@ static void rcu_prepare_for_idle(int cpu) | |||
2110 | /* Check and update the rcu_dyntick_drain sequencing. */ | 2142 | /* Check and update the rcu_dyntick_drain sequencing. */ |
2111 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2143 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
2112 | /* First time through, initialize the counter. */ | 2144 | /* First time through, initialize the counter. */ |
2113 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | 2145 | per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; |
2114 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2146 | } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && |
2147 | !rcu_pending(cpu)) { | ||
2115 | /* Can we go dyntick-idle despite still having callbacks? */ | 2148 | /* Can we go dyntick-idle despite still having callbacks? */ |
2116 | if (!rcu_pending(cpu)) { | 2149 | trace_rcu_prep_idle("Dyntick with callbacks"); |
2117 | trace_rcu_prep_idle("Dyntick with callbacks"); | 2150 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
2118 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2151 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
2119 | hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), | 2152 | hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu), |
2120 | rcu_idle_gp_wait, HRTIMER_MODE_REL); | 2153 | rcu_idle_gp_wait, HRTIMER_MODE_REL); |
2121 | return; /* Nothing more to do immediately. */ | 2154 | return; /* Nothing more to do immediately. */ |
2122 | } | 2155 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
2123 | |||
2124 | /* We have hit the limit, so time to give up. */ | 2156 | /* We have hit the limit, so time to give up. */ |
2125 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2157 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; |
2126 | local_irq_restore(flags); | 2158 | local_irq_restore(flags); |