diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-11-22 23:43:02 -0500 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-12-11 13:32:02 -0500 |
commit | f535a607c13c7b674e0788ca5765779aa74a01c3 (patch) | |
tree | 433a07d16ff11d9f67e7991831cebf3cadfd9939 /kernel/rcutree_plugin.h | |
parent | 84ad00cb61f1cb21f0b63bc6f7dc254399eb3830 (diff) |
rcu: Eliminate RCU_FAST_NO_HZ grace-period hang
With the new implementation of RCU_FAST_NO_HZ, it was possible to hang
RCU grace periods as follows:
o CPU 0 attempts to go idle, cycles several times through the
rcu_prepare_for_idle() loop, then goes dyntick-idle when
RCU needs nothing more from it, while still having at least
on RCU callback pending.
o CPU 1 goes idle with no callbacks.
Both CPUs can then stay in dyntick-idle mode indefinitely, preventing
the RCU grace period from ever completing, possibly hanging the system.
This commit therefore prevents CPUs that have RCU callbacks from entering
dyntick-idle mode. This approach also eliminates the need for the
end-of-grace-period IPIs used previously.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 78 |
1 files changed, 2 insertions, 76 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index c4daf1e19e01..3d84dbc113d6 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1964,28 +1964,11 @@ static void rcu_prepare_for_idle(int cpu) | |||
1964 | { | 1964 | { |
1965 | } | 1965 | } |
1966 | 1966 | ||
1967 | /* | ||
1968 | * CPUs are never putting themselves to sleep with callbacks pending, | ||
1969 | * so there is no need to awaken them. | ||
1970 | */ | ||
1971 | static void rcu_wake_cpus_for_gp_end(void) | ||
1972 | { | ||
1973 | } | ||
1974 | |||
1975 | /* | ||
1976 | * CPUs are never putting themselves to sleep with callbacks pending, | ||
1977 | * so there is no need to schedule the act of awakening them. | ||
1978 | */ | ||
1979 | static void rcu_schedule_wake_gp_end(void) | ||
1980 | { | ||
1981 | } | ||
1982 | |||
1983 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 1967 | #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
1984 | 1968 | ||
1985 | #define RCU_NEEDS_CPU_FLUSHES 5 | 1969 | #define RCU_NEEDS_CPU_FLUSHES 5 |
1986 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); | 1970 | static DEFINE_PER_CPU(int, rcu_dyntick_drain); |
1987 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); | 1971 | static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff); |
1988 | static DEFINE_PER_CPU(bool, rcu_awake_at_gp_end); | ||
1989 | 1972 | ||
1990 | /* | 1973 | /* |
1991 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no | 1974 | * Allow the CPU to enter dyntick-idle mode if either: (1) There are no |
@@ -2032,26 +2015,16 @@ static void rcu_prepare_for_idle(int cpu) | |||
2032 | local_irq_save(flags); | 2015 | local_irq_save(flags); |
2033 | 2016 | ||
2034 | /* | 2017 | /* |
2035 | * If there are no callbacks on this CPU or if RCU has no further | 2018 | * If there are no callbacks on this CPU, enter dyntick-idle mode. |
2036 | * need for this CPU at the moment, enter dyntick-idle mode. | 2019 | * Also reset state to avoid prejudicing later attempts. |
2037 | * Also reset state so as to not prejudice later attempts. | ||
2038 | */ | 2020 | */ |
2039 | if (!rcu_cpu_has_callbacks(cpu)) { | 2021 | if (!rcu_cpu_has_callbacks(cpu)) { |
2040 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | 2022 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; |
2041 | per_cpu(rcu_dyntick_drain, cpu) = 0; | 2023 | per_cpu(rcu_dyntick_drain, cpu) = 0; |
2042 | per_cpu(rcu_awake_at_gp_end, cpu) = 0; | ||
2043 | local_irq_restore(flags); | 2024 | local_irq_restore(flags); |
2044 | trace_rcu_prep_idle("No callbacks"); | 2025 | trace_rcu_prep_idle("No callbacks"); |
2045 | return; | 2026 | return; |
2046 | } | 2027 | } |
2047 | if (!rcu_pending(cpu)) { | ||
2048 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; | ||
2049 | per_cpu(rcu_dyntick_drain, cpu) = 0; | ||
2050 | per_cpu(rcu_awake_at_gp_end, cpu) = 1; | ||
2051 | local_irq_restore(flags); | ||
2052 | trace_rcu_prep_idle("Dyntick with callbacks"); | ||
2053 | return; /* Nothing to do immediately. */ | ||
2054 | } | ||
2055 | 2028 | ||
2056 | /* | 2029 | /* |
2057 | * If in holdoff mode, just return. We will presumably have | 2030 | * If in holdoff mode, just return. We will presumably have |
@@ -2067,7 +2040,6 @@ static void rcu_prepare_for_idle(int cpu) | |||
2067 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2040 | if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
2068 | /* First time through, initialize the counter. */ | 2041 | /* First time through, initialize the counter. */ |
2069 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; | 2042 | per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES; |
2070 | per_cpu(rcu_awake_at_gp_end, cpu) = 0; | ||
2071 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { | 2043 | } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { |
2072 | /* We have hit the limit, so time to give up. */ | 2044 | /* We have hit the limit, so time to give up. */ |
2073 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; | 2045 | per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; |
@@ -2113,50 +2085,4 @@ static void rcu_prepare_for_idle(int cpu) | |||
2113 | } | 2085 | } |
2114 | } | 2086 | } |
2115 | 2087 | ||
2116 | /* | ||
2117 | * Wake up a CPU by invoking the RCU core. Intended for use by | ||
2118 | * rcu_wake_cpus_for_gp_end(), which passes this function to | ||
2119 | * smp_call_function_single(). | ||
2120 | */ | ||
2121 | static void rcu_wake_cpu(void *unused) | ||
2122 | { | ||
2123 | trace_rcu_prep_idle("CPU awakened at GP end"); | ||
2124 | invoke_rcu_core(); | ||
2125 | } | ||
2126 | |||
2127 | /* | ||
2128 | * If an RCU grace period ended recently, scan the rcu_awake_at_gp_end | ||
2129 | * per-CPU variables, and wake up any CPUs that requested a wakeup. | ||
2130 | */ | ||
2131 | static void rcu_wake_cpus_for_gp_end(void) | ||
2132 | { | ||
2133 | int cpu; | ||
2134 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | ||
2135 | |||
2136 | if (!rdtp->wake_gp_end) | ||
2137 | return; | ||
2138 | rdtp->wake_gp_end = 0; | ||
2139 | for_each_online_cpu(cpu) { | ||
2140 | if (per_cpu(rcu_awake_at_gp_end, cpu)) { | ||
2141 | per_cpu(rcu_awake_at_gp_end, cpu) = 0; | ||
2142 | smp_call_function_single(cpu, rcu_wake_cpu, NULL, 0); | ||
2143 | } | ||
2144 | } | ||
2145 | } | ||
2146 | |||
2147 | /* | ||
2148 | * A grace period has just ended, and so we will need to awaken CPUs | ||
2149 | * that now have work to do. But we cannot send IPIs with interrupts | ||
2150 | * disabled, so just set a flag so that this will happen upon exit | ||
2151 | * from RCU core processing. | ||
2152 | */ | ||
2153 | static void rcu_schedule_wake_gp_end(void) | ||
2154 | { | ||
2155 | struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks); | ||
2156 | |||
2157 | rdtp->wake_gp_end = 1; | ||
2158 | } | ||
2159 | |||
2160 | /* @@@ need tracing as well. */ | ||
2161 | |||
2162 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 2088 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |