diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-06-11 20:39:43 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-09-23 10:41:53 -0400 |
commit | b626c1b689364859ccd2e86d5e043aeadfeb2cd4 (patch) | |
tree | 6a9deb7bd94a1bcf30b60ca0eb8c1d5f3a2b9172 /kernel/rcutree_plugin.h | |
parent | bfa00b4c4028f39357d16279ff0fddf550241593 (diff) |
rcu: Provide OOM handler to motivate lazy RCU callbacks
In kernels built with CONFIG_RCU_FAST_NO_HZ=y, CPUs can accumulate a
large number of lazy callbacks, which as the name implies will be slow
to be invoked. This can be a problem on small-memory systems, where the
default 6-second sleep for CPUs having only lazy RCU callbacks could well
be fatal. This commit therefore installs an OOM hander that ensures that
every CPU with lazy callbacks has at least one non-lazy callback, in turn
ensuring timely advancement for these callbacks.
Updated to fix bug that disabled OOM killing, noted by Lai Jiangshan.
Updated to push the for_each_rcu_flavor() loop into rcu_oom_notify_cpu(),
thus reducing the number of IPIs, as suggested by Steven Rostedt. Also
to make the for_each_online_cpu() loop be preemptible. (Later, it might
be good to use smp_call_function(), as suggested by Peter Zijlstra.)
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Sasha Levin <levinsasha928@gmail.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 83 |
1 files changed, 83 insertions, 0 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 7f3244c0df01..587963689328 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -25,6 +25,7 @@ | |||
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/oom.h> | ||
28 | 29 | ||
29 | #define RCU_KTHREAD_PRIO 1 | 30 | #define RCU_KTHREAD_PRIO 1 |
30 | 31 | ||
@@ -2112,6 +2113,88 @@ static void rcu_idle_count_callbacks_posted(void) | |||
2112 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); | 2113 | __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); |
2113 | } | 2114 | } |
2114 | 2115 | ||
2116 | /* | ||
2117 | * Data for flushing lazy RCU callbacks at OOM time. | ||
2118 | */ | ||
2119 | static atomic_t oom_callback_count; | ||
2120 | static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq); | ||
2121 | |||
2122 | /* | ||
2123 | * RCU OOM callback -- decrement the outstanding count and deliver the | ||
2124 | * wake-up if we are the last one. | ||
2125 | */ | ||
2126 | static void rcu_oom_callback(struct rcu_head *rhp) | ||
2127 | { | ||
2128 | if (atomic_dec_and_test(&oom_callback_count)) | ||
2129 | wake_up(&oom_callback_wq); | ||
2130 | } | ||
2131 | |||
2132 | /* | ||
2133 | * Post an rcu_oom_notify callback on the current CPU if it has at | ||
2134 | * least one lazy callback. This will unnecessarily post callbacks | ||
2135 | * to CPUs that already have a non-lazy callback at the end of their | ||
2136 | * callback list, but this is an infrequent operation, so accept some | ||
2137 | * extra overhead to keep things simple. | ||
2138 | */ | ||
2139 | static void rcu_oom_notify_cpu(void *unused) | ||
2140 | { | ||
2141 | struct rcu_state *rsp; | ||
2142 | struct rcu_data *rdp; | ||
2143 | |||
2144 | for_each_rcu_flavor(rsp) { | ||
2145 | rdp = __this_cpu_ptr(rsp->rda); | ||
2146 | if (rdp->qlen_lazy != 0) { | ||
2147 | atomic_inc(&oom_callback_count); | ||
2148 | rsp->call(&rdp->oom_head, rcu_oom_callback); | ||
2149 | } | ||
2150 | } | ||
2151 | } | ||
2152 | |||
2153 | /* | ||
2154 | * If low on memory, ensure that each CPU has a non-lazy callback. | ||
2155 | * This will wake up CPUs that have only lazy callbacks, in turn | ||
2156 | * ensuring that they free up the corresponding memory in a timely manner. | ||
2157 | * Because an uncertain amount of memory will be freed in some uncertain | ||
2158 | * timeframe, we do not claim to have freed anything. | ||
2159 | */ | ||
2160 | static int rcu_oom_notify(struct notifier_block *self, | ||
2161 | unsigned long notused, void *nfreed) | ||
2162 | { | ||
2163 | int cpu; | ||
2164 | |||
2165 | /* Wait for callbacks from earlier instance to complete. */ | ||
2166 | wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); | ||
2167 | |||
2168 | /* | ||
2169 | * Prevent premature wakeup: ensure that all increments happen | ||
2170 | * before there is a chance of the counter reaching zero. | ||
2171 | */ | ||
2172 | atomic_set(&oom_callback_count, 1); | ||
2173 | |||
2174 | get_online_cpus(); | ||
2175 | for_each_online_cpu(cpu) { | ||
2176 | smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); | ||
2177 | cond_resched(); | ||
2178 | } | ||
2179 | put_online_cpus(); | ||
2180 | |||
2181 | /* Unconditionally decrement: no need to wake ourselves up. */ | ||
2182 | atomic_dec(&oom_callback_count); | ||
2183 | |||
2184 | return NOTIFY_OK; | ||
2185 | } | ||
2186 | |||
2187 | static struct notifier_block rcu_oom_nb = { | ||
2188 | .notifier_call = rcu_oom_notify | ||
2189 | }; | ||
2190 | |||
2191 | static int __init rcu_register_oom_notifier(void) | ||
2192 | { | ||
2193 | register_oom_notifier(&rcu_oom_nb); | ||
2194 | return 0; | ||
2195 | } | ||
2196 | early_initcall(rcu_register_oom_notifier); | ||
2197 | |||
2115 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ | 2198 | #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ |
2116 | 2199 | ||
2117 | #ifdef CONFIG_RCU_CPU_STALL_INFO | 2200 | #ifdef CONFIG_RCU_CPU_STALL_INFO |