diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2010-09-09 16:40:39 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2010-11-17 18:45:09 -0500 |
commit | b2c0710c464ede15e1fc52fb1e7ee9ba54cea186 (patch) | |
tree | 7524518fec8a02e53c3fab558b40a5e94f0bb5ec /kernel/rcutiny.c | |
parent | 8e8be45e8e55daa381028aec339829929ddb53a5 (diff) |
rcu: move TINY_RCU from softirq to kthread
If RCU priority boosting is to be meaningful, callback invocation must
be boosted in addition to preempted RCU readers. Otherwise, in presence
of CPU real-time threads, the grace period ends, but the callbacks don't
get invoked. If the callbacks don't get invoked, the associated memory
doesn't get freed, so the system is still subject to OOM.
But it is not reasonable to priority-boost RCU_SOFTIRQ, so this commit
moves the callback invocations to a kthread, which can be boosted easily.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r-- | kernel/rcutiny.c | 71 |
1 files changed, 58 insertions, 13 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index d806735342ac..86eef29cdfb2 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -59,8 +59,15 @@ int rcu_scheduler_active __read_mostly; | |||
59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 59 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 60 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
61 | 61 | ||
62 | /* Controls for rcu_cbs() kthread, replacing RCU_SOFTIRQ used previously. */ | ||
63 | static struct task_struct *rcu_cbs_task; | ||
64 | static DECLARE_WAIT_QUEUE_HEAD(rcu_cbs_wq); | ||
65 | static unsigned long have_rcu_cbs; | ||
66 | static void invoke_rcu_cbs(void); | ||
67 | |||
62 | /* Forward declarations for rcutiny_plugin.h. */ | 68 | /* Forward declarations for rcutiny_plugin.h. */ |
63 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); | 69 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
70 | static int rcu_cbs(void *arg); | ||
64 | static void __call_rcu(struct rcu_head *head, | 71 | static void __call_rcu(struct rcu_head *head, |
65 | void (*func)(struct rcu_head *rcu), | 72 | void (*func)(struct rcu_head *rcu), |
66 | struct rcu_ctrlblk *rcp); | 73 | struct rcu_ctrlblk *rcp); |
@@ -123,7 +130,7 @@ void rcu_sched_qs(int cpu) | |||
123 | { | 130 | { |
124 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + | 131 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
125 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | 132 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
126 | raise_softirq(RCU_SOFTIRQ); | 133 | invoke_rcu_cbs(); |
127 | } | 134 | } |
128 | 135 | ||
129 | /* | 136 | /* |
@@ -132,7 +139,7 @@ void rcu_sched_qs(int cpu) | |||
132 | void rcu_bh_qs(int cpu) | 139 | void rcu_bh_qs(int cpu) |
133 | { | 140 | { |
134 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | 141 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
135 | raise_softirq(RCU_SOFTIRQ); | 142 | invoke_rcu_cbs(); |
136 | } | 143 | } |
137 | 144 | ||
138 | /* | 145 | /* |
@@ -152,10 +159,10 @@ void rcu_check_callbacks(int cpu, int user) | |||
152 | } | 159 | } |
153 | 160 | ||
154 | /* | 161 | /* |
155 | * Helper function for rcu_process_callbacks() that operates on the | 162 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
156 | * specified rcu_ctrlkblk structure. | 163 | * whose grace period has elapsed. |
157 | */ | 164 | */ |
158 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | 165 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
159 | { | 166 | { |
160 | struct rcu_head *next, *list; | 167 | struct rcu_head *next, *list; |
161 | unsigned long flags; | 168 | unsigned long flags; |
@@ -180,19 +187,52 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
180 | next = list->next; | 187 | next = list->next; |
181 | prefetch(next); | 188 | prefetch(next); |
182 | debug_rcu_head_unqueue(list); | 189 | debug_rcu_head_unqueue(list); |
190 | local_bh_disable(); | ||
183 | list->func(list); | 191 | list->func(list); |
192 | local_bh_enable(); | ||
184 | list = next; | 193 | list = next; |
185 | } | 194 | } |
186 | } | 195 | } |
187 | 196 | ||
188 | /* | 197 | /* |
189 | * Invoke any callbacks whose grace period has completed. | 198 | * This kthread invokes RCU callbacks whose grace periods have |
199 | * elapsed. It is awakened as needed, and takes the place of the | ||
200 | * RCU_SOFTIRQ that was used previously for this purpose. | ||
201 | * This is a kthread, but it is never stopped, at least not until | ||
202 | * the system goes down. | ||
203 | */ | ||
204 | static int rcu_cbs(void *arg) | ||
205 | { | ||
206 | unsigned long work; | ||
207 | unsigned long flags; | ||
208 | |||
209 | for (;;) { | ||
210 | wait_event(rcu_cbs_wq, have_rcu_cbs != 0); | ||
211 | local_irq_save(flags); | ||
212 | work = have_rcu_cbs; | ||
213 | have_rcu_cbs = 0; | ||
214 | local_irq_restore(flags); | ||
215 | if (work) { | ||
216 | rcu_process_callbacks(&rcu_sched_ctrlblk); | ||
217 | rcu_process_callbacks(&rcu_bh_ctrlblk); | ||
218 | rcu_preempt_process_callbacks(); | ||
219 | } | ||
220 | } | ||
221 | |||
222 | return 0; /* Not reached, but needed to shut gcc up. */ | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Wake up rcu_cbs() to process callbacks now eligible for invocation. | ||
190 | */ | 227 | */ |
191 | static void rcu_process_callbacks(struct softirq_action *unused) | 228 | static void invoke_rcu_cbs(void) |
192 | { | 229 | { |
193 | __rcu_process_callbacks(&rcu_sched_ctrlblk); | 230 | unsigned long flags; |
194 | __rcu_process_callbacks(&rcu_bh_ctrlblk); | 231 | |
195 | rcu_preempt_process_callbacks(); | 232 | local_irq_save(flags); |
233 | have_rcu_cbs = 1; | ||
234 | wake_up(&rcu_cbs_wq); | ||
235 | local_irq_restore(flags); | ||
196 | } | 236 | } |
197 | 237 | ||
198 | /* | 238 | /* |
@@ -282,7 +322,12 @@ void rcu_barrier_sched(void) | |||
282 | } | 322 | } |
283 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | 323 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); |
284 | 324 | ||
285 | void __init rcu_init(void) | 325 | /* |
326 | * Spawn the kthread that invokes RCU callbacks. | ||
327 | */ | ||
328 | static int __init rcu_spawn_kthreads(void) | ||
286 | { | 329 | { |
287 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); | 330 | rcu_cbs_task = kthread_run(rcu_cbs, NULL, "rcu_cbs"); |
331 | return 0; | ||
288 | } | 332 | } |
333 | early_initcall(rcu_spawn_kthreads); | ||