aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutiny.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r--kernel/rcutiny.c117
1 files changed, 24 insertions, 93 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 7bbac7d0f5ab..da775c87f27f 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -37,16 +37,17 @@
37#include <linux/cpu.h> 37#include <linux/cpu.h>
38#include <linux/prefetch.h> 38#include <linux/prefetch.h>
39 39
40/* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ 40#ifdef CONFIG_RCU_TRACE
41static struct task_struct *rcu_kthread_task; 41#include <trace/events/rcu.h>
42static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); 42#endif /* #else #ifdef CONFIG_RCU_TRACE */
43static unsigned long have_rcu_kthread_work; 43
44#include "rcu.h"
44 45
45/* Forward declarations for rcutiny_plugin.h. */ 46/* Forward declarations for rcutiny_plugin.h. */
46struct rcu_ctrlblk; 47struct rcu_ctrlblk;
47static void invoke_rcu_kthread(void); 48static void invoke_rcu_callbacks(void);
48static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); 49static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
49static int rcu_kthread(void *arg); 50static void rcu_process_callbacks(struct softirq_action *unused);
50static void __call_rcu(struct rcu_head *head, 51static void __call_rcu(struct rcu_head *head,
51 void (*func)(struct rcu_head *rcu), 52 void (*func)(struct rcu_head *rcu),
52 struct rcu_ctrlblk *rcp); 53 struct rcu_ctrlblk *rcp);
@@ -96,16 +97,6 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
96} 97}
97 98
98/* 99/*
99 * Wake up rcu_kthread() to process callbacks now eligible for invocation
100 * or to boost readers.
101 */
102static void invoke_rcu_kthread(void)
103{
104 have_rcu_kthread_work = 1;
105 wake_up(&rcu_kthread_wq);
106}
107
108/*
109 * Record an rcu quiescent state. And an rcu_bh quiescent state while we 100 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
110 * are at it, given that any rcu quiescent state is also an rcu_bh 101 * are at it, given that any rcu quiescent state is also an rcu_bh
111 * quiescent state. Use "+" instead of "||" to defeat short circuiting. 102 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
@@ -117,7 +108,7 @@ void rcu_sched_qs(int cpu)
117 local_irq_save(flags); 108 local_irq_save(flags);
118 if (rcu_qsctr_help(&rcu_sched_ctrlblk) + 109 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
119 rcu_qsctr_help(&rcu_bh_ctrlblk)) 110 rcu_qsctr_help(&rcu_bh_ctrlblk))
120 invoke_rcu_kthread(); 111 invoke_rcu_callbacks();
121 local_irq_restore(flags); 112 local_irq_restore(flags);
122} 113}
123 114
@@ -130,7 +121,7 @@ void rcu_bh_qs(int cpu)
130 121
131 local_irq_save(flags); 122 local_irq_save(flags);
132 if (rcu_qsctr_help(&rcu_bh_ctrlblk)) 123 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
133 invoke_rcu_kthread(); 124 invoke_rcu_callbacks();
134 local_irq_restore(flags); 125 local_irq_restore(flags);
135} 126}
136 127
@@ -154,18 +145,23 @@ void rcu_check_callbacks(int cpu, int user)
154 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure 145 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
155 * whose grace period has elapsed. 146 * whose grace period has elapsed.
156 */ 147 */
157static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) 148static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
158{ 149{
150 char *rn = NULL;
159 struct rcu_head *next, *list; 151 struct rcu_head *next, *list;
160 unsigned long flags; 152 unsigned long flags;
161 RCU_TRACE(int cb_count = 0); 153 RCU_TRACE(int cb_count = 0);
162 154
163 /* If no RCU callbacks ready to invoke, just return. */ 155 /* If no RCU callbacks ready to invoke, just return. */
164 if (&rcp->rcucblist == rcp->donetail) 156 if (&rcp->rcucblist == rcp->donetail) {
157 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
158 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0));
165 return; 159 return;
160 }
166 161
167 /* Move the ready-to-invoke callbacks to a local list. */ 162 /* Move the ready-to-invoke callbacks to a local list. */
168 local_irq_save(flags); 163 local_irq_save(flags);
164 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1));
169 list = rcp->rcucblist; 165 list = rcp->rcucblist;
170 rcp->rcucblist = *rcp->donetail; 166 rcp->rcucblist = *rcp->donetail;
171 *rcp->donetail = NULL; 167 *rcp->donetail = NULL;
@@ -176,49 +172,26 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
176 local_irq_restore(flags); 172 local_irq_restore(flags);
177 173
178 /* Invoke the callbacks on the local list. */ 174 /* Invoke the callbacks on the local list. */
175 RCU_TRACE(rn = rcp->name);
179 while (list) { 176 while (list) {
180 next = list->next; 177 next = list->next;
181 prefetch(next); 178 prefetch(next);
182 debug_rcu_head_unqueue(list); 179 debug_rcu_head_unqueue(list);
183 local_bh_disable(); 180 local_bh_disable();
184 __rcu_reclaim(list); 181 __rcu_reclaim(rn, list);
185 local_bh_enable(); 182 local_bh_enable();
186 list = next; 183 list = next;
187 RCU_TRACE(cb_count++); 184 RCU_TRACE(cb_count++);
188 } 185 }
189 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); 186 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
187 RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
190} 188}
191 189
192/* 190static void rcu_process_callbacks(struct softirq_action *unused)
193 * This kthread invokes RCU callbacks whose grace periods have
194 * elapsed. It is awakened as needed, and takes the place of the
195 * RCU_SOFTIRQ that was used previously for this purpose.
196 * This is a kthread, but it is never stopped, at least not until
197 * the system goes down.
198 */
199static int rcu_kthread(void *arg)
200{ 191{
201 unsigned long work; 192 __rcu_process_callbacks(&rcu_sched_ctrlblk);
202 unsigned long morework; 193 __rcu_process_callbacks(&rcu_bh_ctrlblk);
203 unsigned long flags; 194 rcu_preempt_process_callbacks();
204
205 for (;;) {
206 wait_event_interruptible(rcu_kthread_wq,
207 have_rcu_kthread_work != 0);
208 morework = rcu_boost();
209 local_irq_save(flags);
210 work = have_rcu_kthread_work;
211 have_rcu_kthread_work = morework;
212 local_irq_restore(flags);
213 if (work) {
214 rcu_process_callbacks(&rcu_sched_ctrlblk);
215 rcu_process_callbacks(&rcu_bh_ctrlblk);
216 rcu_preempt_process_callbacks();
217 }
218 schedule_timeout_interruptible(1); /* Leave CPU for others. */
219 }
220
221 return 0; /* Not reached, but needed to shut gcc up. */
222} 195}
223 196
224/* 197/*
@@ -280,45 +253,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
280 __call_rcu(head, func, &rcu_bh_ctrlblk); 253 __call_rcu(head, func, &rcu_bh_ctrlblk);
281} 254}
282EXPORT_SYMBOL_GPL(call_rcu_bh); 255EXPORT_SYMBOL_GPL(call_rcu_bh);
283
284void rcu_barrier_bh(void)
285{
286 struct rcu_synchronize rcu;
287
288 init_rcu_head_on_stack(&rcu.head);
289 init_completion(&rcu.completion);
290 /* Will wake me after RCU finished. */
291 call_rcu_bh(&rcu.head, wakeme_after_rcu);
292 /* Wait for it. */
293 wait_for_completion(&rcu.completion);
294 destroy_rcu_head_on_stack(&rcu.head);
295}
296EXPORT_SYMBOL_GPL(rcu_barrier_bh);
297
298void rcu_barrier_sched(void)
299{
300 struct rcu_synchronize rcu;
301
302 init_rcu_head_on_stack(&rcu.head);
303 init_completion(&rcu.completion);
304 /* Will wake me after RCU finished. */
305 call_rcu_sched(&rcu.head, wakeme_after_rcu);
306 /* Wait for it. */
307 wait_for_completion(&rcu.completion);
308 destroy_rcu_head_on_stack(&rcu.head);
309}
310EXPORT_SYMBOL_GPL(rcu_barrier_sched);
311
312/*
313 * Spawn the kthread that invokes RCU callbacks.
314 */
315static int __init rcu_spawn_kthreads(void)
316{
317 struct sched_param sp;
318
319 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
320 sp.sched_priority = RCU_BOOST_PRIO;
321 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
322 return 0;
323}
324early_initcall(rcu_spawn_kthreads);