diff options
Diffstat (limited to 'kernel/rcutiny.c')
-rw-r--r-- | kernel/rcutiny.c | 120 |
1 files changed, 25 insertions, 95 deletions
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c index 7bbac7d0f5ab..636af6d9c6e5 100644 --- a/kernel/rcutiny.c +++ b/kernel/rcutiny.c | |||
@@ -22,13 +22,12 @@ | |||
22 | * For detailed explanation of Read-Copy Update mechanism see - | 22 | * For detailed explanation of Read-Copy Update mechanism see - |
23 | * Documentation/RCU | 23 | * Documentation/RCU |
24 | */ | 24 | */ |
25 | #include <linux/moduleparam.h> | ||
26 | #include <linux/completion.h> | 25 | #include <linux/completion.h> |
27 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
28 | #include <linux/notifier.h> | 27 | #include <linux/notifier.h> |
29 | #include <linux/rcupdate.h> | 28 | #include <linux/rcupdate.h> |
30 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | 30 | #include <linux/export.h> |
32 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
33 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
34 | #include <linux/types.h> | 33 | #include <linux/types.h> |
@@ -37,16 +36,17 @@ | |||
37 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
38 | #include <linux/prefetch.h> | 37 | #include <linux/prefetch.h> |
39 | 38 | ||
40 | /* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */ | 39 | #ifdef CONFIG_RCU_TRACE |
41 | static struct task_struct *rcu_kthread_task; | 40 | #include <trace/events/rcu.h> |
42 | static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq); | 41 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
43 | static unsigned long have_rcu_kthread_work; | 42 | |
43 | #include "rcu.h" | ||
44 | 44 | ||
45 | /* Forward declarations for rcutiny_plugin.h. */ | 45 | /* Forward declarations for rcutiny_plugin.h. */ |
46 | struct rcu_ctrlblk; | 46 | struct rcu_ctrlblk; |
47 | static void invoke_rcu_kthread(void); | 47 | static void invoke_rcu_callbacks(void); |
48 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); | 48 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
49 | static int rcu_kthread(void *arg); | 49 | static void rcu_process_callbacks(struct softirq_action *unused); |
50 | static void __call_rcu(struct rcu_head *head, | 50 | static void __call_rcu(struct rcu_head *head, |
51 | void (*func)(struct rcu_head *rcu), | 51 | void (*func)(struct rcu_head *rcu), |
52 | struct rcu_ctrlblk *rcp); | 52 | struct rcu_ctrlblk *rcp); |
@@ -96,16 +96,6 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) | |||
96 | } | 96 | } |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * Wake up rcu_kthread() to process callbacks now eligible for invocation | ||
100 | * or to boost readers. | ||
101 | */ | ||
102 | static void invoke_rcu_kthread(void) | ||
103 | { | ||
104 | have_rcu_kthread_work = 1; | ||
105 | wake_up(&rcu_kthread_wq); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we | 99 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
110 | * are at it, given that any rcu quiescent state is also an rcu_bh | 100 | * are at it, given that any rcu quiescent state is also an rcu_bh |
111 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. | 101 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
@@ -117,7 +107,7 @@ void rcu_sched_qs(int cpu) | |||
117 | local_irq_save(flags); | 107 | local_irq_save(flags); |
118 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + | 108 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
119 | rcu_qsctr_help(&rcu_bh_ctrlblk)) | 109 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
120 | invoke_rcu_kthread(); | 110 | invoke_rcu_callbacks(); |
121 | local_irq_restore(flags); | 111 | local_irq_restore(flags); |
122 | } | 112 | } |
123 | 113 | ||
@@ -130,7 +120,7 @@ void rcu_bh_qs(int cpu) | |||
130 | 120 | ||
131 | local_irq_save(flags); | 121 | local_irq_save(flags); |
132 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) | 122 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
133 | invoke_rcu_kthread(); | 123 | invoke_rcu_callbacks(); |
134 | local_irq_restore(flags); | 124 | local_irq_restore(flags); |
135 | } | 125 | } |
136 | 126 | ||
@@ -154,18 +144,23 @@ void rcu_check_callbacks(int cpu, int user) | |||
154 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure | 144 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
155 | * whose grace period has elapsed. | 145 | * whose grace period has elapsed. |
156 | */ | 146 | */ |
157 | static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) | 147 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
158 | { | 148 | { |
149 | char *rn = NULL; | ||
159 | struct rcu_head *next, *list; | 150 | struct rcu_head *next, *list; |
160 | unsigned long flags; | 151 | unsigned long flags; |
161 | RCU_TRACE(int cb_count = 0); | 152 | RCU_TRACE(int cb_count = 0); |
162 | 153 | ||
163 | /* If no RCU callbacks ready to invoke, just return. */ | 154 | /* If no RCU callbacks ready to invoke, just return. */ |
164 | if (&rcp->rcucblist == rcp->donetail) | 155 | if (&rcp->rcucblist == rcp->donetail) { |
156 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); | ||
157 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0)); | ||
165 | return; | 158 | return; |
159 | } | ||
166 | 160 | ||
167 | /* Move the ready-to-invoke callbacks to a local list. */ | 161 | /* Move the ready-to-invoke callbacks to a local list. */ |
168 | local_irq_save(flags); | 162 | local_irq_save(flags); |
163 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); | ||
169 | list = rcp->rcucblist; | 164 | list = rcp->rcucblist; |
170 | rcp->rcucblist = *rcp->donetail; | 165 | rcp->rcucblist = *rcp->donetail; |
171 | *rcp->donetail = NULL; | 166 | *rcp->donetail = NULL; |
@@ -176,49 +171,26 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) | |||
176 | local_irq_restore(flags); | 171 | local_irq_restore(flags); |
177 | 172 | ||
178 | /* Invoke the callbacks on the local list. */ | 173 | /* Invoke the callbacks on the local list. */ |
174 | RCU_TRACE(rn = rcp->name); | ||
179 | while (list) { | 175 | while (list) { |
180 | next = list->next; | 176 | next = list->next; |
181 | prefetch(next); | 177 | prefetch(next); |
182 | debug_rcu_head_unqueue(list); | 178 | debug_rcu_head_unqueue(list); |
183 | local_bh_disable(); | 179 | local_bh_disable(); |
184 | __rcu_reclaim(list); | 180 | __rcu_reclaim(rn, list); |
185 | local_bh_enable(); | 181 | local_bh_enable(); |
186 | list = next; | 182 | list = next; |
187 | RCU_TRACE(cb_count++); | 183 | RCU_TRACE(cb_count++); |
188 | } | 184 | } |
189 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); | 185 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
186 | RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); | ||
190 | } | 187 | } |
191 | 188 | ||
192 | /* | 189 | static void rcu_process_callbacks(struct softirq_action *unused) |
193 | * This kthread invokes RCU callbacks whose grace periods have | ||
194 | * elapsed. It is awakened as needed, and takes the place of the | ||
195 | * RCU_SOFTIRQ that was used previously for this purpose. | ||
196 | * This is a kthread, but it is never stopped, at least not until | ||
197 | * the system goes down. | ||
198 | */ | ||
199 | static int rcu_kthread(void *arg) | ||
200 | { | 190 | { |
201 | unsigned long work; | 191 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
202 | unsigned long morework; | 192 | __rcu_process_callbacks(&rcu_bh_ctrlblk); |
203 | unsigned long flags; | 193 | rcu_preempt_process_callbacks(); |
204 | |||
205 | for (;;) { | ||
206 | wait_event_interruptible(rcu_kthread_wq, | ||
207 | have_rcu_kthread_work != 0); | ||
208 | morework = rcu_boost(); | ||
209 | local_irq_save(flags); | ||
210 | work = have_rcu_kthread_work; | ||
211 | have_rcu_kthread_work = morework; | ||
212 | local_irq_restore(flags); | ||
213 | if (work) { | ||
214 | rcu_process_callbacks(&rcu_sched_ctrlblk); | ||
215 | rcu_process_callbacks(&rcu_bh_ctrlblk); | ||
216 | rcu_preempt_process_callbacks(); | ||
217 | } | ||
218 | schedule_timeout_interruptible(1); /* Leave CPU for others. */ | ||
219 | } | ||
220 | |||
221 | return 0; /* Not reached, but needed to shut gcc up. */ | ||
222 | } | 194 | } |
223 | 195 | ||
224 | /* | 196 | /* |
@@ -280,45 +252,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
280 | __call_rcu(head, func, &rcu_bh_ctrlblk); | 252 | __call_rcu(head, func, &rcu_bh_ctrlblk); |
281 | } | 253 | } |
282 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 254 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
283 | |||
284 | void rcu_barrier_bh(void) | ||
285 | { | ||
286 | struct rcu_synchronize rcu; | ||
287 | |||
288 | init_rcu_head_on_stack(&rcu.head); | ||
289 | init_completion(&rcu.completion); | ||
290 | /* Will wake me after RCU finished. */ | ||
291 | call_rcu_bh(&rcu.head, wakeme_after_rcu); | ||
292 | /* Wait for it. */ | ||
293 | wait_for_completion(&rcu.completion); | ||
294 | destroy_rcu_head_on_stack(&rcu.head); | ||
295 | } | ||
296 | EXPORT_SYMBOL_GPL(rcu_barrier_bh); | ||
297 | |||
298 | void rcu_barrier_sched(void) | ||
299 | { | ||
300 | struct rcu_synchronize rcu; | ||
301 | |||
302 | init_rcu_head_on_stack(&rcu.head); | ||
303 | init_completion(&rcu.completion); | ||
304 | /* Will wake me after RCU finished. */ | ||
305 | call_rcu_sched(&rcu.head, wakeme_after_rcu); | ||
306 | /* Wait for it. */ | ||
307 | wait_for_completion(&rcu.completion); | ||
308 | destroy_rcu_head_on_stack(&rcu.head); | ||
309 | } | ||
310 | EXPORT_SYMBOL_GPL(rcu_barrier_sched); | ||
311 | |||
312 | /* | ||
313 | * Spawn the kthread that invokes RCU callbacks. | ||
314 | */ | ||
315 | static int __init rcu_spawn_kthreads(void) | ||
316 | { | ||
317 | struct sched_param sp; | ||
318 | |||
319 | rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread"); | ||
320 | sp.sched_priority = RCU_BOOST_PRIO; | ||
321 | sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp); | ||
322 | return 0; | ||
323 | } | ||
324 | early_initcall(rcu_spawn_kthreads); | ||