diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /kernel/rcupdate.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'kernel/rcupdate.c')
-rw-r--r-- | kernel/rcupdate.c | 470 |
1 files changed, 470 insertions, 0 deletions
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c new file mode 100644 index 000000000000..d00eded75d71 --- /dev/null +++ b/kernel/rcupdate.c | |||
@@ -0,0 +1,470 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2001 | ||
19 | * | ||
20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> | ||
21 | * Manfred Spraul <manfred@colorfullife.com> | ||
22 | * | ||
23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
25 | * Papers: | ||
26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
28 | * | ||
29 | * For detailed explanation of Read-Copy Update mechanism see - | ||
30 | * http://lse.sourceforge.net/locking/rcupdate.html | ||
31 | * | ||
32 | */ | ||
33 | #include <linux/types.h> | ||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/smp.h> | ||
38 | #include <linux/interrupt.h> | ||
39 | #include <linux/sched.h> | ||
40 | #include <asm/atomic.h> | ||
41 | #include <linux/bitops.h> | ||
42 | #include <linux/module.h> | ||
43 | #include <linux/completion.h> | ||
44 | #include <linux/moduleparam.h> | ||
45 | #include <linux/percpu.h> | ||
46 | #include <linux/notifier.h> | ||
47 | #include <linux/rcupdate.h> | ||
48 | #include <linux/cpu.h> | ||
49 | |||
50 | /* Definition for rcupdate control block. */ | ||
51 | struct rcu_ctrlblk rcu_ctrlblk = | ||
52 | { .cur = -300, .completed = -300 }; | ||
53 | struct rcu_ctrlblk rcu_bh_ctrlblk = | ||
54 | { .cur = -300, .completed = -300 }; | ||
55 | |||
56 | /* Bookkeeping of the progress of the grace period */ | ||
57 | struct rcu_state { | ||
58 | spinlock_t lock; /* Guard this struct and writes to rcu_ctrlblk */ | ||
59 | cpumask_t cpumask; /* CPUs that need to switch in order */ | ||
60 | /* for current batch to proceed. */ | ||
61 | }; | ||
62 | |||
63 | static struct rcu_state rcu_state ____cacheline_maxaligned_in_smp = | ||
64 | {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE }; | ||
65 | static struct rcu_state rcu_bh_state ____cacheline_maxaligned_in_smp = | ||
66 | {.lock = SPIN_LOCK_UNLOCKED, .cpumask = CPU_MASK_NONE }; | ||
67 | |||
68 | DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L }; | ||
69 | DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L }; | ||
70 | |||
71 | /* Fake initialization required by compiler */ | ||
72 | static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL}; | ||
73 | static int maxbatch = 10; | ||
74 | |||
75 | /** | ||
76 | * call_rcu - Queue an RCU callback for invocation after a grace period. | ||
77 | * @head: structure to be used for queueing the RCU updates. | ||
78 | * @func: actual update function to be invoked after the grace period | ||
79 | * | ||
80 | * The update function will be invoked some time after a full grace | ||
81 | * period elapses, in other words after all currently executing RCU | ||
82 | * read-side critical sections have completed. RCU read-side critical | ||
83 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
84 | * and may be nested. | ||
85 | */ | ||
86 | void fastcall call_rcu(struct rcu_head *head, | ||
87 | void (*func)(struct rcu_head *rcu)) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | struct rcu_data *rdp; | ||
91 | |||
92 | head->func = func; | ||
93 | head->next = NULL; | ||
94 | local_irq_save(flags); | ||
95 | rdp = &__get_cpu_var(rcu_data); | ||
96 | *rdp->nxttail = head; | ||
97 | rdp->nxttail = &head->next; | ||
98 | local_irq_restore(flags); | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | ||
103 | * @head: structure to be used for queueing the RCU updates. | ||
104 | * @func: actual update function to be invoked after the grace period | ||
105 | * | ||
106 | * The update function will be invoked some time after a full grace | ||
107 | * period elapses, in other words after all currently executing RCU | ||
108 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
109 | * that the read-side critical sections end on completion of a softirq | ||
110 | * handler. This means that read-side critical sections in process | ||
111 | * context must not be interrupted by softirqs. This interface is to be | ||
112 | * used when most of the read-side critical sections are in softirq context. | ||
113 | * RCU read-side critical sections are delimited by rcu_read_lock() and | ||
114 | * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh() | ||
115 | * and rcu_read_unlock_bh(), if in process context. These may be nested. | ||
116 | */ | ||
117 | void fastcall call_rcu_bh(struct rcu_head *head, | ||
118 | void (*func)(struct rcu_head *rcu)) | ||
119 | { | ||
120 | unsigned long flags; | ||
121 | struct rcu_data *rdp; | ||
122 | |||
123 | head->func = func; | ||
124 | head->next = NULL; | ||
125 | local_irq_save(flags); | ||
126 | rdp = &__get_cpu_var(rcu_bh_data); | ||
127 | *rdp->nxttail = head; | ||
128 | rdp->nxttail = &head->next; | ||
129 | local_irq_restore(flags); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * Invoke the completed RCU callbacks. They are expected to be in | ||
134 | * a per-cpu list. | ||
135 | */ | ||
136 | static void rcu_do_batch(struct rcu_data *rdp) | ||
137 | { | ||
138 | struct rcu_head *next, *list; | ||
139 | int count = 0; | ||
140 | |||
141 | list = rdp->donelist; | ||
142 | while (list) { | ||
143 | next = rdp->donelist = list->next; | ||
144 | list->func(list); | ||
145 | list = next; | ||
146 | if (++count >= maxbatch) | ||
147 | break; | ||
148 | } | ||
149 | if (!rdp->donelist) | ||
150 | rdp->donetail = &rdp->donelist; | ||
151 | else | ||
152 | tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu)); | ||
153 | } | ||
154 | |||
155 | /* | ||
156 | * Grace period handling: | ||
157 | * The grace period handling consists out of two steps: | ||
158 | * - A new grace period is started. | ||
159 | * This is done by rcu_start_batch. The start is not broadcasted to | ||
160 | * all cpus, they must pick this up by comparing rcp->cur with | ||
161 | * rdp->quiescbatch. All cpus are recorded in the | ||
162 | * rcu_state.cpumask bitmap. | ||
163 | * - All cpus must go through a quiescent state. | ||
164 | * Since the start of the grace period is not broadcasted, at least two | ||
165 | * calls to rcu_check_quiescent_state are required: | ||
166 | * The first call just notices that a new grace period is running. The | ||
167 | * following calls check if there was a quiescent state since the beginning | ||
168 | * of the grace period. If so, it updates rcu_state.cpumask. If | ||
169 | * the bitmap is empty, then the grace period is completed. | ||
170 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | ||
171 | * period (if necessary). | ||
172 | */ | ||
173 | /* | ||
174 | * Register a new batch of callbacks, and start it up if there is currently no | ||
175 | * active batch and the batch to be registered has not already occurred. | ||
176 | * Caller must hold rcu_state.lock. | ||
177 | */ | ||
178 | static void rcu_start_batch(struct rcu_ctrlblk *rcp, struct rcu_state *rsp, | ||
179 | int next_pending) | ||
180 | { | ||
181 | if (next_pending) | ||
182 | rcp->next_pending = 1; | ||
183 | |||
184 | if (rcp->next_pending && | ||
185 | rcp->completed == rcp->cur) { | ||
186 | /* Can't change, since spin lock held. */ | ||
187 | cpus_andnot(rsp->cpumask, cpu_online_map, nohz_cpu_mask); | ||
188 | |||
189 | rcp->next_pending = 0; | ||
190 | /* next_pending == 0 must be visible in __rcu_process_callbacks() | ||
191 | * before it can see new value of cur. | ||
192 | */ | ||
193 | smp_wmb(); | ||
194 | rcp->cur++; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * cpu went through a quiescent state since the beginning of the grace period. | ||
200 | * Clear it from the cpu mask and complete the grace period if it was the last | ||
201 | * cpu. Start another grace period if someone has further entries pending | ||
202 | */ | ||
203 | static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp, struct rcu_state *rsp) | ||
204 | { | ||
205 | cpu_clear(cpu, rsp->cpumask); | ||
206 | if (cpus_empty(rsp->cpumask)) { | ||
207 | /* batch completed ! */ | ||
208 | rcp->completed = rcp->cur; | ||
209 | rcu_start_batch(rcp, rsp, 0); | ||
210 | } | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Check if the cpu has gone through a quiescent state (say context | ||
215 | * switch). If so and if it already hasn't done so in this RCU | ||
216 | * quiescent cycle, then indicate that it has done so. | ||
217 | */ | ||
218 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | ||
219 | struct rcu_state *rsp, struct rcu_data *rdp) | ||
220 | { | ||
221 | if (rdp->quiescbatch != rcp->cur) { | ||
222 | /* start new grace period: */ | ||
223 | rdp->qs_pending = 1; | ||
224 | rdp->passed_quiesc = 0; | ||
225 | rdp->quiescbatch = rcp->cur; | ||
226 | return; | ||
227 | } | ||
228 | |||
229 | /* Grace period already completed for this cpu? | ||
230 | * qs_pending is checked instead of the actual bitmap to avoid | ||
231 | * cacheline trashing. | ||
232 | */ | ||
233 | if (!rdp->qs_pending) | ||
234 | return; | ||
235 | |||
236 | /* | ||
237 | * Was there a quiescent state since the beginning of the grace | ||
238 | * period? If no, then exit and wait for the next call. | ||
239 | */ | ||
240 | if (!rdp->passed_quiesc) | ||
241 | return; | ||
242 | rdp->qs_pending = 0; | ||
243 | |||
244 | spin_lock(&rsp->lock); | ||
245 | /* | ||
246 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | ||
247 | * during cpu startup. Ignore the quiescent state. | ||
248 | */ | ||
249 | if (likely(rdp->quiescbatch == rcp->cur)) | ||
250 | cpu_quiet(rdp->cpu, rcp, rsp); | ||
251 | |||
252 | spin_unlock(&rsp->lock); | ||
253 | } | ||
254 | |||
255 | |||
256 | #ifdef CONFIG_HOTPLUG_CPU | ||
257 | |||
258 | /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing | ||
259 | * locking requirements, the list it's pulling from has to belong to a cpu | ||
260 | * which is dead and hence not processing interrupts. | ||
261 | */ | ||
262 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | ||
263 | struct rcu_head **tail) | ||
264 | { | ||
265 | local_irq_disable(); | ||
266 | *this_rdp->nxttail = list; | ||
267 | if (list) | ||
268 | this_rdp->nxttail = tail; | ||
269 | local_irq_enable(); | ||
270 | } | ||
271 | |||
272 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | ||
273 | struct rcu_ctrlblk *rcp, struct rcu_state *rsp, struct rcu_data *rdp) | ||
274 | { | ||
275 | /* if the cpu going offline owns the grace period | ||
276 | * we can block indefinitely waiting for it, so flush | ||
277 | * it here | ||
278 | */ | ||
279 | spin_lock_bh(&rsp->lock); | ||
280 | if (rcp->cur != rcp->completed) | ||
281 | cpu_quiet(rdp->cpu, rcp, rsp); | ||
282 | spin_unlock_bh(&rsp->lock); | ||
283 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | ||
284 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | ||
285 | |||
286 | } | ||
287 | static void rcu_offline_cpu(int cpu) | ||
288 | { | ||
289 | struct rcu_data *this_rdp = &get_cpu_var(rcu_data); | ||
290 | struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data); | ||
291 | |||
292 | __rcu_offline_cpu(this_rdp, &rcu_ctrlblk, &rcu_state, | ||
293 | &per_cpu(rcu_data, cpu)); | ||
294 | __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk, &rcu_bh_state, | ||
295 | &per_cpu(rcu_bh_data, cpu)); | ||
296 | put_cpu_var(rcu_data); | ||
297 | put_cpu_var(rcu_bh_data); | ||
298 | tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu); | ||
299 | } | ||
300 | |||
301 | #else | ||
302 | |||
303 | static void rcu_offline_cpu(int cpu) | ||
304 | { | ||
305 | } | ||
306 | |||
307 | #endif | ||
308 | |||
309 | /* | ||
310 | * This does the RCU processing work from tasklet context. | ||
311 | */ | ||
312 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | ||
313 | struct rcu_state *rsp, struct rcu_data *rdp) | ||
314 | { | ||
315 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { | ||
316 | *rdp->donetail = rdp->curlist; | ||
317 | rdp->donetail = rdp->curtail; | ||
318 | rdp->curlist = NULL; | ||
319 | rdp->curtail = &rdp->curlist; | ||
320 | } | ||
321 | |||
322 | local_irq_disable(); | ||
323 | if (rdp->nxtlist && !rdp->curlist) { | ||
324 | rdp->curlist = rdp->nxtlist; | ||
325 | rdp->curtail = rdp->nxttail; | ||
326 | rdp->nxtlist = NULL; | ||
327 | rdp->nxttail = &rdp->nxtlist; | ||
328 | local_irq_enable(); | ||
329 | |||
330 | /* | ||
331 | * start the next batch of callbacks | ||
332 | */ | ||
333 | |||
334 | /* determine batch number */ | ||
335 | rdp->batch = rcp->cur + 1; | ||
336 | /* see the comment and corresponding wmb() in | ||
337 | * the rcu_start_batch() | ||
338 | */ | ||
339 | smp_rmb(); | ||
340 | |||
341 | if (!rcp->next_pending) { | ||
342 | /* and start it/schedule start if it's a new batch */ | ||
343 | spin_lock(&rsp->lock); | ||
344 | rcu_start_batch(rcp, rsp, 1); | ||
345 | spin_unlock(&rsp->lock); | ||
346 | } | ||
347 | } else { | ||
348 | local_irq_enable(); | ||
349 | } | ||
350 | rcu_check_quiescent_state(rcp, rsp, rdp); | ||
351 | if (rdp->donelist) | ||
352 | rcu_do_batch(rdp); | ||
353 | } | ||
354 | |||
355 | static void rcu_process_callbacks(unsigned long unused) | ||
356 | { | ||
357 | __rcu_process_callbacks(&rcu_ctrlblk, &rcu_state, | ||
358 | &__get_cpu_var(rcu_data)); | ||
359 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &rcu_bh_state, | ||
360 | &__get_cpu_var(rcu_bh_data)); | ||
361 | } | ||
362 | |||
363 | void rcu_check_callbacks(int cpu, int user) | ||
364 | { | ||
365 | if (user || | ||
366 | (idle_cpu(cpu) && !in_softirq() && | ||
367 | hardirq_count() <= (1 << HARDIRQ_SHIFT))) { | ||
368 | rcu_qsctr_inc(cpu); | ||
369 | rcu_bh_qsctr_inc(cpu); | ||
370 | } else if (!in_softirq()) | ||
371 | rcu_bh_qsctr_inc(cpu); | ||
372 | tasklet_schedule(&per_cpu(rcu_tasklet, cpu)); | ||
373 | } | ||
374 | |||
375 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | ||
376 | struct rcu_data *rdp) | ||
377 | { | ||
378 | memset(rdp, 0, sizeof(*rdp)); | ||
379 | rdp->curtail = &rdp->curlist; | ||
380 | rdp->nxttail = &rdp->nxtlist; | ||
381 | rdp->donetail = &rdp->donelist; | ||
382 | rdp->quiescbatch = rcp->completed; | ||
383 | rdp->qs_pending = 0; | ||
384 | rdp->cpu = cpu; | ||
385 | } | ||
386 | |||
387 | static void __devinit rcu_online_cpu(int cpu) | ||
388 | { | ||
389 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
390 | struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu); | ||
391 | |||
392 | rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp); | ||
393 | rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp); | ||
394 | tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); | ||
395 | } | ||
396 | |||
397 | static int __devinit rcu_cpu_notify(struct notifier_block *self, | ||
398 | unsigned long action, void *hcpu) | ||
399 | { | ||
400 | long cpu = (long)hcpu; | ||
401 | switch (action) { | ||
402 | case CPU_UP_PREPARE: | ||
403 | rcu_online_cpu(cpu); | ||
404 | break; | ||
405 | case CPU_DEAD: | ||
406 | rcu_offline_cpu(cpu); | ||
407 | break; | ||
408 | default: | ||
409 | break; | ||
410 | } | ||
411 | return NOTIFY_OK; | ||
412 | } | ||
413 | |||
414 | static struct notifier_block __devinitdata rcu_nb = { | ||
415 | .notifier_call = rcu_cpu_notify, | ||
416 | }; | ||
417 | |||
418 | /* | ||
419 | * Initializes rcu mechanism. Assumed to be called early. | ||
420 | * That is before local timer(SMP) or jiffie timer (uniproc) is setup. | ||
421 | * Note that rcu_qsctr and friends are implicitly | ||
422 | * initialized due to the choice of ``0'' for RCU_CTR_INVALID. | ||
423 | */ | ||
424 | void __init rcu_init(void) | ||
425 | { | ||
426 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, | ||
427 | (void *)(long)smp_processor_id()); | ||
428 | /* Register notifier for non-boot CPUs */ | ||
429 | register_cpu_notifier(&rcu_nb); | ||
430 | } | ||
431 | |||
432 | struct rcu_synchronize { | ||
433 | struct rcu_head head; | ||
434 | struct completion completion; | ||
435 | }; | ||
436 | |||
437 | /* Because of FASTCALL declaration of complete, we use this wrapper */ | ||
438 | static void wakeme_after_rcu(struct rcu_head *head) | ||
439 | { | ||
440 | struct rcu_synchronize *rcu; | ||
441 | |||
442 | rcu = container_of(head, struct rcu_synchronize, head); | ||
443 | complete(&rcu->completion); | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * synchronize_kernel - wait until a grace period has elapsed. | ||
448 | * | ||
449 | * Control will return to the caller some time after a full grace | ||
450 | * period has elapsed, in other words after all currently executing RCU | ||
451 | * read-side critical sections have completed. RCU read-side critical | ||
452 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
453 | * and may be nested. | ||
454 | */ | ||
455 | void synchronize_kernel(void) | ||
456 | { | ||
457 | struct rcu_synchronize rcu; | ||
458 | |||
459 | init_completion(&rcu.completion); | ||
460 | /* Will wake me after RCU finished */ | ||
461 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
462 | |||
463 | /* Wait for it */ | ||
464 | wait_for_completion(&rcu.completion); | ||
465 | } | ||
466 | |||
467 | module_param(maxbatch, int, 0); | ||
468 | EXPORT_SYMBOL_GPL(call_rcu); | ||
469 | EXPORT_SYMBOL_GPL(call_rcu_bh); | ||
470 | EXPORT_SYMBOL_GPL(synchronize_kernel); | ||