diff options
author | Robert Richter <robert.richter@amd.com> | 2010-10-25 10:28:14 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-10-25 10:29:12 -0400 |
commit | dbd1e66e04558a582e673bc4a9cd933ce0228d93 (patch) | |
tree | 85f3633276282cde0a3ac558d988704eaa3e68af /kernel/rcutiny_plugin.h | |
parent | 328b8f1ba50b708a1b3c0acd7c41ee1b356822f6 (diff) | |
parent | 4a60cfa9457749f7987fd4f3c956dbba5a281129 (diff) |
Merge commit 'linux-2.6/master' (early part) into oprofile/core
This branch depends on these apic patches:
apic, x86: Use BIOS settings for IBS and MCE threshold interrupt LVT offsets
apic, x86: Check if EILVT APIC registers are available (AMD only)
Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'kernel/rcutiny_plugin.h')
-rw-r--r-- | kernel/rcutiny_plugin.h | 582 |
1 files changed, 579 insertions, 3 deletions
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h index d223a92bc742..6ceca4f745ff 100644 --- a/kernel/rcutiny_plugin.h +++ b/kernel/rcutiny_plugin.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition |
3 | * Internal non-public definitions that provide either classic | 3 | * Internal non-public definitions that provide either classic |
4 | * or preemptable semantics. | 4 | * or preemptible semantics. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
@@ -17,11 +17,587 @@ | |||
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | * | 19 | * |
20 | * Copyright IBM Corporation, 2009 | 20 | * Copyright (c) 2010 Linaro |
21 | * | 21 | * |
22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 22 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #ifdef CONFIG_TINY_PREEMPT_RCU | ||
26 | |||
27 | #include <linux/delay.h> | ||
28 | |||
29 | /* Global control variables for preemptible RCU. */ | ||
30 | struct rcu_preempt_ctrlblk { | ||
31 | struct rcu_ctrlblk rcb; /* curtail: ->next ptr of last CB for GP. */ | ||
32 | struct rcu_head **nexttail; | ||
33 | /* Tasks blocked in a preemptible RCU */ | ||
34 | /* read-side critical section while an */ | ||
35 | /* preemptible-RCU grace period is in */ | ||
36 | /* progress must wait for a later grace */ | ||
37 | /* period. This pointer points to the */ | ||
38 | /* ->next pointer of the last task that */ | ||
39 | /* must wait for a later grace period, or */ | ||
40 | /* to &->rcb.rcucblist if there is no */ | ||
41 | /* such task. */ | ||
42 | struct list_head blkd_tasks; | ||
43 | /* Tasks blocked in RCU read-side critical */ | ||
44 | /* section. Tasks are placed at the head */ | ||
45 | /* of this list and age towards the tail. */ | ||
46 | struct list_head *gp_tasks; | ||
47 | /* Pointer to the first task blocking the */ | ||
48 | /* current grace period, or NULL if there */ | ||
49 | /* is not such task. */ | ||
50 | struct list_head *exp_tasks; | ||
51 | /* Pointer to first task blocking the */ | ||
52 | /* current expedited grace period, or NULL */ | ||
53 | /* if there is no such task. If there */ | ||
54 | /* is no current expedited grace period, */ | ||
55 | /* then there cannot be any such task. */ | ||
56 | u8 gpnum; /* Current grace period. */ | ||
57 | u8 gpcpu; /* Last grace period blocked by the CPU. */ | ||
58 | u8 completed; /* Last grace period completed. */ | ||
59 | /* If all three are equal, RCU is idle. */ | ||
60 | }; | ||
61 | |||
62 | static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = { | ||
63 | .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist, | ||
64 | .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist, | ||
65 | .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist, | ||
66 | .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks), | ||
67 | }; | ||
68 | |||
69 | static int rcu_preempted_readers_exp(void); | ||
70 | static void rcu_report_exp_done(void); | ||
71 | |||
72 | /* | ||
73 | * Return true if the CPU has not yet responded to the current grace period. | ||
74 | */ | ||
75 | static int rcu_cpu_blocking_cur_gp(void) | ||
76 | { | ||
77 | return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum; | ||
78 | } | ||
79 | |||
80 | /* | ||
81 | * Check for a running RCU reader. Because there is only one CPU, | ||
82 | * there can be but one running RCU reader at a time. ;-) | ||
83 | */ | ||
84 | static int rcu_preempt_running_reader(void) | ||
85 | { | ||
86 | return current->rcu_read_lock_nesting; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Check for preempted RCU readers blocking any grace period. | ||
91 | * If the caller needs a reliable answer, it must disable hard irqs. | ||
92 | */ | ||
93 | static int rcu_preempt_blocked_readers_any(void) | ||
94 | { | ||
95 | return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks); | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * Check for preempted RCU readers blocking the current grace period. | ||
100 | * If the caller needs a reliable answer, it must disable hard irqs. | ||
101 | */ | ||
102 | static int rcu_preempt_blocked_readers_cgp(void) | ||
103 | { | ||
104 | return rcu_preempt_ctrlblk.gp_tasks != NULL; | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * Return true if another preemptible-RCU grace period is needed. | ||
109 | */ | ||
110 | static int rcu_preempt_needs_another_gp(void) | ||
111 | { | ||
112 | return *rcu_preempt_ctrlblk.rcb.curtail != NULL; | ||
113 | } | ||
114 | |||
115 | /* | ||
116 | * Return true if a preemptible-RCU grace period is in progress. | ||
117 | * The caller must disable hardirqs. | ||
118 | */ | ||
119 | static int rcu_preempt_gp_in_progress(void) | ||
120 | { | ||
121 | return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum; | ||
122 | } | ||
123 | |||
124 | /* | ||
125 | * Record a preemptible-RCU quiescent state for the specified CPU. Note | ||
126 | * that this just means that the task currently running on the CPU is | ||
127 | * in a quiescent state. There might be any number of tasks blocked | ||
128 | * while in an RCU read-side critical section. | ||
129 | * | ||
130 | * Unlike the other rcu_*_qs() functions, callers to this function | ||
131 | * must disable irqs in order to protect the assignment to | ||
132 | * ->rcu_read_unlock_special. | ||
133 | * | ||
134 | * Because this is a single-CPU implementation, the only way a grace | ||
135 | * period can end is if the CPU is in a quiescent state. The reason is | ||
136 | * that a blocked preemptible-RCU reader can exit its critical section | ||
137 | * only if the CPU is running it at the time. Therefore, when the | ||
138 | * last task blocking the current grace period exits its RCU read-side | ||
139 | * critical section, neither the CPU nor blocked tasks will be stopping | ||
140 | * the current grace period. (In contrast, SMP implementations | ||
141 | * might have CPUs running in RCU read-side critical sections that | ||
142 | * block later grace periods -- but this is not possible given only | ||
143 | * one CPU.) | ||
144 | */ | ||
145 | static void rcu_preempt_cpu_qs(void) | ||
146 | { | ||
147 | /* Record both CPU and task as having responded to current GP. */ | ||
148 | rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum; | ||
149 | current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; | ||
150 | |||
151 | /* | ||
152 | * If there is no GP, or if blocked readers are still blocking GP, | ||
153 | * then there is nothing more to do. | ||
154 | */ | ||
155 | if (!rcu_preempt_gp_in_progress() || rcu_preempt_blocked_readers_cgp()) | ||
156 | return; | ||
157 | |||
158 | /* Advance callbacks. */ | ||
159 | rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum; | ||
160 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail; | ||
161 | rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail; | ||
162 | |||
163 | /* If there are no blocked readers, next GP is done instantly. */ | ||
164 | if (!rcu_preempt_blocked_readers_any()) | ||
165 | rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail; | ||
166 | |||
167 | /* If there are done callbacks, make RCU_SOFTIRQ process them. */ | ||
168 | if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) | ||
169 | raise_softirq(RCU_SOFTIRQ); | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * Start a new RCU grace period if warranted. Hard irqs must be disabled. | ||
174 | */ | ||
175 | static void rcu_preempt_start_gp(void) | ||
176 | { | ||
177 | if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) { | ||
178 | |||
179 | /* Official start of GP. */ | ||
180 | rcu_preempt_ctrlblk.gpnum++; | ||
181 | |||
182 | /* Any blocked RCU readers block new GP. */ | ||
183 | if (rcu_preempt_blocked_readers_any()) | ||
184 | rcu_preempt_ctrlblk.gp_tasks = | ||
185 | rcu_preempt_ctrlblk.blkd_tasks.next; | ||
186 | |||
187 | /* If there is no running reader, CPU is done with GP. */ | ||
188 | if (!rcu_preempt_running_reader()) | ||
189 | rcu_preempt_cpu_qs(); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * We have entered the scheduler, and the current task might soon be | ||
195 | * context-switched away from. If this task is in an RCU read-side | ||
196 | * critical section, we will no longer be able to rely on the CPU to | ||
197 | * record that fact, so we enqueue the task on the blkd_tasks list. | ||
198 | * If the task started after the current grace period began, as recorded | ||
199 | * by ->gpcpu, we enqueue at the beginning of the list. Otherwise | ||
200 | * before the element referenced by ->gp_tasks (or at the tail if | ||
201 | * ->gp_tasks is NULL) and point ->gp_tasks at the newly added element. | ||
202 | * The task will dequeue itself when it exits the outermost enclosing | ||
203 | * RCU read-side critical section. Therefore, the current grace period | ||
204 | * cannot be permitted to complete until the ->gp_tasks pointer becomes | ||
205 | * NULL. | ||
206 | * | ||
207 | * Caller must disable preemption. | ||
208 | */ | ||
209 | void rcu_preempt_note_context_switch(void) | ||
210 | { | ||
211 | struct task_struct *t = current; | ||
212 | unsigned long flags; | ||
213 | |||
214 | local_irq_save(flags); /* must exclude scheduler_tick(). */ | ||
215 | if (rcu_preempt_running_reader() && | ||
216 | (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { | ||
217 | |||
218 | /* Possibly blocking in an RCU read-side critical section. */ | ||
219 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; | ||
220 | |||
221 | /* | ||
222 | * If this CPU has already checked in, then this task | ||
223 | * will hold up the next grace period rather than the | ||
224 | * current grace period. Queue the task accordingly. | ||
225 | * If the task is queued for the current grace period | ||
226 | * (i.e., this CPU has not yet passed through a quiescent | ||
227 | * state for the current grace period), then as long | ||
228 | * as that task remains queued, the current grace period | ||
229 | * cannot end. | ||
230 | */ | ||
231 | list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks); | ||
232 | if (rcu_cpu_blocking_cur_gp()) | ||
233 | rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Either we were not in an RCU read-side critical section to | ||
238 | * begin with, or we have now recorded that critical section | ||
239 | * globally. Either way, we can now note a quiescent state | ||
240 | * for this CPU. Again, if we were in an RCU read-side critical | ||
241 | * section, and if that critical section was blocking the current | ||
242 | * grace period, then the fact that the task has been enqueued | ||
243 | * means that current grace period continues to be blocked. | ||
244 | */ | ||
245 | rcu_preempt_cpu_qs(); | ||
246 | local_irq_restore(flags); | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Tiny-preemptible RCU implementation for rcu_read_lock(). | ||
251 | * Just increment ->rcu_read_lock_nesting, shared state will be updated | ||
252 | * if we block. | ||
253 | */ | ||
254 | void __rcu_read_lock(void) | ||
255 | { | ||
256 | current->rcu_read_lock_nesting++; | ||
257 | barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */ | ||
258 | } | ||
259 | EXPORT_SYMBOL_GPL(__rcu_read_lock); | ||
260 | |||
261 | /* | ||
262 | * Handle special cases during rcu_read_unlock(), such as needing to | ||
263 | * notify RCU core processing or task having blocked during the RCU | ||
264 | * read-side critical section. | ||
265 | */ | ||
266 | static void rcu_read_unlock_special(struct task_struct *t) | ||
267 | { | ||
268 | int empty; | ||
269 | int empty_exp; | ||
270 | unsigned long flags; | ||
271 | struct list_head *np; | ||
272 | int special; | ||
273 | |||
274 | /* | ||
275 | * NMI handlers cannot block and cannot safely manipulate state. | ||
276 | * They therefore cannot possibly be special, so just leave. | ||
277 | */ | ||
278 | if (in_nmi()) | ||
279 | return; | ||
280 | |||
281 | local_irq_save(flags); | ||
282 | |||
283 | /* | ||
284 | * If RCU core is waiting for this CPU to exit critical section, | ||
285 | * let it know that we have done so. | ||
286 | */ | ||
287 | special = t->rcu_read_unlock_special; | ||
288 | if (special & RCU_READ_UNLOCK_NEED_QS) | ||
289 | rcu_preempt_cpu_qs(); | ||
290 | |||
291 | /* Hardware IRQ handlers cannot block. */ | ||
292 | if (in_irq()) { | ||
293 | local_irq_restore(flags); | ||
294 | return; | ||
295 | } | ||
296 | |||
297 | /* Clean up if blocked during RCU read-side critical section. */ | ||
298 | if (special & RCU_READ_UNLOCK_BLOCKED) { | ||
299 | t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED; | ||
300 | |||
301 | /* | ||
302 | * Remove this task from the ->blkd_tasks list and adjust | ||
303 | * any pointers that might have been referencing it. | ||
304 | */ | ||
305 | empty = !rcu_preempt_blocked_readers_cgp(); | ||
306 | empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL; | ||
307 | np = t->rcu_node_entry.next; | ||
308 | if (np == &rcu_preempt_ctrlblk.blkd_tasks) | ||
309 | np = NULL; | ||
310 | list_del(&t->rcu_node_entry); | ||
311 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks) | ||
312 | rcu_preempt_ctrlblk.gp_tasks = np; | ||
313 | if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks) | ||
314 | rcu_preempt_ctrlblk.exp_tasks = np; | ||
315 | INIT_LIST_HEAD(&t->rcu_node_entry); | ||
316 | |||
317 | /* | ||
318 | * If this was the last task on the current list, and if | ||
319 | * we aren't waiting on the CPU, report the quiescent state | ||
320 | * and start a new grace period if needed. | ||
321 | */ | ||
322 | if (!empty && !rcu_preempt_blocked_readers_cgp()) { | ||
323 | rcu_preempt_cpu_qs(); | ||
324 | rcu_preempt_start_gp(); | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * If this was the last task on the expedited lists, | ||
329 | * then we need wake up the waiting task. | ||
330 | */ | ||
331 | if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL) | ||
332 | rcu_report_exp_done(); | ||
333 | } | ||
334 | local_irq_restore(flags); | ||
335 | } | ||
336 | |||
337 | /* | ||
338 | * Tiny-preemptible RCU implementation for rcu_read_unlock(). | ||
339 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost | ||
340 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then | ||
341 | * invoke rcu_read_unlock_special() to clean up after a context switch | ||
342 | * in an RCU read-side critical section and other special cases. | ||
343 | */ | ||
344 | void __rcu_read_unlock(void) | ||
345 | { | ||
346 | struct task_struct *t = current; | ||
347 | |||
348 | barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */ | ||
349 | --t->rcu_read_lock_nesting; | ||
350 | barrier(); /* decrement before load of ->rcu_read_unlock_special */ | ||
351 | if (t->rcu_read_lock_nesting == 0 && | ||
352 | unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) | ||
353 | rcu_read_unlock_special(t); | ||
354 | #ifdef CONFIG_PROVE_LOCKING | ||
355 | WARN_ON_ONCE(t->rcu_read_lock_nesting < 0); | ||
356 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | ||
357 | } | ||
358 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); | ||
359 | |||
360 | /* | ||
361 | * Check for a quiescent state from the current CPU. When a task blocks, | ||
362 | * the task is recorded in the rcu_preempt_ctrlblk structure, which is | ||
363 | * checked elsewhere. This is called from the scheduling-clock interrupt. | ||
364 | * | ||
365 | * Caller must disable hard irqs. | ||
366 | */ | ||
367 | static void rcu_preempt_check_callbacks(void) | ||
368 | { | ||
369 | struct task_struct *t = current; | ||
370 | |||
371 | if (rcu_preempt_gp_in_progress() && | ||
372 | (!rcu_preempt_running_reader() || | ||
373 | !rcu_cpu_blocking_cur_gp())) | ||
374 | rcu_preempt_cpu_qs(); | ||
375 | if (&rcu_preempt_ctrlblk.rcb.rcucblist != | ||
376 | rcu_preempt_ctrlblk.rcb.donetail) | ||
377 | raise_softirq(RCU_SOFTIRQ); | ||
378 | if (rcu_preempt_gp_in_progress() && | ||
379 | rcu_cpu_blocking_cur_gp() && | ||
380 | rcu_preempt_running_reader()) | ||
381 | t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * TINY_PREEMPT_RCU has an extra callback-list tail pointer to | ||
386 | * update, so this is invoked from __rcu_process_callbacks() to | ||
387 | * handle that case. Of course, it is invoked for all flavors of | ||
388 | * RCU, but RCU callbacks can appear only on one of the lists, and | ||
389 | * neither ->nexttail nor ->donetail can possibly be NULL, so there | ||
390 | * is no need for an explicit check. | ||
391 | */ | ||
392 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | ||
393 | { | ||
394 | if (rcu_preempt_ctrlblk.nexttail == rcp->donetail) | ||
395 | rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * Process callbacks for preemptible RCU. | ||
400 | */ | ||
401 | static void rcu_preempt_process_callbacks(void) | ||
402 | { | ||
403 | __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * Queue a preemptible -RCU callback for invocation after a grace period. | ||
408 | */ | ||
409 | void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | ||
410 | { | ||
411 | unsigned long flags; | ||
412 | |||
413 | debug_rcu_head_queue(head); | ||
414 | head->func = func; | ||
415 | head->next = NULL; | ||
416 | |||
417 | local_irq_save(flags); | ||
418 | *rcu_preempt_ctrlblk.nexttail = head; | ||
419 | rcu_preempt_ctrlblk.nexttail = &head->next; | ||
420 | rcu_preempt_start_gp(); /* checks to see if GP needed. */ | ||
421 | local_irq_restore(flags); | ||
422 | } | ||
423 | EXPORT_SYMBOL_GPL(call_rcu); | ||
424 | |||
425 | void rcu_barrier(void) | ||
426 | { | ||
427 | struct rcu_synchronize rcu; | ||
428 | |||
429 | init_rcu_head_on_stack(&rcu.head); | ||
430 | init_completion(&rcu.completion); | ||
431 | /* Will wake me after RCU finished. */ | ||
432 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
433 | /* Wait for it. */ | ||
434 | wait_for_completion(&rcu.completion); | ||
435 | destroy_rcu_head_on_stack(&rcu.head); | ||
436 | } | ||
437 | EXPORT_SYMBOL_GPL(rcu_barrier); | ||
438 | |||
439 | /* | ||
440 | * synchronize_rcu - wait until a grace period has elapsed. | ||
441 | * | ||
442 | * Control will return to the caller some time after a full grace | ||
443 | * period has elapsed, in other words after all currently executing RCU | ||
444 | * read-side critical sections have completed. RCU read-side critical | ||
445 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
446 | * and may be nested. | ||
447 | */ | ||
448 | void synchronize_rcu(void) | ||
449 | { | ||
450 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
451 | if (!rcu_scheduler_active) | ||
452 | return; | ||
453 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
454 | |||
455 | WARN_ON_ONCE(rcu_preempt_running_reader()); | ||
456 | if (!rcu_preempt_blocked_readers_any()) | ||
457 | return; | ||
458 | |||
459 | /* Once we get past the fastpath checks, same code as rcu_barrier(). */ | ||
460 | rcu_barrier(); | ||
461 | } | ||
462 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
463 | |||
464 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | ||
465 | static unsigned long sync_rcu_preempt_exp_count; | ||
466 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | ||
467 | |||
468 | /* | ||
469 | * Return non-zero if there are any tasks in RCU read-side critical | ||
470 | * sections blocking the current preemptible-RCU expedited grace period. | ||
471 | * If there is no preemptible-RCU expedited grace period currently in | ||
472 | * progress, returns zero unconditionally. | ||
473 | */ | ||
474 | static int rcu_preempted_readers_exp(void) | ||
475 | { | ||
476 | return rcu_preempt_ctrlblk.exp_tasks != NULL; | ||
477 | } | ||
478 | |||
479 | /* | ||
480 | * Report the exit from RCU read-side critical section for the last task | ||
481 | * that queued itself during or before the current expedited preemptible-RCU | ||
482 | * grace period. | ||
483 | */ | ||
484 | static void rcu_report_exp_done(void) | ||
485 | { | ||
486 | wake_up(&sync_rcu_preempt_exp_wq); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea | ||
491 | * is to rely in the fact that there is but one CPU, and that it is | ||
492 | * illegal for a task to invoke synchronize_rcu_expedited() while in a | ||
493 | * preemptible-RCU read-side critical section. Therefore, any such | ||
494 | * critical sections must correspond to blocked tasks, which must therefore | ||
495 | * be on the ->blkd_tasks list. So just record the current head of the | ||
496 | * list in the ->exp_tasks pointer, and wait for all tasks including and | ||
497 | * after the task pointed to by ->exp_tasks to drain. | ||
498 | */ | ||
499 | void synchronize_rcu_expedited(void) | ||
500 | { | ||
501 | unsigned long flags; | ||
502 | struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk; | ||
503 | unsigned long snap; | ||
504 | |||
505 | barrier(); /* ensure prior action seen before grace period. */ | ||
506 | |||
507 | WARN_ON_ONCE(rcu_preempt_running_reader()); | ||
508 | |||
509 | /* | ||
510 | * Acquire lock so that there is only one preemptible RCU grace | ||
511 | * period in flight. Of course, if someone does the expedited | ||
512 | * grace period for us while we are acquiring the lock, just leave. | ||
513 | */ | ||
514 | snap = sync_rcu_preempt_exp_count + 1; | ||
515 | mutex_lock(&sync_rcu_preempt_exp_mutex); | ||
516 | if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count)) | ||
517 | goto unlock_mb_ret; /* Others did our work for us. */ | ||
518 | |||
519 | local_irq_save(flags); | ||
520 | |||
521 | /* | ||
522 | * All RCU readers have to already be on blkd_tasks because | ||
523 | * we cannot legally be executing in an RCU read-side critical | ||
524 | * section. | ||
525 | */ | ||
526 | |||
527 | /* Snapshot current head of ->blkd_tasks list. */ | ||
528 | rpcp->exp_tasks = rpcp->blkd_tasks.next; | ||
529 | if (rpcp->exp_tasks == &rpcp->blkd_tasks) | ||
530 | rpcp->exp_tasks = NULL; | ||
531 | local_irq_restore(flags); | ||
532 | |||
533 | /* Wait for tail of ->blkd_tasks list to drain. */ | ||
534 | if (rcu_preempted_readers_exp()) | ||
535 | wait_event(sync_rcu_preempt_exp_wq, | ||
536 | !rcu_preempted_readers_exp()); | ||
537 | |||
538 | /* Clean up and exit. */ | ||
539 | barrier(); /* ensure expedited GP seen before counter increment. */ | ||
540 | sync_rcu_preempt_exp_count++; | ||
541 | unlock_mb_ret: | ||
542 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | ||
543 | barrier(); /* ensure subsequent action seen after grace period. */ | ||
544 | } | ||
545 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | ||
546 | |||
547 | /* | ||
548 | * Does preemptible RCU need the CPU to stay out of dynticks mode? | ||
549 | */ | ||
550 | int rcu_preempt_needs_cpu(void) | ||
551 | { | ||
552 | if (!rcu_preempt_running_reader()) | ||
553 | rcu_preempt_cpu_qs(); | ||
554 | return rcu_preempt_ctrlblk.rcb.rcucblist != NULL; | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * Check for a task exiting while in a preemptible -RCU read-side | ||
559 | * critical section, clean up if so. No need to issue warnings, | ||
560 | * as debug_check_no_locks_held() already does this if lockdep | ||
561 | * is enabled. | ||
562 | */ | ||
563 | void exit_rcu(void) | ||
564 | { | ||
565 | struct task_struct *t = current; | ||
566 | |||
567 | if (t->rcu_read_lock_nesting == 0) | ||
568 | return; | ||
569 | t->rcu_read_lock_nesting = 1; | ||
570 | rcu_read_unlock(); | ||
571 | } | ||
572 | |||
573 | #else /* #ifdef CONFIG_TINY_PREEMPT_RCU */ | ||
574 | |||
575 | /* | ||
576 | * Because preemptible RCU does not exist, it never has any callbacks | ||
577 | * to check. | ||
578 | */ | ||
579 | static void rcu_preempt_check_callbacks(void) | ||
580 | { | ||
581 | } | ||
582 | |||
583 | /* | ||
584 | * Because preemptible RCU does not exist, it never has any callbacks | ||
585 | * to remove. | ||
586 | */ | ||
587 | static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) | ||
588 | { | ||
589 | } | ||
590 | |||
591 | /* | ||
592 | * Because preemptible RCU does not exist, it never has any callbacks | ||
593 | * to process. | ||
594 | */ | ||
595 | static void rcu_preempt_process_callbacks(void) | ||
596 | { | ||
597 | } | ||
598 | |||
599 | #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ | ||
600 | |||
25 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 601 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
26 | 602 | ||
27 | #include <linux/kernel_stat.h> | 603 | #include <linux/kernel_stat.h> |