diff options
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 309 |
1 files changed, 275 insertions, 34 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index ef2a58c2b9d5..37fbccdf41d5 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -24,16 +24,19 @@ | |||
24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 24 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/delay.h> | ||
27 | 28 | ||
28 | #ifdef CONFIG_TREE_PREEMPT_RCU | 29 | #ifdef CONFIG_TREE_PREEMPT_RCU |
29 | 30 | ||
30 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); | 31 | struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state); |
31 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); | 32 | DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data); |
32 | 33 | ||
34 | static int rcu_preempted_readers_exp(struct rcu_node *rnp); | ||
35 | |||
33 | /* | 36 | /* |
34 | * Tell them what RCU they are running. | 37 | * Tell them what RCU they are running. |
35 | */ | 38 | */ |
36 | static inline void rcu_bootup_announce(void) | 39 | static void __init rcu_bootup_announce(void) |
37 | { | 40 | { |
38 | printk(KERN_INFO | 41 | printk(KERN_INFO |
39 | "Experimental preemptable hierarchical RCU implementation.\n"); | 42 | "Experimental preemptable hierarchical RCU implementation.\n"); |
@@ -67,7 +70,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); | |||
67 | static void rcu_preempt_qs(int cpu) | 70 | static void rcu_preempt_qs(int cpu) |
68 | { | 71 | { |
69 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); | 72 | struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); |
70 | rdp->passed_quiesc_completed = rdp->completed; | 73 | rdp->passed_quiesc_completed = rdp->gpnum - 1; |
71 | barrier(); | 74 | barrier(); |
72 | rdp->passed_quiesc = 1; | 75 | rdp->passed_quiesc = 1; |
73 | } | 76 | } |
@@ -157,14 +160,58 @@ EXPORT_SYMBOL_GPL(__rcu_read_lock); | |||
157 | */ | 160 | */ |
158 | static int rcu_preempted_readers(struct rcu_node *rnp) | 161 | static int rcu_preempted_readers(struct rcu_node *rnp) |
159 | { | 162 | { |
160 | return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); | 163 | int phase = rnp->gpnum & 0x1; |
164 | |||
165 | return !list_empty(&rnp->blocked_tasks[phase]) || | ||
166 | !list_empty(&rnp->blocked_tasks[phase + 2]); | ||
167 | } | ||
168 | |||
169 | /* | ||
170 | * Record a quiescent state for all tasks that were previously queued | ||
171 | * on the specified rcu_node structure and that were blocking the current | ||
172 | * RCU grace period. The caller must hold the specified rnp->lock with | ||
173 | * irqs disabled, and this lock is released upon return, but irqs remain | ||
174 | * disabled. | ||
175 | */ | ||
176 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | ||
177 | __releases(rnp->lock) | ||
178 | { | ||
179 | unsigned long mask; | ||
180 | struct rcu_node *rnp_p; | ||
181 | |||
182 | if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { | ||
183 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
184 | return; /* Still need more quiescent states! */ | ||
185 | } | ||
186 | |||
187 | rnp_p = rnp->parent; | ||
188 | if (rnp_p == NULL) { | ||
189 | /* | ||
190 | * Either there is only one rcu_node in the tree, | ||
191 | * or tasks were kicked up to root rcu_node due to | ||
192 | * CPUs going offline. | ||
193 | */ | ||
194 | rcu_report_qs_rsp(&rcu_preempt_state, flags); | ||
195 | return; | ||
196 | } | ||
197 | |||
198 | /* Report up the rest of the hierarchy. */ | ||
199 | mask = rnp->grpmask; | ||
200 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
201 | spin_lock(&rnp_p->lock); /* irqs already disabled. */ | ||
202 | rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); | ||
161 | } | 203 | } |
162 | 204 | ||
205 | /* | ||
206 | * Handle special cases during rcu_read_unlock(), such as needing to | ||
207 | * notify RCU core processing or task having blocked during the RCU | ||
208 | * read-side critical section. | ||
209 | */ | ||
163 | static void rcu_read_unlock_special(struct task_struct *t) | 210 | static void rcu_read_unlock_special(struct task_struct *t) |
164 | { | 211 | { |
165 | int empty; | 212 | int empty; |
213 | int empty_exp; | ||
166 | unsigned long flags; | 214 | unsigned long flags; |
167 | unsigned long mask; | ||
168 | struct rcu_node *rnp; | 215 | struct rcu_node *rnp; |
169 | int special; | 216 | int special; |
170 | 217 | ||
@@ -207,36 +254,30 @@ static void rcu_read_unlock_special(struct task_struct *t) | |||
207 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | 254 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
208 | } | 255 | } |
209 | empty = !rcu_preempted_readers(rnp); | 256 | empty = !rcu_preempted_readers(rnp); |
257 | empty_exp = !rcu_preempted_readers_exp(rnp); | ||
258 | smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ | ||
210 | list_del_init(&t->rcu_node_entry); | 259 | list_del_init(&t->rcu_node_entry); |
211 | t->rcu_blocked_node = NULL; | 260 | t->rcu_blocked_node = NULL; |
212 | 261 | ||
213 | /* | 262 | /* |
214 | * If this was the last task on the current list, and if | 263 | * If this was the last task on the current list, and if |
215 | * we aren't waiting on any CPUs, report the quiescent state. | 264 | * we aren't waiting on any CPUs, report the quiescent state. |
216 | * Note that both cpu_quiet_msk_finish() and cpu_quiet_msk() | 265 | * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. |
217 | * drop rnp->lock and restore irq. | ||
218 | */ | 266 | */ |
219 | if (!empty && rnp->qsmask == 0 && | 267 | if (empty) |
220 | !rcu_preempted_readers(rnp)) { | ||
221 | struct rcu_node *rnp_p; | ||
222 | |||
223 | if (rnp->parent == NULL) { | ||
224 | /* Only one rcu_node in the tree. */ | ||
225 | cpu_quiet_msk_finish(&rcu_preempt_state, flags); | ||
226 | return; | ||
227 | } | ||
228 | /* Report up the rest of the hierarchy. */ | ||
229 | mask = rnp->grpmask; | ||
230 | spin_unlock_irqrestore(&rnp->lock, flags); | 268 | spin_unlock_irqrestore(&rnp->lock, flags); |
231 | rnp_p = rnp->parent; | 269 | else |
232 | spin_lock_irqsave(&rnp_p->lock, flags); | 270 | rcu_report_unblock_qs_rnp(rnp, flags); |
233 | WARN_ON_ONCE(rnp->qsmask); | 271 | |
234 | cpu_quiet_msk(mask, &rcu_preempt_state, rnp_p, flags); | 272 | /* |
235 | return; | 273 | * If this was the last task on the expedited lists, |
236 | } | 274 | * then we need to report up the rcu_node hierarchy. |
237 | spin_unlock(&rnp->lock); | 275 | */ |
276 | if (!empty_exp && !rcu_preempted_readers_exp(rnp)) | ||
277 | rcu_report_exp_rnp(&rcu_preempt_state, rnp); | ||
278 | } else { | ||
279 | local_irq_restore(flags); | ||
238 | } | 280 | } |
239 | local_irq_restore(flags); | ||
240 | } | 281 | } |
241 | 282 | ||
242 | /* | 283 | /* |
@@ -303,6 +344,8 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) | |||
303 | * rcu_node. The reason for not just moving them to the immediate | 344 | * rcu_node. The reason for not just moving them to the immediate |
304 | * parent is to remove the need for rcu_read_unlock_special() to | 345 | * parent is to remove the need for rcu_read_unlock_special() to |
305 | * make more than two attempts to acquire the target rcu_node's lock. | 346 | * make more than two attempts to acquire the target rcu_node's lock. |
347 | * Returns true if there were tasks blocking the current RCU grace | ||
348 | * period. | ||
306 | * | 349 | * |
307 | * Returns 1 if there was previously a task blocking the current grace | 350 | * Returns 1 if there was previously a task blocking the current grace |
308 | * period on the specified rcu_node structure. | 351 | * period on the specified rcu_node structure. |
@@ -316,7 +359,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
316 | int i; | 359 | int i; |
317 | struct list_head *lp; | 360 | struct list_head *lp; |
318 | struct list_head *lp_root; | 361 | struct list_head *lp_root; |
319 | int retval = rcu_preempted_readers(rnp); | 362 | int retval = 0; |
320 | struct rcu_node *rnp_root = rcu_get_root(rsp); | 363 | struct rcu_node *rnp_root = rcu_get_root(rsp); |
321 | struct task_struct *tp; | 364 | struct task_struct *tp; |
322 | 365 | ||
@@ -326,7 +369,9 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
326 | } | 369 | } |
327 | WARN_ON_ONCE(rnp != rdp->mynode && | 370 | WARN_ON_ONCE(rnp != rdp->mynode && |
328 | (!list_empty(&rnp->blocked_tasks[0]) || | 371 | (!list_empty(&rnp->blocked_tasks[0]) || |
329 | !list_empty(&rnp->blocked_tasks[1]))); | 372 | !list_empty(&rnp->blocked_tasks[1]) || |
373 | !list_empty(&rnp->blocked_tasks[2]) || | ||
374 | !list_empty(&rnp->blocked_tasks[3]))); | ||
330 | 375 | ||
331 | /* | 376 | /* |
332 | * Move tasks up to root rcu_node. Rely on the fact that the | 377 | * Move tasks up to root rcu_node. Rely on the fact that the |
@@ -334,7 +379,11 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
334 | * rcu_nodes in terms of gp_num value. This fact allows us to | 379 | * rcu_nodes in terms of gp_num value. This fact allows us to |
335 | * move the blocked_tasks[] array directly, element by element. | 380 | * move the blocked_tasks[] array directly, element by element. |
336 | */ | 381 | */ |
337 | for (i = 0; i < 2; i++) { | 382 | if (rcu_preempted_readers(rnp)) |
383 | retval |= RCU_OFL_TASKS_NORM_GP; | ||
384 | if (rcu_preempted_readers_exp(rnp)) | ||
385 | retval |= RCU_OFL_TASKS_EXP_GP; | ||
386 | for (i = 0; i < 4; i++) { | ||
338 | lp = &rnp->blocked_tasks[i]; | 387 | lp = &rnp->blocked_tasks[i]; |
339 | lp_root = &rnp_root->blocked_tasks[i]; | 388 | lp_root = &rnp_root->blocked_tasks[i]; |
340 | while (!list_empty(lp)) { | 389 | while (!list_empty(lp)) { |
@@ -346,7 +395,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, | |||
346 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ | 395 | spin_unlock(&rnp_root->lock); /* irqs remain disabled */ |
347 | } | 396 | } |
348 | } | 397 | } |
349 | |||
350 | return retval; | 398 | return retval; |
351 | } | 399 | } |
352 | 400 | ||
@@ -398,14 +446,183 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) | |||
398 | } | 446 | } |
399 | EXPORT_SYMBOL_GPL(call_rcu); | 447 | EXPORT_SYMBOL_GPL(call_rcu); |
400 | 448 | ||
449 | /** | ||
450 | * synchronize_rcu - wait until a grace period has elapsed. | ||
451 | * | ||
452 | * Control will return to the caller some time after a full grace | ||
453 | * period has elapsed, in other words after all currently executing RCU | ||
454 | * read-side critical sections have completed. RCU read-side critical | ||
455 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
456 | * and may be nested. | ||
457 | */ | ||
458 | void synchronize_rcu(void) | ||
459 | { | ||
460 | struct rcu_synchronize rcu; | ||
461 | |||
462 | if (!rcu_scheduler_active) | ||
463 | return; | ||
464 | |||
465 | init_completion(&rcu.completion); | ||
466 | /* Will wake me after RCU finished. */ | ||
467 | call_rcu(&rcu.head, wakeme_after_rcu); | ||
468 | /* Wait for it. */ | ||
469 | wait_for_completion(&rcu.completion); | ||
470 | } | ||
471 | EXPORT_SYMBOL_GPL(synchronize_rcu); | ||
472 | |||
473 | static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); | ||
474 | static long sync_rcu_preempt_exp_count; | ||
475 | static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); | ||
476 | |||
401 | /* | 477 | /* |
402 | * Wait for an rcu-preempt grace period. We are supposed to expedite the | 478 | * Return non-zero if there are any tasks in RCU read-side critical |
403 | * grace period, but this is the crude slow compatability hack, so just | 479 | * sections blocking the current preemptible-RCU expedited grace period. |
404 | * invoke synchronize_rcu(). | 480 | * If there is no preemptible-RCU expedited grace period currently in |
481 | * progress, returns zero unconditionally. | ||
482 | */ | ||
483 | static int rcu_preempted_readers_exp(struct rcu_node *rnp) | ||
484 | { | ||
485 | return !list_empty(&rnp->blocked_tasks[2]) || | ||
486 | !list_empty(&rnp->blocked_tasks[3]); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * return non-zero if there is no RCU expedited grace period in progress | ||
491 | * for the specified rcu_node structure, in other words, if all CPUs and | ||
492 | * tasks covered by the specified rcu_node structure have done their bit | ||
493 | * for the current expedited grace period. Works only for preemptible | ||
494 | * RCU -- other RCU implementation use other means. | ||
495 | * | ||
496 | * Caller must hold sync_rcu_preempt_exp_mutex. | ||
497 | */ | ||
498 | static int sync_rcu_preempt_exp_done(struct rcu_node *rnp) | ||
499 | { | ||
500 | return !rcu_preempted_readers_exp(rnp) && | ||
501 | ACCESS_ONCE(rnp->expmask) == 0; | ||
502 | } | ||
503 | |||
504 | /* | ||
505 | * Report the exit from RCU read-side critical section for the last task | ||
506 | * that queued itself during or before the current expedited preemptible-RCU | ||
507 | * grace period. This event is reported either to the rcu_node structure on | ||
508 | * which the task was queued or to one of that rcu_node structure's ancestors, | ||
509 | * recursively up the tree. (Calm down, calm down, we do the recursion | ||
510 | * iteratively!) | ||
511 | * | ||
512 | * Caller must hold sync_rcu_preempt_exp_mutex. | ||
513 | */ | ||
514 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | ||
515 | { | ||
516 | unsigned long flags; | ||
517 | unsigned long mask; | ||
518 | |||
519 | spin_lock_irqsave(&rnp->lock, flags); | ||
520 | for (;;) { | ||
521 | if (!sync_rcu_preempt_exp_done(rnp)) | ||
522 | break; | ||
523 | if (rnp->parent == NULL) { | ||
524 | wake_up(&sync_rcu_preempt_exp_wq); | ||
525 | break; | ||
526 | } | ||
527 | mask = rnp->grpmask; | ||
528 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
529 | rnp = rnp->parent; | ||
530 | spin_lock(&rnp->lock); /* irqs already disabled */ | ||
531 | rnp->expmask &= ~mask; | ||
532 | } | ||
533 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
534 | } | ||
535 | |||
536 | /* | ||
537 | * Snapshot the tasks blocking the newly started preemptible-RCU expedited | ||
538 | * grace period for the specified rcu_node structure. If there are no such | ||
539 | * tasks, report it up the rcu_node hierarchy. | ||
540 | * | ||
541 | * Caller must hold sync_rcu_preempt_exp_mutex and rsp->onofflock. | ||
542 | */ | ||
543 | static void | ||
544 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | ||
545 | { | ||
546 | int must_wait; | ||
547 | |||
548 | spin_lock(&rnp->lock); /* irqs already disabled */ | ||
549 | list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); | ||
550 | list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); | ||
551 | must_wait = rcu_preempted_readers_exp(rnp); | ||
552 | spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
553 | if (!must_wait) | ||
554 | rcu_report_exp_rnp(rsp, rnp); | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * Wait for an rcu-preempt grace period, but expedite it. The basic idea | ||
559 | * is to invoke synchronize_sched_expedited() to push all the tasks to | ||
560 | * the ->blocked_tasks[] lists, move all entries from the first set of | ||
561 | * ->blocked_tasks[] lists to the second set, and finally wait for this | ||
562 | * second set to drain. | ||
405 | */ | 563 | */ |
406 | void synchronize_rcu_expedited(void) | 564 | void synchronize_rcu_expedited(void) |
407 | { | 565 | { |
408 | synchronize_rcu(); | 566 | unsigned long flags; |
567 | struct rcu_node *rnp; | ||
568 | struct rcu_state *rsp = &rcu_preempt_state; | ||
569 | long snap; | ||
570 | int trycount = 0; | ||
571 | |||
572 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ | ||
573 | snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; | ||
574 | smp_mb(); /* Above access cannot bleed into critical section. */ | ||
575 | |||
576 | /* | ||
577 | * Acquire lock, falling back to synchronize_rcu() if too many | ||
578 | * lock-acquisition failures. Of course, if someone does the | ||
579 | * expedited grace period for us, just leave. | ||
580 | */ | ||
581 | while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { | ||
582 | if (trycount++ < 10) | ||
583 | udelay(trycount * num_online_cpus()); | ||
584 | else { | ||
585 | synchronize_rcu(); | ||
586 | return; | ||
587 | } | ||
588 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | ||
589 | goto mb_ret; /* Others did our work for us. */ | ||
590 | } | ||
591 | if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) | ||
592 | goto unlock_mb_ret; /* Others did our work for us. */ | ||
593 | |||
594 | /* force all RCU readers onto blocked_tasks[]. */ | ||
595 | synchronize_sched_expedited(); | ||
596 | |||
597 | spin_lock_irqsave(&rsp->onofflock, flags); | ||
598 | |||
599 | /* Initialize ->expmask for all non-leaf rcu_node structures. */ | ||
600 | rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { | ||
601 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
602 | rnp->expmask = rnp->qsmaskinit; | ||
603 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
604 | } | ||
605 | |||
606 | /* Snapshot current state of ->blocked_tasks[] lists. */ | ||
607 | rcu_for_each_leaf_node(rsp, rnp) | ||
608 | sync_rcu_preempt_exp_init(rsp, rnp); | ||
609 | if (NUM_RCU_NODES > 1) | ||
610 | sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); | ||
611 | |||
612 | spin_unlock_irqrestore(&rsp->onofflock, flags); | ||
613 | |||
614 | /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ | ||
615 | rnp = rcu_get_root(rsp); | ||
616 | wait_event(sync_rcu_preempt_exp_wq, | ||
617 | sync_rcu_preempt_exp_done(rnp)); | ||
618 | |||
619 | /* Clean up and exit. */ | ||
620 | smp_mb(); /* ensure expedited GP seen before counter increment. */ | ||
621 | ACCESS_ONCE(sync_rcu_preempt_exp_count)++; | ||
622 | unlock_mb_ret: | ||
623 | mutex_unlock(&sync_rcu_preempt_exp_mutex); | ||
624 | mb_ret: | ||
625 | smp_mb(); /* ensure subsequent action seen after grace period. */ | ||
409 | } | 626 | } |
410 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 627 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
411 | 628 | ||
@@ -481,7 +698,7 @@ void exit_rcu(void) | |||
481 | /* | 698 | /* |
482 | * Tell them what RCU they are running. | 699 | * Tell them what RCU they are running. |
483 | */ | 700 | */ |
484 | static inline void rcu_bootup_announce(void) | 701 | static void __init rcu_bootup_announce(void) |
485 | { | 702 | { |
486 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); | 703 | printk(KERN_INFO "Hierarchical RCU implementation.\n"); |
487 | } | 704 | } |
@@ -512,6 +729,16 @@ static int rcu_preempted_readers(struct rcu_node *rnp) | |||
512 | return 0; | 729 | return 0; |
513 | } | 730 | } |
514 | 731 | ||
732 | #ifdef CONFIG_HOTPLUG_CPU | ||
733 | |||
734 | /* Because preemptible RCU does not exist, no quieting of tasks. */ | ||
735 | static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) | ||
736 | { | ||
737 | spin_unlock_irqrestore(&rnp->lock, flags); | ||
738 | } | ||
739 | |||
740 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
741 | |||
515 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | 742 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR |
516 | 743 | ||
517 | /* | 744 | /* |
@@ -594,6 +821,20 @@ void synchronize_rcu_expedited(void) | |||
594 | } | 821 | } |
595 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); | 822 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |
596 | 823 | ||
824 | #ifdef CONFIG_HOTPLUG_CPU | ||
825 | |||
826 | /* | ||
827 | * Because preemptable RCU does not exist, there is never any need to | ||
828 | * report on tasks preempted in RCU read-side critical sections during | ||
829 | * expedited RCU grace periods. | ||
830 | */ | ||
831 | static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | ||
832 | { | ||
833 | return; | ||
834 | } | ||
835 | |||
836 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
837 | |||
597 | /* | 838 | /* |
598 | * Because preemptable RCU does not exist, it never has any work to do. | 839 | * Because preemptable RCU does not exist, it never has any work to do. |
599 | */ | 840 | */ |