aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h149
1 files changed, 114 insertions, 35 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 1cee04f627e..ef2a58c2b9d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -150,6 +150,16 @@ void __rcu_read_lock(void)
150} 150}
151EXPORT_SYMBOL_GPL(__rcu_read_lock); 151EXPORT_SYMBOL_GPL(__rcu_read_lock);
152 152
153/*
154 * Check for preempted RCU readers blocking the current grace period
155 * for the specified rcu_node structure. If the caller needs a reliable
156 * answer, it must hold the rcu_node's ->lock.
157 */
158static int rcu_preempted_readers(struct rcu_node *rnp)
159{
160 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
161}
162
153static void rcu_read_unlock_special(struct task_struct *t) 163static void rcu_read_unlock_special(struct task_struct *t)
154{ 164{
155 int empty; 165 int empty;
@@ -196,7 +206,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
196 break; 206 break;
197 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 207 spin_unlock(&rnp->lock); /* irqs remain disabled. */
198 } 208 }
199 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); 209 empty = !rcu_preempted_readers(rnp);
200 list_del_init(&t->rcu_node_entry); 210 list_del_init(&t->rcu_node_entry);
201 t->rcu_blocked_node = NULL; 211 t->rcu_blocked_node = NULL;
202 212
@@ -207,7 +217,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
207 * drop rnp->lock and restore irq. 217 * drop rnp->lock and restore irq.
208 */ 218 */
209 if (!empty && rnp->qsmask == 0 && 219 if (!empty && rnp->qsmask == 0 &&
210 list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])) { 220 !rcu_preempted_readers(rnp)) {
211 struct rcu_node *rnp_p; 221 struct rcu_node *rnp_p;
212 222
213 if (rnp->parent == NULL) { 223 if (rnp->parent == NULL) {
@@ -257,12 +267,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
257{ 267{
258 unsigned long flags; 268 unsigned long flags;
259 struct list_head *lp; 269 struct list_head *lp;
260 int phase = rnp->gpnum & 0x1; 270 int phase;
261 struct task_struct *t; 271 struct task_struct *t;
262 272
263 if (!list_empty(&rnp->blocked_tasks[phase])) { 273 if (rcu_preempted_readers(rnp)) {
264 spin_lock_irqsave(&rnp->lock, flags); 274 spin_lock_irqsave(&rnp->lock, flags);
265 phase = rnp->gpnum & 0x1; /* re-read under lock. */ 275 phase = rnp->gpnum & 0x1;
266 lp = &rnp->blocked_tasks[phase]; 276 lp = &rnp->blocked_tasks[phase];
267 list_for_each_entry(t, lp, rcu_node_entry) 277 list_for_each_entry(t, lp, rcu_node_entry)
268 printk(" P%d", t->pid); 278 printk(" P%d", t->pid);
@@ -281,20 +291,10 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
281 */ 291 */
282static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp) 292static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
283{ 293{
284 WARN_ON_ONCE(!list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1])); 294 WARN_ON_ONCE(rcu_preempted_readers(rnp));
285 WARN_ON_ONCE(rnp->qsmask); 295 WARN_ON_ONCE(rnp->qsmask);
286} 296}
287 297
288/*
289 * Check for preempted RCU readers for the specified rcu_node structure.
290 * If the caller needs a reliable answer, it must hold the rcu_node's
291 * >lock.
292 */
293static int rcu_preempted_readers(struct rcu_node *rnp)
294{
295 return !list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
296}
297
298#ifdef CONFIG_HOTPLUG_CPU 298#ifdef CONFIG_HOTPLUG_CPU
299 299
300/* 300/*
@@ -304,21 +304,25 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
304 * parent is to remove the need for rcu_read_unlock_special() to 304 * parent is to remove the need for rcu_read_unlock_special() to
305 * make more than two attempts to acquire the target rcu_node's lock. 305 * make more than two attempts to acquire the target rcu_node's lock.
306 * 306 *
307 * Returns 1 if there was previously a task blocking the current grace
308 * period on the specified rcu_node structure.
309 *
307 * The caller must hold rnp->lock with irqs disabled. 310 * The caller must hold rnp->lock with irqs disabled.
308 */ 311 */
309static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 312static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
310 struct rcu_node *rnp, 313 struct rcu_node *rnp,
311 struct rcu_data *rdp) 314 struct rcu_data *rdp)
312{ 315{
313 int i; 316 int i;
314 struct list_head *lp; 317 struct list_head *lp;
315 struct list_head *lp_root; 318 struct list_head *lp_root;
319 int retval = rcu_preempted_readers(rnp);
316 struct rcu_node *rnp_root = rcu_get_root(rsp); 320 struct rcu_node *rnp_root = rcu_get_root(rsp);
317 struct task_struct *tp; 321 struct task_struct *tp;
318 322
319 if (rnp == rnp_root) { 323 if (rnp == rnp_root) {
320 WARN_ONCE(1, "Last CPU thought to be offlined?"); 324 WARN_ONCE(1, "Last CPU thought to be offlined?");
321 return; /* Shouldn't happen: at least one CPU online. */ 325 return 0; /* Shouldn't happen: at least one CPU online. */
322 } 326 }
323 WARN_ON_ONCE(rnp != rdp->mynode && 327 WARN_ON_ONCE(rnp != rdp->mynode &&
324 (!list_empty(&rnp->blocked_tasks[0]) || 328 (!list_empty(&rnp->blocked_tasks[0]) ||
@@ -342,6 +346,8 @@ static void rcu_preempt_offline_tasks(struct rcu_state *rsp,
342 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 346 spin_unlock(&rnp_root->lock); /* irqs remain disabled */
343 } 347 }
344 } 348 }
349
350 return retval;
345} 351}
346 352
347/* 353/*
@@ -393,6 +399,17 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
393EXPORT_SYMBOL_GPL(call_rcu); 399EXPORT_SYMBOL_GPL(call_rcu);
394 400
395/* 401/*
402 * Wait for an rcu-preempt grace period. We are supposed to expedite the
403 * grace period, but this is the crude slow compatability hack, so just
404 * invoke synchronize_rcu().
405 */
406void synchronize_rcu_expedited(void)
407{
408 synchronize_rcu();
409}
410EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
411
412/*
396 * Check to see if there is any immediate preemptable-RCU-related work 413 * Check to see if there is any immediate preemptable-RCU-related work
397 * to be done. 414 * to be done.
398 */ 415 */
@@ -410,6 +427,15 @@ static int rcu_preempt_needs_cpu(int cpu)
410 return !!per_cpu(rcu_preempt_data, cpu).nxtlist; 427 return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
411} 428}
412 429
430/**
431 * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
432 */
433void rcu_barrier(void)
434{
435 _rcu_barrier(&rcu_preempt_state, call_rcu);
436}
437EXPORT_SYMBOL_GPL(rcu_barrier);
438
413/* 439/*
414 * Initialize preemptable RCU's per-CPU data. 440 * Initialize preemptable RCU's per-CPU data.
415 */ 441 */
@@ -419,6 +445,22 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
419} 445}
420 446
421/* 447/*
448 * Move preemptable RCU's callbacks to ->orphan_cbs_list.
449 */
450static void rcu_preempt_send_cbs_to_orphanage(void)
451{
452 rcu_send_cbs_to_orphanage(&rcu_preempt_state);
453}
454
455/*
456 * Initialize preemptable RCU's state structures.
457 */
458static void __init __rcu_init_preempt(void)
459{
460 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
461}
462
463/*
422 * Check for a task exiting while in a preemptable-RCU read-side 464 * Check for a task exiting while in a preemptable-RCU read-side
423 * critical section, clean up if so. No need to issue warnings, 465 * critical section, clean up if so. No need to issue warnings,
424 * as debug_check_no_locks_held() already does this if lockdep 466 * as debug_check_no_locks_held() already does this if lockdep
@@ -461,6 +503,15 @@ static void rcu_preempt_note_context_switch(int cpu)
461{ 503{
462} 504}
463 505
506/*
507 * Because preemptable RCU does not exist, there are never any preempted
508 * RCU readers.
509 */
510static int rcu_preempted_readers(struct rcu_node *rnp)
511{
512 return 0;
513}
514
464#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 515#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
465 516
466/* 517/*
@@ -483,25 +534,19 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
483 WARN_ON_ONCE(rnp->qsmask); 534 WARN_ON_ONCE(rnp->qsmask);
484} 535}
485 536
486/*
487 * Because preemptable RCU does not exist, there are never any preempted
488 * RCU readers.
489 */
490static int rcu_preempted_readers(struct rcu_node *rnp)
491{
492 return 0;
493}
494
495#ifdef CONFIG_HOTPLUG_CPU 537#ifdef CONFIG_HOTPLUG_CPU
496 538
497/* 539/*
498 * Because preemptable RCU does not exist, it never needs to migrate 540 * Because preemptable RCU does not exist, it never needs to migrate
499 * tasks that were blocked within RCU read-side critical sections. 541 * tasks that were blocked within RCU read-side critical sections, and
542 * such non-existent tasks cannot possibly have been blocking the current
543 * grace period.
500 */ 544 */
501static void rcu_preempt_offline_tasks(struct rcu_state *rsp, 545static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
502 struct rcu_node *rnp, 546 struct rcu_node *rnp,
503 struct rcu_data *rdp) 547 struct rcu_data *rdp)
504{ 548{
549 return 0;
505} 550}
506 551
507/* 552/*
@@ -518,7 +563,7 @@ static void rcu_preempt_offline_cpu(int cpu)
518 * Because preemptable RCU does not exist, it never has any callbacks 563 * Because preemptable RCU does not exist, it never has any callbacks
519 * to check. 564 * to check.
520 */ 565 */
521void rcu_preempt_check_callbacks(int cpu) 566static void rcu_preempt_check_callbacks(int cpu)
522{ 567{
523} 568}
524 569
@@ -526,7 +571,7 @@ void rcu_preempt_check_callbacks(int cpu)
526 * Because preemptable RCU does not exist, it never has any callbacks 571 * Because preemptable RCU does not exist, it never has any callbacks
527 * to process. 572 * to process.
528 */ 573 */
529void rcu_preempt_process_callbacks(void) 574static void rcu_preempt_process_callbacks(void)
530{ 575{
531} 576}
532 577
@@ -540,6 +585,16 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
540EXPORT_SYMBOL_GPL(call_rcu); 585EXPORT_SYMBOL_GPL(call_rcu);
541 586
542/* 587/*
588 * Wait for an rcu-preempt grace period, but make it happen quickly.
589 * But because preemptable RCU does not exist, map to rcu-sched.
590 */
591void synchronize_rcu_expedited(void)
592{
593 synchronize_sched_expedited();
594}
595EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
596
597/*
543 * Because preemptable RCU does not exist, it never has any work to do. 598 * Because preemptable RCU does not exist, it never has any work to do.
544 */ 599 */
545static int rcu_preempt_pending(int cpu) 600static int rcu_preempt_pending(int cpu)
@@ -556,6 +611,16 @@ static int rcu_preempt_needs_cpu(int cpu)
556} 611}
557 612
558/* 613/*
614 * Because preemptable RCU does not exist, rcu_barrier() is just
615 * another name for rcu_barrier_sched().
616 */
617void rcu_barrier(void)
618{
619 rcu_barrier_sched();
620}
621EXPORT_SYMBOL_GPL(rcu_barrier);
622
623/*
559 * Because preemptable RCU does not exist, there is no per-CPU 624 * Because preemptable RCU does not exist, there is no per-CPU
560 * data to initialize. 625 * data to initialize.
561 */ 626 */
@@ -563,4 +628,18 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
563{ 628{
564} 629}
565 630
631/*
632 * Because there is no preemptable RCU, there are no callbacks to move.
633 */
634static void rcu_preempt_send_cbs_to_orphanage(void)
635{
636}
637
638/*
639 * Because preemptable RCU does not exist, it need not be initialized.
640 */
641static void __init __rcu_init_preempt(void)
642{
643}
644
566#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 645#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */