aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c787
1 files changed, 540 insertions, 247 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 52b06f6e158c..53ae9598f798 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -46,30 +46,30 @@
46#include <linux/cpu.h> 46#include <linux/cpu.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/time.h> 48#include <linux/time.h>
49#include <linux/kernel_stat.h>
49 50
50#include "rcutree.h" 51#include "rcutree.h"
51 52
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59/* Data structures. */ 53/* Data structures. */
60 54
55static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
56
61#define RCU_STATE_INITIALIZER(name) { \ 57#define RCU_STATE_INITIALIZER(name) { \
62 .level = { &name.node[0] }, \ 58 .level = { &name.node[0] }, \
63 .levelcnt = { \ 59 .levelcnt = { \
64 NUM_RCU_LVL_0, /* root of hierarchy. */ \ 60 NUM_RCU_LVL_0, /* root of hierarchy. */ \
65 NUM_RCU_LVL_1, \ 61 NUM_RCU_LVL_1, \
66 NUM_RCU_LVL_2, \ 62 NUM_RCU_LVL_2, \
67 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 63 NUM_RCU_LVL_3, \
64 NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
68 }, \ 65 }, \
69 .signaled = RCU_SIGNAL_INIT, \ 66 .signaled = RCU_GP_IDLE, \
70 .gpnum = -300, \ 67 .gpnum = -300, \
71 .completed = -300, \ 68 .completed = -300, \
72 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 69 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
70 .orphan_cbs_list = NULL, \
71 .orphan_cbs_tail = &name.orphan_cbs_list, \
72 .orphan_qlen = 0, \
73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ 73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
74 .n_force_qs = 0, \ 74 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 75 .n_force_qs_ngp = 0, \
@@ -81,24 +81,18 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 83
84extern long rcu_batches_completed_sched(void); 84static int rcu_scheduler_active __read_mostly;
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100 85
101#include "rcutree_plugin.h" 86
87/*
88 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
89 * permit this function to be invoked without holding the root rcu_node
90 * structure's ->lock, but of course results can be subject to change.
91 */
92static int rcu_gp_in_progress(struct rcu_state *rsp)
93{
94 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
95}
102 96
103/* 97/*
104 * Note a quiescent state. Because we do not need to know 98 * Note a quiescent state. Because we do not need to know
@@ -110,7 +104,7 @@ void rcu_sched_qs(int cpu)
110 struct rcu_data *rdp; 104 struct rcu_data *rdp;
111 105
112 rdp = &per_cpu(rcu_sched_data, cpu); 106 rdp = &per_cpu(rcu_sched_data, cpu);
113 rdp->passed_quiesc_completed = rdp->completed; 107 rdp->passed_quiesc_completed = rdp->gpnum - 1;
114 barrier(); 108 barrier();
115 rdp->passed_quiesc = 1; 109 rdp->passed_quiesc = 1;
116 rcu_preempt_note_context_switch(cpu); 110 rcu_preempt_note_context_switch(cpu);
@@ -121,7 +115,7 @@ void rcu_bh_qs(int cpu)
121 struct rcu_data *rdp; 115 struct rcu_data *rdp;
122 116
123 rdp = &per_cpu(rcu_bh_data, cpu); 117 rdp = &per_cpu(rcu_bh_data, cpu);
124 rdp->passed_quiesc_completed = rdp->completed; 118 rdp->passed_quiesc_completed = rdp->gpnum - 1;
125 barrier(); 119 barrier();
126 rdp->passed_quiesc = 1; 120 rdp->passed_quiesc = 1;
127} 121}
@@ -137,6 +131,10 @@ static int blimit = 10; /* Maximum callbacks per softirq. */
137static int qhimark = 10000; /* If this many pending, ignore blimit. */ 131static int qhimark = 10000; /* If this many pending, ignore blimit. */
138static int qlowmark = 100; /* Once only this many pending, use blimit. */ 132static int qlowmark = 100; /* Once only this many pending, use blimit. */
139 133
134module_param(blimit, int, 0);
135module_param(qhimark, int, 0);
136module_param(qlowmark, int, 0);
137
140static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 138static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
141static int rcu_pending(int cpu); 139static int rcu_pending(int cpu);
142 140
@@ -173,9 +171,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
173static int 171static int
174cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 172cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
175{ 173{
176 /* ACCESS_ONCE() because we are accessing outside of lock. */ 174 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
177 return *rdp->nxttail[RCU_DONE_TAIL] &&
178 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
179} 175}
180 176
181/* 177/*
@@ -345,31 +341,12 @@ void rcu_irq_exit(void)
345 set_need_resched(); 341 set_need_resched();
346} 342}
347 343
348/*
349 * Record the specified "completed" value, which is later used to validate
350 * dynticks counter manipulations. Specify "rsp->completed - 1" to
351 * unconditionally invalidate any future dynticks manipulations (which is
352 * useful at the beginning of a grace period).
353 */
354static void dyntick_record_completed(struct rcu_state *rsp, long comp)
355{
356 rsp->dynticks_completed = comp;
357}
358
359#ifdef CONFIG_SMP 344#ifdef CONFIG_SMP
360 345
361/* 346/*
362 * Recall the previously recorded value of the completion for dynticks.
363 */
364static long dyntick_recall_completed(struct rcu_state *rsp)
365{
366 return rsp->dynticks_completed;
367}
368
369/*
370 * Snapshot the specified CPU's dynticks counter so that we can later 347 * Snapshot the specified CPU's dynticks counter so that we can later
371 * credit them with an implicit quiescent state. Return 1 if this CPU 348 * credit them with an implicit quiescent state. Return 1 if this CPU
372 * is already in a quiescent state courtesy of dynticks idle mode. 349 * is in dynticks idle mode, which is an extended quiescent state.
373 */ 350 */
374static int dyntick_save_progress_counter(struct rcu_data *rdp) 351static int dyntick_save_progress_counter(struct rcu_data *rdp)
375{ 352{
@@ -429,24 +406,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
429 406
430#else /* #ifdef CONFIG_NO_HZ */ 407#else /* #ifdef CONFIG_NO_HZ */
431 408
432static void dyntick_record_completed(struct rcu_state *rsp, long comp)
433{
434}
435
436#ifdef CONFIG_SMP 409#ifdef CONFIG_SMP
437 410
438/*
439 * If there are no dynticks, then the only way that a CPU can passively
440 * be in a quiescent state is to be offline. Unlike dynticks idle, which
441 * is a point in time during the prior (already finished) grace period,
442 * an offline CPU is always in a quiescent state, and thus can be
443 * unconditionally applied. So just return the current value of completed.
444 */
445static long dyntick_recall_completed(struct rcu_state *rsp)
446{
447 return rsp->completed;
448}
449
450static int dyntick_save_progress_counter(struct rcu_data *rdp) 411static int dyntick_save_progress_counter(struct rcu_data *rdp)
451{ 412{
452 return 0; 413 return 0;
@@ -475,30 +436,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
475 long delta; 436 long delta;
476 unsigned long flags; 437 unsigned long flags;
477 struct rcu_node *rnp = rcu_get_root(rsp); 438 struct rcu_node *rnp = rcu_get_root(rsp);
478 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
479 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
480 439
481 /* Only let one CPU complain about others per time interval. */ 440 /* Only let one CPU complain about others per time interval. */
482 441
483 spin_lock_irqsave(&rnp->lock, flags); 442 spin_lock_irqsave(&rnp->lock, flags);
484 delta = jiffies - rsp->jiffies_stall; 443 delta = jiffies - rsp->jiffies_stall;
485 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { 444 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
486 spin_unlock_irqrestore(&rnp->lock, flags); 445 spin_unlock_irqrestore(&rnp->lock, flags);
487 return; 446 return;
488 } 447 }
489 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 448 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
449
450 /*
451 * Now rat on any tasks that got kicked up to the root rcu_node
452 * due to CPU offlining.
453 */
454 rcu_print_task_stall(rnp);
490 spin_unlock_irqrestore(&rnp->lock, flags); 455 spin_unlock_irqrestore(&rnp->lock, flags);
491 456
492 /* OK, time to rat on our buddy... */ 457 /* OK, time to rat on our buddy... */
493 458
494 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 459 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
495 for (; rnp_cur < rnp_end; rnp_cur++) { 460 rcu_for_each_leaf_node(rsp, rnp) {
496 rcu_print_task_stall(rnp); 461 rcu_print_task_stall(rnp);
497 if (rnp_cur->qsmask == 0) 462 if (rnp->qsmask == 0)
498 continue; 463 continue;
499 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 464 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
500 if (rnp_cur->qsmask & (1UL << cpu)) 465 if (rnp->qsmask & (1UL << cpu))
501 printk(" %d", rnp_cur->grplo + cpu); 466 printk(" %d", rnp->grplo + cpu);
502 } 467 }
503 printk(" (detected by %d, t=%ld jiffies)\n", 468 printk(" (detected by %d, t=%ld jiffies)\n",
504 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 469 smp_processor_id(), (long)(jiffies - rsp->gp_start));
@@ -537,8 +502,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
537 /* We haven't checked in, so go dump stack. */ 502 /* We haven't checked in, so go dump stack. */
538 print_cpu_stall(rsp); 503 print_cpu_stall(rsp);
539 504
540 } else if (rsp->gpnum != rsp->completed && 505 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
541 delta >= RCU_STALL_RAT_DELAY) {
542 506
543 /* They had two time units to dump stack, so complain. */ 507 /* They had two time units to dump stack, so complain. */
544 print_other_cpu_stall(rsp); 508 print_other_cpu_stall(rsp);
@@ -560,13 +524,33 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
560/* 524/*
561 * Update CPU-local rcu_data state to record the newly noticed grace period. 525 * Update CPU-local rcu_data state to record the newly noticed grace period.
562 * This is used both when we started the grace period and when we notice 526 * This is used both when we started the grace period and when we notice
563 * that someone else started the grace period. 527 * that someone else started the grace period. The caller must hold the
528 * ->lock of the leaf rcu_node structure corresponding to the current CPU,
529 * and must have irqs disabled.
564 */ 530 */
531static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
532{
533 if (rdp->gpnum != rnp->gpnum) {
534 rdp->qs_pending = 1;
535 rdp->passed_quiesc = 0;
536 rdp->gpnum = rnp->gpnum;
537 }
538}
539
565static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp) 540static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
566{ 541{
567 rdp->qs_pending = 1; 542 unsigned long flags;
568 rdp->passed_quiesc = 0; 543 struct rcu_node *rnp;
569 rdp->gpnum = rsp->gpnum; 544
545 local_irq_save(flags);
546 rnp = rdp->mynode;
547 if (rdp->gpnum == ACCESS_ONCE(rnp->gpnum) || /* outside lock. */
548 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
549 local_irq_restore(flags);
550 return;
551 }
552 __note_new_gpnum(rsp, rnp, rdp);
553 spin_unlock_irqrestore(&rnp->lock, flags);
570} 554}
571 555
572/* 556/*
@@ -590,6 +574,79 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
590} 574}
591 575
592/* 576/*
577 * Advance this CPU's callbacks, but only if the current grace period
578 * has ended. This may be called only from the CPU to whom the rdp
579 * belongs. In addition, the corresponding leaf rcu_node structure's
580 * ->lock must be held by the caller, with irqs disabled.
581 */
582static void
583__rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
584{
585 /* Did another grace period end? */
586 if (rdp->completed != rnp->completed) {
587
588 /* Advance callbacks. No harm if list empty. */
589 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
590 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
591 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
592
593 /* Remember that we saw this grace-period completion. */
594 rdp->completed = rnp->completed;
595 }
596}
597
598/*
599 * Advance this CPU's callbacks, but only if the current grace period
600 * has ended. This may be called only from the CPU to whom the rdp
601 * belongs.
602 */
603static void
604rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
605{
606 unsigned long flags;
607 struct rcu_node *rnp;
608
609 local_irq_save(flags);
610 rnp = rdp->mynode;
611 if (rdp->completed == ACCESS_ONCE(rnp->completed) || /* outside lock. */
612 !spin_trylock(&rnp->lock)) { /* irqs already off, retry later. */
613 local_irq_restore(flags);
614 return;
615 }
616 __rcu_process_gp_end(rsp, rnp, rdp);
617 spin_unlock_irqrestore(&rnp->lock, flags);
618}
619
620/*
621 * Do per-CPU grace-period initialization for running CPU. The caller
622 * must hold the lock of the leaf rcu_node structure corresponding to
623 * this CPU.
624 */
625static void
626rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_data *rdp)
627{
628 /* Prior grace period ended, so advance callbacks for current CPU. */
629 __rcu_process_gp_end(rsp, rnp, rdp);
630
631 /*
632 * Because this CPU just now started the new grace period, we know
633 * that all of its callbacks will be covered by this upcoming grace
634 * period, even the ones that were registered arbitrarily recently.
635 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
636 *
637 * Other CPUs cannot be sure exactly when the grace period started.
638 * Therefore, their recently registered callbacks must pass through
639 * an additional RCU_NEXT_READY stage, so that they will be handled
640 * by the next RCU grace period.
641 */
642 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
643 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
644
645 /* Set state so that this CPU will detect the next quiescent state. */
646 __note_new_gpnum(rsp, rnp, rdp);
647}
648
649/*
593 * Start a new RCU grace period if warranted, re-initializing the hierarchy 650 * Start a new RCU grace period if warranted, re-initializing the hierarchy
594 * in preparation for detecting the next grace period. The caller must hold 651 * in preparation for detecting the next grace period. The caller must hold
595 * the root node's ->lock, which is released before return. Hard irqs must 652 * the root node's ->lock, which is released before return. Hard irqs must
@@ -603,7 +660,23 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
603 struct rcu_node *rnp = rcu_get_root(rsp); 660 struct rcu_node *rnp = rcu_get_root(rsp);
604 661
605 if (!cpu_needs_another_gp(rsp, rdp)) { 662 if (!cpu_needs_another_gp(rsp, rdp)) {
606 spin_unlock_irqrestore(&rnp->lock, flags); 663 if (rnp->completed == rsp->completed) {
664 spin_unlock_irqrestore(&rnp->lock, flags);
665 return;
666 }
667 spin_unlock(&rnp->lock); /* irqs remain disabled. */
668
669 /*
670 * Propagate new ->completed value to rcu_node structures
671 * so that other CPUs don't have to wait until the start
672 * of the next grace period to process their callbacks.
673 */
674 rcu_for_each_node_breadth_first(rsp, rnp) {
675 spin_lock(&rnp->lock); /* irqs already disabled. */
676 rnp->completed = rsp->completed;
677 spin_unlock(&rnp->lock); /* irqs remain disabled. */
678 }
679 local_irq_restore(flags);
607 return; 680 return;
608 } 681 }
609 682
@@ -613,23 +686,15 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
613 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 686 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
614 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 687 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
615 record_gp_stall_check_time(rsp); 688 record_gp_stall_check_time(rsp);
616 dyntick_record_completed(rsp, rsp->completed - 1);
617 note_new_gpnum(rsp, rdp);
618
619 /*
620 * Because we are first, we know that all our callbacks will
621 * be covered by this upcoming grace period, even the ones
622 * that were registered arbitrarily recently.
623 */
624 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
625 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
626 689
627 /* Special-case the common single-level case. */ 690 /* Special-case the common single-level case. */
628 if (NUM_RCU_NODES == 1) { 691 if (NUM_RCU_NODES == 1) {
629 rcu_preempt_check_blocked_tasks(rnp); 692 rcu_preempt_check_blocked_tasks(rnp);
630 rnp->qsmask = rnp->qsmaskinit; 693 rnp->qsmask = rnp->qsmaskinit;
631 rnp->gpnum = rsp->gpnum; 694 rnp->gpnum = rsp->gpnum;
695 rnp->completed = rsp->completed;
632 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 696 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
697 rcu_start_gp_per_cpu(rsp, rnp, rdp);
633 spin_unlock_irqrestore(&rnp->lock, flags); 698 spin_unlock_irqrestore(&rnp->lock, flags);
634 return; 699 return;
635 } 700 }
@@ -657,70 +722,51 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * one corresponding to this CPU, due to the fact that we have 722 * one corresponding to this CPU, due to the fact that we have
658 * irqs disabled. 723 * irqs disabled.
659 */ 724 */
660 for (rnp = &rsp->node[0]; rnp < &rsp->node[NUM_RCU_NODES]; rnp++) { 725 rcu_for_each_node_breadth_first(rsp, rnp) {
661 spin_lock(&rnp->lock); /* irqs already disabled. */ 726 spin_lock(&rnp->lock); /* irqs already disabled. */
662 rcu_preempt_check_blocked_tasks(rnp); 727 rcu_preempt_check_blocked_tasks(rnp);
663 rnp->qsmask = rnp->qsmaskinit; 728 rnp->qsmask = rnp->qsmaskinit;
664 rnp->gpnum = rsp->gpnum; 729 rnp->gpnum = rsp->gpnum;
665 spin_unlock(&rnp->lock); /* irqs already disabled. */ 730 rnp->completed = rsp->completed;
731 if (rnp == rdp->mynode)
732 rcu_start_gp_per_cpu(rsp, rnp, rdp);
733 spin_unlock(&rnp->lock); /* irqs remain disabled. */
666 } 734 }
667 735
736 rnp = rcu_get_root(rsp);
737 spin_lock(&rnp->lock); /* irqs already disabled. */
668 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 738 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
739 spin_unlock(&rnp->lock); /* irqs remain disabled. */
669 spin_unlock_irqrestore(&rsp->onofflock, flags); 740 spin_unlock_irqrestore(&rsp->onofflock, flags);
670} 741}
671 742
672/* 743/*
673 * Advance this CPU's callbacks, but only if the current grace period 744 * Report a full set of quiescent states to the specified rcu_state
674 * has ended. This may be called only from the CPU to whom the rdp 745 * data structure. This involves cleaning up after the prior grace
675 * belongs. 746 * period and letting rcu_start_gp() start up the next grace period
676 */ 747 * if one is needed. Note that the caller must hold rnp->lock, as
677static void 748 * required by rcu_start_gp(), which will release it.
678rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
679{
680 long completed_snap;
681 unsigned long flags;
682
683 local_irq_save(flags);
684 completed_snap = ACCESS_ONCE(rsp->completed); /* outside of lock. */
685
686 /* Did another grace period end? */
687 if (rdp->completed != completed_snap) {
688
689 /* Advance callbacks. No harm if list empty. */
690 rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[RCU_WAIT_TAIL];
691 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_READY_TAIL];
692 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
693
694 /* Remember that we saw this grace-period completion. */
695 rdp->completed = completed_snap;
696 }
697 local_irq_restore(flags);
698}
699
700/*
701 * Clean up after the prior grace period and let rcu_start_gp() start up
702 * the next grace period if one is needed. Note that the caller must
703 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
704 */ 749 */
705static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 750static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
706 __releases(rnp->lock) 751 __releases(rcu_get_root(rsp)->lock)
707{ 752{
708 WARN_ON_ONCE(rsp->completed == rsp->gpnum); 753 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
709 rsp->completed = rsp->gpnum; 754 rsp->completed = rsp->gpnum;
710 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 755 rsp->signaled = RCU_GP_IDLE;
711 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 756 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
712} 757}
713 758
714/* 759/*
715 * Similar to cpu_quiet(), for which it is a helper function. Allows 760 * Similar to rcu_report_qs_rdp(), for which it is a helper function.
716 * a group of CPUs to be quieted at one go, though all the CPUs in the 761 * Allows quiescent states for a group of CPUs to be reported at one go
717 * group must be represented by the same leaf rcu_node structure. 762 * to the specified rcu_node structure, though all the CPUs in the group
718 * That structure's lock must be held upon entry, and it is released 763 * must be represented by the same rcu_node structure (which need not be
719 * before return. 764 * a leaf rcu_node structure, though it often will be). That structure's
765 * lock must be held upon entry, and it is released before return.
720 */ 766 */
721static void 767static void
722cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp, 768rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
723 unsigned long flags) 769 struct rcu_node *rnp, unsigned long flags)
724 __releases(rnp->lock) 770 __releases(rnp->lock)
725{ 771{
726 struct rcu_node *rnp_c; 772 struct rcu_node *rnp_c;
@@ -756,21 +802,23 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
756 802
757 /* 803 /*
758 * Get here if we are the last CPU to pass through a quiescent 804 * Get here if we are the last CPU to pass through a quiescent
759 * state for this grace period. Invoke cpu_quiet_msk_finish() 805 * state for this grace period. Invoke rcu_report_qs_rsp()
760 * to clean up and start the next grace period if one is needed. 806 * to clean up and start the next grace period if one is needed.
761 */ 807 */
762 cpu_quiet_msk_finish(rsp, flags); /* releases rnp->lock. */ 808 rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
763} 809}
764 810
765/* 811/*
766 * Record a quiescent state for the specified CPU, which must either be 812 * Record a quiescent state for the specified CPU to that CPU's rcu_data
767 * the current CPU. The lastcomp argument is used to make sure we are 813 * structure. This must be either called from the specified CPU, or
768 * still in the grace period of interest. We don't want to end the current 814 * called when the specified CPU is known to be offline (and when it is
769 * grace period based on quiescent states detected in an earlier grace 815 * also known that no other CPU is concurrently trying to help the offline
770 * period! 816 * CPU). The lastcomp argument is used to make sure we are still in the
817 * grace period of interest. We don't want to end the current grace period
818 * based on quiescent states detected in an earlier grace period!
771 */ 819 */
772static void 820static void
773cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 821rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
774{ 822{
775 unsigned long flags; 823 unsigned long flags;
776 unsigned long mask; 824 unsigned long mask;
@@ -778,15 +826,15 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
778 826
779 rnp = rdp->mynode; 827 rnp = rdp->mynode;
780 spin_lock_irqsave(&rnp->lock, flags); 828 spin_lock_irqsave(&rnp->lock, flags);
781 if (lastcomp != ACCESS_ONCE(rsp->completed)) { 829 if (lastcomp != rnp->completed) {
782 830
783 /* 831 /*
784 * Someone beat us to it for this grace period, so leave. 832 * Someone beat us to it for this grace period, so leave.
785 * The race with GP start is resolved by the fact that we 833 * The race with GP start is resolved by the fact that we
786 * hold the leaf rcu_node lock, so that the per-CPU bits 834 * hold the leaf rcu_node lock, so that the per-CPU bits
787 * cannot yet be initialized -- so we would simply find our 835 * cannot yet be initialized -- so we would simply find our
788 * CPU's bit already cleared in cpu_quiet_msk() if this race 836 * CPU's bit already cleared in rcu_report_qs_rnp() if this
789 * occurred. 837 * race occurred.
790 */ 838 */
791 rdp->passed_quiesc = 0; /* try again later! */ 839 rdp->passed_quiesc = 0; /* try again later! */
792 spin_unlock_irqrestore(&rnp->lock, flags); 840 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -804,7 +852,7 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
804 */ 852 */
805 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 853 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
806 854
807 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 855 rcu_report_qs_rnp(mask, rsp, rnp, flags); /* rlses rnp->lock */
808 } 856 }
809} 857}
810 858
@@ -835,24 +883,73 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
835 if (!rdp->passed_quiesc) 883 if (!rdp->passed_quiesc)
836 return; 884 return;
837 885
838 /* Tell RCU we are done (but cpu_quiet() will be the judge of that). */ 886 /*
839 cpu_quiet(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed); 887 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
888 * judge of that).
889 */
890 rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesc_completed);
840} 891}
841 892
842#ifdef CONFIG_HOTPLUG_CPU 893#ifdef CONFIG_HOTPLUG_CPU
843 894
844/* 895/*
896 * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the
897 * specified flavor of RCU. The callbacks will be adopted by the next
898 * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever
899 * comes first. Because this is invoked from the CPU_DYING notifier,
900 * irqs are already disabled.
901 */
902static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
903{
904 int i;
905 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
906
907 if (rdp->nxtlist == NULL)
908 return; /* irqs disabled, so comparison is stable. */
909 spin_lock(&rsp->onofflock); /* irqs already disabled. */
910 *rsp->orphan_cbs_tail = rdp->nxtlist;
911 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
912 rdp->nxtlist = NULL;
913 for (i = 0; i < RCU_NEXT_SIZE; i++)
914 rdp->nxttail[i] = &rdp->nxtlist;
915 rsp->orphan_qlen += rdp->qlen;
916 rdp->qlen = 0;
917 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
918}
919
920/*
921 * Adopt previously orphaned RCU callbacks.
922 */
923static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
924{
925 unsigned long flags;
926 struct rcu_data *rdp;
927
928 spin_lock_irqsave(&rsp->onofflock, flags);
929 rdp = rsp->rda[smp_processor_id()];
930 if (rsp->orphan_cbs_list == NULL) {
931 spin_unlock_irqrestore(&rsp->onofflock, flags);
932 return;
933 }
934 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
935 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
936 rdp->qlen += rsp->orphan_qlen;
937 rsp->orphan_cbs_list = NULL;
938 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
939 rsp->orphan_qlen = 0;
940 spin_unlock_irqrestore(&rsp->onofflock, flags);
941}
942
943/*
845 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 944 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
846 * and move all callbacks from the outgoing CPU to the current one. 945 * and move all callbacks from the outgoing CPU to the current one.
847 */ 946 */
848static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 947static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
849{ 948{
850 int i;
851 unsigned long flags; 949 unsigned long flags;
852 long lastcomp;
853 unsigned long mask; 950 unsigned long mask;
951 int need_report = 0;
854 struct rcu_data *rdp = rsp->rda[cpu]; 952 struct rcu_data *rdp = rsp->rda[cpu];
855 struct rcu_data *rdp_me;
856 struct rcu_node *rnp; 953 struct rcu_node *rnp;
857 954
858 /* Exclude any attempts to start a new grace period. */ 955 /* Exclude any attempts to start a new grace period. */
@@ -865,42 +962,34 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
865 spin_lock(&rnp->lock); /* irqs already disabled. */ 962 spin_lock(&rnp->lock); /* irqs already disabled. */
866 rnp->qsmaskinit &= ~mask; 963 rnp->qsmaskinit &= ~mask;
867 if (rnp->qsmaskinit != 0) { 964 if (rnp->qsmaskinit != 0) {
868 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 965 if (rnp != rdp->mynode)
966 spin_unlock(&rnp->lock); /* irqs remain disabled. */
869 break; 967 break;
870 } 968 }
871 rcu_preempt_offline_tasks(rsp, rnp, rdp); 969 if (rnp == rdp->mynode)
970 need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
971 else
972 spin_unlock(&rnp->lock); /* irqs remain disabled. */
872 mask = rnp->grpmask; 973 mask = rnp->grpmask;
873 spin_unlock(&rnp->lock); /* irqs remain disabled. */
874 rnp = rnp->parent; 974 rnp = rnp->parent;
875 } while (rnp != NULL); 975 } while (rnp != NULL);
876 lastcomp = rsp->completed;
877
878 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
879 976
880 /* 977 /*
881 * Move callbacks from the outgoing CPU to the running CPU. 978 * We still hold the leaf rcu_node structure lock here, and
882 * Note that the outgoing CPU is now quiscent, so it is now 979 * irqs are still disabled. The reason for this subterfuge is
883 * (uncharacteristically) safe to access its rcu_data structure. 980 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock
884 * Note also that we must carefully retain the order of the 981 * held leads to deadlock.
885 * outgoing CPU's callbacks in order for rcu_barrier() to work
886 * correctly. Finally, note that we start all the callbacks
887 * afresh, even those that have passed through a grace period
888 * and are therefore ready to invoke. The theory is that hotplug
889 * events are rare, and that if they are frequent enough to
890 * indefinitely delay callbacks, you have far worse things to
891 * be worrying about.
892 */ 982 */
893 rdp_me = rsp->rda[smp_processor_id()]; 983 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
894 if (rdp->nxtlist != NULL) { 984 rnp = rdp->mynode;
895 *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist; 985 if (need_report & RCU_OFL_TASKS_NORM_GP)
896 rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 986 rcu_report_unblock_qs_rnp(rnp, flags);
897 rdp->nxtlist = NULL; 987 else
898 for (i = 0; i < RCU_NEXT_SIZE; i++) 988 spin_unlock_irqrestore(&rnp->lock, flags);
899 rdp->nxttail[i] = &rdp->nxtlist; 989 if (need_report & RCU_OFL_TASKS_EXP_GP)
900 rdp_me->qlen += rdp->qlen; 990 rcu_report_exp_rnp(rsp, rnp);
901 rdp->qlen = 0; 991
902 } 992 rcu_adopt_orphan_cbs(rsp);
903 local_irq_restore(flags);
904} 993}
905 994
906/* 995/*
@@ -918,6 +1007,14 @@ static void rcu_offline_cpu(int cpu)
918 1007
919#else /* #ifdef CONFIG_HOTPLUG_CPU */ 1008#else /* #ifdef CONFIG_HOTPLUG_CPU */
920 1009
1010static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
1011{
1012}
1013
1014static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1015{
1016}
1017
921static void rcu_offline_cpu(int cpu) 1018static void rcu_offline_cpu(int cpu)
922{ 1019{
923} 1020}
@@ -928,7 +1025,7 @@ static void rcu_offline_cpu(int cpu)
928 * Invoke any RCU callbacks that have made it to the end of their grace 1025 * Invoke any RCU callbacks that have made it to the end of their grace
929 * period. Thottle as specified by rdp->blimit. 1026 * period. Thottle as specified by rdp->blimit.
930 */ 1027 */
931static void rcu_do_batch(struct rcu_data *rdp) 1028static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
932{ 1029{
933 unsigned long flags; 1030 unsigned long flags;
934 struct rcu_head *next, *list, **tail; 1031 struct rcu_head *next, *list, **tail;
@@ -981,6 +1078,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
981 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1078 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
982 rdp->blimit = blimit; 1079 rdp->blimit = blimit;
983 1080
1081 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1082 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1083 rdp->qlen_last_fqs_check = 0;
1084 rdp->n_force_qs_snap = rsp->n_force_qs;
1085 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1086 rdp->qlen_last_fqs_check = rdp->qlen;
1087
984 local_irq_restore(flags); 1088 local_irq_restore(flags);
985 1089
986 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1090 /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1050,33 +1154,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1050 int cpu; 1154 int cpu;
1051 unsigned long flags; 1155 unsigned long flags;
1052 unsigned long mask; 1156 unsigned long mask;
1053 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 1157 struct rcu_node *rnp;
1054 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
1055 1158
1056 for (; rnp_cur < rnp_end; rnp_cur++) { 1159 rcu_for_each_leaf_node(rsp, rnp) {
1057 mask = 0; 1160 mask = 0;
1058 spin_lock_irqsave(&rnp_cur->lock, flags); 1161 spin_lock_irqsave(&rnp->lock, flags);
1059 if (rsp->completed != lastcomp) { 1162 if (rnp->completed != lastcomp) {
1060 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1163 spin_unlock_irqrestore(&rnp->lock, flags);
1061 return 1; 1164 return 1;
1062 } 1165 }
1063 if (rnp_cur->qsmask == 0) { 1166 if (rnp->qsmask == 0) {
1064 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1167 spin_unlock_irqrestore(&rnp->lock, flags);
1065 continue; 1168 continue;
1066 } 1169 }
1067 cpu = rnp_cur->grplo; 1170 cpu = rnp->grplo;
1068 bit = 1; 1171 bit = 1;
1069 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { 1172 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1070 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1173 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1071 mask |= bit; 1174 mask |= bit;
1072 } 1175 }
1073 if (mask != 0 && rsp->completed == lastcomp) { 1176 if (mask != 0 && rnp->completed == lastcomp) {
1074 1177
1075 /* cpu_quiet_msk() releases rnp_cur->lock. */ 1178 /* rcu_report_qs_rnp() releases rnp->lock. */
1076 cpu_quiet_msk(mask, rsp, rnp_cur, flags); 1179 rcu_report_qs_rnp(mask, rsp, rnp, flags);
1077 continue; 1180 continue;
1078 } 1181 }
1079 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1182 spin_unlock_irqrestore(&rnp->lock, flags);
1080 } 1183 }
1081 return 0; 1184 return 0;
1082} 1185}
@@ -1091,8 +1194,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1091 long lastcomp; 1194 long lastcomp;
1092 struct rcu_node *rnp = rcu_get_root(rsp); 1195 struct rcu_node *rnp = rcu_get_root(rsp);
1093 u8 signaled; 1196 u8 signaled;
1197 u8 forcenow;
1094 1198
1095 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) 1199 if (!rcu_gp_in_progress(rsp))
1096 return; /* No grace period in progress, nothing to force. */ 1200 return; /* No grace period in progress, nothing to force. */
1097 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1201 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1098 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1202 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
@@ -1103,19 +1207,20 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1103 goto unlock_ret; /* no emergency and done recently. */ 1207 goto unlock_ret; /* no emergency and done recently. */
1104 rsp->n_force_qs++; 1208 rsp->n_force_qs++;
1105 spin_lock(&rnp->lock); 1209 spin_lock(&rnp->lock);
1106 lastcomp = rsp->completed; 1210 lastcomp = rsp->gpnum - 1;
1107 signaled = rsp->signaled; 1211 signaled = rsp->signaled;
1108 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1212 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1109 if (lastcomp == rsp->gpnum) { 1213 if(!rcu_gp_in_progress(rsp)) {
1110 rsp->n_force_qs_ngp++; 1214 rsp->n_force_qs_ngp++;
1111 spin_unlock(&rnp->lock); 1215 spin_unlock(&rnp->lock);
1112 goto unlock_ret; /* no GP in progress, time updated. */ 1216 goto unlock_ret; /* no GP in progress, time updated. */
1113 } 1217 }
1114 spin_unlock(&rnp->lock); 1218 spin_unlock(&rnp->lock);
1115 switch (signaled) { 1219 switch (signaled) {
1220 case RCU_GP_IDLE:
1116 case RCU_GP_INIT: 1221 case RCU_GP_INIT:
1117 1222
1118 break; /* grace period still initializing, ignore. */ 1223 break; /* grace period idle or initializing, ignore. */
1119 1224
1120 case RCU_SAVE_DYNTICK: 1225 case RCU_SAVE_DYNTICK:
1121 1226
@@ -1126,20 +1231,29 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1126 if (rcu_process_dyntick(rsp, lastcomp, 1231 if (rcu_process_dyntick(rsp, lastcomp,
1127 dyntick_save_progress_counter)) 1232 dyntick_save_progress_counter))
1128 goto unlock_ret; 1233 goto unlock_ret;
1234 /* fall into next case. */
1235
1236 case RCU_SAVE_COMPLETED:
1129 1237
1130 /* Update state, record completion counter. */ 1238 /* Update state, record completion counter. */
1239 forcenow = 0;
1131 spin_lock(&rnp->lock); 1240 spin_lock(&rnp->lock);
1132 if (lastcomp == rsp->completed) { 1241 if (lastcomp + 1 == rsp->gpnum &&
1242 lastcomp == rsp->completed &&
1243 rsp->signaled == signaled) {
1133 rsp->signaled = RCU_FORCE_QS; 1244 rsp->signaled = RCU_FORCE_QS;
1134 dyntick_record_completed(rsp, lastcomp); 1245 rsp->completed_fqs = lastcomp;
1246 forcenow = signaled == RCU_SAVE_COMPLETED;
1135 } 1247 }
1136 spin_unlock(&rnp->lock); 1248 spin_unlock(&rnp->lock);
1137 break; 1249 if (!forcenow)
1250 break;
1251 /* fall into next case. */
1138 1252
1139 case RCU_FORCE_QS: 1253 case RCU_FORCE_QS:
1140 1254
1141 /* Check dyntick-idle state, send IPI to laggarts. */ 1255 /* Check dyntick-idle state, send IPI to laggarts. */
1142 if (rcu_process_dyntick(rsp, dyntick_recall_completed(rsp), 1256 if (rcu_process_dyntick(rsp, rsp->completed_fqs,
1143 rcu_implicit_dynticks_qs)) 1257 rcu_implicit_dynticks_qs))
1144 goto unlock_ret; 1258 goto unlock_ret;
1145 1259
@@ -1195,7 +1309,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1195 } 1309 }
1196 1310
1197 /* If there are callbacks ready, invoke them. */ 1311 /* If there are callbacks ready, invoke them. */
1198 rcu_do_batch(rdp); 1312 rcu_do_batch(rsp, rdp);
1199} 1313}
1200 1314
1201/* 1315/*
@@ -1251,7 +1365,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1251 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1365 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1252 1366
1253 /* Start a new grace period if one not already started. */ 1367 /* Start a new grace period if one not already started. */
1254 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { 1368 if (!rcu_gp_in_progress(rsp)) {
1255 unsigned long nestflag; 1369 unsigned long nestflag;
1256 struct rcu_node *rnp_root = rcu_get_root(rsp); 1370 struct rcu_node *rnp_root = rcu_get_root(rsp);
1257 1371
@@ -1259,10 +1373,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1259 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1373 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1260 } 1374 }
1261 1375
1262 /* Force the grace period if too many callbacks or too long waiting. */ 1376 /*
1263 if (unlikely(++rdp->qlen > qhimark)) { 1377 * Force the grace period if too many callbacks or too long waiting.
1378 * Enforce hysteresis, and don't invoke force_quiescent_state()
1379 * if some other CPU has recently done so. Also, don't bother
1380 * invoking force_quiescent_state() if the newly enqueued callback
1381 * is the only one waiting for a grace period to complete.
1382 */
1383 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1264 rdp->blimit = LONG_MAX; 1384 rdp->blimit = LONG_MAX;
1265 force_quiescent_state(rsp, 0); 1385 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1386 *rdp->nxttail[RCU_DONE_TAIL] != head)
1387 force_quiescent_state(rsp, 0);
1388 rdp->n_force_qs_snap = rsp->n_force_qs;
1389 rdp->qlen_last_fqs_check = rdp->qlen;
1266 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1390 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1267 force_quiescent_state(rsp, 1); 1391 force_quiescent_state(rsp, 1);
1268 local_irq_restore(flags); 1392 local_irq_restore(flags);
@@ -1286,6 +1410,68 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
1286} 1410}
1287EXPORT_SYMBOL_GPL(call_rcu_bh); 1411EXPORT_SYMBOL_GPL(call_rcu_bh);
1288 1412
1413/**
1414 * synchronize_sched - wait until an rcu-sched grace period has elapsed.
1415 *
1416 * Control will return to the caller some time after a full rcu-sched
1417 * grace period has elapsed, in other words after all currently executing
1418 * rcu-sched read-side critical sections have completed. These read-side
1419 * critical sections are delimited by rcu_read_lock_sched() and
1420 * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
1421 * local_irq_disable(), and so on may be used in place of
1422 * rcu_read_lock_sched().
1423 *
1424 * This means that all preempt_disable code sequences, including NMI and
1425 * hardware-interrupt handlers, in progress on entry will have completed
1426 * before this primitive returns. However, this does not guarantee that
1427 * softirq handlers will have completed, since in some kernels, these
1428 * handlers can run in process context, and can block.
1429 *
1430 * This primitive provides the guarantees made by the (now removed)
1431 * synchronize_kernel() API. In contrast, synchronize_rcu() only
1432 * guarantees that rcu_read_lock() sections will have completed.
1433 * In "classic RCU", these two guarantees happen to be one and
1434 * the same, but can differ in realtime RCU implementations.
1435 */
1436void synchronize_sched(void)
1437{
1438 struct rcu_synchronize rcu;
1439
1440 if (rcu_blocking_is_gp())
1441 return;
1442
1443 init_completion(&rcu.completion);
1444 /* Will wake me after RCU finished. */
1445 call_rcu_sched(&rcu.head, wakeme_after_rcu);
1446 /* Wait for it. */
1447 wait_for_completion(&rcu.completion);
1448}
1449EXPORT_SYMBOL_GPL(synchronize_sched);
1450
1451/**
1452 * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
1453 *
1454 * Control will return to the caller some time after a full rcu_bh grace
1455 * period has elapsed, in other words after all currently executing rcu_bh
1456 * read-side critical sections have completed. RCU read-side critical
1457 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
1458 * and may be nested.
1459 */
1460void synchronize_rcu_bh(void)
1461{
1462 struct rcu_synchronize rcu;
1463
1464 if (rcu_blocking_is_gp())
1465 return;
1466
1467 init_completion(&rcu.completion);
1468 /* Will wake me after RCU finished. */
1469 call_rcu_bh(&rcu.head, wakeme_after_rcu);
1470 /* Wait for it. */
1471 wait_for_completion(&rcu.completion);
1472}
1473EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
1474
1289/* 1475/*
1290 * Check to see if there is any immediate RCU-related work to be done 1476 * Check to see if there is any immediate RCU-related work to be done
1291 * by the current CPU, for the specified type of RCU, returning 1 if so. 1477 * by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -1295,6 +1481,8 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
1295 */ 1481 */
1296static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) 1482static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1297{ 1483{
1484 struct rcu_node *rnp = rdp->mynode;
1485
1298 rdp->n_rcu_pending++; 1486 rdp->n_rcu_pending++;
1299 1487
1300 /* Check for CPU stalls, if enabled. */ 1488 /* Check for CPU stalls, if enabled. */
@@ -1319,19 +1507,19 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1319 } 1507 }
1320 1508
1321 /* Has another RCU grace period completed? */ 1509 /* Has another RCU grace period completed? */
1322 if (ACCESS_ONCE(rsp->completed) != rdp->completed) { /* outside lock */ 1510 if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
1323 rdp->n_rp_gp_completed++; 1511 rdp->n_rp_gp_completed++;
1324 return 1; 1512 return 1;
1325 } 1513 }
1326 1514
1327 /* Has a new RCU grace period started? */ 1515 /* Has a new RCU grace period started? */
1328 if (ACCESS_ONCE(rsp->gpnum) != rdp->gpnum) { /* outside lock */ 1516 if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum) { /* outside lock */
1329 rdp->n_rp_gp_started++; 1517 rdp->n_rp_gp_started++;
1330 return 1; 1518 return 1;
1331 } 1519 }
1332 1520
1333 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1521 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1334 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1522 if (rcu_gp_in_progress(rsp) &&
1335 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1523 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1336 rdp->n_rp_need_fqs++; 1524 rdp->n_rp_need_fqs++;
1337 return 1; 1525 return 1;
@@ -1369,6 +1557,97 @@ int rcu_needs_cpu(int cpu)
1369} 1557}
1370 1558
1371/* 1559/*
1560 * This function is invoked towards the end of the scheduler's initialization
1561 * process. Before this is called, the idle task might contain
1562 * RCU read-side critical sections (during which time, this idle
1563 * task is booting the system). After this function is called, the
1564 * idle tasks are prohibited from containing RCU read-side critical
1565 * sections.
1566 */
1567void rcu_scheduler_starting(void)
1568{
1569 WARN_ON(num_online_cpus() != 1);
1570 WARN_ON(nr_context_switches() > 0);
1571 rcu_scheduler_active = 1;
1572}
1573
1574static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1575static atomic_t rcu_barrier_cpu_count;
1576static DEFINE_MUTEX(rcu_barrier_mutex);
1577static struct completion rcu_barrier_completion;
1578
1579static void rcu_barrier_callback(struct rcu_head *notused)
1580{
1581 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1582 complete(&rcu_barrier_completion);
1583}
1584
1585/*
1586 * Called with preemption disabled, and from cross-cpu IRQ context.
1587 */
1588static void rcu_barrier_func(void *type)
1589{
1590 int cpu = smp_processor_id();
1591 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1592 void (*call_rcu_func)(struct rcu_head *head,
1593 void (*func)(struct rcu_head *head));
1594
1595 atomic_inc(&rcu_barrier_cpu_count);
1596 call_rcu_func = type;
1597 call_rcu_func(head, rcu_barrier_callback);
1598}
1599
1600/*
1601 * Orchestrate the specified type of RCU barrier, waiting for all
1602 * RCU callbacks of the specified type to complete.
1603 */
1604static void _rcu_barrier(struct rcu_state *rsp,
1605 void (*call_rcu_func)(struct rcu_head *head,
1606 void (*func)(struct rcu_head *head)))
1607{
1608 BUG_ON(in_interrupt());
1609 /* Take mutex to serialize concurrent rcu_barrier() requests. */
1610 mutex_lock(&rcu_barrier_mutex);
1611 init_completion(&rcu_barrier_completion);
1612 /*
1613 * Initialize rcu_barrier_cpu_count to 1, then invoke
1614 * rcu_barrier_func() on each CPU, so that each CPU also has
1615 * incremented rcu_barrier_cpu_count. Only then is it safe to
1616 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
1617 * might complete its grace period before all of the other CPUs
1618 * did their increment, causing this function to return too
1619 * early.
1620 */
1621 atomic_set(&rcu_barrier_cpu_count, 1);
1622 preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */
1623 rcu_adopt_orphan_cbs(rsp);
1624 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1625 preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */
1626 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1627 complete(&rcu_barrier_completion);
1628 wait_for_completion(&rcu_barrier_completion);
1629 mutex_unlock(&rcu_barrier_mutex);
1630}
1631
1632/**
1633 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
1634 */
1635void rcu_barrier_bh(void)
1636{
1637 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
1638}
1639EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1640
1641/**
1642 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
1643 */
1644void rcu_barrier_sched(void)
1645{
1646 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
1647}
1648EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1649
1650/*
1372 * Do boot-time initialization of a CPU's per-CPU RCU data. 1651 * Do boot-time initialization of a CPU's per-CPU RCU data.
1373 */ 1652 */
1374static void __init 1653static void __init
@@ -1403,21 +1682,18 @@ static void __cpuinit
1403rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable) 1682rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1404{ 1683{
1405 unsigned long flags; 1684 unsigned long flags;
1406 long lastcomp;
1407 unsigned long mask; 1685 unsigned long mask;
1408 struct rcu_data *rdp = rsp->rda[cpu]; 1686 struct rcu_data *rdp = rsp->rda[cpu];
1409 struct rcu_node *rnp = rcu_get_root(rsp); 1687 struct rcu_node *rnp = rcu_get_root(rsp);
1410 1688
1411 /* Set up local state, ensuring consistent view of global state. */ 1689 /* Set up local state, ensuring consistent view of global state. */
1412 spin_lock_irqsave(&rnp->lock, flags); 1690 spin_lock_irqsave(&rnp->lock, flags);
1413 lastcomp = rsp->completed;
1414 rdp->completed = lastcomp;
1415 rdp->gpnum = lastcomp;
1416 rdp->passed_quiesc = 0; /* We could be racing with new GP, */ 1691 rdp->passed_quiesc = 0; /* We could be racing with new GP, */
1417 rdp->qs_pending = 1; /* so set up to respond to current GP. */ 1692 rdp->qs_pending = 1; /* so set up to respond to current GP. */
1418 rdp->beenonline = 1; /* We have now been online. */ 1693 rdp->beenonline = 1; /* We have now been online. */
1419 rdp->preemptable = preemptable; 1694 rdp->preemptable = preemptable;
1420 rdp->passed_quiesc_completed = lastcomp - 1; 1695 rdp->qlen_last_fqs_check = 0;
1696 rdp->n_force_qs_snap = rsp->n_force_qs;
1421 rdp->blimit = blimit; 1697 rdp->blimit = blimit;
1422 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1698 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1423 1699
@@ -1437,6 +1713,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1437 spin_lock(&rnp->lock); /* irqs already disabled. */ 1713 spin_lock(&rnp->lock); /* irqs already disabled. */
1438 rnp->qsmaskinit |= mask; 1714 rnp->qsmaskinit |= mask;
1439 mask = rnp->grpmask; 1715 mask = rnp->grpmask;
1716 if (rnp == rdp->mynode) {
1717 rdp->gpnum = rnp->completed; /* if GP in progress... */
1718 rdp->completed = rnp->completed;
1719 rdp->passed_quiesc_completed = rnp->completed - 1;
1720 }
1440 spin_unlock(&rnp->lock); /* irqs already disabled. */ 1721 spin_unlock(&rnp->lock); /* irqs already disabled. */
1441 rnp = rnp->parent; 1722 rnp = rnp->parent;
1442 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1723 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
@@ -1454,8 +1735,8 @@ static void __cpuinit rcu_online_cpu(int cpu)
1454/* 1735/*
1455 * Handle CPU online/offline notification events. 1736 * Handle CPU online/offline notification events.
1456 */ 1737 */
1457int __cpuinit rcu_cpu_notify(struct notifier_block *self, 1738static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1458 unsigned long action, void *hcpu) 1739 unsigned long action, void *hcpu)
1459{ 1740{
1460 long cpu = (long)hcpu; 1741 long cpu = (long)hcpu;
1461 1742
@@ -1464,6 +1745,22 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1464 case CPU_UP_PREPARE_FROZEN: 1745 case CPU_UP_PREPARE_FROZEN:
1465 rcu_online_cpu(cpu); 1746 rcu_online_cpu(cpu);
1466 break; 1747 break;
1748 case CPU_DYING:
1749 case CPU_DYING_FROZEN:
1750 /*
1751 * preempt_disable() in _rcu_barrier() prevents stop_machine(),
1752 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
1753 * returns, all online cpus have queued rcu_barrier_func().
1754 * The dying CPU clears its cpu_online_mask bit and
1755 * moves all of its RCU callbacks to ->orphan_cbs_list
1756 * in the context of stop_machine(), so subsequent calls
1757 * to _rcu_barrier() will adopt these callbacks and only
1758 * then queue rcu_barrier_func() on all remaining CPUs.
1759 */
1760 rcu_send_cbs_to_orphanage(&rcu_bh_state);
1761 rcu_send_cbs_to_orphanage(&rcu_sched_state);
1762 rcu_preempt_send_cbs_to_orphanage();
1763 break;
1467 case CPU_DEAD: 1764 case CPU_DEAD:
1468 case CPU_DEAD_FROZEN: 1765 case CPU_DEAD_FROZEN:
1469 case CPU_UP_CANCELED: 1766 case CPU_UP_CANCELED:
@@ -1527,6 +1824,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1527 rnp = rsp->level[i]; 1824 rnp = rsp->level[i];
1528 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1825 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1529 spin_lock_init(&rnp->lock); 1826 spin_lock_init(&rnp->lock);
1827 lockdep_set_class(&rnp->lock, &rcu_node_class[i]);
1530 rnp->gpnum = 0; 1828 rnp->gpnum = 0;
1531 rnp->qsmask = 0; 1829 rnp->qsmask = 0;
1532 rnp->qsmaskinit = 0; 1830 rnp->qsmaskinit = 0;
@@ -1547,6 +1845,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1547 rnp->level = i; 1845 rnp->level = i;
1548 INIT_LIST_HEAD(&rnp->blocked_tasks[0]); 1846 INIT_LIST_HEAD(&rnp->blocked_tasks[0]);
1549 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1847 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1848 INIT_LIST_HEAD(&rnp->blocked_tasks[2]);
1849 INIT_LIST_HEAD(&rnp->blocked_tasks[3]);
1550 } 1850 }
1551 } 1851 }
1552} 1852}
@@ -1558,6 +1858,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1558 */ 1858 */
1559#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1859#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1560do { \ 1860do { \
1861 int i; \
1862 int j; \
1863 struct rcu_node *rnp; \
1864 \
1561 rcu_init_one(rsp); \ 1865 rcu_init_one(rsp); \
1562 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1866 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1563 j = 0; \ 1867 j = 0; \
@@ -1570,41 +1874,30 @@ do { \
1570 } \ 1874 } \
1571} while (0) 1875} while (0)
1572 1876
1573#ifdef CONFIG_TREE_PREEMPT_RCU 1877void __init rcu_init(void)
1574
1575void __init __rcu_init_preempt(void)
1576{
1577 int i; /* All used by RCU_INIT_FLAVOR(). */
1578 int j;
1579 struct rcu_node *rnp;
1580
1581 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1582}
1583
1584#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1585
1586void __init __rcu_init_preempt(void)
1587{ 1878{
1588} 1879 int i;
1589
1590#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1591
1592void __init __rcu_init(void)
1593{
1594 int i; /* All used by RCU_INIT_FLAVOR(). */
1595 int j;
1596 struct rcu_node *rnp;
1597 1880
1598 rcu_bootup_announce(); 1881 rcu_bootup_announce();
1599#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1882#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1600 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1883 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
1601#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 1884#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
1885#if NUM_RCU_LVL_4 != 0
1886 printk(KERN_INFO "Experimental four-level hierarchy is enabled.\n");
1887#endif /* #if NUM_RCU_LVL_4 != 0 */
1602 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data); 1888 RCU_INIT_FLAVOR(&rcu_sched_state, rcu_sched_data);
1603 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data); 1889 RCU_INIT_FLAVOR(&rcu_bh_state, rcu_bh_data);
1604 __rcu_init_preempt(); 1890 __rcu_init_preempt();
1605 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1891 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1892
1893 /*
1894 * We don't need protection against CPU-hotplug here because
1895 * this is called early in boot, before either interrupts
1896 * or the scheduler are operational.
1897 */
1898 cpu_notifier(rcu_cpu_notify, 0);
1899 for_each_online_cpu(i)
1900 rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)i);
1606} 1901}
1607 1902
1608module_param(blimit, int, 0); 1903#include "rcutree_plugin.h"
1609module_param(qhimark, int, 0);
1610module_param(qlowmark, int, 0);