aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c485
1 files changed, 298 insertions, 187 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 6b11b07cfe7f..f3077c0ab181 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -25,7 +25,7 @@
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * 26 *
27 * For detailed explanation of Read-Copy Update mechanism see - 27 * For detailed explanation of Read-Copy Update mechanism see -
28 * Documentation/RCU 28 * Documentation/RCU
29 */ 29 */
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
@@ -49,13 +49,6 @@
49 49
50#include "rcutree.h" 50#include "rcutree.h"
51 51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59/* Data structures. */ 52/* Data structures. */
60 53
61#define RCU_STATE_INITIALIZER(name) { \ 54#define RCU_STATE_INITIALIZER(name) { \
@@ -66,10 +59,13 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
66 NUM_RCU_LVL_2, \ 59 NUM_RCU_LVL_2, \
67 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
68 }, \ 61 }, \
69 .signaled = RCU_SIGNAL_INIT, \ 62 .signaled = RCU_GP_IDLE, \
70 .gpnum = -300, \ 63 .gpnum = -300, \
71 .completed = -300, \ 64 .completed = -300, \
72 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
66 .orphan_cbs_list = NULL, \
67 .orphan_cbs_tail = &name.orphan_cbs_list, \
68 .orphan_qlen = 0, \
73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ 69 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
74 .n_force_qs = 0, \ 70 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 71 .n_force_qs_ngp = 0, \
@@ -81,24 +77,16 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 77struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 78DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 79
84extern long rcu_batches_completed_sched(void);
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100 80
101#include "rcutree_plugin.h" 81/*
82 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
83 * permit this function to be invoked without holding the root rcu_node
84 * structure's ->lock, but of course results can be subject to change.
85 */
86static int rcu_gp_in_progress(struct rcu_state *rsp)
87{
88 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
89}
102 90
103/* 91/*
104 * Note a quiescent state. Because we do not need to know 92 * Note a quiescent state. Because we do not need to know
@@ -107,27 +95,23 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
107 */ 95 */
108void rcu_sched_qs(int cpu) 96void rcu_sched_qs(int cpu)
109{ 97{
110 unsigned long flags;
111 struct rcu_data *rdp; 98 struct rcu_data *rdp;
112 99
113 local_irq_save(flags);
114 rdp = &per_cpu(rcu_sched_data, cpu); 100 rdp = &per_cpu(rcu_sched_data, cpu);
115 rdp->passed_quiesc = 1;
116 rdp->passed_quiesc_completed = rdp->completed; 101 rdp->passed_quiesc_completed = rdp->completed;
117 rcu_preempt_qs(cpu); 102 barrier();
118 local_irq_restore(flags); 103 rdp->passed_quiesc = 1;
104 rcu_preempt_note_context_switch(cpu);
119} 105}
120 106
121void rcu_bh_qs(int cpu) 107void rcu_bh_qs(int cpu)
122{ 108{
123 unsigned long flags;
124 struct rcu_data *rdp; 109 struct rcu_data *rdp;
125 110
126 local_irq_save(flags);
127 rdp = &per_cpu(rcu_bh_data, cpu); 111 rdp = &per_cpu(rcu_bh_data, cpu);
128 rdp->passed_quiesc = 1;
129 rdp->passed_quiesc_completed = rdp->completed; 112 rdp->passed_quiesc_completed = rdp->completed;
130 local_irq_restore(flags); 113 barrier();
114 rdp->passed_quiesc = 1;
131} 115}
132 116
133#ifdef CONFIG_NO_HZ 117#ifdef CONFIG_NO_HZ
@@ -141,6 +125,10 @@ static int blimit = 10; /* Maximum callbacks per softirq. */
141static int qhimark = 10000; /* If this many pending, ignore blimit. */ 125static int qhimark = 10000; /* If this many pending, ignore blimit. */
142static int qlowmark = 100; /* Once only this many pending, use blimit. */ 126static int qlowmark = 100; /* Once only this many pending, use blimit. */
143 127
128module_param(blimit, int, 0);
129module_param(qhimark, int, 0);
130module_param(qlowmark, int, 0);
131
144static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 132static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
145static int rcu_pending(int cpu); 133static int rcu_pending(int cpu);
146 134
@@ -177,9 +165,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
177static int 165static int
178cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 166cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
179{ 167{
180 /* ACCESS_ONCE() because we are accessing outside of lock. */ 168 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
181 return *rdp->nxttail[RCU_DONE_TAIL] &&
182 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
183} 169}
184 170
185/* 171/*
@@ -373,7 +359,7 @@ static long dyntick_recall_completed(struct rcu_state *rsp)
373/* 359/*
374 * Snapshot the specified CPU's dynticks counter so that we can later 360 * Snapshot the specified CPU's dynticks counter so that we can later
375 * credit them with an implicit quiescent state. Return 1 if this CPU 361 * credit them with an implicit quiescent state. Return 1 if this CPU
376 * is already in a quiescent state courtesy of dynticks idle mode. 362 * is in dynticks idle mode, which is an extended quiescent state.
377 */ 363 */
378static int dyntick_save_progress_counter(struct rcu_data *rdp) 364static int dyntick_save_progress_counter(struct rcu_data *rdp)
379{ 365{
@@ -479,30 +465,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
479 long delta; 465 long delta;
480 unsigned long flags; 466 unsigned long flags;
481 struct rcu_node *rnp = rcu_get_root(rsp); 467 struct rcu_node *rnp = rcu_get_root(rsp);
482 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
483 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
484 468
485 /* Only let one CPU complain about others per time interval. */ 469 /* Only let one CPU complain about others per time interval. */
486 470
487 spin_lock_irqsave(&rnp->lock, flags); 471 spin_lock_irqsave(&rnp->lock, flags);
488 delta = jiffies - rsp->jiffies_stall; 472 delta = jiffies - rsp->jiffies_stall;
489 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { 473 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
490 spin_unlock_irqrestore(&rnp->lock, flags); 474 spin_unlock_irqrestore(&rnp->lock, flags);
491 return; 475 return;
492 } 476 }
493 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 477 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
478
479 /*
480 * Now rat on any tasks that got kicked up to the root rcu_node
481 * due to CPU offlining.
482 */
483 rcu_print_task_stall(rnp);
494 spin_unlock_irqrestore(&rnp->lock, flags); 484 spin_unlock_irqrestore(&rnp->lock, flags);
495 485
496 /* OK, time to rat on our buddy... */ 486 /* OK, time to rat on our buddy... */
497 487
498 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 488 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
499 for (; rnp_cur < rnp_end; rnp_cur++) { 489 rcu_for_each_leaf_node(rsp, rnp) {
500 rcu_print_task_stall(rnp); 490 rcu_print_task_stall(rnp);
501 if (rnp_cur->qsmask == 0) 491 if (rnp->qsmask == 0)
502 continue; 492 continue;
503 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 493 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
504 if (rnp_cur->qsmask & (1UL << cpu)) 494 if (rnp->qsmask & (1UL << cpu))
505 printk(" %d", rnp_cur->grplo + cpu); 495 printk(" %d", rnp->grplo + cpu);
506 } 496 }
507 printk(" (detected by %d, t=%ld jiffies)\n", 497 printk(" (detected by %d, t=%ld jiffies)\n",
508 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 498 smp_processor_id(), (long)(jiffies - rsp->gp_start));
@@ -541,8 +531,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
541 /* We haven't checked in, so go dump stack. */ 531 /* We haven't checked in, so go dump stack. */
542 print_cpu_stall(rsp); 532 print_cpu_stall(rsp);
543 533
544 } else if (rsp->gpnum != rsp->completed && 534 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
545 delta >= RCU_STALL_RAT_DELAY) {
546 535
547 /* They had two time units to dump stack, so complain. */ 536 /* They had two time units to dump stack, so complain. */
548 print_other_cpu_stall(rsp); 537 print_other_cpu_stall(rsp);
@@ -605,8 +594,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
605{ 594{
606 struct rcu_data *rdp = rsp->rda[smp_processor_id()]; 595 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
607 struct rcu_node *rnp = rcu_get_root(rsp); 596 struct rcu_node *rnp = rcu_get_root(rsp);
608 struct rcu_node *rnp_cur;
609 struct rcu_node *rnp_end;
610 597
611 if (!cpu_needs_another_gp(rsp, rdp)) { 598 if (!cpu_needs_another_gp(rsp, rdp)) {
612 spin_unlock_irqrestore(&rnp->lock, flags); 599 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -615,6 +602,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
615 602
616 /* Advance to a new grace period and initialize state. */ 603 /* Advance to a new grace period and initialize state. */
617 rsp->gpnum++; 604 rsp->gpnum++;
605 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
618 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 606 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
619 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 607 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
620 record_gp_stall_check_time(rsp); 608 record_gp_stall_check_time(rsp);
@@ -622,16 +610,24 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
622 note_new_gpnum(rsp, rdp); 610 note_new_gpnum(rsp, rdp);
623 611
624 /* 612 /*
625 * Because we are first, we know that all our callbacks will 613 * Because this CPU just now started the new grace period, we know
626 * be covered by this upcoming grace period, even the ones 614 * that all of its callbacks will be covered by this upcoming grace
627 * that were registered arbitrarily recently. 615 * period, even the ones that were registered arbitrarily recently.
616 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
617 *
618 * Other CPUs cannot be sure exactly when the grace period started.
619 * Therefore, their recently registered callbacks must pass through
620 * an additional RCU_NEXT_READY stage, so that they will be handled
621 * by the next RCU grace period.
628 */ 622 */
629 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 623 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
630 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 624 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
631 625
632 /* Special-case the common single-level case. */ 626 /* Special-case the common single-level case. */
633 if (NUM_RCU_NODES == 1) { 627 if (NUM_RCU_NODES == 1) {
628 rcu_preempt_check_blocked_tasks(rnp);
634 rnp->qsmask = rnp->qsmaskinit; 629 rnp->qsmask = rnp->qsmaskinit;
630 rnp->gpnum = rsp->gpnum;
635 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 631 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
636 spin_unlock_irqrestore(&rnp->lock, flags); 632 spin_unlock_irqrestore(&rnp->lock, flags);
637 return; 633 return;
@@ -644,45 +640,34 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
644 spin_lock(&rsp->onofflock); /* irqs already disabled. */ 640 spin_lock(&rsp->onofflock); /* irqs already disabled. */
645 641
646 /* 642 /*
647 * Set the quiescent-state-needed bits in all the non-leaf RCU 643 * Set the quiescent-state-needed bits in all the rcu_node
648 * nodes for all currently online CPUs. This operation relies 644 * structures for all currently online CPUs in breadth-first
649 * on the layout of the hierarchy within the rsp->node[] array. 645 * order, starting from the root rcu_node structure. This
650 * Note that other CPUs will access only the leaves of the 646 * operation relies on the layout of the hierarchy within the
651 * hierarchy, which still indicate that no grace period is in 647 * rsp->node[] array. Note that other CPUs will access only
652 * progress. In addition, we have excluded CPU-hotplug operations. 648 * the leaves of the hierarchy, which still indicate that no
653 * 649 * grace period is in progress, at least until the corresponding
654 * We therefore do not need to hold any locks. Any required 650 * leaf node has been initialized. In addition, we have excluded
655 * memory barriers will be supplied by the locks guarding the 651 * CPU-hotplug operations.
656 * leaf rcu_nodes in the hierarchy.
657 */
658
659 rnp_end = rsp->level[NUM_RCU_LVLS - 1];
660 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
661 rnp_cur->qsmask = rnp_cur->qsmaskinit;
662
663 /*
664 * Now set up the leaf nodes. Here we must be careful. First,
665 * we need to hold the lock in order to exclude other CPUs, which
666 * might be contending for the leaf nodes' locks. Second, as
667 * soon as we initialize a given leaf node, its CPUs might run
668 * up the rest of the hierarchy. We must therefore acquire locks
669 * for each node that we touch during this stage. (But we still
670 * are excluding CPU-hotplug operations.)
671 * 652 *
672 * Note that the grace period cannot complete until we finish 653 * Note that the grace period cannot complete until we finish
673 * the initialization process, as there will be at least one 654 * the initialization process, as there will be at least one
674 * qsmask bit set in the root node until that time, namely the 655 * qsmask bit set in the root node until that time, namely the
675 * one corresponding to this CPU. 656 * one corresponding to this CPU, due to the fact that we have
657 * irqs disabled.
676 */ 658 */
677 rnp_end = &rsp->node[NUM_RCU_NODES]; 659 rcu_for_each_node_breadth_first(rsp, rnp) {
678 rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 660 spin_lock(&rnp->lock); /* irqs already disabled. */
679 for (; rnp_cur < rnp_end; rnp_cur++) { 661 rcu_preempt_check_blocked_tasks(rnp);
680 spin_lock(&rnp_cur->lock); /* irqs already disabled. */ 662 rnp->qsmask = rnp->qsmaskinit;
681 rnp_cur->qsmask = rnp_cur->qsmaskinit; 663 rnp->gpnum = rsp->gpnum;
682 spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ 664 spin_unlock(&rnp->lock); /* irqs remain disabled. */
683 } 665 }
684 666
667 rnp = rcu_get_root(rsp);
668 spin_lock(&rnp->lock); /* irqs already disabled. */
685 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 669 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
670 spin_unlock(&rnp->lock); /* irqs remain disabled. */
686 spin_unlock_irqrestore(&rsp->onofflock, flags); 671 spin_unlock_irqrestore(&rsp->onofflock, flags);
687} 672}
688 673
@@ -720,9 +705,11 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
720 * hold rnp->lock, as required by rcu_start_gp(), which will release it. 705 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
721 */ 706 */
722static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 707static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
723 __releases(rnp->lock) 708 __releases(rcu_get_root(rsp)->lock)
724{ 709{
710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
725 rsp->completed = rsp->gpnum; 711 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE;
726 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
727 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
728} 715}
@@ -739,6 +726,8 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
739 unsigned long flags) 726 unsigned long flags)
740 __releases(rnp->lock) 727 __releases(rnp->lock)
741{ 728{
729 struct rcu_node *rnp_c;
730
742 /* Walk up the rcu_node hierarchy. */ 731 /* Walk up the rcu_node hierarchy. */
743 for (;;) { 732 for (;;) {
744 if (!(rnp->qsmask & mask)) { 733 if (!(rnp->qsmask & mask)) {
@@ -762,8 +751,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
762 break; 751 break;
763 } 752 }
764 spin_unlock_irqrestore(&rnp->lock, flags); 753 spin_unlock_irqrestore(&rnp->lock, flags);
754 rnp_c = rnp;
765 rnp = rnp->parent; 755 rnp = rnp->parent;
766 spin_lock_irqsave(&rnp->lock, flags); 756 spin_lock_irqsave(&rnp->lock, flags);
757 WARN_ON_ONCE(rnp_c->qsmask);
767 } 758 }
768 759
769 /* 760 /*
@@ -776,10 +767,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
776 767
777/* 768/*
778 * Record a quiescent state for the specified CPU, which must either be 769 * Record a quiescent state for the specified CPU, which must either be
779 * the current CPU or an offline CPU. The lastcomp argument is used to 770 * the current CPU. The lastcomp argument is used to make sure we are
780 * make sure we are still in the grace period of interest. We don't want 771 * still in the grace period of interest. We don't want to end the current
781 * to end the current grace period based on quiescent states detected in 772 * grace period based on quiescent states detected in an earlier grace
782 * an earlier grace period! 773 * period!
783 */ 774 */
784static void 775static void
785cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 776cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
@@ -814,7 +805,6 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
814 * This GP can't end until cpu checks in, so all of our 805 * This GP can't end until cpu checks in, so all of our
815 * callbacks can be processed during the next GP. 806 * callbacks can be processed during the next GP.
816 */ 807 */
817 rdp = rsp->rda[smp_processor_id()];
818 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 808 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
819 809
820 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 810 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
@@ -855,24 +845,70 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
855#ifdef CONFIG_HOTPLUG_CPU 845#ifdef CONFIG_HOTPLUG_CPU
856 846
857/* 847/*
848 * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the
849 * specified flavor of RCU. The callbacks will be adopted by the next
850 * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever
851 * comes first. Because this is invoked from the CPU_DYING notifier,
852 * irqs are already disabled.
853 */
854static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
855{
856 int i;
857 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
858
859 if (rdp->nxtlist == NULL)
860 return; /* irqs disabled, so comparison is stable. */
861 spin_lock(&rsp->onofflock); /* irqs already disabled. */
862 *rsp->orphan_cbs_tail = rdp->nxtlist;
863 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
864 rdp->nxtlist = NULL;
865 for (i = 0; i < RCU_NEXT_SIZE; i++)
866 rdp->nxttail[i] = &rdp->nxtlist;
867 rsp->orphan_qlen += rdp->qlen;
868 rdp->qlen = 0;
869 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
870}
871
872/*
873 * Adopt previously orphaned RCU callbacks.
874 */
875static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
876{
877 unsigned long flags;
878 struct rcu_data *rdp;
879
880 spin_lock_irqsave(&rsp->onofflock, flags);
881 rdp = rsp->rda[smp_processor_id()];
882 if (rsp->orphan_cbs_list == NULL) {
883 spin_unlock_irqrestore(&rsp->onofflock, flags);
884 return;
885 }
886 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
887 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
888 rdp->qlen += rsp->orphan_qlen;
889 rsp->orphan_cbs_list = NULL;
890 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
891 rsp->orphan_qlen = 0;
892 spin_unlock_irqrestore(&rsp->onofflock, flags);
893}
894
895/*
858 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 896 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
859 * and move all callbacks from the outgoing CPU to the current one. 897 * and move all callbacks from the outgoing CPU to the current one.
860 */ 898 */
861static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 899static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
862{ 900{
863 int i;
864 unsigned long flags; 901 unsigned long flags;
865 long lastcomp; 902 long lastcomp;
866 unsigned long mask; 903 unsigned long mask;
867 struct rcu_data *rdp = rsp->rda[cpu]; 904 struct rcu_data *rdp = rsp->rda[cpu];
868 struct rcu_data *rdp_me;
869 struct rcu_node *rnp; 905 struct rcu_node *rnp;
870 906
871 /* Exclude any attempts to start a new grace period. */ 907 /* Exclude any attempts to start a new grace period. */
872 spin_lock_irqsave(&rsp->onofflock, flags); 908 spin_lock_irqsave(&rsp->onofflock, flags);
873 909
874 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 910 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
875 rnp = rdp->mynode; 911 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
876 mask = rdp->grpmask; /* rnp->grplo is constant. */ 912 mask = rdp->grpmask; /* rnp->grplo is constant. */
877 do { 913 do {
878 spin_lock(&rnp->lock); /* irqs already disabled. */ 914 spin_lock(&rnp->lock); /* irqs already disabled. */
@@ -881,42 +917,29 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
881 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 917 spin_unlock(&rnp->lock); /* irqs remain disabled. */
882 break; 918 break;
883 } 919 }
884 rcu_preempt_offline_tasks(rsp, rnp); 920
921 /*
922 * If there was a task blocking the current grace period,
923 * and if all CPUs have checked in, we need to propagate
924 * the quiescent state up the rcu_node hierarchy. But that
925 * is inconvenient at the moment due to deadlock issues if
926 * this should end the current grace period. So set the
927 * offlined CPU's bit in ->qsmask in order to force the
928 * next force_quiescent_state() invocation to clean up this
929 * mess in a deadlock-free manner.
930 */
931 if (rcu_preempt_offline_tasks(rsp, rnp, rdp) && !rnp->qsmask)
932 rnp->qsmask |= mask;
933
885 mask = rnp->grpmask; 934 mask = rnp->grpmask;
886 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 935 spin_unlock(&rnp->lock); /* irqs remain disabled. */
887 rnp = rnp->parent; 936 rnp = rnp->parent;
888 } while (rnp != NULL); 937 } while (rnp != NULL);
889 lastcomp = rsp->completed; 938 lastcomp = rsp->completed;
890 939
891 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 940 spin_unlock_irqrestore(&rsp->onofflock, flags);
892
893 /* Being offline is a quiescent state, so go record it. */
894 cpu_quiet(cpu, rsp, rdp, lastcomp);
895 941
896 /* 942 rcu_adopt_orphan_cbs(rsp);
897 * Move callbacks from the outgoing CPU to the running CPU.
898 * Note that the outgoing CPU is now quiscent, so it is now
899 * (uncharacteristically) safe to access its rcu_data structure.
900 * Note also that we must carefully retain the order of the
901 * outgoing CPU's callbacks in order for rcu_barrier() to work
902 * correctly. Finally, note that we start all the callbacks
903 * afresh, even those that have passed through a grace period
904 * and are therefore ready to invoke. The theory is that hotplug
905 * events are rare, and that if they are frequent enough to
906 * indefinitely delay callbacks, you have far worse things to
907 * be worrying about.
908 */
909 rdp_me = rsp->rda[smp_processor_id()];
910 if (rdp->nxtlist != NULL) {
911 *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
912 rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
913 rdp->nxtlist = NULL;
914 for (i = 0; i < RCU_NEXT_SIZE; i++)
915 rdp->nxttail[i] = &rdp->nxtlist;
916 rdp_me->qlen += rdp->qlen;
917 rdp->qlen = 0;
918 }
919 local_irq_restore(flags);
920} 943}
921 944
922/* 945/*
@@ -934,6 +957,14 @@ static void rcu_offline_cpu(int cpu)
934 957
935#else /* #ifdef CONFIG_HOTPLUG_CPU */ 958#else /* #ifdef CONFIG_HOTPLUG_CPU */
936 959
960static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
961{
962}
963
964static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
965{
966}
967
937static void rcu_offline_cpu(int cpu) 968static void rcu_offline_cpu(int cpu)
938{ 969{
939} 970}
@@ -944,7 +975,7 @@ static void rcu_offline_cpu(int cpu)
944 * Invoke any RCU callbacks that have made it to the end of their grace 975 * Invoke any RCU callbacks that have made it to the end of their grace
945 * period. Thottle as specified by rdp->blimit. 976 * period. Thottle as specified by rdp->blimit.
946 */ 977 */
947static void rcu_do_batch(struct rcu_data *rdp) 978static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
948{ 979{
949 unsigned long flags; 980 unsigned long flags;
950 struct rcu_head *next, *list, **tail; 981 struct rcu_head *next, *list, **tail;
@@ -997,6 +1028,13 @@ static void rcu_do_batch(struct rcu_data *rdp)
997 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark) 1028 if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
998 rdp->blimit = blimit; 1029 rdp->blimit = blimit;
999 1030
1031 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1032 if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
1033 rdp->qlen_last_fqs_check = 0;
1034 rdp->n_force_qs_snap = rsp->n_force_qs;
1035 } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
1036 rdp->qlen_last_fqs_check = rdp->qlen;
1037
1000 local_irq_restore(flags); 1038 local_irq_restore(flags);
1001 1039
1002 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1040 /* Re-raise the RCU softirq if there are callbacks remaining. */
@@ -1066,33 +1104,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1066 int cpu; 1104 int cpu;
1067 unsigned long flags; 1105 unsigned long flags;
1068 unsigned long mask; 1106 unsigned long mask;
1069 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 1107 struct rcu_node *rnp;
1070 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
1071 1108
1072 for (; rnp_cur < rnp_end; rnp_cur++) { 1109 rcu_for_each_leaf_node(rsp, rnp) {
1073 mask = 0; 1110 mask = 0;
1074 spin_lock_irqsave(&rnp_cur->lock, flags); 1111 spin_lock_irqsave(&rnp->lock, flags);
1075 if (rsp->completed != lastcomp) { 1112 if (rsp->completed != lastcomp) {
1076 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1113 spin_unlock_irqrestore(&rnp->lock, flags);
1077 return 1; 1114 return 1;
1078 } 1115 }
1079 if (rnp_cur->qsmask == 0) { 1116 if (rnp->qsmask == 0) {
1080 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1117 spin_unlock_irqrestore(&rnp->lock, flags);
1081 continue; 1118 continue;
1082 } 1119 }
1083 cpu = rnp_cur->grplo; 1120 cpu = rnp->grplo;
1084 bit = 1; 1121 bit = 1;
1085 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { 1122 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1086 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1123 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1087 mask |= bit; 1124 mask |= bit;
1088 } 1125 }
1089 if (mask != 0 && rsp->completed == lastcomp) { 1126 if (mask != 0 && rsp->completed == lastcomp) {
1090 1127
1091 /* cpu_quiet_msk() releases rnp_cur->lock. */ 1128 /* cpu_quiet_msk() releases rnp->lock. */
1092 cpu_quiet_msk(mask, rsp, rnp_cur, flags); 1129 cpu_quiet_msk(mask, rsp, rnp, flags);
1093 continue; 1130 continue;
1094 } 1131 }
1095 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1132 spin_unlock_irqrestore(&rnp->lock, flags);
1096 } 1133 }
1097 return 0; 1134 return 0;
1098} 1135}
@@ -1108,7 +1145,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1108 struct rcu_node *rnp = rcu_get_root(rsp); 1145 struct rcu_node *rnp = rcu_get_root(rsp);
1109 u8 signaled; 1146 u8 signaled;
1110 1147
1111 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) 1148 if (!rcu_gp_in_progress(rsp))
1112 return; /* No grace period in progress, nothing to force. */ 1149 return; /* No grace period in progress, nothing to force. */
1113 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1150 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1114 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1151 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
@@ -1129,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1129 } 1166 }
1130 spin_unlock(&rnp->lock); 1167 spin_unlock(&rnp->lock);
1131 switch (signaled) { 1168 switch (signaled) {
1169 case RCU_GP_IDLE:
1132 case RCU_GP_INIT: 1170 case RCU_GP_INIT:
1133 1171
1134 break; /* grace period still initializing, ignore. */ 1172 break; /* grace period idle or initializing, ignore. */
1135 1173
1136 case RCU_SAVE_DYNTICK: 1174 case RCU_SAVE_DYNTICK:
1137 1175
@@ -1145,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1145 1183
1146 /* Update state, record completion counter. */ 1184 /* Update state, record completion counter. */
1147 spin_lock(&rnp->lock); 1185 spin_lock(&rnp->lock);
1148 if (lastcomp == rsp->completed) { 1186 if (lastcomp == rsp->completed &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) {
1149 rsp->signaled = RCU_FORCE_QS; 1188 rsp->signaled = RCU_FORCE_QS;
1150 dyntick_record_completed(rsp, lastcomp); 1189 dyntick_record_completed(rsp, lastcomp);
1151 } 1190 }
@@ -1211,7 +1250,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1211 } 1250 }
1212 1251
1213 /* If there are callbacks ready, invoke them. */ 1252 /* If there are callbacks ready, invoke them. */
1214 rcu_do_batch(rdp); 1253 rcu_do_batch(rsp, rdp);
1215} 1254}
1216 1255
1217/* 1256/*
@@ -1267,7 +1306,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1267 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1306 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1268 1307
1269 /* Start a new grace period if one not already started. */ 1308 /* Start a new grace period if one not already started. */
1270 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { 1309 if (!rcu_gp_in_progress(rsp)) {
1271 unsigned long nestflag; 1310 unsigned long nestflag;
1272 struct rcu_node *rnp_root = rcu_get_root(rsp); 1311 struct rcu_node *rnp_root = rcu_get_root(rsp);
1273 1312
@@ -1275,10 +1314,20 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1275 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */ 1314 rcu_start_gp(rsp, nestflag); /* releases rnp_root->lock. */
1276 } 1315 }
1277 1316
1278 /* Force the grace period if too many callbacks or too long waiting. */ 1317 /*
1279 if (unlikely(++rdp->qlen > qhimark)) { 1318 * Force the grace period if too many callbacks or too long waiting.
1319 * Enforce hysteresis, and don't invoke force_quiescent_state()
1320 * if some other CPU has recently done so. Also, don't bother
1321 * invoking force_quiescent_state() if the newly enqueued callback
1322 * is the only one waiting for a grace period to complete.
1323 */
1324 if (unlikely(++rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
1280 rdp->blimit = LONG_MAX; 1325 rdp->blimit = LONG_MAX;
1281 force_quiescent_state(rsp, 0); 1326 if (rsp->n_force_qs == rdp->n_force_qs_snap &&
1327 *rdp->nxttail[RCU_DONE_TAIL] != head)
1328 force_quiescent_state(rsp, 0);
1329 rdp->n_force_qs_snap = rsp->n_force_qs;
1330 rdp->qlen_last_fqs_check = rdp->qlen;
1282 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0) 1331 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1283 force_quiescent_state(rsp, 1); 1332 force_quiescent_state(rsp, 1);
1284 local_irq_restore(flags); 1333 local_irq_restore(flags);
@@ -1347,7 +1396,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1347 } 1396 }
1348 1397
1349 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1398 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1350 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1399 if (rcu_gp_in_progress(rsp) &&
1351 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1400 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1352 rdp->n_rp_need_fqs++; 1401 rdp->n_rp_need_fqs++;
1353 return 1; 1402 return 1;
@@ -1384,6 +1433,82 @@ int rcu_needs_cpu(int cpu)
1384 rcu_preempt_needs_cpu(cpu); 1433 rcu_preempt_needs_cpu(cpu);
1385} 1434}
1386 1435
1436static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1437static atomic_t rcu_barrier_cpu_count;
1438static DEFINE_MUTEX(rcu_barrier_mutex);
1439static struct completion rcu_barrier_completion;
1440
1441static void rcu_barrier_callback(struct rcu_head *notused)
1442{
1443 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1444 complete(&rcu_barrier_completion);
1445}
1446
1447/*
1448 * Called with preemption disabled, and from cross-cpu IRQ context.
1449 */
1450static void rcu_barrier_func(void *type)
1451{
1452 int cpu = smp_processor_id();
1453 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1454 void (*call_rcu_func)(struct rcu_head *head,
1455 void (*func)(struct rcu_head *head));
1456
1457 atomic_inc(&rcu_barrier_cpu_count);
1458 call_rcu_func = type;
1459 call_rcu_func(head, rcu_barrier_callback);
1460}
1461
1462/*
1463 * Orchestrate the specified type of RCU barrier, waiting for all
1464 * RCU callbacks of the specified type to complete.
1465 */
1466static void _rcu_barrier(struct rcu_state *rsp,
1467 void (*call_rcu_func)(struct rcu_head *head,
1468 void (*func)(struct rcu_head *head)))
1469{
1470 BUG_ON(in_interrupt());
1471 /* Take mutex to serialize concurrent rcu_barrier() requests. */
1472 mutex_lock(&rcu_barrier_mutex);
1473 init_completion(&rcu_barrier_completion);
1474 /*
1475 * Initialize rcu_barrier_cpu_count to 1, then invoke
1476 * rcu_barrier_func() on each CPU, so that each CPU also has
1477 * incremented rcu_barrier_cpu_count. Only then is it safe to
1478 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
1479 * might complete its grace period before all of the other CPUs
1480 * did their increment, causing this function to return too
1481 * early.
1482 */
1483 atomic_set(&rcu_barrier_cpu_count, 1);
1484 preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */
1485 rcu_adopt_orphan_cbs(rsp);
1486 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1487 preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */
1488 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1489 complete(&rcu_barrier_completion);
1490 wait_for_completion(&rcu_barrier_completion);
1491 mutex_unlock(&rcu_barrier_mutex);
1492}
1493
1494/**
1495 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
1496 */
1497void rcu_barrier_bh(void)
1498{
1499 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
1500}
1501EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1502
1503/**
1504 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
1505 */
1506void rcu_barrier_sched(void)
1507{
1508 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
1509}
1510EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1511
1387/* 1512/*
1388 * Do boot-time initialization of a CPU's per-CPU RCU data. 1513 * Do boot-time initialization of a CPU's per-CPU RCU data.
1389 */ 1514 */
@@ -1434,6 +1559,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1434 rdp->beenonline = 1; /* We have now been online. */ 1559 rdp->beenonline = 1; /* We have now been online. */
1435 rdp->preemptable = preemptable; 1560 rdp->preemptable = preemptable;
1436 rdp->passed_quiesc_completed = lastcomp - 1; 1561 rdp->passed_quiesc_completed = lastcomp - 1;
1562 rdp->qlen_last_fqs_check = 0;
1563 rdp->n_force_qs_snap = rsp->n_force_qs;
1437 rdp->blimit = blimit; 1564 rdp->blimit = blimit;
1438 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 1565 spin_unlock(&rnp->lock); /* irqs remain disabled. */
1439 1566
@@ -1457,20 +1584,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1457 rnp = rnp->parent; 1584 rnp = rnp->parent;
1458 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1585 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
1459 1586
1460 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 1587 spin_unlock_irqrestore(&rsp->onofflock, flags);
1461
1462 /*
1463 * A new grace period might start here. If so, we will be part of
1464 * it, and its gpnum will be greater than ours, so we will
1465 * participate. It is also possible for the gpnum to have been
1466 * incremented before this function was called, and the bitmasks
1467 * to not be filled out until now, in which case we will also
1468 * participate due to our gpnum being behind.
1469 */
1470
1471 /* Since it is coming online, the CPU is in a quiescent state. */
1472 cpu_quiet(cpu, rsp, rdp, lastcomp);
1473 local_irq_restore(flags);
1474} 1588}
1475 1589
1476static void __cpuinit rcu_online_cpu(int cpu) 1590static void __cpuinit rcu_online_cpu(int cpu)
@@ -1493,6 +1607,22 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1493 case CPU_UP_PREPARE_FROZEN: 1607 case CPU_UP_PREPARE_FROZEN:
1494 rcu_online_cpu(cpu); 1608 rcu_online_cpu(cpu);
1495 break; 1609 break;
1610 case CPU_DYING:
1611 case CPU_DYING_FROZEN:
1612 /*
1613 * preempt_disable() in _rcu_barrier() prevents stop_machine(),
1614 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
1615 * returns, all online cpus have queued rcu_barrier_func().
1616 * The dying CPU clears its cpu_online_mask bit and
1617 * moves all of its RCU callbacks to ->orphan_cbs_list
1618 * in the context of stop_machine(), so subsequent calls
1619 * to _rcu_barrier() will adopt these callbacks and only
1620 * then queue rcu_barrier_func() on all remaining CPUs.
1621 */
1622 rcu_send_cbs_to_orphanage(&rcu_bh_state);
1623 rcu_send_cbs_to_orphanage(&rcu_sched_state);
1624 rcu_preempt_send_cbs_to_orphanage();
1625 break;
1496 case CPU_DEAD: 1626 case CPU_DEAD:
1497 case CPU_DEAD_FROZEN: 1627 case CPU_DEAD_FROZEN:
1498 case CPU_UP_CANCELED: 1628 case CPU_UP_CANCELED:
@@ -1555,7 +1685,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1555 cpustride *= rsp->levelspread[i]; 1685 cpustride *= rsp->levelspread[i];
1556 rnp = rsp->level[i]; 1686 rnp = rsp->level[i];
1557 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1687 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1558 spin_lock_init(&rnp->lock); 1688 if (rnp != rcu_get_root(rsp))
1689 spin_lock_init(&rnp->lock);
1559 rnp->gpnum = 0; 1690 rnp->gpnum = 0;
1560 rnp->qsmask = 0; 1691 rnp->qsmask = 0;
1561 rnp->qsmaskinit = 0; 1692 rnp->qsmaskinit = 0;
@@ -1578,6 +1709,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1578 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1709 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1579 } 1710 }
1580 } 1711 }
1712 spin_lock_init(&rcu_get_root(rsp)->lock);
1581} 1713}
1582 1714
1583/* 1715/*
@@ -1587,6 +1719,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1587 */ 1719 */
1588#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1720#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1589do { \ 1721do { \
1722 int i; \
1723 int j; \
1724 struct rcu_node *rnp; \
1725 \
1590 rcu_init_one(rsp); \ 1726 rcu_init_one(rsp); \
1591 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1727 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1592 j = 0; \ 1728 j = 0; \
@@ -1599,31 +1735,8 @@ do { \
1599 } \ 1735 } \
1600} while (0) 1736} while (0)
1601 1737
1602#ifdef CONFIG_TREE_PREEMPT_RCU
1603
1604void __init __rcu_init_preempt(void)
1605{
1606 int i; /* All used by RCU_INIT_FLAVOR(). */
1607 int j;
1608 struct rcu_node *rnp;
1609
1610 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1611}
1612
1613#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1614
1615void __init __rcu_init_preempt(void)
1616{
1617}
1618
1619#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1620
1621void __init __rcu_init(void) 1738void __init __rcu_init(void)
1622{ 1739{
1623 int i; /* All used by RCU_INIT_FLAVOR(). */
1624 int j;
1625 struct rcu_node *rnp;
1626
1627 rcu_bootup_announce(); 1740 rcu_bootup_announce();
1628#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1741#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1629 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1742 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
@@ -1634,6 +1747,4 @@ void __init __rcu_init(void)
1634 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1747 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1635} 1748}
1636 1749
1637module_param(blimit, int, 0); 1750#include "rcutree_plugin.h"
1638module_param(qhimark, int, 0);
1639module_param(qlowmark, int, 0);