aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c431
1 files changed, 252 insertions, 179 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 6b11b07cfe7f..705f02ac7433 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -25,7 +25,7 @@
25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 25 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * 26 *
27 * For detailed explanation of Read-Copy Update mechanism see - 27 * For detailed explanation of Read-Copy Update mechanism see -
28 * Documentation/RCU 28 * Documentation/RCU
29 */ 29 */
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/kernel.h> 31#include <linux/kernel.h>
@@ -49,13 +49,6 @@
49 49
50#include "rcutree.h" 50#include "rcutree.h"
51 51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53static struct lock_class_key rcu_lock_key;
54struct lockdep_map rcu_lock_map =
55 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
56EXPORT_SYMBOL_GPL(rcu_lock_map);
57#endif
58
59/* Data structures. */ 52/* Data structures. */
60 53
61#define RCU_STATE_INITIALIZER(name) { \ 54#define RCU_STATE_INITIALIZER(name) { \
@@ -70,6 +63,9 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
70 .gpnum = -300, \ 63 .gpnum = -300, \
71 .completed = -300, \ 64 .completed = -300, \
72 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
66 .orphan_cbs_list = NULL, \
67 .orphan_cbs_tail = &name.orphan_cbs_list, \
68 .orphan_qlen = 0, \
73 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \ 69 .fqslock = __SPIN_LOCK_UNLOCKED(&name.fqslock), \
74 .n_force_qs = 0, \ 70 .n_force_qs = 0, \
75 .n_force_qs_ngp = 0, \ 71 .n_force_qs_ngp = 0, \
@@ -81,24 +77,16 @@ DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
81struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state); 77struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh_state);
82DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); 78DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
83 79
84extern long rcu_batches_completed_sched(void);
85static struct rcu_node *rcu_get_root(struct rcu_state *rsp);
86static void cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp,
87 struct rcu_node *rnp, unsigned long flags);
88static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags);
89#ifdef CONFIG_HOTPLUG_CPU
90static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp);
91#endif /* #ifdef CONFIG_HOTPLUG_CPU */
92static void __rcu_process_callbacks(struct rcu_state *rsp,
93 struct rcu_data *rdp);
94static void __call_rcu(struct rcu_head *head,
95 void (*func)(struct rcu_head *rcu),
96 struct rcu_state *rsp);
97static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp);
98static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
99 int preemptable);
100 80
101#include "rcutree_plugin.h" 81/*
82 * Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
83 * permit this function to be invoked without holding the root rcu_node
84 * structure's ->lock, but of course results can be subject to change.
85 */
86static int rcu_gp_in_progress(struct rcu_state *rsp)
87{
88 return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
89}
102 90
103/* 91/*
104 * Note a quiescent state. Because we do not need to know 92 * Note a quiescent state. Because we do not need to know
@@ -107,27 +95,23 @@ static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_state *rsp,
107 */ 95 */
108void rcu_sched_qs(int cpu) 96void rcu_sched_qs(int cpu)
109{ 97{
110 unsigned long flags;
111 struct rcu_data *rdp; 98 struct rcu_data *rdp;
112 99
113 local_irq_save(flags);
114 rdp = &per_cpu(rcu_sched_data, cpu); 100 rdp = &per_cpu(rcu_sched_data, cpu);
115 rdp->passed_quiesc = 1;
116 rdp->passed_quiesc_completed = rdp->completed; 101 rdp->passed_quiesc_completed = rdp->completed;
117 rcu_preempt_qs(cpu); 102 barrier();
118 local_irq_restore(flags); 103 rdp->passed_quiesc = 1;
104 rcu_preempt_note_context_switch(cpu);
119} 105}
120 106
121void rcu_bh_qs(int cpu) 107void rcu_bh_qs(int cpu)
122{ 108{
123 unsigned long flags;
124 struct rcu_data *rdp; 109 struct rcu_data *rdp;
125 110
126 local_irq_save(flags);
127 rdp = &per_cpu(rcu_bh_data, cpu); 111 rdp = &per_cpu(rcu_bh_data, cpu);
128 rdp->passed_quiesc = 1;
129 rdp->passed_quiesc_completed = rdp->completed; 112 rdp->passed_quiesc_completed = rdp->completed;
130 local_irq_restore(flags); 113 barrier();
114 rdp->passed_quiesc = 1;
131} 115}
132 116
133#ifdef CONFIG_NO_HZ 117#ifdef CONFIG_NO_HZ
@@ -141,6 +125,10 @@ static int blimit = 10; /* Maximum callbacks per softirq. */
141static int qhimark = 10000; /* If this many pending, ignore blimit. */ 125static int qhimark = 10000; /* If this many pending, ignore blimit. */
142static int qlowmark = 100; /* Once only this many pending, use blimit. */ 126static int qlowmark = 100; /* Once only this many pending, use blimit. */
143 127
128module_param(blimit, int, 0);
129module_param(qhimark, int, 0);
130module_param(qlowmark, int, 0);
131
144static void force_quiescent_state(struct rcu_state *rsp, int relaxed); 132static void force_quiescent_state(struct rcu_state *rsp, int relaxed);
145static int rcu_pending(int cpu); 133static int rcu_pending(int cpu);
146 134
@@ -177,9 +165,7 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
177static int 165static int
178cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) 166cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
179{ 167{
180 /* ACCESS_ONCE() because we are accessing outside of lock. */ 168 return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
181 return *rdp->nxttail[RCU_DONE_TAIL] &&
182 ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum);
183} 169}
184 170
185/* 171/*
@@ -373,7 +359,7 @@ static long dyntick_recall_completed(struct rcu_state *rsp)
373/* 359/*
374 * Snapshot the specified CPU's dynticks counter so that we can later 360 * Snapshot the specified CPU's dynticks counter so that we can later
375 * credit them with an implicit quiescent state. Return 1 if this CPU 361 * credit them with an implicit quiescent state. Return 1 if this CPU
376 * is already in a quiescent state courtesy of dynticks idle mode. 362 * is in dynticks idle mode, which is an extended quiescent state.
377 */ 363 */
378static int dyntick_save_progress_counter(struct rcu_data *rdp) 364static int dyntick_save_progress_counter(struct rcu_data *rdp)
379{ 365{
@@ -479,30 +465,34 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
479 long delta; 465 long delta;
480 unsigned long flags; 466 unsigned long flags;
481 struct rcu_node *rnp = rcu_get_root(rsp); 467 struct rcu_node *rnp = rcu_get_root(rsp);
482 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1];
483 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
484 468
485 /* Only let one CPU complain about others per time interval. */ 469 /* Only let one CPU complain about others per time interval. */
486 470
487 spin_lock_irqsave(&rnp->lock, flags); 471 spin_lock_irqsave(&rnp->lock, flags);
488 delta = jiffies - rsp->jiffies_stall; 472 delta = jiffies - rsp->jiffies_stall;
489 if (delta < RCU_STALL_RAT_DELAY || rsp->gpnum == rsp->completed) { 473 if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
490 spin_unlock_irqrestore(&rnp->lock, flags); 474 spin_unlock_irqrestore(&rnp->lock, flags);
491 return; 475 return;
492 } 476 }
493 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; 477 rsp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
478
479 /*
480 * Now rat on any tasks that got kicked up to the root rcu_node
481 * due to CPU offlining.
482 */
483 rcu_print_task_stall(rnp);
494 spin_unlock_irqrestore(&rnp->lock, flags); 484 spin_unlock_irqrestore(&rnp->lock, flags);
495 485
496 /* OK, time to rat on our buddy... */ 486 /* OK, time to rat on our buddy... */
497 487
498 printk(KERN_ERR "INFO: RCU detected CPU stalls:"); 488 printk(KERN_ERR "INFO: RCU detected CPU stalls:");
499 for (; rnp_cur < rnp_end; rnp_cur++) { 489 rcu_for_each_leaf_node(rsp, rnp) {
500 rcu_print_task_stall(rnp); 490 rcu_print_task_stall(rnp);
501 if (rnp_cur->qsmask == 0) 491 if (rnp->qsmask == 0)
502 continue; 492 continue;
503 for (cpu = 0; cpu <= rnp_cur->grphi - rnp_cur->grplo; cpu++) 493 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
504 if (rnp_cur->qsmask & (1UL << cpu)) 494 if (rnp->qsmask & (1UL << cpu))
505 printk(" %d", rnp_cur->grplo + cpu); 495 printk(" %d", rnp->grplo + cpu);
506 } 496 }
507 printk(" (detected by %d, t=%ld jiffies)\n", 497 printk(" (detected by %d, t=%ld jiffies)\n",
508 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 498 smp_processor_id(), (long)(jiffies - rsp->gp_start));
@@ -541,8 +531,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
541 /* We haven't checked in, so go dump stack. */ 531 /* We haven't checked in, so go dump stack. */
542 print_cpu_stall(rsp); 532 print_cpu_stall(rsp);
543 533
544 } else if (rsp->gpnum != rsp->completed && 534 } else if (rcu_gp_in_progress(rsp) && delta >= RCU_STALL_RAT_DELAY) {
545 delta >= RCU_STALL_RAT_DELAY) {
546 535
547 /* They had two time units to dump stack, so complain. */ 536 /* They had two time units to dump stack, so complain. */
548 print_other_cpu_stall(rsp); 537 print_other_cpu_stall(rsp);
@@ -605,8 +594,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
605{ 594{
606 struct rcu_data *rdp = rsp->rda[smp_processor_id()]; 595 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
607 struct rcu_node *rnp = rcu_get_root(rsp); 596 struct rcu_node *rnp = rcu_get_root(rsp);
608 struct rcu_node *rnp_cur;
609 struct rcu_node *rnp_end;
610 597
611 if (!cpu_needs_another_gp(rsp, rdp)) { 598 if (!cpu_needs_another_gp(rsp, rdp)) {
612 spin_unlock_irqrestore(&rnp->lock, flags); 599 spin_unlock_irqrestore(&rnp->lock, flags);
@@ -615,6 +602,7 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
615 602
616 /* Advance to a new grace period and initialize state. */ 603 /* Advance to a new grace period and initialize state. */
617 rsp->gpnum++; 604 rsp->gpnum++;
605 WARN_ON_ONCE(rsp->signaled == RCU_GP_INIT);
618 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 606 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
619 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 607 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
620 record_gp_stall_check_time(rsp); 608 record_gp_stall_check_time(rsp);
@@ -622,16 +610,24 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
622 note_new_gpnum(rsp, rdp); 610 note_new_gpnum(rsp, rdp);
623 611
624 /* 612 /*
625 * Because we are first, we know that all our callbacks will 613 * Because this CPU just now started the new grace period, we know
626 * be covered by this upcoming grace period, even the ones 614 * that all of its callbacks will be covered by this upcoming grace
627 * that were registered arbitrarily recently. 615 * period, even the ones that were registered arbitrarily recently.
616 * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL.
617 *
618 * Other CPUs cannot be sure exactly when the grace period started.
619 * Therefore, their recently registered callbacks must pass through
620 * an additional RCU_NEXT_READY stage, so that they will be handled
621 * by the next RCU grace period.
628 */ 622 */
629 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 623 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
630 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 624 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
631 625
632 /* Special-case the common single-level case. */ 626 /* Special-case the common single-level case. */
633 if (NUM_RCU_NODES == 1) { 627 if (NUM_RCU_NODES == 1) {
628 rcu_preempt_check_blocked_tasks(rnp);
634 rnp->qsmask = rnp->qsmaskinit; 629 rnp->qsmask = rnp->qsmaskinit;
630 rnp->gpnum = rsp->gpnum;
635 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */ 631 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state OK. */
636 spin_unlock_irqrestore(&rnp->lock, flags); 632 spin_unlock_irqrestore(&rnp->lock, flags);
637 return; 633 return;
@@ -644,42 +640,28 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
644 spin_lock(&rsp->onofflock); /* irqs already disabled. */ 640 spin_lock(&rsp->onofflock); /* irqs already disabled. */
645 641
646 /* 642 /*
647 * Set the quiescent-state-needed bits in all the non-leaf RCU 643 * Set the quiescent-state-needed bits in all the rcu_node
648 * nodes for all currently online CPUs. This operation relies 644 * structures for all currently online CPUs in breadth-first
649 * on the layout of the hierarchy within the rsp->node[] array. 645 * order, starting from the root rcu_node structure. This
650 * Note that other CPUs will access only the leaves of the 646 * operation relies on the layout of the hierarchy within the
651 * hierarchy, which still indicate that no grace period is in 647 * rsp->node[] array. Note that other CPUs will access only
652 * progress. In addition, we have excluded CPU-hotplug operations. 648 * the leaves of the hierarchy, which still indicate that no
653 * 649 * grace period is in progress, at least until the corresponding
654 * We therefore do not need to hold any locks. Any required 650 * leaf node has been initialized. In addition, we have excluded
655 * memory barriers will be supplied by the locks guarding the 651 * CPU-hotplug operations.
656 * leaf rcu_nodes in the hierarchy.
657 */
658
659 rnp_end = rsp->level[NUM_RCU_LVLS - 1];
660 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++)
661 rnp_cur->qsmask = rnp_cur->qsmaskinit;
662
663 /*
664 * Now set up the leaf nodes. Here we must be careful. First,
665 * we need to hold the lock in order to exclude other CPUs, which
666 * might be contending for the leaf nodes' locks. Second, as
667 * soon as we initialize a given leaf node, its CPUs might run
668 * up the rest of the hierarchy. We must therefore acquire locks
669 * for each node that we touch during this stage. (But we still
670 * are excluding CPU-hotplug operations.)
671 * 652 *
672 * Note that the grace period cannot complete until we finish 653 * Note that the grace period cannot complete until we finish
673 * the initialization process, as there will be at least one 654 * the initialization process, as there will be at least one
674 * qsmask bit set in the root node until that time, namely the 655 * qsmask bit set in the root node until that time, namely the
675 * one corresponding to this CPU. 656 * one corresponding to this CPU, due to the fact that we have
657 * irqs disabled.
676 */ 658 */
677 rnp_end = &rsp->node[NUM_RCU_NODES]; 659 rcu_for_each_node_breadth_first(rsp, rnp) {
678 rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 660 spin_lock(&rnp->lock); /* irqs already disabled. */
679 for (; rnp_cur < rnp_end; rnp_cur++) { 661 rcu_preempt_check_blocked_tasks(rnp);
680 spin_lock(&rnp_cur->lock); /* irqs already disabled. */ 662 rnp->qsmask = rnp->qsmaskinit;
681 rnp_cur->qsmask = rnp_cur->qsmaskinit; 663 rnp->gpnum = rsp->gpnum;
682 spin_unlock(&rnp_cur->lock); /* irqs already disabled. */ 664 spin_unlock(&rnp->lock); /* irqs already disabled. */
683 } 665 }
684 666
685 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 667 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
@@ -720,8 +702,9 @@ rcu_process_gp_end(struct rcu_state *rsp, struct rcu_data *rdp)
720 * hold rnp->lock, as required by rcu_start_gp(), which will release it. 702 * hold rnp->lock, as required by rcu_start_gp(), which will release it.
721 */ 703 */
722static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) 704static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
723 __releases(rnp->lock) 705 __releases(rcu_get_root(rsp)->lock)
724{ 706{
707 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
725 rsp->completed = rsp->gpnum; 708 rsp->completed = rsp->gpnum;
726 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 709 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
727 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 710 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
@@ -739,6 +722,8 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
739 unsigned long flags) 722 unsigned long flags)
740 __releases(rnp->lock) 723 __releases(rnp->lock)
741{ 724{
725 struct rcu_node *rnp_c;
726
742 /* Walk up the rcu_node hierarchy. */ 727 /* Walk up the rcu_node hierarchy. */
743 for (;;) { 728 for (;;) {
744 if (!(rnp->qsmask & mask)) { 729 if (!(rnp->qsmask & mask)) {
@@ -762,8 +747,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
762 break; 747 break;
763 } 748 }
764 spin_unlock_irqrestore(&rnp->lock, flags); 749 spin_unlock_irqrestore(&rnp->lock, flags);
750 rnp_c = rnp;
765 rnp = rnp->parent; 751 rnp = rnp->parent;
766 spin_lock_irqsave(&rnp->lock, flags); 752 spin_lock_irqsave(&rnp->lock, flags);
753 WARN_ON_ONCE(rnp_c->qsmask);
767 } 754 }
768 755
769 /* 756 /*
@@ -776,10 +763,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
776 763
777/* 764/*
778 * Record a quiescent state for the specified CPU, which must either be 765 * Record a quiescent state for the specified CPU, which must either be
779 * the current CPU or an offline CPU. The lastcomp argument is used to 766 * the current CPU. The lastcomp argument is used to make sure we are
780 * make sure we are still in the grace period of interest. We don't want 767 * still in the grace period of interest. We don't want to end the current
781 * to end the current grace period based on quiescent states detected in 768 * grace period based on quiescent states detected in an earlier grace
782 * an earlier grace period! 769 * period!
783 */ 770 */
784static void 771static void
785cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 772cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
@@ -814,7 +801,6 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
814 * This GP can't end until cpu checks in, so all of our 801 * This GP can't end until cpu checks in, so all of our
815 * callbacks can be processed during the next GP. 802 * callbacks can be processed during the next GP.
816 */ 803 */
817 rdp = rsp->rda[smp_processor_id()];
818 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 804 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
819 805
820 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 806 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
@@ -855,24 +841,70 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
855#ifdef CONFIG_HOTPLUG_CPU 841#ifdef CONFIG_HOTPLUG_CPU
856 842
857/* 843/*
844 * Move a dying CPU's RCU callbacks to the ->orphan_cbs_list for the
845 * specified flavor of RCU. The callbacks will be adopted by the next
846 * _rcu_barrier() invocation or by the CPU_DEAD notifier, whichever
847 * comes first. Because this is invoked from the CPU_DYING notifier,
848 * irqs are already disabled.
849 */
850static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
851{
852 int i;
853 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
854
855 if (rdp->nxtlist == NULL)
856 return; /* irqs disabled, so comparison is stable. */
857 spin_lock(&rsp->onofflock); /* irqs already disabled. */
858 *rsp->orphan_cbs_tail = rdp->nxtlist;
859 rsp->orphan_cbs_tail = rdp->nxttail[RCU_NEXT_TAIL];
860 rdp->nxtlist = NULL;
861 for (i = 0; i < RCU_NEXT_SIZE; i++)
862 rdp->nxttail[i] = &rdp->nxtlist;
863 rsp->orphan_qlen += rdp->qlen;
864 rdp->qlen = 0;
865 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
866}
867
868/*
869 * Adopt previously orphaned RCU callbacks.
870 */
871static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
872{
873 unsigned long flags;
874 struct rcu_data *rdp;
875
876 spin_lock_irqsave(&rsp->onofflock, flags);
877 rdp = rsp->rda[smp_processor_id()];
878 if (rsp->orphan_cbs_list == NULL) {
879 spin_unlock_irqrestore(&rsp->onofflock, flags);
880 return;
881 }
882 *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_list;
883 rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_cbs_tail;
884 rdp->qlen += rsp->orphan_qlen;
885 rsp->orphan_cbs_list = NULL;
886 rsp->orphan_cbs_tail = &rsp->orphan_cbs_list;
887 rsp->orphan_qlen = 0;
888 spin_unlock_irqrestore(&rsp->onofflock, flags);
889}
890
891/*
858 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy 892 * Remove the outgoing CPU from the bitmasks in the rcu_node hierarchy
859 * and move all callbacks from the outgoing CPU to the current one. 893 * and move all callbacks from the outgoing CPU to the current one.
860 */ 894 */
861static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) 895static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
862{ 896{
863 int i;
864 unsigned long flags; 897 unsigned long flags;
865 long lastcomp; 898 long lastcomp;
866 unsigned long mask; 899 unsigned long mask;
867 struct rcu_data *rdp = rsp->rda[cpu]; 900 struct rcu_data *rdp = rsp->rda[cpu];
868 struct rcu_data *rdp_me;
869 struct rcu_node *rnp; 901 struct rcu_node *rnp;
870 902
871 /* Exclude any attempts to start a new grace period. */ 903 /* Exclude any attempts to start a new grace period. */
872 spin_lock_irqsave(&rsp->onofflock, flags); 904 spin_lock_irqsave(&rsp->onofflock, flags);
873 905
874 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ 906 /* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */
875 rnp = rdp->mynode; 907 rnp = rdp->mynode; /* this is the outgoing CPU's rnp. */
876 mask = rdp->grpmask; /* rnp->grplo is constant. */ 908 mask = rdp->grpmask; /* rnp->grplo is constant. */
877 do { 909 do {
878 spin_lock(&rnp->lock); /* irqs already disabled. */ 910 spin_lock(&rnp->lock); /* irqs already disabled. */
@@ -881,42 +913,16 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
881 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 913 spin_unlock(&rnp->lock); /* irqs remain disabled. */
882 break; 914 break;
883 } 915 }
884 rcu_preempt_offline_tasks(rsp, rnp); 916 rcu_preempt_offline_tasks(rsp, rnp, rdp);
885 mask = rnp->grpmask; 917 mask = rnp->grpmask;
886 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 918 spin_unlock(&rnp->lock); /* irqs remain disabled. */
887 rnp = rnp->parent; 919 rnp = rnp->parent;
888 } while (rnp != NULL); 920 } while (rnp != NULL);
889 lastcomp = rsp->completed; 921 lastcomp = rsp->completed;
890 922
891 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 923 spin_unlock_irqrestore(&rsp->onofflock, flags);
892
893 /* Being offline is a quiescent state, so go record it. */
894 cpu_quiet(cpu, rsp, rdp, lastcomp);
895 924
896 /* 925 rcu_adopt_orphan_cbs(rsp);
897 * Move callbacks from the outgoing CPU to the running CPU.
898 * Note that the outgoing CPU is now quiscent, so it is now
899 * (uncharacteristically) safe to access its rcu_data structure.
900 * Note also that we must carefully retain the order of the
901 * outgoing CPU's callbacks in order for rcu_barrier() to work
902 * correctly. Finally, note that we start all the callbacks
903 * afresh, even those that have passed through a grace period
904 * and are therefore ready to invoke. The theory is that hotplug
905 * events are rare, and that if they are frequent enough to
906 * indefinitely delay callbacks, you have far worse things to
907 * be worrying about.
908 */
909 rdp_me = rsp->rda[smp_processor_id()];
910 if (rdp->nxtlist != NULL) {
911 *rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxtlist;
912 rdp_me->nxttail[RCU_NEXT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
913 rdp->nxtlist = NULL;
914 for (i = 0; i < RCU_NEXT_SIZE; i++)
915 rdp->nxttail[i] = &rdp->nxtlist;
916 rdp_me->qlen += rdp->qlen;
917 rdp->qlen = 0;
918 }
919 local_irq_restore(flags);
920} 926}
921 927
922/* 928/*
@@ -934,6 +940,14 @@ static void rcu_offline_cpu(int cpu)
934 940
935#else /* #ifdef CONFIG_HOTPLUG_CPU */ 941#else /* #ifdef CONFIG_HOTPLUG_CPU */
936 942
943static void rcu_send_cbs_to_orphanage(struct rcu_state *rsp)
944{
945}
946
947static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
948{
949}
950
937static void rcu_offline_cpu(int cpu) 951static void rcu_offline_cpu(int cpu)
938{ 952{
939} 953}
@@ -1066,33 +1080,32 @@ static int rcu_process_dyntick(struct rcu_state *rsp, long lastcomp,
1066 int cpu; 1080 int cpu;
1067 unsigned long flags; 1081 unsigned long flags;
1068 unsigned long mask; 1082 unsigned long mask;
1069 struct rcu_node *rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 1083 struct rcu_node *rnp;
1070 struct rcu_node *rnp_end = &rsp->node[NUM_RCU_NODES];
1071 1084
1072 for (; rnp_cur < rnp_end; rnp_cur++) { 1085 rcu_for_each_leaf_node(rsp, rnp) {
1073 mask = 0; 1086 mask = 0;
1074 spin_lock_irqsave(&rnp_cur->lock, flags); 1087 spin_lock_irqsave(&rnp->lock, flags);
1075 if (rsp->completed != lastcomp) { 1088 if (rsp->completed != lastcomp) {
1076 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1089 spin_unlock_irqrestore(&rnp->lock, flags);
1077 return 1; 1090 return 1;
1078 } 1091 }
1079 if (rnp_cur->qsmask == 0) { 1092 if (rnp->qsmask == 0) {
1080 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1093 spin_unlock_irqrestore(&rnp->lock, flags);
1081 continue; 1094 continue;
1082 } 1095 }
1083 cpu = rnp_cur->grplo; 1096 cpu = rnp->grplo;
1084 bit = 1; 1097 bit = 1;
1085 for (; cpu <= rnp_cur->grphi; cpu++, bit <<= 1) { 1098 for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
1086 if ((rnp_cur->qsmask & bit) != 0 && f(rsp->rda[cpu])) 1099 if ((rnp->qsmask & bit) != 0 && f(rsp->rda[cpu]))
1087 mask |= bit; 1100 mask |= bit;
1088 } 1101 }
1089 if (mask != 0 && rsp->completed == lastcomp) { 1102 if (mask != 0 && rsp->completed == lastcomp) {
1090 1103
1091 /* cpu_quiet_msk() releases rnp_cur->lock. */ 1104 /* cpu_quiet_msk() releases rnp->lock. */
1092 cpu_quiet_msk(mask, rsp, rnp_cur, flags); 1105 cpu_quiet_msk(mask, rsp, rnp, flags);
1093 continue; 1106 continue;
1094 } 1107 }
1095 spin_unlock_irqrestore(&rnp_cur->lock, flags); 1108 spin_unlock_irqrestore(&rnp->lock, flags);
1096 } 1109 }
1097 return 0; 1110 return 0;
1098} 1111}
@@ -1108,7 +1121,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1108 struct rcu_node *rnp = rcu_get_root(rsp); 1121 struct rcu_node *rnp = rcu_get_root(rsp);
1109 u8 signaled; 1122 u8 signaled;
1110 1123
1111 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) 1124 if (!rcu_gp_in_progress(rsp))
1112 return; /* No grace period in progress, nothing to force. */ 1125 return; /* No grace period in progress, nothing to force. */
1113 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) { 1126 if (!spin_trylock_irqsave(&rsp->fqslock, flags)) {
1114 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ 1127 rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */
@@ -1267,7 +1280,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1267 rdp->nxttail[RCU_NEXT_TAIL] = &head->next; 1280 rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
1268 1281
1269 /* Start a new grace period if one not already started. */ 1282 /* Start a new grace period if one not already started. */
1270 if (ACCESS_ONCE(rsp->completed) == ACCESS_ONCE(rsp->gpnum)) { 1283 if (!rcu_gp_in_progress(rsp)) {
1271 unsigned long nestflag; 1284 unsigned long nestflag;
1272 struct rcu_node *rnp_root = rcu_get_root(rsp); 1285 struct rcu_node *rnp_root = rcu_get_root(rsp);
1273 1286
@@ -1347,7 +1360,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1347 } 1360 }
1348 1361
1349 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1362 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1350 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1363 if (rcu_gp_in_progress(rsp) &&
1351 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) { 1364 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)) {
1352 rdp->n_rp_need_fqs++; 1365 rdp->n_rp_need_fqs++;
1353 return 1; 1366 return 1;
@@ -1384,6 +1397,82 @@ int rcu_needs_cpu(int cpu)
1384 rcu_preempt_needs_cpu(cpu); 1397 rcu_preempt_needs_cpu(cpu);
1385} 1398}
1386 1399
1400static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
1401static atomic_t rcu_barrier_cpu_count;
1402static DEFINE_MUTEX(rcu_barrier_mutex);
1403static struct completion rcu_barrier_completion;
1404
1405static void rcu_barrier_callback(struct rcu_head *notused)
1406{
1407 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1408 complete(&rcu_barrier_completion);
1409}
1410
1411/*
1412 * Called with preemption disabled, and from cross-cpu IRQ context.
1413 */
1414static void rcu_barrier_func(void *type)
1415{
1416 int cpu = smp_processor_id();
1417 struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
1418 void (*call_rcu_func)(struct rcu_head *head,
1419 void (*func)(struct rcu_head *head));
1420
1421 atomic_inc(&rcu_barrier_cpu_count);
1422 call_rcu_func = type;
1423 call_rcu_func(head, rcu_barrier_callback);
1424}
1425
1426/*
1427 * Orchestrate the specified type of RCU barrier, waiting for all
1428 * RCU callbacks of the specified type to complete.
1429 */
1430static void _rcu_barrier(struct rcu_state *rsp,
1431 void (*call_rcu_func)(struct rcu_head *head,
1432 void (*func)(struct rcu_head *head)))
1433{
1434 BUG_ON(in_interrupt());
1435 /* Take mutex to serialize concurrent rcu_barrier() requests. */
1436 mutex_lock(&rcu_barrier_mutex);
1437 init_completion(&rcu_barrier_completion);
1438 /*
1439 * Initialize rcu_barrier_cpu_count to 1, then invoke
1440 * rcu_barrier_func() on each CPU, so that each CPU also has
1441 * incremented rcu_barrier_cpu_count. Only then is it safe to
1442 * decrement rcu_barrier_cpu_count -- otherwise the first CPU
1443 * might complete its grace period before all of the other CPUs
1444 * did their increment, causing this function to return too
1445 * early.
1446 */
1447 atomic_set(&rcu_barrier_cpu_count, 1);
1448 preempt_disable(); /* stop CPU_DYING from filling orphan_cbs_list */
1449 rcu_adopt_orphan_cbs(rsp);
1450 on_each_cpu(rcu_barrier_func, (void *)call_rcu_func, 1);
1451 preempt_enable(); /* CPU_DYING can again fill orphan_cbs_list */
1452 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
1453 complete(&rcu_barrier_completion);
1454 wait_for_completion(&rcu_barrier_completion);
1455 mutex_unlock(&rcu_barrier_mutex);
1456}
1457
1458/**
1459 * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
1460 */
1461void rcu_barrier_bh(void)
1462{
1463 _rcu_barrier(&rcu_bh_state, call_rcu_bh);
1464}
1465EXPORT_SYMBOL_GPL(rcu_barrier_bh);
1466
1467/**
1468 * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
1469 */
1470void rcu_barrier_sched(void)
1471{
1472 _rcu_barrier(&rcu_sched_state, call_rcu_sched);
1473}
1474EXPORT_SYMBOL_GPL(rcu_barrier_sched);
1475
1387/* 1476/*
1388 * Do boot-time initialization of a CPU's per-CPU RCU data. 1477 * Do boot-time initialization of a CPU's per-CPU RCU data.
1389 */ 1478 */
@@ -1457,20 +1546,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1457 rnp = rnp->parent; 1546 rnp = rnp->parent;
1458 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1547 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
1459 1548
1460 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 1549 spin_unlock_irqrestore(&rsp->onofflock, flags);
1461
1462 /*
1463 * A new grace period might start here. If so, we will be part of
1464 * it, and its gpnum will be greater than ours, so we will
1465 * participate. It is also possible for the gpnum to have been
1466 * incremented before this function was called, and the bitmasks
1467 * to not be filled out until now, in which case we will also
1468 * participate due to our gpnum being behind.
1469 */
1470
1471 /* Since it is coming online, the CPU is in a quiescent state. */
1472 cpu_quiet(cpu, rsp, rdp, lastcomp);
1473 local_irq_restore(flags);
1474} 1550}
1475 1551
1476static void __cpuinit rcu_online_cpu(int cpu) 1552static void __cpuinit rcu_online_cpu(int cpu)
@@ -1493,6 +1569,22 @@ int __cpuinit rcu_cpu_notify(struct notifier_block *self,
1493 case CPU_UP_PREPARE_FROZEN: 1569 case CPU_UP_PREPARE_FROZEN:
1494 rcu_online_cpu(cpu); 1570 rcu_online_cpu(cpu);
1495 break; 1571 break;
1572 case CPU_DYING:
1573 case CPU_DYING_FROZEN:
1574 /*
1575 * preempt_disable() in _rcu_barrier() prevents stop_machine(),
1576 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
1577 * returns, all online cpus have queued rcu_barrier_func().
1578 * The dying CPU clears its cpu_online_mask bit and
1579 * moves all of its RCU callbacks to ->orphan_cbs_list
1580 * in the context of stop_machine(), so subsequent calls
1581 * to _rcu_barrier() will adopt these callbacks and only
1582 * then queue rcu_barrier_func() on all remaining CPUs.
1583 */
1584 rcu_send_cbs_to_orphanage(&rcu_bh_state);
1585 rcu_send_cbs_to_orphanage(&rcu_sched_state);
1586 rcu_preempt_send_cbs_to_orphanage();
1587 break;
1496 case CPU_DEAD: 1588 case CPU_DEAD:
1497 case CPU_DEAD_FROZEN: 1589 case CPU_DEAD_FROZEN:
1498 case CPU_UP_CANCELED: 1590 case CPU_UP_CANCELED:
@@ -1555,7 +1647,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1555 cpustride *= rsp->levelspread[i]; 1647 cpustride *= rsp->levelspread[i];
1556 rnp = rsp->level[i]; 1648 rnp = rsp->level[i];
1557 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) { 1649 for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
1558 spin_lock_init(&rnp->lock); 1650 if (rnp != rcu_get_root(rsp))
1651 spin_lock_init(&rnp->lock);
1559 rnp->gpnum = 0; 1652 rnp->gpnum = 0;
1560 rnp->qsmask = 0; 1653 rnp->qsmask = 0;
1561 rnp->qsmaskinit = 0; 1654 rnp->qsmaskinit = 0;
@@ -1578,6 +1671,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1578 INIT_LIST_HEAD(&rnp->blocked_tasks[1]); 1671 INIT_LIST_HEAD(&rnp->blocked_tasks[1]);
1579 } 1672 }
1580 } 1673 }
1674 spin_lock_init(&rcu_get_root(rsp)->lock);
1581} 1675}
1582 1676
1583/* 1677/*
@@ -1587,6 +1681,10 @@ static void __init rcu_init_one(struct rcu_state *rsp)
1587 */ 1681 */
1588#define RCU_INIT_FLAVOR(rsp, rcu_data) \ 1682#define RCU_INIT_FLAVOR(rsp, rcu_data) \
1589do { \ 1683do { \
1684 int i; \
1685 int j; \
1686 struct rcu_node *rnp; \
1687 \
1590 rcu_init_one(rsp); \ 1688 rcu_init_one(rsp); \
1591 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \ 1689 rnp = (rsp)->level[NUM_RCU_LVLS - 1]; \
1592 j = 0; \ 1690 j = 0; \
@@ -1599,31 +1697,8 @@ do { \
1599 } \ 1697 } \
1600} while (0) 1698} while (0)
1601 1699
1602#ifdef CONFIG_TREE_PREEMPT_RCU
1603
1604void __init __rcu_init_preempt(void)
1605{
1606 int i; /* All used by RCU_INIT_FLAVOR(). */
1607 int j;
1608 struct rcu_node *rnp;
1609
1610 RCU_INIT_FLAVOR(&rcu_preempt_state, rcu_preempt_data);
1611}
1612
1613#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1614
1615void __init __rcu_init_preempt(void)
1616{
1617}
1618
1619#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
1620
1621void __init __rcu_init(void) 1700void __init __rcu_init(void)
1622{ 1701{
1623 int i; /* All used by RCU_INIT_FLAVOR(). */
1624 int j;
1625 struct rcu_node *rnp;
1626
1627 rcu_bootup_announce(); 1702 rcu_bootup_announce();
1628#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 1703#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
1629 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); 1704 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
@@ -1634,6 +1709,4 @@ void __init __rcu_init(void)
1634 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); 1709 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
1635} 1710}
1636 1711
1637module_param(blimit, int, 0); 1712#include "rcutree_plugin.h"
1638module_param(qhimark, int, 0);
1639module_param(qlowmark, int, 0);