aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c41
1 files changed, 12 insertions, 29 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index c634a92d1217..da301e2fd84f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -645,41 +645,24 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
645 spin_lock(&rsp->onofflock); /* irqs already disabled. */ 645 spin_lock(&rsp->onofflock); /* irqs already disabled. */
646 646
647 /* 647 /*
648 * Set the quiescent-state-needed bits in all the non-leaf RCU 648 * Set the quiescent-state-needed bits in all the rcu_node
649 * nodes for all currently online CPUs. This operation relies 649 * structures for all currently online CPUs in breadth-first
650 * on the layout of the hierarchy within the rsp->node[] array. 650 * order, starting from the root rcu_node structure. This
651 * Note that other CPUs will access only the leaves of the 651 * operation relies on the layout of the hierarchy within the
652 * hierarchy, which still indicate that no grace period is in 652 * rsp->node[] array. Note that other CPUs will access only
653 * progress. In addition, we have excluded CPU-hotplug operations. 653 * the leaves of the hierarchy, which still indicate that no
654 * 654 * grace period is in progress, at least until the corresponding
655 * We therefore do not need to hold any locks. Any required 655 * leaf node has been initialized. In addition, we have excluded
656 * memory barriers will be supplied by the locks guarding the 656 * CPU-hotplug operations.
657 * leaf rcu_nodes in the hierarchy.
658 */
659
660 rnp_end = rsp->level[NUM_RCU_LVLS - 1];
661 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) {
662 rnp_cur->qsmask = rnp_cur->qsmaskinit;
663 rnp->gpnum = rsp->gpnum;
664 }
665
666 /*
667 * Now set up the leaf nodes. Here we must be careful. First,
668 * we need to hold the lock in order to exclude other CPUs, which
669 * might be contending for the leaf nodes' locks. Second, as
670 * soon as we initialize a given leaf node, its CPUs might run
671 * up the rest of the hierarchy. We must therefore acquire locks
672 * for each node that we touch during this stage. (But we still
673 * are excluding CPU-hotplug operations.)
674 * 657 *
675 * Note that the grace period cannot complete until we finish 658 * Note that the grace period cannot complete until we finish
676 * the initialization process, as there will be at least one 659 * the initialization process, as there will be at least one
677 * qsmask bit set in the root node until that time, namely the 660 * qsmask bit set in the root node until that time, namely the
678 * one corresponding to this CPU. 661 * one corresponding to this CPU, due to the fact that we have
662 * irqs disabled.
679 */ 663 */
680 rnp_end = &rsp->node[NUM_RCU_NODES]; 664 rnp_end = &rsp->node[NUM_RCU_NODES];
681 rnp_cur = rsp->level[NUM_RCU_LVLS - 1]; 665 for (rnp_cur = &rsp->node[0]; rnp_cur < rnp_end; rnp_cur++) {
682 for (; rnp_cur < rnp_end; rnp_cur++) {
683 spin_lock(&rnp_cur->lock); /* irqs already disabled. */ 666 spin_lock(&rnp_cur->lock); /* irqs already disabled. */
684 rnp_cur->qsmask = rnp_cur->qsmaskinit; 667 rnp_cur->qsmask = rnp_cur->qsmaskinit;
685 rnp->gpnum = rsp->gpnum; 668 rnp->gpnum = rsp->gpnum;