aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-09-18 12:50:18 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-19 02:53:21 -0400
commite7d8842ed34a7fe19d1ed90f84c211fb056ac523 (patch)
treed49d5b8ff8829e525b8f80d60a18ef1f37e09529 /kernel
parent28ecd58020409be8eb176c716f957fc3386fa2fa (diff)
rcu: Apply results of code inspection of kernel/rcutree_plugin.h
o Drop the calls to cpu_quiet() from the online/offline code. These are unnecessary, since force_quiescent_state() will clean up, and removing them simplifies the code a bit. o Add a warning to check that we don't enqueue the same blocked task twice onto the ->blocked_tasks[] lists. o Rework the phase computation in rcu_preempt_note_context_switch() to be more readable, as suggested by Josh Triplett. o Disable irqs to close a race between the scheduling clock interrupt and rcu_preempt_note_context_switch() WRT the ->rcu_read_unlock_special field. o Add comments to rnp->lock acquisition and release within rcu_read_unlock_special() noting that irqs are already disabled. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12532926201851-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree.c27
-rw-r--r--kernel/rcutree_plugin.h10
2 files changed, 11 insertions, 26 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e8624ebf2320..ae4a553e37ce 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -767,10 +767,10 @@ cpu_quiet_msk(unsigned long mask, struct rcu_state *rsp, struct rcu_node *rnp,
767 767
768/* 768/*
769 * Record a quiescent state for the specified CPU, which must either be 769 * Record a quiescent state for the specified CPU, which must either be
770 * the current CPU or an offline CPU. The lastcomp argument is used to 770 * the current CPU. The lastcomp argument is used to make sure we are
771 * make sure we are still in the grace period of interest. We don't want 771 * still in the grace period of interest. We don't want to end the current
772 * to end the current grace period based on quiescent states detected in 772 * grace period based on quiescent states detected in an earlier grace
773 * an earlier grace period! 773 * period!
774 */ 774 */
775static void 775static void
776cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp) 776cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
@@ -805,7 +805,6 @@ cpu_quiet(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastcomp)
805 * This GP can't end until cpu checks in, so all of our 805 * This GP can't end until cpu checks in, so all of our
806 * callbacks can be processed during the next GP. 806 * callbacks can be processed during the next GP.
807 */ 807 */
808 rdp = rsp->rda[smp_processor_id()];
809 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; 808 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
810 809
811 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */ 810 cpu_quiet_msk(mask, rsp, rnp, flags); /* releases rnp->lock */
@@ -881,9 +880,6 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
881 880
882 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 881 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */
883 882
884 /* Being offline is a quiescent state, so go record it. */
885 cpu_quiet(cpu, rsp, rdp, lastcomp);
886
887 /* 883 /*
888 * Move callbacks from the outgoing CPU to the running CPU. 884 * Move callbacks from the outgoing CPU to the running CPU.
889 * Note that the outgoing CPU is now quiscent, so it is now 885 * Note that the outgoing CPU is now quiscent, so it is now
@@ -1448,20 +1444,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptable)
1448 rnp = rnp->parent; 1444 rnp = rnp->parent;
1449 } while (rnp != NULL && !(rnp->qsmaskinit & mask)); 1445 } while (rnp != NULL && !(rnp->qsmaskinit & mask));
1450 1446
1451 spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 1447 spin_unlock_irqrestore(&rsp->onofflock, flags);
1452
1453 /*
1454 * A new grace period might start here. If so, we will be part of
1455 * it, and its gpnum will be greater than ours, so we will
1456 * participate. It is also possible for the gpnum to have been
1457 * incremented before this function was called, and the bitmasks
1458 * to not be filled out until now, in which case we will also
1459 * participate due to our gpnum being behind.
1460 */
1461
1462 /* Since it is coming online, the CPU is in a quiescent state. */
1463 cpu_quiet(cpu, rsp, rdp, lastcomp);
1464 local_irq_restore(flags);
1465} 1448}
1466 1449
1467static void __cpuinit rcu_online_cpu(int cpu) 1450static void __cpuinit rcu_online_cpu(int cpu)
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 5f94619450af..cd6047cc7fc2 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -117,9 +117,9 @@ static void rcu_preempt_note_context_switch(int cpu)
117 * on line! 117 * on line!
118 */ 118 */
119 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); 119 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
120 phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); 120 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
121 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
121 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); 122 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
122 smp_mb(); /* Ensure later ctxt swtch seen after above. */
123 spin_unlock_irqrestore(&rnp->lock, flags); 123 spin_unlock_irqrestore(&rnp->lock, flags);
124 } 124 }
125 125
@@ -133,7 +133,9 @@ static void rcu_preempt_note_context_switch(int cpu)
133 * means that we continue to block the current grace period. 133 * means that we continue to block the current grace period.
134 */ 134 */
135 rcu_preempt_qs(cpu); 135 rcu_preempt_qs(cpu);
136 local_irq_save(flags);
136 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 137 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
138 local_irq_restore(flags);
137} 139}
138 140
139/* 141/*
@@ -189,10 +191,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
189 */ 191 */
190 for (;;) { 192 for (;;) {
191 rnp = t->rcu_blocked_node; 193 rnp = t->rcu_blocked_node;
192 spin_lock(&rnp->lock); 194 spin_lock(&rnp->lock); /* irqs already disabled. */
193 if (rnp == t->rcu_blocked_node) 195 if (rnp == t->rcu_blocked_node)
194 break; 196 break;
195 spin_unlock(&rnp->lock); 197 spin_unlock(&rnp->lock); /* irqs remain disabled. */
196 } 198 }
197 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); 199 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
198 list_del_init(&t->rcu_node_entry); 200 list_del_init(&t->rcu_node_entry);