aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2009-09-18 12:50:18 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-19 02:53:21 -0400
commite7d8842ed34a7fe19d1ed90f84c211fb056ac523 (patch)
treed49d5b8ff8829e525b8f80d60a18ef1f37e09529 /kernel/rcutree_plugin.h
parent28ecd58020409be8eb176c716f957fc3386fa2fa (diff)
rcu: Apply results of code inspection of kernel/rcutree_plugin.h
o Drop the calls to cpu_quiet() from the online/offline code. These are unnecessary, since force_quiescent_state() will clean up, and removing them simplifies the code a bit. o Add a warning to check that we don't enqueue the same blocked task twice onto the ->blocked_tasks[] lists. o Rework the phase computation in rcu_preempt_note_context_switch() to be more readable, as suggested by Josh Triplett. o Disable irqs to close a race between the scheduling clock interrupt and rcu_preempt_note_context_switch() WRT the ->rcu_read_unlock_special field. o Add comments to rnp->lock acquisition and release within rcu_read_unlock_special() noting that irqs are already disabled. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: akpm@linux-foundation.org Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu LKML-Reference: <12532926201851-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 5f94619450af..cd6047cc7fc2 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -117,9 +117,9 @@ static void rcu_preempt_note_context_switch(int cpu)
117 * on line! 117 * on line!
118 */ 118 */
119 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0); 119 WARN_ON_ONCE((rdp->grpmask & rnp->qsmaskinit) == 0);
120 phase = !(rnp->qsmask & rdp->grpmask) ^ (rnp->gpnum & 0x1); 120 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
121 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
121 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); 122 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
122 smp_mb(); /* Ensure later ctxt swtch seen after above. */
123 spin_unlock_irqrestore(&rnp->lock, flags); 123 spin_unlock_irqrestore(&rnp->lock, flags);
124 } 124 }
125 125
@@ -133,7 +133,9 @@ static void rcu_preempt_note_context_switch(int cpu)
133 * means that we continue to block the current grace period. 133 * means that we continue to block the current grace period.
134 */ 134 */
135 rcu_preempt_qs(cpu); 135 rcu_preempt_qs(cpu);
136 local_irq_save(flags);
136 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS; 137 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
138 local_irq_restore(flags);
137} 139}
138 140
139/* 141/*
@@ -189,10 +191,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
189 */ 191 */
190 for (;;) { 192 for (;;) {
191 rnp = t->rcu_blocked_node; 193 rnp = t->rcu_blocked_node;
192 spin_lock(&rnp->lock); 194 spin_lock(&rnp->lock); /* irqs already disabled. */
193 if (rnp == t->rcu_blocked_node) 195 if (rnp == t->rcu_blocked_node)
194 break; 196 break;
195 spin_unlock(&rnp->lock); 197 spin_unlock(&rnp->lock); /* irqs remain disabled. */
196 } 198 }
197 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]); 199 empty = list_empty(&rnp->blocked_tasks[rnp->gpnum & 0x1]);
198 list_del_init(&t->rcu_node_entry); 200 list_del_init(&t->rcu_node_entry);