aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-02-22 20:05:02 -0500
committerIngo Molnar <mingo@elte.hu>2010-02-25 04:34:58 -0500
commit1304afb225288a2e250d6a7495462c28e5509cbb (patch)
tree1ba76e304f718f7ce89532e1c4d276b4af439c07 /kernel/rcutree_plugin.h
parent20133cfce7d0bbdcc0c398301030c091f5675c88 (diff)
rcu: Convert to raw_spinlocks
The spinlocks in rcutree need to be real spinlocks in preempt-rt. Convert them to raw_spinlocks. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-18-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h46
1 files changed, 23 insertions, 23 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index a82566696b0b..a8b2e834fd3a 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -111,7 +111,7 @@ static void rcu_preempt_note_context_switch(int cpu)
111 /* Possibly blocking in an RCU read-side critical section. */ 111 /* Possibly blocking in an RCU read-side critical section. */
112 rdp = rcu_preempt_state.rda[cpu]; 112 rdp = rcu_preempt_state.rda[cpu];
113 rnp = rdp->mynode; 113 rnp = rdp->mynode;
114 spin_lock_irqsave(&rnp->lock, flags); 114 raw_spin_lock_irqsave(&rnp->lock, flags);
115 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; 115 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
116 t->rcu_blocked_node = rnp; 116 t->rcu_blocked_node = rnp;
117 117
@@ -132,7 +132,7 @@ static void rcu_preempt_note_context_switch(int cpu)
132 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry)); 132 WARN_ON_ONCE(!list_empty(&t->rcu_node_entry));
133 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1; 133 phase = (rnp->gpnum + !(rnp->qsmask & rdp->grpmask)) & 0x1;
134 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]); 134 list_add(&t->rcu_node_entry, &rnp->blocked_tasks[phase]);
135 spin_unlock_irqrestore(&rnp->lock, flags); 135 raw_spin_unlock_irqrestore(&rnp->lock, flags);
136 } 136 }
137 137
138 /* 138 /*
@@ -189,7 +189,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
189 struct rcu_node *rnp_p; 189 struct rcu_node *rnp_p;
190 190
191 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) { 191 if (rnp->qsmask != 0 || rcu_preempted_readers(rnp)) {
192 spin_unlock_irqrestore(&rnp->lock, flags); 192 raw_spin_unlock_irqrestore(&rnp->lock, flags);
193 return; /* Still need more quiescent states! */ 193 return; /* Still need more quiescent states! */
194 } 194 }
195 195
@@ -206,8 +206,8 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
206 206
207 /* Report up the rest of the hierarchy. */ 207 /* Report up the rest of the hierarchy. */
208 mask = rnp->grpmask; 208 mask = rnp->grpmask;
209 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 209 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
210 spin_lock(&rnp_p->lock); /* irqs already disabled. */ 210 raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
211 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags); 211 rcu_report_qs_rnp(mask, &rcu_preempt_state, rnp_p, flags);
212} 212}
213 213
@@ -257,10 +257,10 @@ static void rcu_read_unlock_special(struct task_struct *t)
257 */ 257 */
258 for (;;) { 258 for (;;) {
259 rnp = t->rcu_blocked_node; 259 rnp = t->rcu_blocked_node;
260 spin_lock(&rnp->lock); /* irqs already disabled. */ 260 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
261 if (rnp == t->rcu_blocked_node) 261 if (rnp == t->rcu_blocked_node)
262 break; 262 break;
263 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 263 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
264 } 264 }
265 empty = !rcu_preempted_readers(rnp); 265 empty = !rcu_preempted_readers(rnp);
266 empty_exp = !rcu_preempted_readers_exp(rnp); 266 empty_exp = !rcu_preempted_readers_exp(rnp);
@@ -274,7 +274,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
274 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock. 274 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
275 */ 275 */
276 if (empty) 276 if (empty)
277 spin_unlock_irqrestore(&rnp->lock, flags); 277 raw_spin_unlock_irqrestore(&rnp->lock, flags);
278 else 278 else
279 rcu_report_unblock_qs_rnp(rnp, flags); 279 rcu_report_unblock_qs_rnp(rnp, flags);
280 280
@@ -324,12 +324,12 @@ static void rcu_print_task_stall(struct rcu_node *rnp)
324 struct task_struct *t; 324 struct task_struct *t;
325 325
326 if (rcu_preempted_readers(rnp)) { 326 if (rcu_preempted_readers(rnp)) {
327 spin_lock_irqsave(&rnp->lock, flags); 327 raw_spin_lock_irqsave(&rnp->lock, flags);
328 phase = rnp->gpnum & 0x1; 328 phase = rnp->gpnum & 0x1;
329 lp = &rnp->blocked_tasks[phase]; 329 lp = &rnp->blocked_tasks[phase];
330 list_for_each_entry(t, lp, rcu_node_entry) 330 list_for_each_entry(t, lp, rcu_node_entry)
331 printk(" P%d", t->pid); 331 printk(" P%d", t->pid);
332 spin_unlock_irqrestore(&rnp->lock, flags); 332 raw_spin_unlock_irqrestore(&rnp->lock, flags);
333 } 333 }
334} 334}
335 335
@@ -400,11 +400,11 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
400 lp_root = &rnp_root->blocked_tasks[i]; 400 lp_root = &rnp_root->blocked_tasks[i];
401 while (!list_empty(lp)) { 401 while (!list_empty(lp)) {
402 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry); 402 tp = list_entry(lp->next, typeof(*tp), rcu_node_entry);
403 spin_lock(&rnp_root->lock); /* irqs already disabled */ 403 raw_spin_lock(&rnp_root->lock); /* irqs already disabled */
404 list_del(&tp->rcu_node_entry); 404 list_del(&tp->rcu_node_entry);
405 tp->rcu_blocked_node = rnp_root; 405 tp->rcu_blocked_node = rnp_root;
406 list_add(&tp->rcu_node_entry, lp_root); 406 list_add(&tp->rcu_node_entry, lp_root);
407 spin_unlock(&rnp_root->lock); /* irqs remain disabled */ 407 raw_spin_unlock(&rnp_root->lock); /* irqs remain disabled */
408 } 408 }
409 } 409 }
410 return retval; 410 return retval;
@@ -528,7 +528,7 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
528 unsigned long flags; 528 unsigned long flags;
529 unsigned long mask; 529 unsigned long mask;
530 530
531 spin_lock_irqsave(&rnp->lock, flags); 531 raw_spin_lock_irqsave(&rnp->lock, flags);
532 for (;;) { 532 for (;;) {
533 if (!sync_rcu_preempt_exp_done(rnp)) 533 if (!sync_rcu_preempt_exp_done(rnp))
534 break; 534 break;
@@ -537,12 +537,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
537 break; 537 break;
538 } 538 }
539 mask = rnp->grpmask; 539 mask = rnp->grpmask;
540 spin_unlock(&rnp->lock); /* irqs remain disabled */ 540 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
541 rnp = rnp->parent; 541 rnp = rnp->parent;
542 spin_lock(&rnp->lock); /* irqs already disabled */ 542 raw_spin_lock(&rnp->lock); /* irqs already disabled */
543 rnp->expmask &= ~mask; 543 rnp->expmask &= ~mask;
544 } 544 }
545 spin_unlock_irqrestore(&rnp->lock, flags); 545 raw_spin_unlock_irqrestore(&rnp->lock, flags);
546} 546}
547 547
548/* 548/*
@@ -557,11 +557,11 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
557{ 557{
558 int must_wait; 558 int must_wait;
559 559
560 spin_lock(&rnp->lock); /* irqs already disabled */ 560 raw_spin_lock(&rnp->lock); /* irqs already disabled */
561 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]); 561 list_splice_init(&rnp->blocked_tasks[0], &rnp->blocked_tasks[2]);
562 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]); 562 list_splice_init(&rnp->blocked_tasks[1], &rnp->blocked_tasks[3]);
563 must_wait = rcu_preempted_readers_exp(rnp); 563 must_wait = rcu_preempted_readers_exp(rnp);
564 spin_unlock(&rnp->lock); /* irqs remain disabled */ 564 raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
565 if (!must_wait) 565 if (!must_wait)
566 rcu_report_exp_rnp(rsp, rnp); 566 rcu_report_exp_rnp(rsp, rnp);
567} 567}
@@ -606,13 +606,13 @@ void synchronize_rcu_expedited(void)
606 /* force all RCU readers onto blocked_tasks[]. */ 606 /* force all RCU readers onto blocked_tasks[]. */
607 synchronize_sched_expedited(); 607 synchronize_sched_expedited();
608 608
609 spin_lock_irqsave(&rsp->onofflock, flags); 609 raw_spin_lock_irqsave(&rsp->onofflock, flags);
610 610
611 /* Initialize ->expmask for all non-leaf rcu_node structures. */ 611 /* Initialize ->expmask for all non-leaf rcu_node structures. */
612 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { 612 rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) {
613 spin_lock(&rnp->lock); /* irqs already disabled. */ 613 raw_spin_lock(&rnp->lock); /* irqs already disabled. */
614 rnp->expmask = rnp->qsmaskinit; 614 rnp->expmask = rnp->qsmaskinit;
615 spin_unlock(&rnp->lock); /* irqs remain disabled. */ 615 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
616 } 616 }
617 617
618 /* Snapshot current state of ->blocked_tasks[] lists. */ 618 /* Snapshot current state of ->blocked_tasks[] lists. */
@@ -621,7 +621,7 @@ void synchronize_rcu_expedited(void)
621 if (NUM_RCU_NODES > 1) 621 if (NUM_RCU_NODES > 1)
622 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); 622 sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp));
623 623
624 spin_unlock_irqrestore(&rsp->onofflock, flags); 624 raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
625 625
626 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */ 626 /* Wait for snapshotted ->blocked_tasks[] lists to drain. */
627 rnp = rcu_get_root(rsp); 627 rnp = rcu_get_root(rsp);
@@ -756,7 +756,7 @@ static int rcu_preempted_readers(struct rcu_node *rnp)
756/* Because preemptible RCU does not exist, no quieting of tasks. */ 756/* Because preemptible RCU does not exist, no quieting of tasks. */
757static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) 757static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
758{ 758{
759 spin_unlock_irqrestore(&rnp->lock, flags); 759 raw_spin_unlock_irqrestore(&rnp->lock, flags);
760} 760}
761 761
762#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 762#endif /* #ifdef CONFIG_HOTPLUG_CPU */