aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-07-17 05:05:49 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-07-20 00:38:51 -0400
commit131906b0062ddde7f85bbe67754983c754648bd8 (patch)
tree1b92eacb707befb294b4d9985f10aa44715cba16 /kernel
parentb0d304172f49061b4ff78f9e2b02719ac69c8a7e (diff)
rcu: decrease rcu_report_exp_rnp coupling with scheduler
PREEMPT_RCU read-side critical sections blocking an expedited grace period invoke rcu_report_exp_rnp(). When the last such critical section has completed, rcu_report_exp_rnp() invokes the scheduler to wake up the task that invoked synchronize_rcu_expedited() -- needlessly holding the root rcu_node structure's lock while doing so, thus needlessly providing a way for RCU and the scheduler to deadlock. This commit therefore releases the root rcu_node structure's lock before calling wake_up(). Reported-by: Ed Tomlinson <edt@aei.ca> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/rcutree_plugin.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 75113cb7c4f..6abef3cfcbc 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -695,9 +695,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
695 695
696 raw_spin_lock_irqsave(&rnp->lock, flags); 696 raw_spin_lock_irqsave(&rnp->lock, flags);
697 for (;;) { 697 for (;;) {
698 if (!sync_rcu_preempt_exp_done(rnp)) 698 if (!sync_rcu_preempt_exp_done(rnp)) {
699 raw_spin_unlock_irqrestore(&rnp->lock, flags);
699 break; 700 break;
701 }
700 if (rnp->parent == NULL) { 702 if (rnp->parent == NULL) {
703 raw_spin_unlock_irqrestore(&rnp->lock, flags);
701 wake_up(&sync_rcu_preempt_exp_wq); 704 wake_up(&sync_rcu_preempt_exp_wq);
702 break; 705 break;
703 } 706 }
@@ -707,7 +710,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
707 raw_spin_lock(&rnp->lock); /* irqs already disabled */ 710 raw_spin_lock(&rnp->lock); /* irqs already disabled */
708 rnp->expmask &= ~mask; 711 rnp->expmask &= ~mask;
709 } 712 }
710 raw_spin_unlock_irqrestore(&rnp->lock, flags);
711} 713}
712 714
713/* 715/*