aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-05-05 00:43:49 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-05-08 01:50:45 -0400
commit1217ed1ba5c67393293dfb0f03c353b118dadeb4 (patch)
treea765356c8418e134de85fd05d9fe6eda41de859c /kernel/rcutree.c
parent29ce831000081dd757d3116bf774aafffc4b6b20 (diff)
rcu: permit rcu_read_unlock() to be called while holding runqueue locks
Avoid calling into the scheduler while holding core RCU locks. This allows rcu_read_unlock() to be called while holding the runqueue locks, but only as long as there was no chance of the RCU read-side critical section having been preempted. (Otherwise, if RCU priority boosting is enabled, rcu_read_unlock() might call into the scheduler in order to unboost itself, which might allows self-deadlock on the runqueue locks within the scheduler.) Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c44
1 files changed, 13 insertions, 31 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 54ff7eb92819..5616b17e4a22 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1133,22 +1133,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1133 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1133 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1134 if (need_report & RCU_OFL_TASKS_EXP_GP) 1134 if (need_report & RCU_OFL_TASKS_EXP_GP)
1135 rcu_report_exp_rnp(rsp, rnp); 1135 rcu_report_exp_rnp(rsp, rnp);
1136 1136 rcu_node_kthread_setaffinity(rnp, -1);
1137 /*
1138 * If there are no more online CPUs for this rcu_node structure,
1139 * kill the rcu_node structure's kthread. Otherwise, adjust its
1140 * affinity.
1141 */
1142 t = rnp->node_kthread_task;
1143 if (t != NULL &&
1144 rnp->qsmaskinit == 0) {
1145 raw_spin_lock_irqsave(&rnp->lock, flags);
1146 rnp->node_kthread_task = NULL;
1147 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1148 kthread_stop(t);
1149 rcu_stop_boost_kthread(rnp);
1150 } else
1151 rcu_node_kthread_setaffinity(rnp, -1);
1152} 1137}
1153 1138
1154/* 1139/*
@@ -1320,8 +1305,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
1320 return; 1305 return;
1321 } 1306 }
1322 if (rnp->qsmask == 0) { 1307 if (rnp->qsmask == 0) {
1323 rcu_initiate_boost(rnp); 1308 rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
1324 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1325 continue; 1309 continue;
1326 } 1310 }
1327 cpu = rnp->grplo; 1311 cpu = rnp->grplo;
@@ -1340,10 +1324,10 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
1340 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1324 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1341 } 1325 }
1342 rnp = rcu_get_root(rsp); 1326 rnp = rcu_get_root(rsp);
1343 raw_spin_lock_irqsave(&rnp->lock, flags); 1327 if (rnp->qsmask == 0) {
1344 if (rnp->qsmask == 0) 1328 raw_spin_lock_irqsave(&rnp->lock, flags);
1345 rcu_initiate_boost(rnp); 1329 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1346 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1330 }
1347} 1331}
1348 1332
1349/* 1333/*
@@ -1497,7 +1481,8 @@ static void invoke_rcu_cpu_kthread(void)
1497 1481
1498/* 1482/*
1499 * Wake up the specified per-rcu_node-structure kthread. 1483 * Wake up the specified per-rcu_node-structure kthread.
1500 * The caller must hold ->lock. 1484 * Because the per-rcu_node kthreads are immortal, we don't need
1485 * to do anything to keep them alive.
1501 */ 1486 */
1502static void invoke_rcu_node_kthread(struct rcu_node *rnp) 1487static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1503{ 1488{
@@ -1546,8 +1531,8 @@ static void rcu_cpu_kthread_timer(unsigned long arg)
1546 1531
1547 raw_spin_lock_irqsave(&rnp->lock, flags); 1532 raw_spin_lock_irqsave(&rnp->lock, flags);
1548 rnp->wakemask |= rdp->grpmask; 1533 rnp->wakemask |= rdp->grpmask;
1549 invoke_rcu_node_kthread(rnp);
1550 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1534 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1535 invoke_rcu_node_kthread(rnp);
1551} 1536}
1552 1537
1553/* 1538/*
@@ -1694,16 +1679,12 @@ static int rcu_node_kthread(void *arg)
1694 1679
1695 for (;;) { 1680 for (;;) {
1696 rnp->node_kthread_status = RCU_KTHREAD_WAITING; 1681 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1697 wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 || 1682 wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0);
1698 kthread_should_stop());
1699 if (kthread_should_stop())
1700 break;
1701 rnp->node_kthread_status = RCU_KTHREAD_RUNNING; 1683 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1702 raw_spin_lock_irqsave(&rnp->lock, flags); 1684 raw_spin_lock_irqsave(&rnp->lock, flags);
1703 mask = rnp->wakemask; 1685 mask = rnp->wakemask;
1704 rnp->wakemask = 0; 1686 rnp->wakemask = 0;
1705 rcu_initiate_boost(rnp); 1687 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1706 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1707 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { 1688 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1708 if ((mask & 0x1) == 0) 1689 if ((mask & 0x1) == 0)
1709 continue; 1690 continue;
@@ -1719,6 +1700,7 @@ static int rcu_node_kthread(void *arg)
1719 preempt_enable(); 1700 preempt_enable();
1720 } 1701 }
1721 } 1702 }
1703 /* NOTREACHED */
1722 rnp->node_kthread_status = RCU_KTHREAD_STOPPED; 1704 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1723 return 0; 1705 return 0;
1724} 1706}
@@ -1738,7 +1720,7 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1738 int cpu; 1720 int cpu;
1739 unsigned long mask = rnp->qsmaskinit; 1721 unsigned long mask = rnp->qsmaskinit;
1740 1722
1741 if (rnp->node_kthread_task == NULL || mask == 0) 1723 if (rnp->node_kthread_task == NULL)
1742 return; 1724 return;
1743 if (!alloc_cpumask_var(&cm, GFP_KERNEL)) 1725 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1744 return; 1726 return;