diff options
-rw-r--r-- | kernel/rcutree.c | 44 | ||||
-rw-r--r-- | kernel/rcutree.h | 5 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 64 |
3 files changed, 34 insertions, 79 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 54ff7eb92819..5616b17e4a22 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -1133,22 +1133,7 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp) | |||
1133 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1133 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1134 | if (need_report & RCU_OFL_TASKS_EXP_GP) | 1134 | if (need_report & RCU_OFL_TASKS_EXP_GP) |
1135 | rcu_report_exp_rnp(rsp, rnp); | 1135 | rcu_report_exp_rnp(rsp, rnp); |
1136 | 1136 | rcu_node_kthread_setaffinity(rnp, -1); | |
1137 | /* | ||
1138 | * If there are no more online CPUs for this rcu_node structure, | ||
1139 | * kill the rcu_node structure's kthread. Otherwise, adjust its | ||
1140 | * affinity. | ||
1141 | */ | ||
1142 | t = rnp->node_kthread_task; | ||
1143 | if (t != NULL && | ||
1144 | rnp->qsmaskinit == 0) { | ||
1145 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1146 | rnp->node_kthread_task = NULL; | ||
1147 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1148 | kthread_stop(t); | ||
1149 | rcu_stop_boost_kthread(rnp); | ||
1150 | } else | ||
1151 | rcu_node_kthread_setaffinity(rnp, -1); | ||
1152 | } | 1137 | } |
1153 | 1138 | ||
1154 | /* | 1139 | /* |
@@ -1320,8 +1305,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1320 | return; | 1305 | return; |
1321 | } | 1306 | } |
1322 | if (rnp->qsmask == 0) { | 1307 | if (rnp->qsmask == 0) { |
1323 | rcu_initiate_boost(rnp); | 1308 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
1324 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1325 | continue; | 1309 | continue; |
1326 | } | 1310 | } |
1327 | cpu = rnp->grplo; | 1311 | cpu = rnp->grplo; |
@@ -1340,10 +1324,10 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
1340 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1324 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1341 | } | 1325 | } |
1342 | rnp = rcu_get_root(rsp); | 1326 | rnp = rcu_get_root(rsp); |
1343 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1327 | if (rnp->qsmask == 0) { |
1344 | if (rnp->qsmask == 0) | 1328 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1345 | rcu_initiate_boost(rnp); | 1329 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ |
1346 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1330 | } |
1347 | } | 1331 | } |
1348 | 1332 | ||
1349 | /* | 1333 | /* |
@@ -1497,7 +1481,8 @@ static void invoke_rcu_cpu_kthread(void) | |||
1497 | 1481 | ||
1498 | /* | 1482 | /* |
1499 | * Wake up the specified per-rcu_node-structure kthread. | 1483 | * Wake up the specified per-rcu_node-structure kthread. |
1500 | * The caller must hold ->lock. | 1484 | * Because the per-rcu_node kthreads are immortal, we don't need |
1485 | * to do anything to keep them alive. | ||
1501 | */ | 1486 | */ |
1502 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) | 1487 | static void invoke_rcu_node_kthread(struct rcu_node *rnp) |
1503 | { | 1488 | { |
@@ -1546,8 +1531,8 @@ static void rcu_cpu_kthread_timer(unsigned long arg) | |||
1546 | 1531 | ||
1547 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1532 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1548 | rnp->wakemask |= rdp->grpmask; | 1533 | rnp->wakemask |= rdp->grpmask; |
1549 | invoke_rcu_node_kthread(rnp); | ||
1550 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1534 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1535 | invoke_rcu_node_kthread(rnp); | ||
1551 | } | 1536 | } |
1552 | 1537 | ||
1553 | /* | 1538 | /* |
@@ -1694,16 +1679,12 @@ static int rcu_node_kthread(void *arg) | |||
1694 | 1679 | ||
1695 | for (;;) { | 1680 | for (;;) { |
1696 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; | 1681 | rnp->node_kthread_status = RCU_KTHREAD_WAITING; |
1697 | wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0 || | 1682 | wait_event_interruptible(rnp->node_wq, rnp->wakemask != 0); |
1698 | kthread_should_stop()); | ||
1699 | if (kthread_should_stop()) | ||
1700 | break; | ||
1701 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; | 1683 | rnp->node_kthread_status = RCU_KTHREAD_RUNNING; |
1702 | raw_spin_lock_irqsave(&rnp->lock, flags); | 1684 | raw_spin_lock_irqsave(&rnp->lock, flags); |
1703 | mask = rnp->wakemask; | 1685 | mask = rnp->wakemask; |
1704 | rnp->wakemask = 0; | 1686 | rnp->wakemask = 0; |
1705 | rcu_initiate_boost(rnp); | 1687 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ |
1706 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1707 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { | 1688 | for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { |
1708 | if ((mask & 0x1) == 0) | 1689 | if ((mask & 0x1) == 0) |
1709 | continue; | 1690 | continue; |
@@ -1719,6 +1700,7 @@ static int rcu_node_kthread(void *arg) | |||
1719 | preempt_enable(); | 1700 | preempt_enable(); |
1720 | } | 1701 | } |
1721 | } | 1702 | } |
1703 | /* NOTREACHED */ | ||
1722 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; | 1704 | rnp->node_kthread_status = RCU_KTHREAD_STOPPED; |
1723 | return 0; | 1705 | return 0; |
1724 | } | 1706 | } |
@@ -1738,7 +1720,7 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) | |||
1738 | int cpu; | 1720 | int cpu; |
1739 | unsigned long mask = rnp->qsmaskinit; | 1721 | unsigned long mask = rnp->qsmaskinit; |
1740 | 1722 | ||
1741 | if (rnp->node_kthread_task == NULL || mask == 0) | 1723 | if (rnp->node_kthread_task == NULL) |
1742 | return; | 1724 | return; |
1743 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) | 1725 | if (!alloc_cpumask_var(&cm, GFP_KERNEL)) |
1744 | return; | 1726 | return; |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index a6a97171dac6..93d4a1c2e88b 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -444,15 +444,12 @@ static void rcu_preempt_send_cbs_to_online(void); | |||
444 | static void __init __rcu_init_preempt(void); | 444 | static void __init __rcu_init_preempt(void); |
445 | static void rcu_needs_cpu_flush(void); | 445 | static void rcu_needs_cpu_flush(void); |
446 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp); | 446 | static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp); |
447 | static void rcu_initiate_boost(struct rcu_node *rnp); | 447 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); |
448 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 448 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
449 | cpumask_var_t cm); | 449 | cpumask_var_t cm); |
450 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); | 450 | static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); |
451 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 451 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
452 | struct rcu_node *rnp, | 452 | struct rcu_node *rnp, |
453 | int rnp_index); | 453 | int rnp_index); |
454 | #ifdef CONFIG_HOTPLUG_CPU | ||
455 | static void rcu_stop_boost_kthread(struct rcu_node *rnp); | ||
456 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
457 | 454 | ||
458 | #endif /* #ifndef RCU_TREE_NONCORE */ | 455 | #endif /* #ifndef RCU_TREE_NONCORE */ |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index f629479d4b1f..ed339702481d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -711,15 +711,17 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |||
711 | static void | 711 | static void |
712 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | 712 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) |
713 | { | 713 | { |
714 | unsigned long flags; | ||
714 | int must_wait = 0; | 715 | int must_wait = 0; |
715 | 716 | ||
716 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 717 | raw_spin_lock_irqsave(&rnp->lock, flags); |
717 | if (!list_empty(&rnp->blkd_tasks)) { | 718 | if (list_empty(&rnp->blkd_tasks)) |
719 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
720 | else { | ||
718 | rnp->exp_tasks = rnp->blkd_tasks.next; | 721 | rnp->exp_tasks = rnp->blkd_tasks.next; |
719 | rcu_initiate_boost(rnp); | 722 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
720 | must_wait = 1; | 723 | must_wait = 1; |
721 | } | 724 | } |
722 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
723 | if (!must_wait) | 725 | if (!must_wait) |
724 | rcu_report_exp_rnp(rsp, rnp); | 726 | rcu_report_exp_rnp(rsp, rnp); |
725 | } | 727 | } |
@@ -1179,12 +1181,7 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1179 | */ | 1181 | */ |
1180 | static void rcu_boost_kthread_timer(unsigned long arg) | 1182 | static void rcu_boost_kthread_timer(unsigned long arg) |
1181 | { | 1183 | { |
1182 | unsigned long flags; | 1184 | invoke_rcu_node_kthread((struct rcu_node *)arg); |
1183 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1184 | |||
1185 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1186 | invoke_rcu_node_kthread(rnp); | ||
1187 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1188 | } | 1185 | } |
1189 | 1186 | ||
1190 | /* | 1187 | /* |
@@ -1200,10 +1197,7 @@ static int rcu_boost_kthread(void *arg) | |||
1200 | for (;;) { | 1197 | for (;;) { |
1201 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; | 1198 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; |
1202 | wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks || | 1199 | wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks || |
1203 | rnp->exp_tasks || | 1200 | rnp->exp_tasks); |
1204 | kthread_should_stop()); | ||
1205 | if (kthread_should_stop()) | ||
1206 | break; | ||
1207 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; | 1201 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; |
1208 | more2boost = rcu_boost(rnp); | 1202 | more2boost = rcu_boost(rnp); |
1209 | if (more2boost) | 1203 | if (more2boost) |
@@ -1215,7 +1209,7 @@ static int rcu_boost_kthread(void *arg) | |||
1215 | spincnt = 0; | 1209 | spincnt = 0; |
1216 | } | 1210 | } |
1217 | } | 1211 | } |
1218 | rnp->boost_kthread_status = RCU_KTHREAD_STOPPED; | 1212 | /* NOTREACHED */ |
1219 | return 0; | 1213 | return 0; |
1220 | } | 1214 | } |
1221 | 1215 | ||
@@ -1225,14 +1219,17 @@ static int rcu_boost_kthread(void *arg) | |||
1225 | * kthread to start boosting them. If there is an expedited grace | 1219 | * kthread to start boosting them. If there is an expedited grace |
1226 | * period in progress, it is always time to boost. | 1220 | * period in progress, it is always time to boost. |
1227 | * | 1221 | * |
1228 | * The caller must hold rnp->lock. | 1222 | * The caller must hold rnp->lock, which this function releases, |
1223 | * but irqs remain disabled. The ->boost_kthread_task is immortal, | ||
1224 | * so we don't need to worry about it going away. | ||
1229 | */ | 1225 | */ |
1230 | static void rcu_initiate_boost(struct rcu_node *rnp) | 1226 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
1231 | { | 1227 | { |
1232 | struct task_struct *t; | 1228 | struct task_struct *t; |
1233 | 1229 | ||
1234 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { | 1230 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { |
1235 | rnp->n_balk_exp_gp_tasks++; | 1231 | rnp->n_balk_exp_gp_tasks++; |
1232 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1236 | return; | 1233 | return; |
1237 | } | 1234 | } |
1238 | if (rnp->exp_tasks != NULL || | 1235 | if (rnp->exp_tasks != NULL || |
@@ -1242,11 +1239,14 @@ static void rcu_initiate_boost(struct rcu_node *rnp) | |||
1242 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { | 1239 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { |
1243 | if (rnp->exp_tasks == NULL) | 1240 | if (rnp->exp_tasks == NULL) |
1244 | rnp->boost_tasks = rnp->gp_tasks; | 1241 | rnp->boost_tasks = rnp->gp_tasks; |
1242 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1245 | t = rnp->boost_kthread_task; | 1243 | t = rnp->boost_kthread_task; |
1246 | if (t != NULL) | 1244 | if (t != NULL) |
1247 | wake_up_process(t); | 1245 | wake_up_process(t); |
1248 | } else | 1246 | } else { |
1249 | rcu_initiate_boost_trace(rnp); | 1247 | rcu_initiate_boost_trace(rnp); |
1248 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1249 | } | ||
1250 | } | 1250 | } |
1251 | 1251 | ||
1252 | /* | 1252 | /* |
@@ -1312,27 +1312,11 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1312 | return 0; | 1312 | return 0; |
1313 | } | 1313 | } |
1314 | 1314 | ||
1315 | #ifdef CONFIG_HOTPLUG_CPU | ||
1316 | |||
1317 | static void rcu_stop_boost_kthread(struct rcu_node *rnp) | ||
1318 | { | ||
1319 | unsigned long flags; | ||
1320 | struct task_struct *t; | ||
1321 | |||
1322 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1323 | t = rnp->boost_kthread_task; | ||
1324 | rnp->boost_kthread_task = NULL; | ||
1325 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1326 | if (t != NULL) | ||
1327 | kthread_stop(t); | ||
1328 | } | ||
1329 | |||
1330 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1331 | |||
1332 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1315 | #else /* #ifdef CONFIG_RCU_BOOST */ |
1333 | 1316 | ||
1334 | static void rcu_initiate_boost(struct rcu_node *rnp) | 1317 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
1335 | { | 1318 | { |
1319 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1336 | } | 1320 | } |
1337 | 1321 | ||
1338 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 1322 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
@@ -1355,14 +1339,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1355 | return 0; | 1339 | return 0; |
1356 | } | 1340 | } |
1357 | 1341 | ||
1358 | #ifdef CONFIG_HOTPLUG_CPU | ||
1359 | |||
1360 | static void rcu_stop_boost_kthread(struct rcu_node *rnp) | ||
1361 | { | ||
1362 | } | ||
1363 | |||
1364 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1365 | |||
1366 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 1342 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1367 | 1343 | ||
1368 | #ifndef CONFIG_SMP | 1344 | #ifndef CONFIG_SMP |