diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-05-05 00:43:49 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-05-08 01:50:45 -0400 |
commit | 1217ed1ba5c67393293dfb0f03c353b118dadeb4 (patch) | |
tree | a765356c8418e134de85fd05d9fe6eda41de859c /kernel/rcutree_plugin.h | |
parent | 29ce831000081dd757d3116bf774aafffc4b6b20 (diff) |
rcu: permit rcu_read_unlock() to be called while holding runqueue locks
Avoid calling into the scheduler while holding core RCU locks. This
allows rcu_read_unlock() to be called while holding the runqueue locks,
but only as long as there was no chance of the RCU read-side critical
section having been preempted. (Otherwise, if RCU priority boosting
is enabled, rcu_read_unlock() might call into the scheduler in order to
unboost itself, which might allows self-deadlock on the runqueue locks
within the scheduler.)
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 64 |
1 files changed, 20 insertions, 44 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index f629479d4b1f..ed339702481d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -711,15 +711,17 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp) | |||
711 | static void | 711 | static void |
712 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) | 712 | sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp) |
713 | { | 713 | { |
714 | unsigned long flags; | ||
714 | int must_wait = 0; | 715 | int must_wait = 0; |
715 | 716 | ||
716 | raw_spin_lock(&rnp->lock); /* irqs already disabled */ | 717 | raw_spin_lock_irqsave(&rnp->lock, flags); |
717 | if (!list_empty(&rnp->blkd_tasks)) { | 718 | if (list_empty(&rnp->blkd_tasks)) |
719 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
720 | else { | ||
718 | rnp->exp_tasks = rnp->blkd_tasks.next; | 721 | rnp->exp_tasks = rnp->blkd_tasks.next; |
719 | rcu_initiate_boost(rnp); | 722 | rcu_initiate_boost(rnp, flags); /* releases rnp->lock */ |
720 | must_wait = 1; | 723 | must_wait = 1; |
721 | } | 724 | } |
722 | raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ | ||
723 | if (!must_wait) | 725 | if (!must_wait) |
724 | rcu_report_exp_rnp(rsp, rnp); | 726 | rcu_report_exp_rnp(rsp, rnp); |
725 | } | 727 | } |
@@ -1179,12 +1181,7 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1179 | */ | 1181 | */ |
1180 | static void rcu_boost_kthread_timer(unsigned long arg) | 1182 | static void rcu_boost_kthread_timer(unsigned long arg) |
1181 | { | 1183 | { |
1182 | unsigned long flags; | 1184 | invoke_rcu_node_kthread((struct rcu_node *)arg); |
1183 | struct rcu_node *rnp = (struct rcu_node *)arg; | ||
1184 | |||
1185 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1186 | invoke_rcu_node_kthread(rnp); | ||
1187 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1188 | } | 1185 | } |
1189 | 1186 | ||
1190 | /* | 1187 | /* |
@@ -1200,10 +1197,7 @@ static int rcu_boost_kthread(void *arg) | |||
1200 | for (;;) { | 1197 | for (;;) { |
1201 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; | 1198 | rnp->boost_kthread_status = RCU_KTHREAD_WAITING; |
1202 | wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks || | 1199 | wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks || |
1203 | rnp->exp_tasks || | 1200 | rnp->exp_tasks); |
1204 | kthread_should_stop()); | ||
1205 | if (kthread_should_stop()) | ||
1206 | break; | ||
1207 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; | 1201 | rnp->boost_kthread_status = RCU_KTHREAD_RUNNING; |
1208 | more2boost = rcu_boost(rnp); | 1202 | more2boost = rcu_boost(rnp); |
1209 | if (more2boost) | 1203 | if (more2boost) |
@@ -1215,7 +1209,7 @@ static int rcu_boost_kthread(void *arg) | |||
1215 | spincnt = 0; | 1209 | spincnt = 0; |
1216 | } | 1210 | } |
1217 | } | 1211 | } |
1218 | rnp->boost_kthread_status = RCU_KTHREAD_STOPPED; | 1212 | /* NOTREACHED */ |
1219 | return 0; | 1213 | return 0; |
1220 | } | 1214 | } |
1221 | 1215 | ||
@@ -1225,14 +1219,17 @@ static int rcu_boost_kthread(void *arg) | |||
1225 | * kthread to start boosting them. If there is an expedited grace | 1219 | * kthread to start boosting them. If there is an expedited grace |
1226 | * period in progress, it is always time to boost. | 1220 | * period in progress, it is always time to boost. |
1227 | * | 1221 | * |
1228 | * The caller must hold rnp->lock. | 1222 | * The caller must hold rnp->lock, which this function releases, |
1223 | * but irqs remain disabled. The ->boost_kthread_task is immortal, | ||
1224 | * so we don't need to worry about it going away. | ||
1229 | */ | 1225 | */ |
1230 | static void rcu_initiate_boost(struct rcu_node *rnp) | 1226 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
1231 | { | 1227 | { |
1232 | struct task_struct *t; | 1228 | struct task_struct *t; |
1233 | 1229 | ||
1234 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { | 1230 | if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) { |
1235 | rnp->n_balk_exp_gp_tasks++; | 1231 | rnp->n_balk_exp_gp_tasks++; |
1232 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1236 | return; | 1233 | return; |
1237 | } | 1234 | } |
1238 | if (rnp->exp_tasks != NULL || | 1235 | if (rnp->exp_tasks != NULL || |
@@ -1242,11 +1239,14 @@ static void rcu_initiate_boost(struct rcu_node *rnp) | |||
1242 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { | 1239 | ULONG_CMP_GE(jiffies, rnp->boost_time))) { |
1243 | if (rnp->exp_tasks == NULL) | 1240 | if (rnp->exp_tasks == NULL) |
1244 | rnp->boost_tasks = rnp->gp_tasks; | 1241 | rnp->boost_tasks = rnp->gp_tasks; |
1242 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1245 | t = rnp->boost_kthread_task; | 1243 | t = rnp->boost_kthread_task; |
1246 | if (t != NULL) | 1244 | if (t != NULL) |
1247 | wake_up_process(t); | 1245 | wake_up_process(t); |
1248 | } else | 1246 | } else { |
1249 | rcu_initiate_boost_trace(rnp); | 1247 | rcu_initiate_boost_trace(rnp); |
1248 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1249 | } | ||
1250 | } | 1250 | } |
1251 | 1251 | ||
1252 | /* | 1252 | /* |
@@ -1312,27 +1312,11 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1312 | return 0; | 1312 | return 0; |
1313 | } | 1313 | } |
1314 | 1314 | ||
1315 | #ifdef CONFIG_HOTPLUG_CPU | ||
1316 | |||
1317 | static void rcu_stop_boost_kthread(struct rcu_node *rnp) | ||
1318 | { | ||
1319 | unsigned long flags; | ||
1320 | struct task_struct *t; | ||
1321 | |||
1322 | raw_spin_lock_irqsave(&rnp->lock, flags); | ||
1323 | t = rnp->boost_kthread_task; | ||
1324 | rnp->boost_kthread_task = NULL; | ||
1325 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1326 | if (t != NULL) | ||
1327 | kthread_stop(t); | ||
1328 | } | ||
1329 | |||
1330 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1331 | |||
1332 | #else /* #ifdef CONFIG_RCU_BOOST */ | 1315 | #else /* #ifdef CONFIG_RCU_BOOST */ |
1333 | 1316 | ||
1334 | static void rcu_initiate_boost(struct rcu_node *rnp) | 1317 | static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) |
1335 | { | 1318 | { |
1319 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | ||
1336 | } | 1320 | } |
1337 | 1321 | ||
1338 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, | 1322 | static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, |
@@ -1355,14 +1339,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | |||
1355 | return 0; | 1339 | return 0; |
1356 | } | 1340 | } |
1357 | 1341 | ||
1358 | #ifdef CONFIG_HOTPLUG_CPU | ||
1359 | |||
1360 | static void rcu_stop_boost_kthread(struct rcu_node *rnp) | ||
1361 | { | ||
1362 | } | ||
1363 | |||
1364 | #endif /* #ifdef CONFIG_HOTPLUG_CPU */ | ||
1365 | |||
1366 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ | 1342 | #endif /* #else #ifdef CONFIG_RCU_BOOST */ |
1367 | 1343 | ||
1368 | #ifndef CONFIG_SMP | 1344 | #ifndef CONFIG_SMP |