aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-08-04 10:55:34 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:39 -0400
commit82e78d80fc392ac7e98326bc8beeb8a679913ffd (patch)
tree4999d3c03ed711056474c3a31512e7d3cf253b87 /kernel/rcutree_plugin.h
parent5c51dd7349d4bb26f845f17f85daa168f5fa03f2 (diff)
rcu: Simplify unboosting checks
Commit 7765be (Fix RCU_BOOST race handling current->rcu_read_unlock_special) introduced a new ->rcu_boosted field in the task structure. This is redundant because the existing ->rcu_boost_mutex will be non-NULL at any time that ->rcu_boosted is nonzero. Therefore, this commit removes ->rcu_boosted and tests ->rcu_boost_mutex instead. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r--kernel/rcutree_plugin.h20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index ed70f6bf4c31..eeb38ee8ebba 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -306,6 +306,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
306 int empty_exp; 306 int empty_exp;
307 unsigned long flags; 307 unsigned long flags;
308 struct list_head *np; 308 struct list_head *np;
309#ifdef CONFIG_RCU_BOOST
310 struct rt_mutex *rbmp = NULL;
311#endif /* #ifdef CONFIG_RCU_BOOST */
309 struct rcu_node *rnp; 312 struct rcu_node *rnp;
310 int special; 313 int special;
311 314
@@ -351,6 +354,7 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
351 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */ 354 smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
352 np = rcu_next_node_entry(t, rnp); 355 np = rcu_next_node_entry(t, rnp);
353 list_del_init(&t->rcu_node_entry); 356 list_del_init(&t->rcu_node_entry);
357 t->rcu_blocked_node = NULL;
354 trace_rcu_unlock_preempted_task("rcu_preempt", 358 trace_rcu_unlock_preempted_task("rcu_preempt",
355 rnp->gpnum, t->pid); 359 rnp->gpnum, t->pid);
356 if (&t->rcu_node_entry == rnp->gp_tasks) 360 if (&t->rcu_node_entry == rnp->gp_tasks)
@@ -360,13 +364,12 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
360#ifdef CONFIG_RCU_BOOST 364#ifdef CONFIG_RCU_BOOST
361 if (&t->rcu_node_entry == rnp->boost_tasks) 365 if (&t->rcu_node_entry == rnp->boost_tasks)
362 rnp->boost_tasks = np; 366 rnp->boost_tasks = np;
363 /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */ 367 /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
364 if (t->rcu_boosted) { 368 if (t->rcu_boost_mutex) {
365 special |= RCU_READ_UNLOCK_BOOSTED; 369 rbmp = t->rcu_boost_mutex;
366 t->rcu_boosted = 0; 370 t->rcu_boost_mutex = NULL;
367 } 371 }
368#endif /* #ifdef CONFIG_RCU_BOOST */ 372#endif /* #ifdef CONFIG_RCU_BOOST */
369 t->rcu_blocked_node = NULL;
370 373
371 /* 374 /*
372 * If this was the last task on the current list, and if 375 * If this was the last task on the current list, and if
@@ -387,10 +390,8 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
387 390
388#ifdef CONFIG_RCU_BOOST 391#ifdef CONFIG_RCU_BOOST
389 /* Unboost if we were boosted. */ 392 /* Unboost if we were boosted. */
390 if (special & RCU_READ_UNLOCK_BOOSTED) { 393 if (rbmp)
391 rt_mutex_unlock(t->rcu_boost_mutex); 394 rt_mutex_unlock(rbmp);
392 t->rcu_boost_mutex = NULL;
393 }
394#endif /* #ifdef CONFIG_RCU_BOOST */ 395#endif /* #ifdef CONFIG_RCU_BOOST */
395 396
396 /* 397 /*
@@ -1206,7 +1207,6 @@ static int rcu_boost(struct rcu_node *rnp)
1206 t = container_of(tb, struct task_struct, rcu_node_entry); 1207 t = container_of(tb, struct task_struct, rcu_node_entry);
1207 rt_mutex_init_proxy_locked(&mtx, t); 1208 rt_mutex_init_proxy_locked(&mtx, t);
1208 t->rcu_boost_mutex = &mtx; 1209 t->rcu_boost_mutex = &mtx;
1209 t->rcu_boosted = 1;
1210 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1210 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1211 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ 1211 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1212 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ 1212 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */