aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcu/tree_plugin.h
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-06-10 19:31:55 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2014-07-09 12:15:00 -0400
commitdfeb9765ce3c33cb3cbc5f16db423f1c58a4cc55 (patch)
treebeffed3c9b871cb3ce35e1271820da5cc16c8f00 /kernel/rcu/tree_plugin.h
parent1146edcbef3789228454c4aa42c08ddc2c275990 (diff)
rcu: Allow post-unlock reference for rt_mutex
The current approach to RCU priority boosting uses an rt_mutex strictly for its priority-boosting side effects. The rt_mutex_init_proxy_locked() function is used by the booster to initialize the lock as held by the boostee. The booster then uses rt_mutex_lock() to acquire this rt_mutex, which priority-boosts the boostee. When the boostee reaches the end of its outermost RCU read-side critical section, it checks a field in its task structure to see whether it has been boosted, and, if so, uses rt_mutex_unlock() to release the rt_mutex. The booster can then go on to boost the next task that is blocking the current RCU grace period. But reasonable implementations of rt_mutex_unlock() might result in the boostee referencing the rt_mutex's data after releasing it. But the booster might have re-initialized the rt_mutex between the time that the boostee released it and the time that it later referenced it. This is clearly asking for trouble, so this commit introduces a completion that forces the booster to wait until the boostee has completely finished with the rt_mutex, thus avoiding the case where the booster is re-initializing the rt_mutex before the last boostee's last reference to that rt_mutex. This of course does introduce some overhead, but the priority-boosting code paths are miles from any possible fastpath, and the overhead of executing the completion will normally be quite small compared to the overhead of priority boosting and deboosting, so this should be OK. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r--kernel/rcu/tree_plugin.h8
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 5da9f9b3abc9..9c811879d31e 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -427,8 +427,10 @@ void rcu_read_unlock_special(struct task_struct *t)
427 427
428#ifdef CONFIG_RCU_BOOST 428#ifdef CONFIG_RCU_BOOST
429 /* Unboost if we were boosted. */ 429 /* Unboost if we were boosted. */
430 if (rbmp) 430 if (rbmp) {
431 rt_mutex_unlock(rbmp); 431 rt_mutex_unlock(rbmp);
432 complete(&rnp->boost_completion);
433 }
432#endif /* #ifdef CONFIG_RCU_BOOST */ 434#endif /* #ifdef CONFIG_RCU_BOOST */
433 435
434 /* 436 /*
@@ -1202,10 +1204,14 @@ static int rcu_boost(struct rcu_node *rnp)
1202 t = container_of(tb, struct task_struct, rcu_node_entry); 1204 t = container_of(tb, struct task_struct, rcu_node_entry);
1203 rt_mutex_init_proxy_locked(&mtx, t); 1205 rt_mutex_init_proxy_locked(&mtx, t);
1204 t->rcu_boost_mutex = &mtx; 1206 t->rcu_boost_mutex = &mtx;
1207 init_completion(&rnp->boost_completion);
1205 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1208 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1206 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ 1209 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
1207 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ 1210 rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
1208 1211
1212 /* Wait until boostee is done accessing mtx before reinitializing. */
1213 wait_for_completion(&rnp->boost_completion);
1214
1209 return ACCESS_ONCE(rnp->exp_tasks) != NULL || 1215 return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
1210 ACCESS_ONCE(rnp->boost_tasks) != NULL; 1216 ACCESS_ONCE(rnp->boost_tasks) != NULL;
1211} 1217}