diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-06-12 16:30:25 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2014-07-09 12:15:01 -0400 |
commit | abaa93d9e1de2c29297e69ddba8ddd38f15064cf (patch) | |
tree | 127f64c80be6f30fed7265d42cbfc5b449853618 /kernel/rcu/tree_plugin.h | |
parent | 48bd8e9b82a750b983823f391c67e70553757afa (diff) |
rcu: Simplify priority boosting by putting rt_mutex in rcu_node
RCU priority boosting currently checks for boosting via a pointer in
task_struct. However, this is not needed: As Oleg noted, if the
rt_mutex is placed in the rcu_node instead of on the booster's stack,
the boostee can simply check it see if it owns the lock. This commit
makes this change, shrinking task_struct by one pointer and the kernel
by thirteen lines.
Suggested-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu/tree_plugin.h')
-rw-r--r-- | kernel/rcu/tree_plugin.h | 25 |
1 files changed, 11 insertions, 14 deletions
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 9c811879d31e..719587af7b10 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #define RCU_KTHREAD_PRIO 1 | 33 | #define RCU_KTHREAD_PRIO 1 |
34 | 34 | ||
35 | #ifdef CONFIG_RCU_BOOST | 35 | #ifdef CONFIG_RCU_BOOST |
36 | #include "../locking/rtmutex_common.h" | ||
36 | #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO | 37 | #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO |
37 | #else | 38 | #else |
38 | #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO | 39 | #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO |
@@ -336,7 +337,7 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
336 | unsigned long flags; | 337 | unsigned long flags; |
337 | struct list_head *np; | 338 | struct list_head *np; |
338 | #ifdef CONFIG_RCU_BOOST | 339 | #ifdef CONFIG_RCU_BOOST |
339 | struct rt_mutex *rbmp = NULL; | 340 | bool drop_boost_mutex = false; |
340 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 341 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
341 | struct rcu_node *rnp; | 342 | struct rcu_node *rnp; |
342 | int special; | 343 | int special; |
@@ -398,11 +399,8 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
398 | #ifdef CONFIG_RCU_BOOST | 399 | #ifdef CONFIG_RCU_BOOST |
399 | if (&t->rcu_node_entry == rnp->boost_tasks) | 400 | if (&t->rcu_node_entry == rnp->boost_tasks) |
400 | rnp->boost_tasks = np; | 401 | rnp->boost_tasks = np; |
401 | /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */ | 402 | /* Snapshot ->boost_mtx ownership with rcu_node lock held. */ |
402 | if (t->rcu_boost_mutex) { | 403 | drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t; |
403 | rbmp = t->rcu_boost_mutex; | ||
404 | t->rcu_boost_mutex = NULL; | ||
405 | } | ||
406 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 404 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
407 | 405 | ||
408 | /* | 406 | /* |
@@ -427,8 +425,8 @@ void rcu_read_unlock_special(struct task_struct *t) | |||
427 | 425 | ||
428 | #ifdef CONFIG_RCU_BOOST | 426 | #ifdef CONFIG_RCU_BOOST |
429 | /* Unboost if we were boosted. */ | 427 | /* Unboost if we were boosted. */ |
430 | if (rbmp) { | 428 | if (drop_boost_mutex) { |
431 | rt_mutex_unlock(rbmp); | 429 | rt_mutex_unlock(&rnp->boost_mtx); |
432 | complete(&rnp->boost_completion); | 430 | complete(&rnp->boost_completion); |
433 | } | 431 | } |
434 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 432 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
@@ -1151,7 +1149,6 @@ static void rcu_wake_cond(struct task_struct *t, int status) | |||
1151 | static int rcu_boost(struct rcu_node *rnp) | 1149 | static int rcu_boost(struct rcu_node *rnp) |
1152 | { | 1150 | { |
1153 | unsigned long flags; | 1151 | unsigned long flags; |
1154 | struct rt_mutex mtx; | ||
1155 | struct task_struct *t; | 1152 | struct task_struct *t; |
1156 | struct list_head *tb; | 1153 | struct list_head *tb; |
1157 | 1154 | ||
@@ -1202,14 +1199,14 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1202 | * section. | 1199 | * section. |
1203 | */ | 1200 | */ |
1204 | t = container_of(tb, struct task_struct, rcu_node_entry); | 1201 | t = container_of(tb, struct task_struct, rcu_node_entry); |
1205 | rt_mutex_init_proxy_locked(&mtx, t); | 1202 | rt_mutex_init_proxy_locked(&rnp->boost_mtx, t); |
1206 | t->rcu_boost_mutex = &mtx; | ||
1207 | init_completion(&rnp->boost_completion); | 1203 | init_completion(&rnp->boost_completion); |
1208 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1204 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1209 | rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ | 1205 | /* Lock only for side effect: boosts task t's priority. */ |
1210 | rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ | 1206 | rt_mutex_lock(&rnp->boost_mtx); |
1207 | rt_mutex_unlock(&rnp->boost_mtx); /* Then keep lockdep happy. */ | ||
1211 | 1208 | ||
1212 | /* Wait until boostee is done accessing mtx before reinitializing. */ | 1209 | /* Wait for boostee to be done w/boost_mtx before reinitializing. */ |
1213 | wait_for_completion(&rnp->boost_completion); | 1210 | wait_for_completion(&rnp->boost_completion); |
1214 | 1211 | ||
1215 | return ACCESS_ONCE(rnp->exp_tasks) != NULL || | 1212 | return ACCESS_ONCE(rnp->exp_tasks) != NULL || |