diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2011-08-16 20:46:46 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-09-29 00:38:43 -0400 |
commit | 5342e269b2b58ee0b0b4168a94087faaa60d0567 (patch) | |
tree | 77467d64948328e506f06eaf5a719de7fade4b83 /kernel/rcutree_plugin.h | |
parent | 06ae115a1d551cd952d80df06eaf8b5153351875 (diff) |
rcu: Permit rt_mutex_unlock() with irqs disabled
Create a separate lockdep class for the rt_mutex used for RCU priority
boosting and enable use of rt_mutex_lock() with irqs disabled. This
prevents RCU priority boosting from falling prey to deadlocks when
someone begins an RCU read-side critical section in preemptible state,
but releases it with an irq-disabled lock held.
Unfortunately, the scheduler's runqueue and priority-inheritance locks
still must either completely enclose or be completely enclosed by any
overlapping RCU read-side critical section.
This version removes a redundant local_irq_restore() noted by
Yong Zhang.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_plugin.h')
-rw-r--r-- | kernel/rcutree_plugin.h | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index d3127e8764cb..28422767d854 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1149,6 +1149,8 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp) | |||
1149 | 1149 | ||
1150 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ | 1150 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
1151 | 1151 | ||
1152 | static struct lock_class_key rcu_boost_class; | ||
1153 | |||
1152 | /* | 1154 | /* |
1153 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks | 1155 | * Carry out RCU priority boosting on the task indicated by ->exp_tasks |
1154 | * or ->boost_tasks, advancing the pointer to the next task in the | 1156 | * or ->boost_tasks, advancing the pointer to the next task in the |
@@ -1211,6 +1213,9 @@ static int rcu_boost(struct rcu_node *rnp) | |||
1211 | */ | 1213 | */ |
1212 | t = container_of(tb, struct task_struct, rcu_node_entry); | 1214 | t = container_of(tb, struct task_struct, rcu_node_entry); |
1213 | rt_mutex_init_proxy_locked(&mtx, t); | 1215 | rt_mutex_init_proxy_locked(&mtx, t); |
1216 | /* Avoid lockdep false positives. This rt_mutex is its own thing. */ | ||
1217 | lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class, | ||
1218 | "rcu_boost_mutex"); | ||
1214 | t->rcu_boost_mutex = &mtx; | 1219 | t->rcu_boost_mutex = &mtx; |
1215 | raw_spin_unlock_irqrestore(&rnp->lock, flags); | 1220 | raw_spin_unlock_irqrestore(&rnp->lock, flags); |
1216 | rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ | 1221 | rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ |