aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paul.mckenney@linaro.org>2011-08-16 20:46:46 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-09-29 00:38:43 -0400
commit5342e269b2b58ee0b0b4168a94087faaa60d0567 (patch)
tree77467d64948328e506f06eaf5a719de7fade4b83
parent06ae115a1d551cd952d80df06eaf8b5153351875 (diff)
rcu: Permit rt_mutex_unlock() with irqs disabled
Create a separate lockdep class for the rt_mutex used for RCU priority boosting and enable use of rt_mutex_lock() with irqs disabled. This prevents RCU priority boosting from falling prey to deadlocks when someone begins an RCU read-side critical section in preemptible state, but releases it with an irq-disabled lock held. Unfortunately, the scheduler's runqueue and priority-inheritance locks still must either completely enclose or be completely enclosed by any overlapping RCU read-side critical section. This version removes a redundant local_irq_restore() noted by Yong Zhang. Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcutree_plugin.h5
-rw-r--r--kernel/rtmutex.c8
2 files changed, 13 insertions, 0 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index d3127e8764cb..28422767d854 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1149,6 +1149,8 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1149 1149
1150#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1150#endif /* #else #ifdef CONFIG_RCU_TRACE */
1151 1151
1152static struct lock_class_key rcu_boost_class;
1153
1152/* 1154/*
1153 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1155 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1154 * or ->boost_tasks, advancing the pointer to the next task in the 1156 * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1211,6 +1213,9 @@ static int rcu_boost(struct rcu_node *rnp)
1211 */ 1213 */
1212 t = container_of(tb, struct task_struct, rcu_node_entry); 1214 t = container_of(tb, struct task_struct, rcu_node_entry);
1213 rt_mutex_init_proxy_locked(&mtx, t); 1215 rt_mutex_init_proxy_locked(&mtx, t);
1216 /* Avoid lockdep false positives. This rt_mutex is its own thing. */
1217 lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
1218 "rcu_boost_mutex");
1214 t->rcu_boost_mutex = &mtx; 1219 t->rcu_boost_mutex = &mtx;
1215 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1220 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1216 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ 1221 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 255e1662acdb..5e8d9cce7470 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -579,6 +579,7 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
579 struct rt_mutex_waiter *waiter) 579 struct rt_mutex_waiter *waiter)
580{ 580{
581 int ret = 0; 581 int ret = 0;
582 int was_disabled;
582 583
583 for (;;) { 584 for (;;) {
584 /* Try to acquire the lock: */ 585 /* Try to acquire the lock: */
@@ -601,10 +602,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
601 602
602 raw_spin_unlock(&lock->wait_lock); 603 raw_spin_unlock(&lock->wait_lock);
603 604
605 was_disabled = irqs_disabled();
606 if (was_disabled)
607 local_irq_enable();
608
604 debug_rt_mutex_print_deadlock(waiter); 609 debug_rt_mutex_print_deadlock(waiter);
605 610
606 schedule_rt_mutex(lock); 611 schedule_rt_mutex(lock);
607 612
613 if (was_disabled)
614 local_irq_disable();
615
608 raw_spin_lock(&lock->wait_lock); 616 raw_spin_lock(&lock->wait_lock);
609 set_current_state(state); 617 set_current_state(state);
610 } 618 }