aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-09 17:00:06 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2011-12-11 13:33:18 -0500
commit70321d447aa1a7cc2d60db16234f43c5a65630e7 (patch)
treea97749ef35045fc41585d121f595cb0644b319c2
parentd493011a376f9df3b8b3da1102509b343b1a4ef2 (diff)
Revert "rcu: Permit rt_mutex_unlock() with irqs disabled"
This reverts commit 5342e269b2b58ee0b0b4168a94087faaa60d0567. The approach taken in this patch was deemed too abusive to mutexes, and thus too likely to result in maintenance problems in the future. Instead, we will disallow RCU read-side critical sections that partially overlap with interrupt-disbled code segments. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r--kernel/rcutree_plugin.h5
-rw-r--r--kernel/rtmutex.c8
2 files changed, 0 insertions, 13 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 8cd9efe7e81f..f55f10ba01f5 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1165,8 +1165,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
1165 1165
1166#endif /* #else #ifdef CONFIG_RCU_TRACE */ 1166#endif /* #else #ifdef CONFIG_RCU_TRACE */
1167 1167
1168static struct lock_class_key rcu_boost_class;
1169
1170/* 1168/*
1171 * Carry out RCU priority boosting on the task indicated by ->exp_tasks 1169 * Carry out RCU priority boosting on the task indicated by ->exp_tasks
1172 * or ->boost_tasks, advancing the pointer to the next task in the 1170 * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1229,9 +1227,6 @@ static int rcu_boost(struct rcu_node *rnp)
1229 */ 1227 */
1230 t = container_of(tb, struct task_struct, rcu_node_entry); 1228 t = container_of(tb, struct task_struct, rcu_node_entry);
1231 rt_mutex_init_proxy_locked(&mtx, t); 1229 rt_mutex_init_proxy_locked(&mtx, t);
1232 /* Avoid lockdep false positives. This rt_mutex is its own thing. */
1233 lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
1234 "rcu_boost_mutex");
1235 t->rcu_boost_mutex = &mtx; 1230 t->rcu_boost_mutex = &mtx;
1236 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1231 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1237 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */ 1232 rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index f9d8482dd487..a242e691c993 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -579,7 +579,6 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
579 struct rt_mutex_waiter *waiter) 579 struct rt_mutex_waiter *waiter)
580{ 580{
581 int ret = 0; 581 int ret = 0;
582 int was_disabled;
583 582
584 for (;;) { 583 for (;;) {
585 /* Try to acquire the lock: */ 584 /* Try to acquire the lock: */
@@ -602,17 +601,10 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
602 601
603 raw_spin_unlock(&lock->wait_lock); 602 raw_spin_unlock(&lock->wait_lock);
604 603
605 was_disabled = irqs_disabled();
606 if (was_disabled)
607 local_irq_enable();
608
609 debug_rt_mutex_print_deadlock(waiter); 604 debug_rt_mutex_print_deadlock(waiter);
610 605
611 schedule_rt_mutex(lock); 606 schedule_rt_mutex(lock);
612 607
613 if (was_disabled)
614 local_irq_disable();
615
616 raw_spin_lock(&lock->wait_lock); 608 raw_spin_lock(&lock->wait_lock);
617 set_current_state(state); 609 set_current_state(state);
618 } 610 }