aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2009-09-07 11:27:50 -0400
committerRoland Dreier <rolandd@cisco.com>2009-09-07 11:27:50 -0400
commit6b2eef8fd78ff909c3396b8671d57c42559cc51d (patch)
tree98557140c16bc825a82bfd414fedda46749dbbf7 /drivers/infiniband/core
parent60f2b652f54aa4ac4127a538abad05235fb9c469 (diff)
IB/mad: Fix possible lock-lock-timer deadlock
Lockdep reported a possible deadlock with cm_id_priv->lock, mad_agent_priv->lock and mad_agent_priv->timed_work.timer; this happens because the mad module does cancel_delayed_work(&mad_agent_priv->timed_work); while holding mad_agent_priv->lock. cancel_delayed_work() internally does del_timer_sync(&mad_agent_priv->timed_work.timer). This can turn into a deadlock because mad_agent_priv->lock is taken inside cm_id_priv->lock, so we can get the following set of contexts that deadlock each other: A: holding cm_id_priv->lock, waiting for mad_agent_priv->lock B: holding mad_agent_priv->lock, waiting for del_timer_sync() C: interrupt during mad_agent_priv->timed_work.timer that takes cm_id_priv->lock Fix this by using the new __cancel_delayed_work() interface (which internally does del_timer() instead of del_timer_sync()) in all the places where we are holding a lock. Addresses: http://bugzilla.kernel.org/show_bug.cgi?id=13757 Reported-by: Bart Van Assche <bart.vanassche@gmail.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core')
-rw-r--r--drivers/infiniband/core/mad.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index de922a04ca2d..bc30c00c5d7a 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1974,7 +1974,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1974 unsigned long delay; 1974 unsigned long delay;
1975 1975
1976 if (list_empty(&mad_agent_priv->wait_list)) { 1976 if (list_empty(&mad_agent_priv->wait_list)) {
1977 cancel_delayed_work(&mad_agent_priv->timed_work); 1977 __cancel_delayed_work(&mad_agent_priv->timed_work);
1978 } else { 1978 } else {
1979 mad_send_wr = list_entry(mad_agent_priv->wait_list.next, 1979 mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
1980 struct ib_mad_send_wr_private, 1980 struct ib_mad_send_wr_private,
@@ -1983,7 +1983,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
1983 if (time_after(mad_agent_priv->timeout, 1983 if (time_after(mad_agent_priv->timeout,
1984 mad_send_wr->timeout)) { 1984 mad_send_wr->timeout)) {
1985 mad_agent_priv->timeout = mad_send_wr->timeout; 1985 mad_agent_priv->timeout = mad_send_wr->timeout;
1986 cancel_delayed_work(&mad_agent_priv->timed_work); 1986 __cancel_delayed_work(&mad_agent_priv->timed_work);
1987 delay = mad_send_wr->timeout - jiffies; 1987 delay = mad_send_wr->timeout - jiffies;
1988 if ((long)delay <= 0) 1988 if ((long)delay <= 0)
1989 delay = 1; 1989 delay = 1;
@@ -2023,7 +2023,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2023 2023
2024 /* Reschedule a work item if we have a shorter timeout */ 2024 /* Reschedule a work item if we have a shorter timeout */
2025 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { 2025 if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
2026 cancel_delayed_work(&mad_agent_priv->timed_work); 2026 __cancel_delayed_work(&mad_agent_priv->timed_work);
2027 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq, 2027 queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2028 &mad_agent_priv->timed_work, delay); 2028 &mad_agent_priv->timed_work, delay);
2029 } 2029 }