aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorHal Rosenstock <halr@voltaire.com>2005-07-27 14:45:26 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:26:10 -0400
commit6a0c435ef9e2473934442282054d0f58235d1de2 (patch)
tree71a5d054bde7447f10c611e359ecbcc36462a312 /drivers/infiniband
parentd760ce8f71ec5336c4a750a1293f26c0eb938c8a (diff)
[PATCH] IB: Fix timeout/cancelled MAD handling
Fixes an issue processing a sent MAD after it has timed out or been canceled. The race occurs when a response MAD matches with the send request. The request could time out or be canceled after the response MAD matches with the request, but before the request completion can be processed. Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Hal Rosenstock <halr@voltaire.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/mad.c14
-rw-r--r--drivers/infiniband/core/mad_priv.h1
2 files changed, 13 insertions, 2 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index d1898b30c345..7af8f7f87849 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -341,6 +341,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
341 spin_lock_init(&mad_agent_priv->lock); 341 spin_lock_init(&mad_agent_priv->lock);
342 INIT_LIST_HEAD(&mad_agent_priv->send_list); 342 INIT_LIST_HEAD(&mad_agent_priv->send_list);
343 INIT_LIST_HEAD(&mad_agent_priv->wait_list); 343 INIT_LIST_HEAD(&mad_agent_priv->wait_list);
344 INIT_LIST_HEAD(&mad_agent_priv->done_list);
344 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv); 345 INIT_WORK(&mad_agent_priv->timed_work, timeout_sends, mad_agent_priv);
345 INIT_LIST_HEAD(&mad_agent_priv->local_list); 346 INIT_LIST_HEAD(&mad_agent_priv->local_list);
346 INIT_WORK(&mad_agent_priv->local_work, local_completions, 347 INIT_WORK(&mad_agent_priv->local_work, local_completions,
@@ -1559,6 +1560,16 @@ find_send_req(struct ib_mad_agent_private *mad_agent_priv,
1559 return NULL; 1560 return NULL;
1560} 1561}
1561 1562
1563static void ib_mark_req_done(struct ib_mad_send_wr_private *mad_send_wr)
1564{
1565 mad_send_wr->timeout = 0;
1566 if (mad_send_wr->refcount == 1) {
1567 list_del(&mad_send_wr->agent_list);
1568 list_add_tail(&mad_send_wr->agent_list,
1569 &mad_send_wr->mad_agent_priv->done_list);
1570 }
1571}
1572
1562static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, 1573static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1563 struct ib_mad_recv_wc *mad_recv_wc) 1574 struct ib_mad_recv_wc *mad_recv_wc)
1564{ 1575{
@@ -1580,8 +1591,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1580 wake_up(&mad_agent_priv->wait); 1591 wake_up(&mad_agent_priv->wait);
1581 return; 1592 return;
1582 } 1593 }
1583 /* Timeout = 0 means that we won't wait for a response */ 1594 ib_mark_req_done(mad_send_wr);
1584 mad_send_wr->timeout = 0;
1585 spin_unlock_irqrestore(&mad_agent_priv->lock, flags); 1595 spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1586 1596
1587 /* Defined behavior is to complete response before request */ 1597 /* Defined behavior is to complete response before request */
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index 96f1b5b610c2..6fcab0009bb9 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -92,6 +92,7 @@ struct ib_mad_agent_private {
92 spinlock_t lock; 92 spinlock_t lock;
93 struct list_head send_list; 93 struct list_head send_list;
94 struct list_head wait_list; 94 struct list_head wait_list;
95 struct list_head done_list;
95 struct work_struct timed_work; 96 struct work_struct timed_work;
96 unsigned long timeout; 97 unsigned long timeout;
97 struct list_head local_list; 98 struct list_head local_list;