aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2007-10-19 18:04:10 -0400
committerRoland Dreier <rolandd@cisco.com>2007-10-30 13:57:24 -0400
commitfffbfeaa680e2b87a591e141f2aa7e9e91184956 (patch)
tree6a087d79165f626e6ad13dddf4dfb17ba128d1f4 /drivers/infiniband
parent96db0e0335c7981911bd7efc5c79e82d2358c0fc (diff)
IB/ipath: Fix a race where s_last is updated without lock held
There is a small window where a send work queue entry could be overwritten by ib_post_send() because s_last is updated before the entry is read. This patch closes the window by acquiring the lock and updating the last send work queue entry index after reading the wr_id. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/ipath/ipath_ruc.c14
1 files changed, 9 insertions, 5 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
index 4b6b7ee8e5c1..54c61a972de2 100644
--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
@@ -630,11 +630,8 @@ bail:;
630void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, 630void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
631 enum ib_wc_status status) 631 enum ib_wc_status status)
632{ 632{
633 u32 last = qp->s_last; 633 unsigned long flags;
634 634 u32 last;
635 if (++last == qp->s_size)
636 last = 0;
637 qp->s_last = last;
638 635
639 /* See ch. 11.2.4.1 and 10.7.3.1 */ 636 /* See ch. 11.2.4.1 and 10.7.3.1 */
640 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) || 637 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
@@ -658,4 +655,11 @@ void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
658 wc.port_num = 0; 655 wc.port_num = 0;
659 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); 656 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
660 } 657 }
658
659 spin_lock_irqsave(&qp->s_lock, flags);
660 last = qp->s_last;
661 if (++last >= qp->s_size)
662 last = 0;
663 qp->s_last = last;
664 spin_unlock_irqrestore(&qp->s_lock, flags);
661} 665}