aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2014-03-21 11:10:37 -0400
committerRoland Dreier <roland@purestorage.com>2014-04-02 11:53:54 -0400
commitc529fb50463992982c246155e095577aa0485f57 (patch)
treea25ce0a83d90860a7266419baddff6f3da55a581 /drivers/infiniband/hw/cxgb4
parent977116c69862a6062f302395cb3546544d7e1bc1 (diff)
RDMA/cxgb4: rx_data() needs to hold the ep mutex
To avoid racing with other threads doing close/flush/whatever, rx_data() should hold the endpoint mutex. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 8a645d872483..26046c23334c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1170,7 +1170,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1170 * the connection. 1170 * the connection.
1171 */ 1171 */
1172 stop_ep_timer(ep); 1172 stop_ep_timer(ep);
1173 if (state_read(&ep->com) != MPA_REQ_SENT) 1173 if (ep->com.state != MPA_REQ_SENT)
1174 return; 1174 return;
1175 1175
1176 /* 1176 /*
@@ -1245,7 +1245,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1245 * start reply message including private data. And 1245 * start reply message including private data. And
1246 * the MPA header is valid. 1246 * the MPA header is valid.
1247 */ 1247 */
1248 state_set(&ep->com, FPDU_MODE); 1248 __state_set(&ep->com, FPDU_MODE);
1249 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1249 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1250 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1250 ep->mpa_attr.recv_marker_enabled = markers_enabled;
1251 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1251 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
@@ -1360,7 +1360,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1360 } 1360 }
1361 goto out; 1361 goto out;
1362err: 1362err:
1363 state_set(&ep->com, ABORTING); 1363 __state_set(&ep->com, ABORTING);
1364 send_abort(ep, skb, GFP_KERNEL); 1364 send_abort(ep, skb, GFP_KERNEL);
1365out: 1365out:
1366 connect_reply_upcall(ep, err); 1366 connect_reply_upcall(ep, err);
@@ -1375,7 +1375,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1375 1375
1376 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1376 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1377 1377
1378 if (state_read(&ep->com) != MPA_REQ_WAIT) 1378 if (ep->com.state != MPA_REQ_WAIT)
1379 return; 1379 return;
1380 1380
1381 /* 1381 /*
@@ -1496,7 +1496,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1496 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1496 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1497 ep->mpa_attr.p2p_type); 1497 ep->mpa_attr.p2p_type);
1498 1498
1499 state_set(&ep->com, MPA_REQ_RCVD); 1499 __state_set(&ep->com, MPA_REQ_RCVD);
1500 stop_ep_timer(ep); 1500 stop_ep_timer(ep);
1501 1501
1502 /* drive upcall */ 1502 /* drive upcall */
@@ -1526,11 +1526,12 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1526 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1526 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1527 skb_pull(skb, sizeof(*hdr)); 1527 skb_pull(skb, sizeof(*hdr));
1528 skb_trim(skb, dlen); 1528 skb_trim(skb, dlen);
1529 mutex_lock(&ep->com.mutex);
1529 1530
1530 /* update RX credits */ 1531 /* update RX credits */
1531 update_rx_credits(ep, dlen); 1532 update_rx_credits(ep, dlen);
1532 1533
1533 switch (state_read(&ep->com)) { 1534 switch (ep->com.state) {
1534 case MPA_REQ_SENT: 1535 case MPA_REQ_SENT:
1535 ep->rcv_seq += dlen; 1536 ep->rcv_seq += dlen;
1536 process_mpa_reply(ep, skb); 1537 process_mpa_reply(ep, skb);
@@ -1546,7 +1547,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1546 pr_err("%s Unexpected streaming data." \ 1547 pr_err("%s Unexpected streaming data." \
1547 " qpid %u ep %p state %d tid %u status %d\n", 1548 " qpid %u ep %p state %d tid %u status %d\n",
1548 __func__, ep->com.qp->wq.sq.qid, ep, 1549 __func__, ep->com.qp->wq.sq.qid, ep,
1549 state_read(&ep->com), ep->hwtid, status); 1550 ep->com.state, ep->hwtid, status);
1550 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1551 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1551 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1552 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1552 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1553 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
@@ -1555,6 +1556,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1555 default: 1556 default:
1556 break; 1557 break;
1557 } 1558 }
1559 mutex_unlock(&ep->com.mutex);
1558 return 0; 1560 return 0;
1559} 1561}
1560 1562