aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2014-03-21 11:10:35 -0400
committerRoland Dreier <roland@purestorage.com>2014-04-02 11:52:45 -0400
commita7db89eb89cd6a444b16fdd602e818eed34d8222 (patch)
tree0800a65ce5fd09476ab2d6c30d0a0cc2d4972772 /drivers/infiniband/hw/cxgb4
parent9c88aa003d26e9f1e9ea6e08511768c2ef666654 (diff)
RDMA/cxgb4: Lock around accept/reject downcalls
There is a race between ULP threads doing an accept/reject, and the ingress processing thread handling close/abort for the same connection. The accept/reject path needs to hold the lock to serialize these paths. Signed-off-by: Steve Wise <swise@opengridcomputing.com> [ Fold in locking fix found by Dan Carpenter <dan.carpenter@oracle.com>. - Roland ] Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a1bc41d04620..6836d114d75a 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -760,7 +760,7 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
760 ep->mpa_skb = skb; 760 ep->mpa_skb = skb;
761 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 761 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
762 start_ep_timer(ep); 762 start_ep_timer(ep);
763 state_set(&ep->com, MPA_REQ_SENT); 763 __state_set(&ep->com, MPA_REQ_SENT);
764 ep->mpa_attr.initiator = 1; 764 ep->mpa_attr.initiator = 1;
765 ep->snd_seq += mpalen; 765 ep->snd_seq += mpalen;
766 return; 766 return;
@@ -926,7 +926,7 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
926 skb_get(skb); 926 skb_get(skb);
927 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 927 t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
928 ep->mpa_skb = skb; 928 ep->mpa_skb = skb;
929 state_set(&ep->com, MPA_REP_SENT); 929 __state_set(&ep->com, MPA_REP_SENT);
930 ep->snd_seq += mpalen; 930 ep->snd_seq += mpalen;
931 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 931 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
932} 932}
@@ -944,6 +944,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
944 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 944 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
945 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 945 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
946 946
947 mutex_lock(&ep->com.mutex);
947 dst_confirm(ep->dst); 948 dst_confirm(ep->dst);
948 949
949 /* setup the hwtid for this connection */ 950 /* setup the hwtid for this connection */
@@ -967,7 +968,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
967 send_mpa_req(ep, skb, 1); 968 send_mpa_req(ep, skb, 1);
968 else 969 else
969 send_mpa_req(ep, skb, mpa_rev); 970 send_mpa_req(ep, skb, mpa_rev);
970 971 mutex_unlock(&ep->com.mutex);
971 return 0; 972 return 0;
972} 973}
973 974
@@ -2511,22 +2512,28 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2511 2512
2512int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2513int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2513{ 2514{
2514 int err; 2515 int err = 0;
2516 int disconnect = 0;
2515 struct c4iw_ep *ep = to_ep(cm_id); 2517 struct c4iw_ep *ep = to_ep(cm_id);
2516 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2518 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2517 2519
2518 if (state_read(&ep->com) == DEAD) { 2520 mutex_lock(&ep->com.mutex);
2521 if (ep->com.state == DEAD) {
2522 mutex_unlock(&ep->com.mutex);
2519 c4iw_put_ep(&ep->com); 2523 c4iw_put_ep(&ep->com);
2520 return -ECONNRESET; 2524 return -ECONNRESET;
2521 } 2525 }
2522 set_bit(ULP_REJECT, &ep->com.history); 2526 set_bit(ULP_REJECT, &ep->com.history);
2523 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2527 BUG_ON(ep->com.state != MPA_REQ_RCVD);
2524 if (mpa_rev == 0) 2528 if (mpa_rev == 0)
2525 abort_connection(ep, NULL, GFP_KERNEL); 2529 abort_connection(ep, NULL, GFP_KERNEL);
2526 else { 2530 else {
2527 err = send_mpa_reject(ep, pdata, pdata_len); 2531 err = send_mpa_reject(ep, pdata, pdata_len);
2528 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2532 disconnect = 1;
2529 } 2533 }
2534 mutex_unlock(&ep->com.mutex);
2535 if (disconnect)
2536 err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2530 c4iw_put_ep(&ep->com); 2537 c4iw_put_ep(&ep->com);
2531 return 0; 2538 return 0;
2532} 2539}
@@ -2541,12 +2548,14 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2541 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2548 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2542 2549
2543 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2550 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2544 if (state_read(&ep->com) == DEAD) { 2551
2552 mutex_lock(&ep->com.mutex);
2553 if (ep->com.state == DEAD) {
2545 err = -ECONNRESET; 2554 err = -ECONNRESET;
2546 goto err; 2555 goto err;
2547 } 2556 }
2548 2557
2549 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2558 BUG_ON(ep->com.state != MPA_REQ_RCVD);
2550 BUG_ON(!qp); 2559 BUG_ON(!qp);
2551 2560
2552 set_bit(ULP_ACCEPT, &ep->com.history); 2561 set_bit(ULP_ACCEPT, &ep->com.history);
@@ -2615,14 +2624,16 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2615 if (err) 2624 if (err)
2616 goto err1; 2625 goto err1;
2617 2626
2618 state_set(&ep->com, FPDU_MODE); 2627 __state_set(&ep->com, FPDU_MODE);
2619 established_upcall(ep); 2628 established_upcall(ep);
2629 mutex_unlock(&ep->com.mutex);
2620 c4iw_put_ep(&ep->com); 2630 c4iw_put_ep(&ep->com);
2621 return 0; 2631 return 0;
2622err1: 2632err1:
2623 ep->com.cm_id = NULL; 2633 ep->com.cm_id = NULL;
2624 cm_id->rem_ref(cm_id); 2634 cm_id->rem_ref(cm_id);
2625err: 2635err:
2636 mutex_unlock(&ep->com.mutex);
2626 c4iw_put_ep(&ep->com); 2637 c4iw_put_ep(&ep->com);
2627 return err; 2638 return err;
2628} 2639}