aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/cm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c112
1 files changed, 82 insertions, 30 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 768a0fb67dd6..c2fb71c182a8 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -79,9 +79,10 @@ static int dack_mode = 1;
79module_param(dack_mode, int, 0644); 79module_param(dack_mode, int, 0644);
80MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 80MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
81 81
82int c4iw_max_read_depth = 8; 82uint c4iw_max_read_depth = 32;
83module_param(c4iw_max_read_depth, int, 0644); 83module_param(c4iw_max_read_depth, int, 0644);
84MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 84MODULE_PARM_DESC(c4iw_max_read_depth,
85 "Per-connection max ORD/IRD (default=32)");
85 86
86static int enable_tcp_timestamps; 87static int enable_tcp_timestamps;
87module_param(enable_tcp_timestamps, int, 0644); 88module_param(enable_tcp_timestamps, int, 0644);
@@ -474,7 +475,8 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
474 16)) | FW_WR_FLOWID(ep->hwtid)); 475 16)) | FW_WR_FLOWID(ep->hwtid));
475 476
476 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 477 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
477 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); 478 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
479 (ep->com.dev->rdev.lldi.pf));
478 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 480 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
479 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 481 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
480 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 482 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
@@ -821,6 +823,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
821 if (mpa_rev_to_use == 2) { 823 if (mpa_rev_to_use == 2) {
822 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 824 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
823 sizeof (struct mpa_v2_conn_params)); 825 sizeof (struct mpa_v2_conn_params));
826 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
827 ep->ord);
824 mpa_v2_params.ird = htons((u16)ep->ird); 828 mpa_v2_params.ird = htons((u16)ep->ird);
825 mpa_v2_params.ord = htons((u16)ep->ord); 829 mpa_v2_params.ord = htons((u16)ep->ord);
826 830
@@ -1190,8 +1194,8 @@ static int connect_request_upcall(struct c4iw_ep *ep)
1190 sizeof(struct mpa_v2_conn_params); 1194 sizeof(struct mpa_v2_conn_params);
1191 } else { 1195 } else {
1192 /* this means MPA_v1 is used. Send max supported */ 1196 /* this means MPA_v1 is used. Send max supported */
1193 event.ord = c4iw_max_read_depth; 1197 event.ord = cur_max_read_depth(ep->com.dev);
1194 event.ird = c4iw_max_read_depth; 1198 event.ird = cur_max_read_depth(ep->com.dev);
1195 event.private_data_len = ep->plen; 1199 event.private_data_len = ep->plen;
1196 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1200 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1197 } 1201 }
@@ -1255,6 +1259,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1255 return credits; 1259 return credits;
1256} 1260}
1257 1261
1262#define RELAXED_IRD_NEGOTIATION 1
1263
1258static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1264static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1259{ 1265{
1260 struct mpa_message *mpa; 1266 struct mpa_message *mpa;
@@ -1366,17 +1372,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1366 MPA_V2_IRD_ORD_MASK; 1372 MPA_V2_IRD_ORD_MASK;
1367 resp_ord = ntohs(mpa_v2_params->ord) & 1373 resp_ord = ntohs(mpa_v2_params->ord) &
1368 MPA_V2_IRD_ORD_MASK; 1374 MPA_V2_IRD_ORD_MASK;
1375 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1376 __func__, resp_ird, resp_ord, ep->ird, ep->ord);
1369 1377
1370 /* 1378 /*
1371 * This is a double-check. Ideally, below checks are 1379 * This is a double-check. Ideally, below checks are
1372 * not required since ird/ord stuff has been taken 1380 * not required since ird/ord stuff has been taken
1373 * care of in c4iw_accept_cr 1381 * care of in c4iw_accept_cr
1374 */ 1382 */
1375 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1383 if (ep->ird < resp_ord) {
1384 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1385 ep->com.dev->rdev.lldi.max_ordird_qp)
1386 ep->ird = resp_ord;
1387 else
1388 insuff_ird = 1;
1389 } else if (ep->ird > resp_ord) {
1390 ep->ird = resp_ord;
1391 }
1392 if (ep->ord > resp_ird) {
1393 if (RELAXED_IRD_NEGOTIATION)
1394 ep->ord = resp_ird;
1395 else
1396 insuff_ird = 1;
1397 }
1398 if (insuff_ird) {
1376 err = -ENOMEM; 1399 err = -ENOMEM;
1377 ep->ird = resp_ord; 1400 ep->ird = resp_ord;
1378 ep->ord = resp_ird; 1401 ep->ord = resp_ird;
1379 insuff_ird = 1;
1380 } 1402 }
1381 1403
1382 if (ntohs(mpa_v2_params->ird) & 1404 if (ntohs(mpa_v2_params->ird) &
@@ -1579,6 +1601,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1579 MPA_V2_IRD_ORD_MASK; 1601 MPA_V2_IRD_ORD_MASK;
1580 ep->ord = ntohs(mpa_v2_params->ord) & 1602 ep->ord = ntohs(mpa_v2_params->ord) &
1581 MPA_V2_IRD_ORD_MASK; 1603 MPA_V2_IRD_ORD_MASK;
1604 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1605 ep->ord);
1582 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1606 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1583 if (peer2peer) { 1607 if (peer2peer) {
1584 if (ntohs(mpa_v2_params->ord) & 1608 if (ntohs(mpa_v2_params->ord) &
@@ -1798,6 +1822,20 @@ static int is_neg_adv(unsigned int status)
1798 status == CPL_ERR_KEEPALV_NEG_ADVICE; 1822 status == CPL_ERR_KEEPALV_NEG_ADVICE;
1799} 1823}
1800 1824
1825static char *neg_adv_str(unsigned int status)
1826{
1827 switch (status) {
1828 case CPL_ERR_RTX_NEG_ADVICE:
1829 return "Retransmit timeout";
1830 case CPL_ERR_PERSIST_NEG_ADVICE:
1831 return "Persist timeout";
1832 case CPL_ERR_KEEPALV_NEG_ADVICE:
1833 return "Keepalive timeout";
1834 default:
1835 return "Unknown";
1836 }
1837}
1838
1801static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) 1839static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
1802{ 1840{
1803 ep->snd_win = snd_win; 1841 ep->snd_win = snd_win;
@@ -1996,8 +2034,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1996 status, status2errno(status)); 2034 status, status2errno(status));
1997 2035
1998 if (is_neg_adv(status)) { 2036 if (is_neg_adv(status)) {
1999 printk(KERN_WARNING MOD "Connection problems for atid %u\n", 2037 dev_warn(&dev->rdev.lldi.pdev->dev,
2000 atid); 2038 "Connection problems for atid %u status %u (%s)\n",
2039 atid, status, neg_adv_str(status));
2001 return 0; 2040 return 0;
2002 } 2041 }
2003 2042
@@ -2472,8 +2511,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2472 2511
2473 ep = lookup_tid(t, tid); 2512 ep = lookup_tid(t, tid);
2474 if (is_neg_adv(req->status)) { 2513 if (is_neg_adv(req->status)) {
2475 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 2514 dev_warn(&dev->rdev.lldi.pdev->dev,
2476 ep->hwtid); 2515 "Negative advice on abort - tid %u status %d (%s)\n",
2516 ep->hwtid, req->status, neg_adv_str(req->status));
2477 return 0; 2517 return 0;
2478 } 2518 }
2479 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2519 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -2731,8 +2771,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2731 BUG_ON(!qp); 2771 BUG_ON(!qp);
2732 2772
2733 set_bit(ULP_ACCEPT, &ep->com.history); 2773 set_bit(ULP_ACCEPT, &ep->com.history);
2734 if ((conn_param->ord > c4iw_max_read_depth) || 2774 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
2735 (conn_param->ird > c4iw_max_read_depth)) { 2775 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
2736 abort_connection(ep, NULL, GFP_KERNEL); 2776 abort_connection(ep, NULL, GFP_KERNEL);
2737 err = -EINVAL; 2777 err = -EINVAL;
2738 goto err; 2778 goto err;
@@ -2740,31 +2780,41 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2740 2780
2741 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2781 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2742 if (conn_param->ord > ep->ird) { 2782 if (conn_param->ord > ep->ird) {
2743 ep->ird = conn_param->ird; 2783 if (RELAXED_IRD_NEGOTIATION) {
2744 ep->ord = conn_param->ord; 2784 ep->ord = ep->ird;
2745 send_mpa_reject(ep, conn_param->private_data, 2785 } else {
2746 conn_param->private_data_len); 2786 ep->ird = conn_param->ird;
2747 abort_connection(ep, NULL, GFP_KERNEL); 2787 ep->ord = conn_param->ord;
2748 err = -ENOMEM; 2788 send_mpa_reject(ep, conn_param->private_data,
2749 goto err; 2789 conn_param->private_data_len);
2790 abort_connection(ep, NULL, GFP_KERNEL);
2791 err = -ENOMEM;
2792 goto err;
2793 }
2750 } 2794 }
2751 if (conn_param->ird > ep->ord) { 2795 if (conn_param->ird < ep->ord) {
2752 if (!ep->ord) 2796 if (RELAXED_IRD_NEGOTIATION &&
2753 conn_param->ird = 1; 2797 ep->ord <= h->rdev.lldi.max_ordird_qp) {
2754 else { 2798 conn_param->ird = ep->ord;
2799 } else {
2755 abort_connection(ep, NULL, GFP_KERNEL); 2800 abort_connection(ep, NULL, GFP_KERNEL);
2756 err = -ENOMEM; 2801 err = -ENOMEM;
2757 goto err; 2802 goto err;
2758 } 2803 }
2759 } 2804 }
2760
2761 } 2805 }
2762 ep->ird = conn_param->ird; 2806 ep->ird = conn_param->ird;
2763 ep->ord = conn_param->ord; 2807 ep->ord = conn_param->ord;
2764 2808
2765 if (ep->mpa_attr.version != 2) 2809 if (ep->mpa_attr.version == 1) {
2766 if (peer2peer && ep->ird == 0) 2810 if (peer2peer && ep->ird == 0)
2767 ep->ird = 1; 2811 ep->ird = 1;
2812 } else {
2813 if (peer2peer &&
2814 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2815 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
2816 ep->ird = 1;
2817 }
2768 2818
2769 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2819 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2770 2820
@@ -2803,6 +2853,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2803 return 0; 2853 return 0;
2804err1: 2854err1:
2805 ep->com.cm_id = NULL; 2855 ep->com.cm_id = NULL;
2856 abort_connection(ep, NULL, GFP_KERNEL);
2806 cm_id->rem_ref(cm_id); 2857 cm_id->rem_ref(cm_id);
2807err: 2858err:
2808 mutex_unlock(&ep->com.mutex); 2859 mutex_unlock(&ep->com.mutex);
@@ -2886,8 +2937,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2886 int iptype; 2937 int iptype;
2887 int iwpm_err = 0; 2938 int iwpm_err = 0;
2888 2939
2889 if ((conn_param->ord > c4iw_max_read_depth) || 2940 if ((conn_param->ord > cur_max_read_depth(dev)) ||
2890 (conn_param->ird > c4iw_max_read_depth)) { 2941 (conn_param->ird > cur_max_read_depth(dev))) {
2891 err = -EINVAL; 2942 err = -EINVAL;
2892 goto out; 2943 goto out;
2893 } 2944 }
@@ -3867,8 +3918,9 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
3867 return 0; 3918 return 0;
3868 } 3919 }
3869 if (is_neg_adv(req->status)) { 3920 if (is_neg_adv(req->status)) {
3870 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, 3921 dev_warn(&dev->rdev.lldi.pdev->dev,
3871 ep->hwtid); 3922 "Negative advice on abort - tid %u status %d (%s)\n",
3923 ep->hwtid, req->status, neg_adv_str(req->status));
3872 kfree_skb(skb); 3924 kfree_skb(skb);
3873 return 0; 3925 return 0;
3874 } 3926 }