aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHariprasad Shenai <hariprasad@chelsio.com>2014-07-14 12:04:52 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-15 19:25:16 -0400
commit4c2c5763227a14ce111d6f35df708459d2443cc3 (patch)
treeb261cf8eab4279d5a4162b7417e10162eeb2d755
parent04e10e2164fcfa05e14eff3c2757a5097f11d258 (diff)
cxgb4/iw_cxgb4: use firmware ord/ird resource limits
Advertise a larger max read queue depth for qps, and gather the resource limits from fw and use them to avoid exhaustinq all the resources. Design: cxgb4: Obtain the max_ordird_qp and max_ird_adapter device params from FW at init time and pass them up to the ULDs when they attach. If these parameters are not available, due to older firmware, then hard-code the values based on the known values for older firmware. iw_cxgb4: Fix the c4iw_query_device() to report these correct values based on adapter parameters. ibv_query_device() will always return: max_qp_rd_atom = max_qp_init_rd_atom = min(module_max, max_ordird_qp) max_res_rd_atom = max_ird_adapter Bump up the per qp max module option to 32, allowing it to be increased by the user up to the device max of max_ordird_qp. 32 seems to be sufficient to maximize throughput for streaming read benchmarks. Fail connection setup if the negotiated IRD exhausts the available adapter ird resources. So the driver will track the amount of ird resource in use and not send an RI_WR/INIT to FW that would reduce the available ird resources below zero. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c80
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h9
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
9 files changed, 142 insertions, 34 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index d62a0f9dd11a..df5bd3df08a2 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -79,9 +79,10 @@ static int dack_mode = 1;
79module_param(dack_mode, int, 0644); 79module_param(dack_mode, int, 0644);
80MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 80MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
81 81
82int c4iw_max_read_depth = 8; 82uint c4iw_max_read_depth = 32;
83module_param(c4iw_max_read_depth, int, 0644); 83module_param(c4iw_max_read_depth, int, 0644);
84MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 84MODULE_PARM_DESC(c4iw_max_read_depth,
85 "Per-connection max ORD/IRD (default=32)");
85 86
86static int enable_tcp_timestamps; 87static int enable_tcp_timestamps;
87module_param(enable_tcp_timestamps, int, 0644); 88module_param(enable_tcp_timestamps, int, 0644);
@@ -813,6 +814,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
813 if (mpa_rev_to_use == 2) { 814 if (mpa_rev_to_use == 2) {
814 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 815 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
815 sizeof (struct mpa_v2_conn_params)); 816 sizeof (struct mpa_v2_conn_params));
817 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
818 ep->ord);
816 mpa_v2_params.ird = htons((u16)ep->ird); 819 mpa_v2_params.ird = htons((u16)ep->ird);
817 mpa_v2_params.ord = htons((u16)ep->ord); 820 mpa_v2_params.ord = htons((u16)ep->ord);
818 821
@@ -1182,8 +1185,8 @@ static int connect_request_upcall(struct c4iw_ep *ep)
1182 sizeof(struct mpa_v2_conn_params); 1185 sizeof(struct mpa_v2_conn_params);
1183 } else { 1186 } else {
1184 /* this means MPA_v1 is used. Send max supported */ 1187 /* this means MPA_v1 is used. Send max supported */
1185 event.ord = c4iw_max_read_depth; 1188 event.ord = cur_max_read_depth(ep->com.dev);
1186 event.ird = c4iw_max_read_depth; 1189 event.ird = cur_max_read_depth(ep->com.dev);
1187 event.private_data_len = ep->plen; 1190 event.private_data_len = ep->plen;
1188 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1191 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1189 } 1192 }
@@ -1247,6 +1250,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1247 return credits; 1250 return credits;
1248} 1251}
1249 1252
1253#define RELAXED_IRD_NEGOTIATION 1
1254
1250static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1255static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1251{ 1256{
1252 struct mpa_message *mpa; 1257 struct mpa_message *mpa;
@@ -1358,17 +1363,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1358 MPA_V2_IRD_ORD_MASK; 1363 MPA_V2_IRD_ORD_MASK;
1359 resp_ord = ntohs(mpa_v2_params->ord) & 1364 resp_ord = ntohs(mpa_v2_params->ord) &
1360 MPA_V2_IRD_ORD_MASK; 1365 MPA_V2_IRD_ORD_MASK;
1366 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1367 __func__, resp_ird, resp_ord, ep->ird, ep->ord);
1361 1368
1362 /* 1369 /*
1363 * This is a double-check. Ideally, below checks are 1370 * This is a double-check. Ideally, below checks are
1364 * not required since ird/ord stuff has been taken 1371 * not required since ird/ord stuff has been taken
1365 * care of in c4iw_accept_cr 1372 * care of in c4iw_accept_cr
1366 */ 1373 */
1367 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1374 if (ep->ird < resp_ord) {
1375 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1376 ep->com.dev->rdev.lldi.max_ordird_qp)
1377 ep->ird = resp_ord;
1378 else
1379 insuff_ird = 1;
1380 } else if (ep->ird > resp_ord) {
1381 ep->ird = resp_ord;
1382 }
1383 if (ep->ord > resp_ird) {
1384 if (RELAXED_IRD_NEGOTIATION)
1385 ep->ord = resp_ird;
1386 else
1387 insuff_ird = 1;
1388 }
1389 if (insuff_ird) {
1368 err = -ENOMEM; 1390 err = -ENOMEM;
1369 ep->ird = resp_ord; 1391 ep->ird = resp_ord;
1370 ep->ord = resp_ird; 1392 ep->ord = resp_ird;
1371 insuff_ird = 1;
1372 } 1393 }
1373 1394
1374 if (ntohs(mpa_v2_params->ird) & 1395 if (ntohs(mpa_v2_params->ird) &
@@ -1571,6 +1592,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1571 MPA_V2_IRD_ORD_MASK; 1592 MPA_V2_IRD_ORD_MASK;
1572 ep->ord = ntohs(mpa_v2_params->ord) & 1593 ep->ord = ntohs(mpa_v2_params->ord) &
1573 MPA_V2_IRD_ORD_MASK; 1594 MPA_V2_IRD_ORD_MASK;
1595 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1596 ep->ord);
1574 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1597 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1575 if (peer2peer) { 1598 if (peer2peer) {
1576 if (ntohs(mpa_v2_params->ord) & 1599 if (ntohs(mpa_v2_params->ord) &
@@ -2724,8 +2747,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2724 BUG_ON(!qp); 2747 BUG_ON(!qp);
2725 2748
2726 set_bit(ULP_ACCEPT, &ep->com.history); 2749 set_bit(ULP_ACCEPT, &ep->com.history);
2727 if ((conn_param->ord > c4iw_max_read_depth) || 2750 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
2728 (conn_param->ird > c4iw_max_read_depth)) { 2751 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
2729 abort_connection(ep, NULL, GFP_KERNEL); 2752 abort_connection(ep, NULL, GFP_KERNEL);
2730 err = -EINVAL; 2753 err = -EINVAL;
2731 goto err; 2754 goto err;
@@ -2733,31 +2756,41 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2733 2756
2734 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2757 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2735 if (conn_param->ord > ep->ird) { 2758 if (conn_param->ord > ep->ird) {
2736 ep->ird = conn_param->ird; 2759 if (RELAXED_IRD_NEGOTIATION) {
2737 ep->ord = conn_param->ord; 2760 ep->ord = ep->ird;
2738 send_mpa_reject(ep, conn_param->private_data, 2761 } else {
2739 conn_param->private_data_len); 2762 ep->ird = conn_param->ird;
2740 abort_connection(ep, NULL, GFP_KERNEL); 2763 ep->ord = conn_param->ord;
2741 err = -ENOMEM; 2764 send_mpa_reject(ep, conn_param->private_data,
2742 goto err; 2765 conn_param->private_data_len);
2766 abort_connection(ep, NULL, GFP_KERNEL);
2767 err = -ENOMEM;
2768 goto err;
2769 }
2743 } 2770 }
2744 if (conn_param->ird > ep->ord) { 2771 if (conn_param->ird < ep->ord) {
2745 if (!ep->ord) 2772 if (RELAXED_IRD_NEGOTIATION &&
2746 conn_param->ird = 1; 2773 ep->ord <= h->rdev.lldi.max_ordird_qp) {
2747 else { 2774 conn_param->ird = ep->ord;
2775 } else {
2748 abort_connection(ep, NULL, GFP_KERNEL); 2776 abort_connection(ep, NULL, GFP_KERNEL);
2749 err = -ENOMEM; 2777 err = -ENOMEM;
2750 goto err; 2778 goto err;
2751 } 2779 }
2752 } 2780 }
2753
2754 } 2781 }
2755 ep->ird = conn_param->ird; 2782 ep->ird = conn_param->ird;
2756 ep->ord = conn_param->ord; 2783 ep->ord = conn_param->ord;
2757 2784
2758 if (ep->mpa_attr.version != 2) 2785 if (ep->mpa_attr.version == 1) {
2759 if (peer2peer && ep->ird == 0) 2786 if (peer2peer && ep->ird == 0)
2760 ep->ird = 1; 2787 ep->ird = 1;
2788 } else {
2789 if (peer2peer &&
2790 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2791 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
2792 ep->ird = 1;
2793 }
2761 2794
2762 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2795 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2763 2796
@@ -2796,6 +2829,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2796 return 0; 2829 return 0;
2797err1: 2830err1:
2798 ep->com.cm_id = NULL; 2831 ep->com.cm_id = NULL;
2832 abort_connection(ep, NULL, GFP_KERNEL);
2799 cm_id->rem_ref(cm_id); 2833 cm_id->rem_ref(cm_id);
2800err: 2834err:
2801 mutex_unlock(&ep->com.mutex); 2835 mutex_unlock(&ep->com.mutex);
@@ -2879,8 +2913,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2879 int iptype; 2913 int iptype;
2880 int iwpm_err = 0; 2914 int iwpm_err = 0;
2881 2915
2882 if ((conn_param->ord > c4iw_max_read_depth) || 2916 if ((conn_param->ord > cur_max_read_depth(dev)) ||
2883 (conn_param->ird > c4iw_max_read_depth)) { 2917 (conn_param->ird > cur_max_read_depth(dev))) {
2884 err = -EINVAL; 2918 err = -EINVAL;
2885 goto out; 2919 goto out;
2886 } 2920 }
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 88291ef82941..e76358efcaa1 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -348,6 +348,7 @@ static int stats_show(struct seq_file *seq, void *v)
348 dev->rdev.stats.act_ofld_conn_fails); 348 dev->rdev.stats.act_ofld_conn_fails);
349 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 349 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
350 dev->rdev.stats.pas_ofld_conn_fails); 350 dev->rdev.stats.pas_ofld_conn_fails);
351 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
351 return 0; 352 return 0;
352} 353}
353 354
@@ -839,6 +840,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
839 mutex_init(&devp->rdev.stats.lock); 840 mutex_init(&devp->rdev.stats.lock);
840 mutex_init(&devp->db_mutex); 841 mutex_init(&devp->db_mutex);
841 INIT_LIST_HEAD(&devp->db_fc_list); 842 INIT_LIST_HEAD(&devp->db_fc_list);
843 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
842 844
843 if (c4iw_debugfs_root) { 845 if (c4iw_debugfs_root) {
844 devp->debugfs_root = debugfs_create_dir( 846 devp->debugfs_root = debugfs_create_dir(
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9b9754c69ea0..75541cb833c6 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -249,6 +249,7 @@ struct c4iw_dev {
249 struct idr atid_idr; 249 struct idr atid_idr;
250 struct idr stid_idr; 250 struct idr stid_idr;
251 struct list_head db_fc_list; 251 struct list_head db_fc_list;
252 u32 avail_ird;
252}; 253};
253 254
254static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 255static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -330,6 +331,13 @@ static inline void remove_handle_nolock(struct c4iw_dev *rhp,
330 _remove_handle(rhp, idr, id, 0); 331 _remove_handle(rhp, idr, id, 0);
331} 332}
332 333
334extern uint c4iw_max_read_depth;
335
336static inline int cur_max_read_depth(struct c4iw_dev *dev)
337{
338 return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
339}
340
333struct c4iw_pd { 341struct c4iw_pd {
334 struct ib_pd ibpd; 342 struct ib_pd ibpd;
335 u32 pdid; 343 u32 pdid;
@@ -1003,7 +1011,6 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
1003 1011
1004extern struct cxgb4_client t4c_client; 1012extern struct cxgb4_client t4c_client;
1005extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; 1013extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
1006extern int c4iw_max_read_depth;
1007extern int db_fc_threshold; 1014extern int db_fc_threshold;
1008extern int db_coalescing_threshold; 1015extern int db_coalescing_threshold;
1009extern int use_dsgl; 1016extern int use_dsgl;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 1d41b92caaf5..67c4a6908021 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -322,8 +322,10 @@ static int c4iw_query_device(struct ib_device *ibdev,
322 props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth; 322 props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
323 props->max_sge = T4_MAX_RECV_SGE; 323 props->max_sge = T4_MAX_RECV_SGE;
324 props->max_sge_rd = 1; 324 props->max_sge_rd = 1;
325 props->max_qp_rd_atom = c4iw_max_read_depth; 325 props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
326 props->max_qp_init_rd_atom = c4iw_max_read_depth; 326 props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
327 c4iw_max_read_depth);
328 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
327 props->max_cq = T4_MAX_NUM_CQ; 329 props->max_cq = T4_MAX_NUM_CQ;
328 props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth; 330 props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
329 props->max_mr = c4iw_num_stags(&dev->rdev); 331 props->max_mr = c4iw_num_stags(&dev->rdev);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 6f74e0e9a022..0de3cf64eb5e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -58,6 +58,31 @@ static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644); 58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate"); 59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60 60
61static int alloc_ird(struct c4iw_dev *dev, u32 ird)
62{
63 int ret = 0;
64
65 spin_lock_irq(&dev->lock);
66 if (ird <= dev->avail_ird)
67 dev->avail_ird -= ird;
68 else
69 ret = -ENOMEM;
70 spin_unlock_irq(&dev->lock);
71
72 if (ret)
73 dev_warn(&dev->rdev.lldi.pdev->dev,
74 "device IRD resources exhausted\n");
75
76 return ret;
77}
78
79static void free_ird(struct c4iw_dev *dev, int ird)
80{
81 spin_lock_irq(&dev->lock);
82 dev->avail_ird += ird;
83 spin_unlock_irq(&dev->lock);
84}
85
61static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 86static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
62{ 87{
63 unsigned long flag; 88 unsigned long flag;
@@ -1204,12 +1229,20 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1204 int ret; 1229 int ret;
1205 struct sk_buff *skb; 1230 struct sk_buff *skb;
1206 1231
1207 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, 1232 PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
1208 qhp->ep->hwtid); 1233 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1209 1234
1210 skb = alloc_skb(sizeof *wqe, GFP_KERNEL); 1235 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1211 if (!skb) 1236 if (!skb) {
1212 return -ENOMEM; 1237 ret = -ENOMEM;
1238 goto out;
1239 }
1240 ret = alloc_ird(rhp, qhp->attr.max_ird);
1241 if (ret) {
1242 qhp->attr.max_ird = 0;
1243 kfree_skb(skb);
1244 goto out;
1245 }
1213 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); 1246 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1214 1247
1215 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 1248 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1260,10 +1293,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1260 1293
1261 ret = c4iw_ofld_send(&rhp->rdev, skb); 1294 ret = c4iw_ofld_send(&rhp->rdev, skb);
1262 if (ret) 1295 if (ret)
1263 goto out; 1296 goto err1;
1264 1297
1265 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, 1298 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1266 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); 1299 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1300 if (!ret)
1301 goto out;
1302err1:
1303 free_ird(rhp, qhp->attr.max_ird);
1267out: 1304out:
1268 PDBG("%s ret %d\n", __func__, ret); 1305 PDBG("%s ret %d\n", __func__, ret);
1269 return ret; 1306 return ret;
@@ -1308,7 +1345,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1308 newattr.max_ord = attrs->max_ord; 1345 newattr.max_ord = attrs->max_ord;
1309 } 1346 }
1310 if (mask & C4IW_QP_ATTR_MAX_IRD) { 1347 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1311 if (attrs->max_ird > c4iw_max_read_depth) { 1348 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1312 ret = -EINVAL; 1349 ret = -EINVAL;
1313 goto out; 1350 goto out;
1314 } 1351 }
@@ -1531,6 +1568,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1531 if (!list_empty(&qhp->db_fc_entry)) 1568 if (!list_empty(&qhp->db_fc_entry))
1532 list_del_init(&qhp->db_fc_entry); 1569 list_del_init(&qhp->db_fc_entry);
1533 spin_unlock_irq(&rhp->lock); 1570 spin_unlock_irq(&rhp->lock);
1571 free_ird(rhp, qhp->attr.max_ird);
1534 1572
1535 ucontext = ib_qp->uobject ? 1573 ucontext = ib_qp->uobject ?
1536 to_c4iw_ucontext(ib_qp->uobject->context) : NULL; 1574 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
@@ -1621,8 +1659,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1621 qhp->attr.enable_rdma_read = 1; 1659 qhp->attr.enable_rdma_read = 1;
1622 qhp->attr.enable_rdma_write = 1; 1660 qhp->attr.enable_rdma_write = 1;
1623 qhp->attr.enable_bind = 1; 1661 qhp->attr.enable_bind = 1;
1624 qhp->attr.max_ord = 1; 1662 qhp->attr.max_ord = 0;
1625 qhp->attr.max_ird = 1; 1663 qhp->attr.max_ird = 0;
1626 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1664 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1627 spin_lock_init(&qhp->lock); 1665 spin_lock_init(&qhp->lock);
1628 mutex_init(&qhp->mutex); 1666 mutex_init(&qhp->mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index f338a7fcebf7..46156210df34 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -310,6 +310,9 @@ struct adapter_params {
310 310
311 unsigned int ofldq_wr_cred; 311 unsigned int ofldq_wr_cred;
312 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ 312 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
313
314 unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
315 unsigned int max_ird_adapter; /* Max read depth per adapter */
313}; 316};
314 317
315#include "t4fw_api.h" 318#include "t4fw_api.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a7ce996630ed..767cbbaa3d1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4113,6 +4113,8 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
4113 lli.sge_egrstatuspagesize = adap->sge.stat_len; 4113 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4114 lli.sge_pktshift = adap->sge.pktshift; 4114 lli.sge_pktshift = adap->sge.pktshift;
4115 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; 4115 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4116 lli.max_ordird_qp = adap->params.max_ordird_qp;
4117 lli.max_ird_adapter = adap->params.max_ird_adapter;
4116 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; 4118 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4117 4119
4118 handle = ulds[uld].add(&lli); 4120 handle = ulds[uld].add(&lli);
@@ -5877,6 +5879,22 @@ static int adap_init0(struct adapter *adap)
5877 adap->vres.cq.size = val[3] - val[2] + 1; 5879 adap->vres.cq.size = val[3] - val[2] + 1;
5878 adap->vres.ocq.start = val[4]; 5880 adap->vres.ocq.start = val[4];
5879 adap->vres.ocq.size = val[5] - val[4] + 1; 5881 adap->vres.ocq.size = val[5] - val[4] + 1;
5882
5883 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5884 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5885 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
5886 if (ret < 0) {
5887 adap->params.max_ordird_qp = 8;
5888 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5889 ret = 0;
5890 } else {
5891 adap->params.max_ordird_qp = val[0];
5892 adap->params.max_ird_adapter = val[1];
5893 }
5894 dev_info(adap->pdev_dev,
5895 "max_ordird_qp %d max_ird_adapter %d\n",
5896 adap->params.max_ordird_qp,
5897 adap->params.max_ird_adapter);
5880 } 5898 }
5881 if (caps_cmd.iscsicaps) { 5899 if (caps_cmd.iscsicaps) {
5882 params[0] = FW_PARAM_PFVF(ISCSI_START); 5900 params[0] = FW_PARAM_PFVF(ISCSI_START);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 962458f5d5b3..df1d9446768a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -258,6 +258,8 @@ struct cxgb4_lld_info {
258 unsigned int pf; /* Physical Function we're using */ 258 unsigned int pf; /* Physical Function we're using */
259 bool enable_fw_ofld_conn; /* Enable connection through fw */ 259 bool enable_fw_ofld_conn; /* Enable connection through fw */
260 /* WR */ 260 /* WR */
261 unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
262 unsigned int max_ird_adapter; /* Max IRD memory per adapter */
261 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ 263 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
262}; 264};
263 265
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 4a6ae4db7397..ff709e3b3e7e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -934,6 +934,8 @@ enum fw_params_param_dev {
934 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 934 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
935 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 935 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
936 FW_PARAMS_PARAM_DEV_CF = 0x0D, 936 FW_PARAMS_PARAM_DEV_CF = 0x0D,
937 FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
938 FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
937 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, 939 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
938}; 940};
939 941