aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-07-15 19:25:23 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-15 19:25:23 -0400
commit7cf66041eda5841a5a0b74a10275c8ef4fea567a (patch)
tree91a71211347fda589fb5670d5eeb790aeb53fc10
parent5ee2c941b5969eb1b5592f9731b3ee76a784641f (diff)
parent7730b4c7e32c0ab4d7db746a9c3a84cf715161fa (diff)
Merge branch 'cxgb4-next'
Hariprasad Shenai says: ==================== Misc. fixes for iw_cxgb4 This patch series adds support to determine ingress padding boundary at runtime. Advertise a larger max read queue depth for qps, and gather the resource limits from fw and use them to avoid exhausting all the resources and display TPTE on errors and add support for work request logging feature. The patches series is created against 'net-next' tree. And includes patches on cxgb4 and iw_cxgb4 driver. Since this patch-series contains changes which are dependent on commit id fc5ab02 ("cxgb4: Replaced the backdoor mechanism to access the HW memory with PCIe Window method") we would like to request this patch series to get merged via David Miller's 'net-next' tree. We have included all the maintainers of respective drivers. Kindly review the change and let us know in case of any review comments. V2: Optimized alloc_ird function, and several other changes related to debug prints based on review comments given by Yann Droneaud. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c80
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c188
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c55
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h38
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c76
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
13 files changed, 528 insertions, 61 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index d62a0f9dd11a..df5bd3df08a2 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -79,9 +79,10 @@ static int dack_mode = 1;
79module_param(dack_mode, int, 0644); 79module_param(dack_mode, int, 0644);
80MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 80MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
81 81
82int c4iw_max_read_depth = 8; 82uint c4iw_max_read_depth = 32;
83module_param(c4iw_max_read_depth, int, 0644); 83module_param(c4iw_max_read_depth, int, 0644);
84MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); 84MODULE_PARM_DESC(c4iw_max_read_depth,
85 "Per-connection max ORD/IRD (default=32)");
85 86
86static int enable_tcp_timestamps; 87static int enable_tcp_timestamps;
87module_param(enable_tcp_timestamps, int, 0644); 88module_param(enable_tcp_timestamps, int, 0644);
@@ -813,6 +814,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
813 if (mpa_rev_to_use == 2) { 814 if (mpa_rev_to_use == 2) {
814 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 815 mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
815 sizeof (struct mpa_v2_conn_params)); 816 sizeof (struct mpa_v2_conn_params));
817 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
818 ep->ord);
816 mpa_v2_params.ird = htons((u16)ep->ird); 819 mpa_v2_params.ird = htons((u16)ep->ird);
817 mpa_v2_params.ord = htons((u16)ep->ord); 820 mpa_v2_params.ord = htons((u16)ep->ord);
818 821
@@ -1182,8 +1185,8 @@ static int connect_request_upcall(struct c4iw_ep *ep)
1182 sizeof(struct mpa_v2_conn_params); 1185 sizeof(struct mpa_v2_conn_params);
1183 } else { 1186 } else {
1184 /* this means MPA_v1 is used. Send max supported */ 1187 /* this means MPA_v1 is used. Send max supported */
1185 event.ord = c4iw_max_read_depth; 1188 event.ord = cur_max_read_depth(ep->com.dev);
1186 event.ird = c4iw_max_read_depth; 1189 event.ird = cur_max_read_depth(ep->com.dev);
1187 event.private_data_len = ep->plen; 1190 event.private_data_len = ep->plen;
1188 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1191 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1189 } 1192 }
@@ -1247,6 +1250,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1247 return credits; 1250 return credits;
1248} 1251}
1249 1252
1253#define RELAXED_IRD_NEGOTIATION 1
1254
1250static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1255static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1251{ 1256{
1252 struct mpa_message *mpa; 1257 struct mpa_message *mpa;
@@ -1358,17 +1363,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1358 MPA_V2_IRD_ORD_MASK; 1363 MPA_V2_IRD_ORD_MASK;
1359 resp_ord = ntohs(mpa_v2_params->ord) & 1364 resp_ord = ntohs(mpa_v2_params->ord) &
1360 MPA_V2_IRD_ORD_MASK; 1365 MPA_V2_IRD_ORD_MASK;
1366 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
1367 __func__, resp_ird, resp_ord, ep->ird, ep->ord);
1361 1368
1362 /* 1369 /*
1363 * This is a double-check. Ideally, below checks are 1370 * This is a double-check. Ideally, below checks are
1364 * not required since ird/ord stuff has been taken 1371 * not required since ird/ord stuff has been taken
1365 * care of in c4iw_accept_cr 1372 * care of in c4iw_accept_cr
1366 */ 1373 */
1367 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1374 if (ep->ird < resp_ord) {
1375 if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1376 ep->com.dev->rdev.lldi.max_ordird_qp)
1377 ep->ird = resp_ord;
1378 else
1379 insuff_ird = 1;
1380 } else if (ep->ird > resp_ord) {
1381 ep->ird = resp_ord;
1382 }
1383 if (ep->ord > resp_ird) {
1384 if (RELAXED_IRD_NEGOTIATION)
1385 ep->ord = resp_ird;
1386 else
1387 insuff_ird = 1;
1388 }
1389 if (insuff_ird) {
1368 err = -ENOMEM; 1390 err = -ENOMEM;
1369 ep->ird = resp_ord; 1391 ep->ird = resp_ord;
1370 ep->ord = resp_ird; 1392 ep->ord = resp_ird;
1371 insuff_ird = 1;
1372 } 1393 }
1373 1394
1374 if (ntohs(mpa_v2_params->ird) & 1395 if (ntohs(mpa_v2_params->ird) &
@@ -1571,6 +1592,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1571 MPA_V2_IRD_ORD_MASK; 1592 MPA_V2_IRD_ORD_MASK;
1572 ep->ord = ntohs(mpa_v2_params->ord) & 1593 ep->ord = ntohs(mpa_v2_params->ord) &
1573 MPA_V2_IRD_ORD_MASK; 1594 MPA_V2_IRD_ORD_MASK;
1595 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1596 ep->ord);
1574 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1597 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1575 if (peer2peer) { 1598 if (peer2peer) {
1576 if (ntohs(mpa_v2_params->ord) & 1599 if (ntohs(mpa_v2_params->ord) &
@@ -2724,8 +2747,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2724 BUG_ON(!qp); 2747 BUG_ON(!qp);
2725 2748
2726 set_bit(ULP_ACCEPT, &ep->com.history); 2749 set_bit(ULP_ACCEPT, &ep->com.history);
2727 if ((conn_param->ord > c4iw_max_read_depth) || 2750 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
2728 (conn_param->ird > c4iw_max_read_depth)) { 2751 (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
2729 abort_connection(ep, NULL, GFP_KERNEL); 2752 abort_connection(ep, NULL, GFP_KERNEL);
2730 err = -EINVAL; 2753 err = -EINVAL;
2731 goto err; 2754 goto err;
@@ -2733,31 +2756,41 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2733 2756
2734 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2757 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2735 if (conn_param->ord > ep->ird) { 2758 if (conn_param->ord > ep->ird) {
2736 ep->ird = conn_param->ird; 2759 if (RELAXED_IRD_NEGOTIATION) {
2737 ep->ord = conn_param->ord; 2760 ep->ord = ep->ird;
2738 send_mpa_reject(ep, conn_param->private_data, 2761 } else {
2739 conn_param->private_data_len); 2762 ep->ird = conn_param->ird;
2740 abort_connection(ep, NULL, GFP_KERNEL); 2763 ep->ord = conn_param->ord;
2741 err = -ENOMEM; 2764 send_mpa_reject(ep, conn_param->private_data,
2742 goto err; 2765 conn_param->private_data_len);
2766 abort_connection(ep, NULL, GFP_KERNEL);
2767 err = -ENOMEM;
2768 goto err;
2769 }
2743 } 2770 }
2744 if (conn_param->ird > ep->ord) { 2771 if (conn_param->ird < ep->ord) {
2745 if (!ep->ord) 2772 if (RELAXED_IRD_NEGOTIATION &&
2746 conn_param->ird = 1; 2773 ep->ord <= h->rdev.lldi.max_ordird_qp) {
2747 else { 2774 conn_param->ird = ep->ord;
2775 } else {
2748 abort_connection(ep, NULL, GFP_KERNEL); 2776 abort_connection(ep, NULL, GFP_KERNEL);
2749 err = -ENOMEM; 2777 err = -ENOMEM;
2750 goto err; 2778 goto err;
2751 } 2779 }
2752 } 2780 }
2753
2754 } 2781 }
2755 ep->ird = conn_param->ird; 2782 ep->ird = conn_param->ird;
2756 ep->ord = conn_param->ord; 2783 ep->ord = conn_param->ord;
2757 2784
2758 if (ep->mpa_attr.version != 2) 2785 if (ep->mpa_attr.version == 1) {
2759 if (peer2peer && ep->ird == 0) 2786 if (peer2peer && ep->ird == 0)
2760 ep->ird = 1; 2787 ep->ird = 1;
2788 } else {
2789 if (peer2peer &&
2790 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2791 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
2792 ep->ird = 1;
2793 }
2761 2794
2762 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 2795 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
2763 2796
@@ -2796,6 +2829,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2796 return 0; 2829 return 0;
2797err1: 2830err1:
2798 ep->com.cm_id = NULL; 2831 ep->com.cm_id = NULL;
2832 abort_connection(ep, NULL, GFP_KERNEL);
2799 cm_id->rem_ref(cm_id); 2833 cm_id->rem_ref(cm_id);
2800err: 2834err:
2801 mutex_unlock(&ep->com.mutex); 2835 mutex_unlock(&ep->com.mutex);
@@ -2879,8 +2913,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2879 int iptype; 2913 int iptype;
2880 int iwpm_err = 0; 2914 int iwpm_err = 0;
2881 2915
2882 if ((conn_param->ord > c4iw_max_read_depth) || 2916 if ((conn_param->ord > cur_max_read_depth(dev)) ||
2883 (conn_param->ird > c4iw_max_read_depth)) { 2917 (conn_param->ird > cur_max_read_depth(dev))) {
2884 err = -EINVAL; 2918 err = -EINVAL;
2885 goto out; 2919 goto out;
2886 } 2920 }
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c04292c950f1..de9bcf2e6d30 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -633,11 +633,15 @@ proc_cqe:
633 wq->sq.cidx = (uint16_t)idx; 633 wq->sq.cidx = (uint16_t)idx;
634 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); 634 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
635 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; 635 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
636 if (c4iw_wr_log)
637 c4iw_log_wr_stats(wq, hw_cqe);
636 t4_sq_consume(wq); 638 t4_sq_consume(wq);
637 } else { 639 } else {
638 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); 640 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
639 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; 641 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
640 BUG_ON(t4_rq_empty(wq)); 642 BUG_ON(t4_rq_empty(wq));
643 if (c4iw_wr_log)
644 c4iw_log_wr_stats(wq, hw_cqe);
641 t4_rq_consume(wq); 645 t4_rq_consume(wq);
642 goto skip_cqe; 646 goto skip_cqe;
643 } 647 }
@@ -895,7 +899,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
895 /* 899 /*
896 * Make actual HW queue 2x to avoid cdix_inc overflows. 900 * Make actual HW queue 2x to avoid cdix_inc overflows.
897 */ 901 */
898 hwentries = min(entries * 2, T4_MAX_IQ_SIZE); 902 hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
899 903
900 /* 904 /*
901 * Make HW queue at least 64 entries so GTS updates aren't too 905 * Make HW queue at least 64 entries so GTS updates aren't too
@@ -912,7 +916,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
912 if (ucontext) { 916 if (ucontext) {
913 memsize = roundup(memsize, PAGE_SIZE); 917 memsize = roundup(memsize, PAGE_SIZE);
914 hwentries = memsize / sizeof *chp->cq.queue; 918 hwentries = memsize / sizeof *chp->cq.queue;
915 while (hwentries > T4_MAX_IQ_SIZE) { 919 while (hwentries > rhp->rdev.hw_queue.t4_max_iq_size) {
916 memsize -= PAGE_SIZE; 920 memsize -= PAGE_SIZE;
917 hwentries = memsize / sizeof *chp->cq.queue; 921 hwentries = memsize / sizeof *chp->cq.queue;
918 } 922 }
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index dd93aadc996e..df1f1b52c7ec 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -55,6 +55,15 @@ module_param(allow_db_coalescing_on_t5, int, 0644);
55MODULE_PARM_DESC(allow_db_coalescing_on_t5, 55MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)"); 56 "Allow DB Coalescing on T5 (default = 0)");
57 57
58int c4iw_wr_log = 0;
59module_param(c4iw_wr_log, int, 0444);
60MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
61
62int c4iw_wr_log_size_order = 12;
63module_param(c4iw_wr_log_size_order, int, 0444);
64MODULE_PARM_DESC(c4iw_wr_log_size_order,
65 "Number of entries (log2) in the work request timing log.");
66
58struct uld_ctx { 67struct uld_ctx {
59 struct list_head entry; 68 struct list_head entry;
60 struct cxgb4_lld_info lldi; 69 struct cxgb4_lld_info lldi;
@@ -103,6 +112,117 @@ static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
103 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); 112 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
104} 113}
105 114
115void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
116{
117 struct wr_log_entry le;
118 int idx;
119
120 if (!wq->rdev->wr_log)
121 return;
122
123 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
124 (wq->rdev->wr_log_size - 1);
125 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
126 getnstimeofday(&le.poll_host_ts);
127 le.valid = 1;
128 le.cqe_sge_ts = CQE_TS(cqe);
129 if (SQ_TYPE(cqe)) {
130 le.qid = wq->sq.qid;
131 le.opcode = CQE_OPCODE(cqe);
132 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
133 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
134 le.wr_id = CQE_WRID_SQ_IDX(cqe);
135 } else {
136 le.qid = wq->rq.qid;
137 le.opcode = FW_RI_RECEIVE;
138 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
139 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
140 le.wr_id = CQE_WRID_MSN(cqe);
141 }
142 wq->rdev->wr_log[idx] = le;
143}
144
145static int wr_log_show(struct seq_file *seq, void *v)
146{
147 struct c4iw_dev *dev = seq->private;
148 struct timespec prev_ts = {0, 0};
149 struct wr_log_entry *lep;
150 int prev_ts_set = 0;
151 int idx, end;
152
153#define ts2ns(ts) ((ts) * dev->rdev.lldi.cclk_ps / 1000)
154
155 idx = atomic_read(&dev->rdev.wr_log_idx) &
156 (dev->rdev.wr_log_size - 1);
157 end = idx - 1;
158 if (end < 0)
159 end = dev->rdev.wr_log_size - 1;
160 lep = &dev->rdev.wr_log[idx];
161 while (idx != end) {
162 if (lep->valid) {
163 if (!prev_ts_set) {
164 prev_ts_set = 1;
165 prev_ts = lep->poll_host_ts;
166 }
167 seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
168 "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
169 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
170 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
171 "cqe_poll_delta_ns %llu\n",
172 idx,
173 timespec_sub(lep->poll_host_ts,
174 prev_ts).tv_sec,
175 timespec_sub(lep->poll_host_ts,
176 prev_ts).tv_nsec,
177 lep->qid, lep->opcode,
178 lep->opcode == FW_RI_RECEIVE ?
179 "msn" : "wrid",
180 lep->wr_id,
181 timespec_sub(lep->poll_host_ts,
182 lep->post_host_ts).tv_sec,
183 timespec_sub(lep->poll_host_ts,
184 lep->post_host_ts).tv_nsec,
185 lep->post_sge_ts, lep->cqe_sge_ts,
186 lep->poll_sge_ts,
187 ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
188 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
189 prev_ts = lep->poll_host_ts;
190 }
191 idx++;
192 if (idx > (dev->rdev.wr_log_size - 1))
193 idx = 0;
194 lep = &dev->rdev.wr_log[idx];
195 }
196#undef ts2ns
197 return 0;
198}
199
200static int wr_log_open(struct inode *inode, struct file *file)
201{
202 return single_open(file, wr_log_show, inode->i_private);
203}
204
205static ssize_t wr_log_clear(struct file *file, const char __user *buf,
206 size_t count, loff_t *pos)
207{
208 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
209 int i;
210
211 if (dev->rdev.wr_log)
212 for (i = 0; i < dev->rdev.wr_log_size; i++)
213 dev->rdev.wr_log[i].valid = 0;
214 return count;
215}
216
217static const struct file_operations wr_log_debugfs_fops = {
218 .owner = THIS_MODULE,
219 .open = wr_log_open,
220 .release = single_release,
221 .read = seq_read,
222 .llseek = seq_lseek,
223 .write = wr_log_clear,
224};
225
106static int dump_qp(int id, void *p, void *data) 226static int dump_qp(int id, void *p, void *data)
107{ 227{
108 struct c4iw_qp *qp = p; 228 struct c4iw_qp *qp = p;
@@ -241,12 +361,32 @@ static int dump_stag(int id, void *p, void *data)
241 struct c4iw_debugfs_data *stagd = data; 361 struct c4iw_debugfs_data *stagd = data;
242 int space; 362 int space;
243 int cc; 363 int cc;
364 struct fw_ri_tpte tpte;
365 int ret;
244 366
245 space = stagd->bufsize - stagd->pos - 1; 367 space = stagd->bufsize - stagd->pos - 1;
246 if (space == 0) 368 if (space == 0)
247 return 1; 369 return 1;
248 370
249 cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8); 371 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
372 (__be32 *)&tpte);
373 if (ret) {
374 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
375 "%s cxgb4_read_tpte err %d\n", __func__, ret);
376 return ret;
377 }
378 cc = snprintf(stagd->buf + stagd->pos, space,
379 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
380 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
381 (u32)id<<8,
382 G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
383 G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
384 G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
385 G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
386 G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
387 G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
388 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
389 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
250 if (cc < space) 390 if (cc < space)
251 stagd->pos += cc; 391 stagd->pos += cc;
252 return 0; 392 return 0;
@@ -259,7 +399,7 @@ static int stag_release(struct inode *inode, struct file *file)
259 printk(KERN_INFO "%s null stagd?\n", __func__); 399 printk(KERN_INFO "%s null stagd?\n", __func__);
260 return 0; 400 return 0;
261 } 401 }
262 kfree(stagd->buf); 402 vfree(stagd->buf);
263 kfree(stagd); 403 kfree(stagd);
264 return 0; 404 return 0;
265} 405}
@@ -282,8 +422,8 @@ static int stag_open(struct inode *inode, struct file *file)
282 idr_for_each(&stagd->devp->mmidr, count_idrs, &count); 422 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
283 spin_unlock_irq(&stagd->devp->lock); 423 spin_unlock_irq(&stagd->devp->lock);
284 424
285 stagd->bufsize = count * sizeof("0x12345678\n"); 425 stagd->bufsize = count * 256;
286 stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL); 426 stagd->buf = vmalloc(stagd->bufsize);
287 if (!stagd->buf) { 427 if (!stagd->buf) {
288 ret = -ENOMEM; 428 ret = -ENOMEM;
289 goto err1; 429 goto err1;
@@ -348,6 +488,7 @@ static int stats_show(struct seq_file *seq, void *v)
348 dev->rdev.stats.act_ofld_conn_fails); 488 dev->rdev.stats.act_ofld_conn_fails);
349 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", 489 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
350 dev->rdev.stats.pas_ofld_conn_fails); 490 dev->rdev.stats.pas_ofld_conn_fails);
491 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
351 return 0; 492 return 0;
352} 493}
353 494
@@ -583,6 +724,12 @@ static int setup_debugfs(struct c4iw_dev *devp)
583 if (de && de->d_inode) 724 if (de && de->d_inode)
584 de->d_inode->i_size = 4096; 725 de->d_inode->i_size = 4096;
585 726
727 if (c4iw_wr_log) {
728 de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root,
729 (void *)devp, &wr_log_debugfs_fops);
730 if (de && de->d_inode)
731 de->d_inode->i_size = 4096;
732 }
586 return 0; 733 return 0;
587} 734}
588 735
@@ -696,6 +843,16 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
696 pr_err(MOD "error allocating status page\n"); 843 pr_err(MOD "error allocating status page\n");
697 goto err4; 844 goto err4;
698 } 845 }
846 if (c4iw_wr_log) {
847 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
848 sizeof(*rdev->wr_log), GFP_KERNEL);
849 if (rdev->wr_log) {
850 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
851 atomic_set(&rdev->wr_log_idx, 0);
852 } else {
853 pr_err(MOD "error allocating wr_log. Logging disabled\n");
854 }
855 }
699 return 0; 856 return 0;
700err4: 857err4:
701 c4iw_rqtpool_destroy(rdev); 858 c4iw_rqtpool_destroy(rdev);
@@ -709,6 +866,7 @@ err1:
709 866
710static void c4iw_rdev_close(struct c4iw_rdev *rdev) 867static void c4iw_rdev_close(struct c4iw_rdev *rdev)
711{ 868{
869 kfree(rdev->wr_log);
712 free_page((unsigned long)rdev->status_page); 870 free_page((unsigned long)rdev->status_page);
713 c4iw_pblpool_destroy(rdev); 871 c4iw_pblpool_destroy(rdev);
714 c4iw_rqtpool_destroy(rdev); 872 c4iw_rqtpool_destroy(rdev);
@@ -768,6 +926,27 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
768 } 926 }
769 devp->rdev.lldi = *infop; 927 devp->rdev.lldi = *infop;
770 928
929 /* init various hw-queue params based on lld info */
930 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
931 __func__, devp->rdev.lldi.sge_ingpadboundary,
932 devp->rdev.lldi.sge_egrstatuspagesize);
933
934 devp->rdev.hw_queue.t4_eq_status_entries =
935 devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
936 devp->rdev.hw_queue.t4_max_eq_size =
937 65520 - devp->rdev.hw_queue.t4_eq_status_entries;
938 devp->rdev.hw_queue.t4_max_iq_size = 65520 - 1;
939 devp->rdev.hw_queue.t4_max_rq_size =
940 8192 - devp->rdev.hw_queue.t4_eq_status_entries;
941 devp->rdev.hw_queue.t4_max_sq_size =
942 devp->rdev.hw_queue.t4_max_eq_size - 1;
943 devp->rdev.hw_queue.t4_max_qp_depth =
944 devp->rdev.hw_queue.t4_max_rq_size - 1;
945 devp->rdev.hw_queue.t4_max_cq_depth =
946 devp->rdev.hw_queue.t4_max_iq_size - 1;
947 devp->rdev.hw_queue.t4_stat_len =
948 devp->rdev.lldi.sge_egrstatuspagesize;
949
771 /* 950 /*
772 * For T5 devices, we map all of BAR2 with WC. 951 * For T5 devices, we map all of BAR2 with WC.
773 * For T4 devices with onchip qp mem, we map only that part 952 * For T4 devices with onchip qp mem, we map only that part
@@ -818,6 +997,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
818 mutex_init(&devp->rdev.stats.lock); 997 mutex_init(&devp->rdev.stats.lock);
819 mutex_init(&devp->db_mutex); 998 mutex_init(&devp->db_mutex);
820 INIT_LIST_HEAD(&devp->db_fc_list); 999 INIT_LIST_HEAD(&devp->db_fc_list);
1000 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
821 1001
822 if (c4iw_debugfs_root) { 1002 if (c4iw_debugfs_root) {
823 devp->debugfs_root = debugfs_create_dir( 1003 devp->debugfs_root = debugfs_create_dir(
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index d61d0a18f784..fbe6051af254 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -35,6 +35,55 @@
35 35
36#include "iw_cxgb4.h" 36#include "iw_cxgb4.h"
37 37
38static void print_tpte(struct c4iw_dev *dev, u32 stag)
39{
40 int ret;
41 struct fw_ri_tpte tpte;
42
43 ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
44 (__be32 *)&tpte);
45 if (ret) {
46 dev_err(&dev->rdev.lldi.pdev->dev,
47 "%s cxgb4_read_tpte err %d\n", __func__, ret);
48 return;
49 }
50 PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
51 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
52 stag & 0xffffff00,
53 G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
54 G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
55 G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
56 G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
57 G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
58 G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
59 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
60 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
61}
62
63static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
64{
65 __be64 *p = (void *)err_cqe;
66
67 dev_err(&dev->rdev.lldi.pdev->dev,
68 "AE qpid %d opcode %d status 0x%x "
69 "type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
70 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
71 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
72 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
73
74 PDBG("%016llx %016llx %016llx %016llx\n",
75 be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
76 be64_to_cpu(p[3]));
77
78 /*
79 * Ingress WRITE and READ_RESP errors provide
80 * the offending stag, so parse and log it.
81 */
82 if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
83 CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
84 print_tpte(dev, CQE_WRID_STAG(err_cqe));
85}
86
38static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, 87static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
39 struct c4iw_qp *qhp, 88 struct c4iw_qp *qhp,
40 struct t4_cqe *err_cqe, 89 struct t4_cqe *err_cqe,
@@ -44,11 +93,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
44 struct c4iw_qp_attributes attrs; 93 struct c4iw_qp_attributes attrs;
45 unsigned long flag; 94 unsigned long flag;
46 95
47 printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x " 96 dump_err_cqe(dev, err_cqe);
48 "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
49 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
50 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
51 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
52 97
53 if (qhp->attr.state == C4IW_QP_STATE_RTS) { 98 if (qhp->attr.state == C4IW_QP_STATE_RTS) {
54 attrs.next_state = C4IW_QP_STATE_TERMINATE; 99 attrs.next_state = C4IW_QP_STATE_TERMINATE;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 125bc5d1e175..69f047cdba6a 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -139,6 +139,29 @@ struct c4iw_stats {
139 u64 pas_ofld_conn_fails; 139 u64 pas_ofld_conn_fails;
140}; 140};
141 141
142struct c4iw_hw_queue {
143 int t4_eq_status_entries;
144 int t4_max_eq_size;
145 int t4_max_iq_size;
146 int t4_max_rq_size;
147 int t4_max_sq_size;
148 int t4_max_qp_depth;
149 int t4_max_cq_depth;
150 int t4_stat_len;
151};
152
153struct wr_log_entry {
154 struct timespec post_host_ts;
155 struct timespec poll_host_ts;
156 u64 post_sge_ts;
157 u64 cqe_sge_ts;
158 u64 poll_sge_ts;
159 u16 qid;
160 u16 wr_id;
161 u8 opcode;
162 u8 valid;
163};
164
142struct c4iw_rdev { 165struct c4iw_rdev {
143 struct c4iw_resource resource; 166 struct c4iw_resource resource;
144 unsigned long qpshift; 167 unsigned long qpshift;
@@ -156,7 +179,11 @@ struct c4iw_rdev {
156 unsigned long oc_mw_pa; 179 unsigned long oc_mw_pa;
157 void __iomem *oc_mw_kva; 180 void __iomem *oc_mw_kva;
158 struct c4iw_stats stats; 181 struct c4iw_stats stats;
182 struct c4iw_hw_queue hw_queue;
159 struct t4_dev_status_page *status_page; 183 struct t4_dev_status_page *status_page;
184 atomic_t wr_log_idx;
185 struct wr_log_entry *wr_log;
186 int wr_log_size;
160}; 187};
161 188
162static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) 189static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -237,6 +264,7 @@ struct c4iw_dev {
237 struct idr atid_idr; 264 struct idr atid_idr;
238 struct idr stid_idr; 265 struct idr stid_idr;
239 struct list_head db_fc_list; 266 struct list_head db_fc_list;
267 u32 avail_ird;
240}; 268};
241 269
242static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 270static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -318,6 +346,13 @@ static inline void remove_handle_nolock(struct c4iw_dev *rhp,
318 _remove_handle(rhp, idr, id, 0); 346 _remove_handle(rhp, idr, id, 0);
319} 347}
320 348
349extern uint c4iw_max_read_depth;
350
351static inline int cur_max_read_depth(struct c4iw_dev *dev)
352{
353 return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
354}
355
321struct c4iw_pd { 356struct c4iw_pd {
322 struct ib_pd ibpd; 357 struct ib_pd ibpd;
323 u32 pdid; 358 u32 pdid;
@@ -991,7 +1026,8 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
991 1026
992extern struct cxgb4_client t4c_client; 1027extern struct cxgb4_client t4c_client;
993extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; 1028extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
994extern int c4iw_max_read_depth; 1029extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
1030extern int c4iw_wr_log;
995extern int db_fc_threshold; 1031extern int db_fc_threshold;
996extern int db_coalescing_threshold; 1032extern int db_coalescing_threshold;
997extern int use_dsgl; 1033extern int use_dsgl;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index b1d305338de6..67c4a6908021 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -319,13 +319,15 @@ static int c4iw_query_device(struct ib_device *ibdev,
319 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device; 319 props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
320 props->max_mr_size = T4_MAX_MR_SIZE; 320 props->max_mr_size = T4_MAX_MR_SIZE;
321 props->max_qp = T4_MAX_NUM_QP; 321 props->max_qp = T4_MAX_NUM_QP;
322 props->max_qp_wr = T4_MAX_QP_DEPTH; 322 props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
323 props->max_sge = T4_MAX_RECV_SGE; 323 props->max_sge = T4_MAX_RECV_SGE;
324 props->max_sge_rd = 1; 324 props->max_sge_rd = 1;
325 props->max_qp_rd_atom = c4iw_max_read_depth; 325 props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
326 props->max_qp_init_rd_atom = c4iw_max_read_depth; 326 props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
327 c4iw_max_read_depth);
328 props->max_qp_init_rd_atom = props->max_qp_rd_atom;
327 props->max_cq = T4_MAX_NUM_CQ; 329 props->max_cq = T4_MAX_NUM_CQ;
328 props->max_cqe = T4_MAX_CQ_DEPTH; 330 props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
329 props->max_mr = c4iw_num_stags(&dev->rdev); 331 props->max_mr = c4iw_num_stags(&dev->rdev);
330 props->max_pd = T4_MAX_NUM_PD; 332 props->max_pd = T4_MAX_NUM_PD;
331 props->local_ca_ack_delay = 0; 333 props->local_ca_ack_delay = 0;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 086f62f5dc9e..fd66bd9a9db0 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -58,6 +58,31 @@ static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644); 58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate"); 59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
60 60
61static int alloc_ird(struct c4iw_dev *dev, u32 ird)
62{
63 int ret = 0;
64
65 spin_lock_irq(&dev->lock);
66 if (ird <= dev->avail_ird)
67 dev->avail_ird -= ird;
68 else
69 ret = -ENOMEM;
70 spin_unlock_irq(&dev->lock);
71
72 if (ret)
73 dev_warn(&dev->rdev.lldi.pdev->dev,
74 "device IRD resources exhausted\n");
75
76 return ret;
77}
78
79static void free_ird(struct c4iw_dev *dev, int ird)
80{
81 spin_lock_irq(&dev->lock);
82 dev->avail_ird += ird;
83 spin_unlock_irq(&dev->lock);
84}
85
61static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 86static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
62{ 87{
63 unsigned long flag; 88 unsigned long flag;
@@ -258,7 +283,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
258 /* 283 /*
259 * eqsize is the number of 64B entries plus the status page size. 284 * eqsize is the number of 64B entries plus the status page size.
260 */ 285 */
261 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; 286 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
287 rdev->hw_queue.t4_eq_status_entries;
262 288
263 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 289 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
264 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 290 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
@@ -283,7 +309,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
283 /* 309 /*
284 * eqsize is the number of 64B entries plus the status page size. 310 * eqsize is the number of 64B entries plus the status page size.
285 */ 311 */
286 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; 312 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
313 rdev->hw_queue.t4_eq_status_entries;
287 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( 314 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
288 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 315 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
289 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 316 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
@@ -796,6 +823,11 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
796 qhp->sq_sig_all; 823 qhp->sq_sig_all;
797 swsqe->flushed = 0; 824 swsqe->flushed = 0;
798 swsqe->wr_id = wr->wr_id; 825 swsqe->wr_id = wr->wr_id;
826 if (c4iw_wr_log) {
827 swsqe->sge_ts = cxgb4_read_sge_timestamp(
828 qhp->rhp->rdev.lldi.ports[0]);
829 getnstimeofday(&swsqe->host_ts);
830 }
799 831
800 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); 832 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
801 833
@@ -859,6 +891,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
859 } 891 }
860 892
861 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; 893 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
894 if (c4iw_wr_log) {
895 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
896 cxgb4_read_sge_timestamp(
897 qhp->rhp->rdev.lldi.ports[0]);
898 getnstimeofday(
899 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
900 }
862 901
863 wqe->recv.opcode = FW_RI_RECV_WR; 902 wqe->recv.opcode = FW_RI_RECV_WR;
864 wqe->recv.r1 = 0; 903 wqe->recv.r1 = 0;
@@ -1202,12 +1241,20 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1202 int ret; 1241 int ret;
1203 struct sk_buff *skb; 1242 struct sk_buff *skb;
1204 1243
1205 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, 1244 PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
1206 qhp->ep->hwtid); 1245 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
1207 1246
1208 skb = alloc_skb(sizeof *wqe, GFP_KERNEL); 1247 skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
1209 if (!skb) 1248 if (!skb) {
1210 return -ENOMEM; 1249 ret = -ENOMEM;
1250 goto out;
1251 }
1252 ret = alloc_ird(rhp, qhp->attr.max_ird);
1253 if (ret) {
1254 qhp->attr.max_ird = 0;
1255 kfree_skb(skb);
1256 goto out;
1257 }
1211 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); 1258 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1212 1259
1213 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 1260 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1258,10 +1305,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1258 1305
1259 ret = c4iw_ofld_send(&rhp->rdev, skb); 1306 ret = c4iw_ofld_send(&rhp->rdev, skb);
1260 if (ret) 1307 if (ret)
1261 goto out; 1308 goto err1;
1262 1309
1263 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, 1310 ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
1264 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); 1311 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1312 if (!ret)
1313 goto out;
1314err1:
1315 free_ird(rhp, qhp->attr.max_ird);
1265out: 1316out:
1266 PDBG("%s ret %d\n", __func__, ret); 1317 PDBG("%s ret %d\n", __func__, ret);
1267 return ret; 1318 return ret;
@@ -1306,7 +1357,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1306 newattr.max_ord = attrs->max_ord; 1357 newattr.max_ord = attrs->max_ord;
1307 } 1358 }
1308 if (mask & C4IW_QP_ATTR_MAX_IRD) { 1359 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1309 if (attrs->max_ird > c4iw_max_read_depth) { 1360 if (attrs->max_ird > cur_max_read_depth(rhp)) {
1310 ret = -EINVAL; 1361 ret = -EINVAL;
1311 goto out; 1362 goto out;
1312 } 1363 }
@@ -1529,6 +1580,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1529 if (!list_empty(&qhp->db_fc_entry)) 1580 if (!list_empty(&qhp->db_fc_entry))
1530 list_del_init(&qhp->db_fc_entry); 1581 list_del_init(&qhp->db_fc_entry);
1531 spin_unlock_irq(&rhp->lock); 1582 spin_unlock_irq(&rhp->lock);
1583 free_ird(rhp, qhp->attr.max_ird);
1532 1584
1533 ucontext = ib_qp->uobject ? 1585 ucontext = ib_qp->uobject ?
1534 to_c4iw_ucontext(ib_qp->uobject->context) : NULL; 1586 to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
@@ -1570,11 +1622,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1570 return ERR_PTR(-EINVAL); 1622 return ERR_PTR(-EINVAL);
1571 1623
1572 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16); 1624 rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
1573 if (rqsize > T4_MAX_RQ_SIZE) 1625 if (rqsize > rhp->rdev.hw_queue.t4_max_rq_size)
1574 return ERR_PTR(-E2BIG); 1626 return ERR_PTR(-E2BIG);
1575 1627
1576 sqsize = roundup(attrs->cap.max_send_wr + 1, 16); 1628 sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
1577 if (sqsize > T4_MAX_SQ_SIZE) 1629 if (sqsize > rhp->rdev.hw_queue.t4_max_sq_size)
1578 return ERR_PTR(-E2BIG); 1630 return ERR_PTR(-E2BIG);
1579 1631
1580 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; 1632 ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
@@ -1619,8 +1671,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1619 qhp->attr.enable_rdma_read = 1; 1671 qhp->attr.enable_rdma_read = 1;
1620 qhp->attr.enable_rdma_write = 1; 1672 qhp->attr.enable_rdma_write = 1;
1621 qhp->attr.enable_bind = 1; 1673 qhp->attr.enable_bind = 1;
1622 qhp->attr.max_ord = 1; 1674 qhp->attr.max_ord = 0;
1623 qhp->attr.max_ird = 1; 1675 qhp->attr.max_ird = 0;
1624 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; 1676 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
1625 spin_lock_init(&qhp->lock); 1677 spin_lock_init(&qhp->lock);
1626 mutex_init(&qhp->mutex); 1678 mutex_init(&qhp->mutex);
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 68b0a6bf4eb0..c9f7034e6647 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -39,19 +39,11 @@
39#define T4_MAX_NUM_QP 65536 39#define T4_MAX_NUM_QP 65536
40#define T4_MAX_NUM_CQ 65536 40#define T4_MAX_NUM_CQ 65536
41#define T4_MAX_NUM_PD 65536 41#define T4_MAX_NUM_PD 65536
42#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
43#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
44#define T4_MAX_IQ_SIZE (65520 - 1)
45#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
46#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
47#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
48#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
49#define T4_MAX_NUM_STAG (1<<15) 42#define T4_MAX_NUM_STAG (1<<15)
50#define T4_MAX_MR_SIZE (~0ULL) 43#define T4_MAX_MR_SIZE (~0ULL)
51#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ 44#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
52#define T4_STAG_UNSET 0xffffffff 45#define T4_STAG_UNSET 0xffffffff
53#define T4_FW_MAJ 0 46#define T4_FW_MAJ 0
54#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
55#define A_PCIE_MA_SYNC 0x30b4 47#define A_PCIE_MA_SYNC 0x30b4
56 48
57struct t4_status_page { 49struct t4_status_page {
@@ -244,8 +236,8 @@ struct t4_cqe {
244#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx) 236#define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx)
245 237
246/* generic accessor macros */ 238/* generic accessor macros */
247#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) 239#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
248#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) 240#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
249 241
250/* macros for flit 3 of the cqe */ 242/* macros for flit 3 of the cqe */
251#define S_CQE_GENBIT 63 243#define S_CQE_GENBIT 63
@@ -277,6 +269,8 @@ struct t4_swsqe {
277 int signaled; 269 int signaled;
278 u16 idx; 270 u16 idx;
279 int flushed; 271 int flushed;
272 struct timespec host_ts;
273 u64 sge_ts;
280}; 274};
281 275
282static inline pgprot_t t4_pgprot_wc(pgprot_t prot) 276static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
@@ -314,6 +308,8 @@ struct t4_sq {
314 308
315struct t4_swrqe { 309struct t4_swrqe {
316 u64 wr_id; 310 u64 wr_id;
311 struct timespec host_ts;
312 u64 sge_ts;
317}; 313};
318 314
319struct t4_rq { 315struct t4_rq {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index f338a7fcebf7..46156210df34 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -310,6 +310,9 @@ struct adapter_params {
310 310
311 unsigned int ofldq_wr_cred; 311 unsigned int ofldq_wr_cred;
312 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ 312 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
313
314 unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
315 unsigned int max_ird_adapter; /* Max read depth per adapter */
313}; 316};
314 317
315#include "t4fw_api.h" 318#include "t4fw_api.h"
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2b438bd68c73..9c7e4f0a7683 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3832,6 +3832,85 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
3832} 3832}
3833EXPORT_SYMBOL(cxgb4_enable_db_coalescing); 3833EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3834 3834
3835int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3836{
3837 struct adapter *adap;
3838 u32 offset, memtype, memaddr;
3839 u32 edc0_size, edc1_size, mc0_size, mc1_size;
3840 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3841 int ret;
3842
3843 adap = netdev2adap(dev);
3844
3845 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3846
3847 /* Figure out where the offset lands in the Memory Type/Address scheme.
3848 * This code assumes that the memory is laid out starting at offset 0
3849 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3850 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3851 * MC0, and some have both MC0 and MC1.
3852 */
3853 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3854 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3855 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3856
3857 edc0_end = edc0_size;
3858 edc1_end = edc0_end + edc1_size;
3859 mc0_end = edc1_end + mc0_size;
3860
3861 if (offset < edc0_end) {
3862 memtype = MEM_EDC0;
3863 memaddr = offset;
3864 } else if (offset < edc1_end) {
3865 memtype = MEM_EDC1;
3866 memaddr = offset - edc0_end;
3867 } else {
3868 if (offset < mc0_end) {
3869 memtype = MEM_MC0;
3870 memaddr = offset - edc1_end;
3871 } else if (is_t4(adap->params.chip)) {
3872 /* T4 only has a single memory channel */
3873 goto err;
3874 } else {
3875 mc1_size = EXT_MEM_SIZE_GET(
3876 t4_read_reg(adap,
3877 MA_EXT_MEMORY1_BAR)) << 20;
3878 mc1_end = mc0_end + mc1_size;
3879 if (offset < mc1_end) {
3880 memtype = MEM_MC1;
3881 memaddr = offset - mc0_end;
3882 } else {
3883 /* offset beyond the end of any memory */
3884 goto err;
3885 }
3886 }
3887 }
3888
3889 spin_lock(&adap->win0_lock);
3890 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3891 spin_unlock(&adap->win0_lock);
3892 return ret;
3893
3894err:
3895 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3896 stag, offset);
3897 return -EINVAL;
3898}
3899EXPORT_SYMBOL(cxgb4_read_tpte);
3900
3901u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3902{
3903 u32 hi, lo;
3904 struct adapter *adap;
3905
3906 adap = netdev2adap(dev);
3907 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3908 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3909
3910 return ((u64)hi << 32) | (u64)lo;
3911}
3912EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3913
3835static struct pci_driver cxgb4_driver; 3914static struct pci_driver cxgb4_driver;
3836 3915
3837static void check_neigh_update(struct neighbour *neigh) 3916static void check_neigh_update(struct neighbour *neigh)
@@ -4095,6 +4174,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
4095 lli.wr_cred = adap->params.ofldq_wr_cred; 4174 lli.wr_cred = adap->params.ofldq_wr_cred;
4096 lli.adapter_type = adap->params.chip; 4175 lli.adapter_type = adap->params.chip;
4097 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); 4176 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4177 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4098 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( 4178 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4099 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> 4179 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4100 (adap->fn * 4)); 4180 (adap->fn * 4));
@@ -4109,8 +4189,12 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
4109 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); 4189 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4110 lli.fw_vers = adap->params.fw_vers; 4190 lli.fw_vers = adap->params.fw_vers;
4111 lli.dbfifo_int_thresh = dbfifo_int_thresh; 4191 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4192 lli.sge_ingpadboundary = adap->sge.fl_align;
4193 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4112 lli.sge_pktshift = adap->sge.pktshift; 4194 lli.sge_pktshift = adap->sge.pktshift;
4113 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; 4195 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4196 lli.max_ordird_qp = adap->params.max_ordird_qp;
4197 lli.max_ird_adapter = adap->params.max_ird_adapter;
4114 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; 4198 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4115 4199
4116 handle = ulds[uld].add(&lli); 4200 handle = ulds[uld].add(&lli);
@@ -5875,6 +5959,22 @@ static int adap_init0(struct adapter *adap)
5875 adap->vres.cq.size = val[3] - val[2] + 1; 5959 adap->vres.cq.size = val[3] - val[2] + 1;
5876 adap->vres.ocq.start = val[4]; 5960 adap->vres.ocq.start = val[4];
5877 adap->vres.ocq.size = val[5] - val[4] + 1; 5961 adap->vres.ocq.size = val[5] - val[4] + 1;
5962
5963 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5964 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5965 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
5966 if (ret < 0) {
5967 adap->params.max_ordird_qp = 8;
5968 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5969 ret = 0;
5970 } else {
5971 adap->params.max_ordird_qp = val[0];
5972 adap->params.max_ird_adapter = val[1];
5973 }
5974 dev_info(adap->pdev_dev,
5975 "max_ordird_qp %d max_ird_adapter %d\n",
5976 adap->params.max_ordird_qp,
5977 adap->params.max_ird_adapter);
5878 } 5978 }
5879 if (caps_cmd.iscsicaps) { 5979 if (caps_cmd.iscsicaps) {
5880 params[0] = FW_PARAM_PFVF(ISCSI_START); 5980 params[0] = FW_PARAM_PFVF(ISCSI_START);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 8f60851b75ad..79a84de1d204 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -243,6 +243,7 @@ struct cxgb4_lld_info {
243 unsigned char fw_api_ver; /* FW API version */ 243 unsigned char fw_api_ver; /* FW API version */
244 unsigned int fw_vers; /* FW version */ 244 unsigned int fw_vers; /* FW version */
245 unsigned int iscsi_iolen; /* iSCSI max I/O length */ 245 unsigned int iscsi_iolen; /* iSCSI max I/O length */
246 unsigned int cclk_ps; /* Core clock period in psec */
246 unsigned short udb_density; /* # of user DB/page */ 247 unsigned short udb_density; /* # of user DB/page */
247 unsigned short ucq_density; /* # of user CQs/page */ 248 unsigned short ucq_density; /* # of user CQs/page */
248 unsigned short filt_mode; /* filter optional components */ 249 unsigned short filt_mode; /* filter optional components */
@@ -251,11 +252,15 @@ struct cxgb4_lld_info {
251 void __iomem *gts_reg; /* address of GTS register */ 252 void __iomem *gts_reg; /* address of GTS register */
252 void __iomem *db_reg; /* address of kernel doorbell */ 253 void __iomem *db_reg; /* address of kernel doorbell */
253 int dbfifo_int_thresh; /* doorbell fifo int threshold */ 254 int dbfifo_int_thresh; /* doorbell fifo int threshold */
255 unsigned int sge_ingpadboundary; /* SGE ingress padding boundary */
256 unsigned int sge_egrstatuspagesize; /* SGE egress status page size */
254 unsigned int sge_pktshift; /* Padding between CPL and */ 257 unsigned int sge_pktshift; /* Padding between CPL and */
255 /* packet data */ 258 /* packet data */
256 unsigned int pf; /* Physical Function we're using */ 259 unsigned int pf; /* Physical Function we're using */
257 bool enable_fw_ofld_conn; /* Enable connection through fw */ 260 bool enable_fw_ofld_conn; /* Enable connection through fw */
258 /* WR */ 261 /* WR */
262 unsigned int max_ordird_qp; /* Max ORD/IRD depth per RDMA QP */
263 unsigned int max_ird_adapter; /* Max IRD memory per adapter */
259 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ 264 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
260}; 265};
261 266
@@ -292,5 +297,7 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
292int cxgb4_flush_eq_cache(struct net_device *dev); 297int cxgb4_flush_eq_cache(struct net_device *dev);
293void cxgb4_disable_db_coalescing(struct net_device *dev); 298void cxgb4_disable_db_coalescing(struct net_device *dev);
294void cxgb4_enable_db_coalescing(struct net_device *dev); 299void cxgb4_enable_db_coalescing(struct net_device *dev);
300int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
301u64 cxgb4_read_sge_timestamp(struct net_device *dev);
295 302
296#endif /* !__CXGB4_OFLD_H */ 303#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index ae7776471ceb..3b244abbf907 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -251,6 +251,12 @@
251#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE) 251#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
252#define F_NOCOALESCE V_NOCOALESCE(1U) 252#define F_NOCOALESCE V_NOCOALESCE(1U)
253 253
254#define SGE_TIMESTAMP_LO 0x1098
255#define SGE_TIMESTAMP_HI 0x109c
256#define S_TSVAL 0
257#define M_TSVAL 0xfffffffU
258#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
259
254#define SGE_TIMER_VALUE_0_AND_1 0x10b8 260#define SGE_TIMER_VALUE_0_AND_1 0x10b8
255#define TIMERVALUE0_MASK 0xffff0000U 261#define TIMERVALUE0_MASK 0xffff0000U
256#define TIMERVALUE0_SHIFT 16 262#define TIMERVALUE0_SHIFT 16
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 4a6ae4db7397..ff709e3b3e7e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -934,6 +934,8 @@ enum fw_params_param_dev {
934 FW_PARAMS_PARAM_DEV_FWREV = 0x0B, 934 FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
935 FW_PARAMS_PARAM_DEV_TPREV = 0x0C, 935 FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
936 FW_PARAMS_PARAM_DEV_CF = 0x0D, 936 FW_PARAMS_PARAM_DEV_CF = 0x0D,
937 FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
938 FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
937 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17, 939 FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
938}; 940};
939 941