diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-05-22 04:28:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-05-22 04:28:56 -0400 |
commit | 65c2ce70046c779974af8b5dfc25a0df489089b5 (patch) | |
tree | b16f152eb62b71cf5a1edc51da865b357c989922 /drivers/infiniband/hw/cxgb4 | |
parent | 842514849a616e9b61acad65771c7afe01e651f9 (diff) | |
parent | 4b660a7f5c8099d88d1a43d8ae138965112592c7 (diff) |
Merge tag 'v3.15-rc6' into sched/core, to pick up the latest fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/Kconfig | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 39 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | 14 |
5 files changed, 55 insertions, 18 deletions
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig index d4e8983fba53..23f38cf2c5cd 100644 --- a/drivers/infiniband/hw/cxgb4/Kconfig +++ b/drivers/infiniband/hw/cxgb4/Kconfig | |||
@@ -1,10 +1,10 @@ | |||
1 | config INFINIBAND_CXGB4 | 1 | config INFINIBAND_CXGB4 |
2 | tristate "Chelsio T4 RDMA Driver" | 2 | tristate "Chelsio T4/T5 RDMA Driver" |
3 | depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) | 3 | depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) |
4 | select GENERIC_ALLOCATOR | 4 | select GENERIC_ALLOCATOR |
5 | ---help--- | 5 | ---help--- |
6 | This is an iWARP/RDMA driver for the Chelsio T4 1GbE and | 6 | This is an iWARP/RDMA driver for the Chelsio T4 and T5 |
7 | 10GbE adapters. | 7 | 1GbE, 10GbE adapters and T5 40GbE adapter. |
8 | 8 | ||
9 | For general information about Chelsio and our products, visit | 9 | For general information about Chelsio and our products, visit |
10 | our website at <http://www.chelsio.com>. | 10 | our website at <http://www.chelsio.com>. |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 185452abf32c..1f863a96a480 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep) | |||
587 | opt2 |= SACK_EN(1); | 587 | opt2 |= SACK_EN(1); |
588 | if (wscale && enable_tcp_window_scaling) | 588 | if (wscale && enable_tcp_window_scaling) |
589 | opt2 |= WND_SCALE_EN(1); | 589 | opt2 |= WND_SCALE_EN(1); |
590 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
591 | opt2 |= T5_OPT_2_VALID; | ||
592 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | ||
593 | } | ||
590 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); | 594 | t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); |
591 | 595 | ||
592 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { | 596 | if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { |
@@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status) | |||
996 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | 1000 | static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) |
997 | { | 1001 | { |
998 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1002 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
999 | state_set(&ep->com, ABORTING); | 1003 | __state_set(&ep->com, ABORTING); |
1000 | set_bit(ABORT_CONN, &ep->com.history); | 1004 | set_bit(ABORT_CONN, &ep->com.history); |
1001 | return send_abort(ep, skb, gfp); | 1005 | return send_abort(ep, skb, gfp); |
1002 | } | 1006 | } |
@@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |||
1154 | return credits; | 1158 | return credits; |
1155 | } | 1159 | } |
1156 | 1160 | ||
1157 | static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | 1161 | static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) |
1158 | { | 1162 | { |
1159 | struct mpa_message *mpa; | 1163 | struct mpa_message *mpa; |
1160 | struct mpa_v2_conn_params *mpa_v2_params; | 1164 | struct mpa_v2_conn_params *mpa_v2_params; |
@@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1164 | struct c4iw_qp_attributes attrs; | 1168 | struct c4iw_qp_attributes attrs; |
1165 | enum c4iw_qp_attr_mask mask; | 1169 | enum c4iw_qp_attr_mask mask; |
1166 | int err; | 1170 | int err; |
1171 | int disconnect = 0; | ||
1167 | 1172 | ||
1168 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 1173 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
1169 | 1174 | ||
@@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1173 | * will abort the connection. | 1178 | * will abort the connection. |
1174 | */ | 1179 | */ |
1175 | if (stop_ep_timer(ep)) | 1180 | if (stop_ep_timer(ep)) |
1176 | return; | 1181 | return 0; |
1177 | 1182 | ||
1178 | /* | 1183 | /* |
1179 | * If we get more than the supported amount of private data | 1184 | * If we get more than the supported amount of private data |
@@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1195 | * if we don't even have the mpa message, then bail. | 1200 | * if we don't even have the mpa message, then bail. |
1196 | */ | 1201 | */ |
1197 | if (ep->mpa_pkt_len < sizeof(*mpa)) | 1202 | if (ep->mpa_pkt_len < sizeof(*mpa)) |
1198 | return; | 1203 | return 0; |
1199 | mpa = (struct mpa_message *) ep->mpa_pkt; | 1204 | mpa = (struct mpa_message *) ep->mpa_pkt; |
1200 | 1205 | ||
1201 | /* Validate MPA header. */ | 1206 | /* Validate MPA header. */ |
@@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1235 | * We'll continue process when more data arrives. | 1240 | * We'll continue process when more data arrives. |
1236 | */ | 1241 | */ |
1237 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) | 1242 | if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) |
1238 | return; | 1243 | return 0; |
1239 | 1244 | ||
1240 | if (mpa->flags & MPA_REJECT) { | 1245 | if (mpa->flags & MPA_REJECT) { |
1241 | err = -ECONNREFUSED; | 1246 | err = -ECONNREFUSED; |
@@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1337 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | 1342 | attrs.layer_etype = LAYER_MPA | DDP_LLP; |
1338 | attrs.ecode = MPA_NOMATCH_RTR; | 1343 | attrs.ecode = MPA_NOMATCH_RTR; |
1339 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1344 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
1345 | attrs.send_term = 1; | ||
1340 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1346 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1341 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1347 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1342 | err = -ENOMEM; | 1348 | err = -ENOMEM; |
1349 | disconnect = 1; | ||
1343 | goto out; | 1350 | goto out; |
1344 | } | 1351 | } |
1345 | 1352 | ||
@@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) | |||
1355 | attrs.layer_etype = LAYER_MPA | DDP_LLP; | 1362 | attrs.layer_etype = LAYER_MPA | DDP_LLP; |
1356 | attrs.ecode = MPA_INSUFF_IRD; | 1363 | attrs.ecode = MPA_INSUFF_IRD; |
1357 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1364 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
1365 | attrs.send_term = 1; | ||
1358 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1366 | err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1359 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1367 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1360 | err = -ENOMEM; | 1368 | err = -ENOMEM; |
1369 | disconnect = 1; | ||
1361 | goto out; | 1370 | goto out; |
1362 | } | 1371 | } |
1363 | goto out; | 1372 | goto out; |
@@ -1366,7 +1375,7 @@ err: | |||
1366 | send_abort(ep, skb, GFP_KERNEL); | 1375 | send_abort(ep, skb, GFP_KERNEL); |
1367 | out: | 1376 | out: |
1368 | connect_reply_upcall(ep, err); | 1377 | connect_reply_upcall(ep, err); |
1369 | return; | 1378 | return disconnect; |
1370 | } | 1379 | } |
1371 | 1380 | ||
1372 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) | 1381 | static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) |
@@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1524 | unsigned int tid = GET_TID(hdr); | 1533 | unsigned int tid = GET_TID(hdr); |
1525 | struct tid_info *t = dev->rdev.lldi.tids; | 1534 | struct tid_info *t = dev->rdev.lldi.tids; |
1526 | __u8 status = hdr->status; | 1535 | __u8 status = hdr->status; |
1536 | int disconnect = 0; | ||
1527 | 1537 | ||
1528 | ep = lookup_tid(t, tid); | 1538 | ep = lookup_tid(t, tid); |
1529 | if (!ep) | 1539 | if (!ep) |
@@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1539 | switch (ep->com.state) { | 1549 | switch (ep->com.state) { |
1540 | case MPA_REQ_SENT: | 1550 | case MPA_REQ_SENT: |
1541 | ep->rcv_seq += dlen; | 1551 | ep->rcv_seq += dlen; |
1542 | process_mpa_reply(ep, skb); | 1552 | disconnect = process_mpa_reply(ep, skb); |
1543 | break; | 1553 | break; |
1544 | case MPA_REQ_WAIT: | 1554 | case MPA_REQ_WAIT: |
1545 | ep->rcv_seq += dlen; | 1555 | ep->rcv_seq += dlen; |
@@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) | |||
1555 | ep->com.state, ep->hwtid, status); | 1565 | ep->com.state, ep->hwtid, status); |
1556 | attrs.next_state = C4IW_QP_STATE_TERMINATE; | 1566 | attrs.next_state = C4IW_QP_STATE_TERMINATE; |
1557 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, | 1567 | c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, |
1558 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); | 1568 | C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); |
1569 | disconnect = 1; | ||
1559 | break; | 1570 | break; |
1560 | } | 1571 | } |
1561 | default: | 1572 | default: |
1562 | break; | 1573 | break; |
1563 | } | 1574 | } |
1564 | mutex_unlock(&ep->com.mutex); | 1575 | mutex_unlock(&ep->com.mutex); |
1576 | if (disconnect) | ||
1577 | c4iw_ep_disconnect(ep, 0, GFP_KERNEL); | ||
1565 | return 0; | 1578 | return 0; |
1566 | } | 1579 | } |
1567 | 1580 | ||
@@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
2009 | if (tcph->ece && tcph->cwr) | 2022 | if (tcph->ece && tcph->cwr) |
2010 | opt2 |= CCTRL_ECN(1); | 2023 | opt2 |= CCTRL_ECN(1); |
2011 | } | 2024 | } |
2025 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
2026 | opt2 |= T5_OPT_2_VALID; | ||
2027 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | ||
2028 | } | ||
2012 | 2029 | ||
2013 | rpl = cplhdr(skb); | 2030 | rpl = cplhdr(skb); |
2014 | INIT_TP_WR(rpl, ep->hwtid); | 2031 | INIT_TP_WR(rpl, ep->hwtid); |
@@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep) | |||
3482 | __func__, ep, ep->hwtid, ep->com.state); | 3499 | __func__, ep, ep->hwtid, ep->com.state); |
3483 | abort = 0; | 3500 | abort = 0; |
3484 | } | 3501 | } |
3485 | mutex_unlock(&ep->com.mutex); | ||
3486 | if (abort) | 3502 | if (abort) |
3487 | abort_connection(ep, NULL, GFP_KERNEL); | 3503 | abort_connection(ep, NULL, GFP_KERNEL); |
3504 | mutex_unlock(&ep->com.mutex); | ||
3488 | c4iw_put_ep(&ep->com); | 3505 | c4iw_put_ep(&ep->com); |
3489 | } | 3506 | } |
3490 | 3507 | ||
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7b8c5806a09d..7474b490760a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -435,6 +435,7 @@ struct c4iw_qp_attributes { | |||
435 | u8 ecode; | 435 | u8 ecode; |
436 | u16 sq_db_inc; | 436 | u16 sq_db_inc; |
437 | u16 rq_db_inc; | 437 | u16 rq_db_inc; |
438 | u8 send_term; | ||
438 | }; | 439 | }; |
439 | 440 | ||
440 | struct c4iw_qp { | 441 | struct c4iw_qp { |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 7b5114cb486f..086f62f5dc9e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1388 | qhp->attr.layer_etype = attrs->layer_etype; | 1388 | qhp->attr.layer_etype = attrs->layer_etype; |
1389 | qhp->attr.ecode = attrs->ecode; | 1389 | qhp->attr.ecode = attrs->ecode; |
1390 | ep = qhp->ep; | 1390 | ep = qhp->ep; |
1391 | disconnect = 1; | 1391 | if (!internal) { |
1392 | c4iw_get_ep(&qhp->ep->com); | 1392 | c4iw_get_ep(&qhp->ep->com); |
1393 | if (!internal) | ||
1394 | terminate = 1; | 1393 | terminate = 1; |
1395 | else { | 1394 | disconnect = 1; |
1395 | } else { | ||
1396 | terminate = qhp->attr.send_term; | ||
1396 | ret = rdma_fini(rhp, qhp, ep); | 1397 | ret = rdma_fini(rhp, qhp, ep); |
1397 | if (ret) | 1398 | if (ret) |
1398 | goto err; | 1399 | goto err; |
@@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1776 | /* | 1777 | /* |
1777 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for | 1778 | * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for |
1778 | * ringing the queue db when we're in DB_FULL mode. | 1779 | * ringing the queue db when we're in DB_FULL mode. |
1780 | * Only allow this on T4 devices. | ||
1779 | */ | 1781 | */ |
1780 | attrs.sq_db_inc = attr->sq_psn; | 1782 | attrs.sq_db_inc = attr->sq_psn; |
1781 | attrs.rq_db_inc = attr->rq_psn; | 1783 | attrs.rq_db_inc = attr->rq_psn; |
1782 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; | 1784 | mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0; |
1783 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; | 1785 | mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0; |
1786 | if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && | ||
1787 | (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB))) | ||
1788 | return -EINVAL; | ||
1784 | 1789 | ||
1785 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); | 1790 | return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); |
1786 | } | 1791 | } |
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index dc193c292671..6121ca08fe58 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | |||
@@ -836,4 +836,18 @@ struct ulptx_idata { | |||
836 | #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) | 836 | #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE) |
837 | #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) | 837 | #define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U) |
838 | 838 | ||
839 | enum { /* TCP congestion control algorithms */ | ||
840 | CONG_ALG_RENO, | ||
841 | CONG_ALG_TAHOE, | ||
842 | CONG_ALG_NEWRENO, | ||
843 | CONG_ALG_HIGHSPEED | ||
844 | }; | ||
845 | |||
846 | #define S_CONG_CNTRL 14 | ||
847 | #define M_CONG_CNTRL 0x3 | ||
848 | #define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) | ||
849 | #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) | ||
850 | |||
851 | #define T5_OPT_2_VALID (1 << 31) | ||
852 | |||
839 | #endif /* _T4FW_RI_API_H_ */ | 853 | #endif /* _T4FW_RI_API_H_ */ |