aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorVipul Pandya <vipul@chelsio.com>2012-12-10 04:30:56 -0500
committerRoland Dreier <roland@purestorage.com>2012-12-20 02:03:12 -0500
commit793dad94e7455c113e391bd3d418c7b95a4c2687 (patch)
treec5352be56d70a9ac46b81da71fb6c8c0e06a2566 /drivers
parent1cab775c3e75f1250c965feafd061d696df36e53 (diff)
RDMA/cxgb4: Fix bug for active and passive LE hash collision path
Retries active opens for INUSE errors. Logs any active ofld_connect_wr error replies. Sends ofld_connect_wr on same ctrlq. It needs to go on the same control txq as regular CPL active/passive messages. Retries on active open replies with EADDRINUSE. Uses active open fw wr only if active filter region is set. Adds stat for ofld_connect_wr failures. This patch also adds debugfs file to show endpoints. Signed-off-by: Vipul Pandya <vipul@chelsio.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c277
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c125
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h2
7 files changed, 376 insertions, 88 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 036ddd281529..c13745cde7fa 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -275,6 +275,7 @@ void _c4iw_free_ep(struct kref *kref)
275 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 275 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
276 dst_release(ep->dst); 276 dst_release(ep->dst);
277 cxgb4_l2t_release(ep->l2t); 277 cxgb4_l2t_release(ep->l2t);
278 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
278 } 279 }
279 kfree(ep); 280 kfree(ep);
280} 281}
@@ -550,6 +551,7 @@ static int send_connect(struct c4iw_ep *ep)
550 req->opt0 = cpu_to_be64(opt0); 551 req->opt0 = cpu_to_be64(opt0);
551 req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 552 req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));
552 req->opt2 = cpu_to_be32(opt2); 553 req->opt2 = cpu_to_be32(opt2);
554 set_bit(ACT_OPEN_REQ, &ep->com.history);
553 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 555 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
554} 556}
555 557
@@ -826,6 +828,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
826 /* setup the hwtid for this connection */ 828 /* setup the hwtid for this connection */
827 ep->hwtid = tid; 829 ep->hwtid = tid;
828 cxgb4_insert_tid(t, ep, tid); 830 cxgb4_insert_tid(t, ep, tid);
831 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
829 832
830 ep->snd_seq = be32_to_cpu(req->snd_isn); 833 ep->snd_seq = be32_to_cpu(req->snd_isn);
831 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 834 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -833,7 +836,9 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
833 set_emss(ep, ntohs(req->tcp_opt)); 836 set_emss(ep, ntohs(req->tcp_opt));
834 837
835 /* dealloc the atid */ 838 /* dealloc the atid */
839 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
836 cxgb4_free_atid(t, atid); 840 cxgb4_free_atid(t, atid);
841 set_bit(ACT_ESTAB, &ep->com.history);
837 842
838 /* start MPA negotiation */ 843 /* start MPA negotiation */
839 send_flowc(ep, NULL); 844 send_flowc(ep, NULL);
@@ -859,6 +864,7 @@ static void close_complete_upcall(struct c4iw_ep *ep)
859 ep->com.cm_id->rem_ref(ep->com.cm_id); 864 ep->com.cm_id->rem_ref(ep->com.cm_id);
860 ep->com.cm_id = NULL; 865 ep->com.cm_id = NULL;
861 ep->com.qp = NULL; 866 ep->com.qp = NULL;
867 set_bit(CLOSE_UPCALL, &ep->com.history);
862 } 868 }
863} 869}
864 870
@@ -867,6 +873,7 @@ static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
867 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 873 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
868 close_complete_upcall(ep); 874 close_complete_upcall(ep);
869 state_set(&ep->com, ABORTING); 875 state_set(&ep->com, ABORTING);
876 set_bit(ABORT_CONN, &ep->com.history);
870 return send_abort(ep, skb, gfp); 877 return send_abort(ep, skb, gfp);
871} 878}
872 879
@@ -881,6 +888,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)
881 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 888 PDBG("peer close delivered ep %p cm_id %p tid %u\n",
882 ep, ep->com.cm_id, ep->hwtid); 889 ep, ep->com.cm_id, ep->hwtid);
883 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 890 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
891 set_bit(DISCONN_UPCALL, &ep->com.history);
884 } 892 }
885} 893}
886 894
@@ -899,6 +907,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)
899 ep->com.cm_id->rem_ref(ep->com.cm_id); 907 ep->com.cm_id->rem_ref(ep->com.cm_id);
900 ep->com.cm_id = NULL; 908 ep->com.cm_id = NULL;
901 ep->com.qp = NULL; 909 ep->com.qp = NULL;
910 set_bit(ABORT_UPCALL, &ep->com.history);
902 } 911 }
903} 912}
904 913
@@ -931,6 +940,7 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
931 940
932 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 941 PDBG("%s ep %p tid %u status %d\n", __func__, ep,
933 ep->hwtid, status); 942 ep->hwtid, status);
943 set_bit(CONN_RPL_UPCALL, &ep->com.history);
934 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 944 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
935 945
936 if (status < 0) { 946 if (status < 0) {
@@ -971,6 +981,7 @@ static void connect_request_upcall(struct c4iw_ep *ep)
971 ep->parent_ep->com.cm_id, 981 ep->parent_ep->com.cm_id,
972 &event); 982 &event);
973 } 983 }
984 set_bit(CONNREQ_UPCALL, &ep->com.history);
974 c4iw_put_ep(&ep->parent_ep->com); 985 c4iw_put_ep(&ep->parent_ep->com);
975 ep->parent_ep = NULL; 986 ep->parent_ep = NULL;
976} 987}
@@ -987,6 +998,7 @@ static void established_upcall(struct c4iw_ep *ep)
987 if (ep->com.cm_id) { 998 if (ep->com.cm_id) {
988 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 999 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
989 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1000 ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1001 set_bit(ESTAB_UPCALL, &ep->com.history);
990 } 1002 }
991} 1003}
992 1004
@@ -1372,6 +1384,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1372 unsigned int dlen = ntohs(hdr->len); 1384 unsigned int dlen = ntohs(hdr->len);
1373 unsigned int tid = GET_TID(hdr); 1385 unsigned int tid = GET_TID(hdr);
1374 struct tid_info *t = dev->rdev.lldi.tids; 1386 struct tid_info *t = dev->rdev.lldi.tids;
1387 __u8 status = hdr->status;
1375 1388
1376 ep = lookup_tid(t, tid); 1389 ep = lookup_tid(t, tid);
1377 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1390 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
@@ -1394,9 +1407,9 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1394 case MPA_REP_SENT: 1407 case MPA_REP_SENT:
1395 break; 1408 break;
1396 default: 1409 default:
1397 printk(KERN_ERR MOD "%s Unexpected streaming data." 1410 pr_err("%s Unexpected streaming data." \
1398 " ep %p state %d tid %u\n", 1411 " ep %p state %d tid %u status %d\n",
1399 __func__, ep, state_read(&ep->com), ep->hwtid); 1412 __func__, ep, state_read(&ep->com), ep->hwtid, status);
1400 1413
1401 /* 1414 /*
1402 * The ep will timeout and inform the ULP of the failure. 1415 * The ep will timeout and inform the ULP of the failure.
@@ -1463,6 +1476,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1463 req->tcb.cplrxdataack_cplpassacceptrpl = 1476 req->tcb.cplrxdataack_cplpassacceptrpl =
1464 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 1477 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
1465 req->tcb.tx_max = jiffies; 1478 req->tcb.tx_max = jiffies;
1479 req->tcb.rcv_adv = htons(1);
1466 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); 1480 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1467 wscale = compute_wscale(rcv_win); 1481 wscale = compute_wscale(rcv_win);
1468 req->tcb.opt0 = TCAM_BYPASS(1) | 1482 req->tcb.opt0 = TCAM_BYPASS(1) |
@@ -1490,7 +1504,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1490 req->tcb.opt2 |= WND_SCALE_EN(1); 1504 req->tcb.opt2 |= WND_SCALE_EN(1);
1491 req->tcb.opt0 = cpu_to_be64(req->tcb.opt0); 1505 req->tcb.opt0 = cpu_to_be64(req->tcb.opt0);
1492 req->tcb.opt2 = cpu_to_be32(req->tcb.opt2); 1506 req->tcb.opt2 = cpu_to_be32(req->tcb.opt2);
1493 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); 1507 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1508 set_bit(ACT_OFLD_CONN, &ep->com.history);
1494 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1509 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1495} 1510}
1496 1511
@@ -1503,6 +1518,111 @@ static inline int act_open_has_tid(int status)
1503 status != CPL_ERR_ARP_MISS; 1518 status != CPL_ERR_ARP_MISS;
1504} 1519}
1505 1520
1521#define ACT_OPEN_RETRY_COUNT 2
1522
1523static int c4iw_reconnect(struct c4iw_ep *ep)
1524{
1525 int err = 0;
1526 struct rtable *rt;
1527 struct port_info *pi;
1528 struct net_device *pdev;
1529 int step;
1530 struct neighbour *neigh;
1531
1532 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1533 init_timer(&ep->timer);
1534
1535 /*
1536 * Allocate an active TID to initiate a TCP connection.
1537 */
1538 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1539 if (ep->atid == -1) {
1540 pr_err("%s - cannot alloc atid.\n", __func__);
1541 err = -ENOMEM;
1542 goto fail2;
1543 }
1544 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
1545
1546 /* find a route */
1547 rt = find_route(ep->com.dev,
1548 ep->com.cm_id->local_addr.sin_addr.s_addr,
1549 ep->com.cm_id->remote_addr.sin_addr.s_addr,
1550 ep->com.cm_id->local_addr.sin_port,
1551 ep->com.cm_id->remote_addr.sin_port, 0);
1552 if (!rt) {
1553 pr_err("%s - cannot find route.\n", __func__);
1554 err = -EHOSTUNREACH;
1555 goto fail3;
1556 }
1557 ep->dst = &rt->dst;
1558
1559 neigh = dst_neigh_lookup(ep->dst,
1560 &ep->com.cm_id->remote_addr.sin_addr.s_addr);
1561 /* get a l2t entry */
1562 if (neigh->dev->flags & IFF_LOOPBACK) {
1563 PDBG("%s LOOPBACK\n", __func__);
1564 pdev = ip_dev_find(&init_net,
1565 ep->com.cm_id->remote_addr.sin_addr.s_addr);
1566 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1567 neigh, pdev, 0);
1568 pi = (struct port_info *)netdev_priv(pdev);
1569 ep->mtu = pdev->mtu;
1570 ep->tx_chan = cxgb4_port_chan(pdev);
1571 ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1572 dev_put(pdev);
1573 } else {
1574 ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1575 neigh, neigh->dev, 0);
1576 pi = (struct port_info *)netdev_priv(neigh->dev);
1577 ep->mtu = dst_mtu(ep->dst);
1578 ep->tx_chan = cxgb4_port_chan(neigh->dev);
1579 ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
1580 0x7F) << 1;
1581 }
1582
1583 step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
1584 ep->txq_idx = pi->port_id * step;
1585 ep->ctrlq_idx = pi->port_id;
1586 step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
1587 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
1588
1589 if (!ep->l2t) {
1590 pr_err("%s - cannot alloc l2e.\n", __func__);
1591 err = -ENOMEM;
1592 goto fail4;
1593 }
1594
1595 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1596 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1597 ep->l2t->idx);
1598
1599 state_set(&ep->com, CONNECTING);
1600 ep->tos = 0;
1601
1602 /* send connect request to rnic */
1603 err = send_connect(ep);
1604 if (!err)
1605 goto out;
1606
1607 cxgb4_l2t_release(ep->l2t);
1608fail4:
1609 dst_release(ep->dst);
1610fail3:
1611 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
1612 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
1613fail2:
1614 /*
1615 * remember to send notification to upper layer.
1616 * We are in here so the upper layer is not aware that this is
1617 * re-connect attempt and so, upper layer is still waiting for
1618 * response of 1st connect request.
1619 */
1620 connect_reply_upcall(ep, -ECONNRESET);
1621 c4iw_put_ep(&ep->com);
1622out:
1623 return err;
1624}
1625
1506static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1626static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1507{ 1627{
1508 struct c4iw_ep *ep; 1628 struct c4iw_ep *ep;
@@ -1523,6 +1643,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1523 return 0; 1643 return 0;
1524 } 1644 }
1525 1645
1646 set_bit(ACT_OPEN_RPL, &ep->com.history);
1647
1526 /* 1648 /*
1527 * Log interesting failures. 1649 * Log interesting failures.
1528 */ 1650 */
@@ -1531,12 +1653,27 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1531 case CPL_ERR_CONN_TIMEDOUT: 1653 case CPL_ERR_CONN_TIMEDOUT:
1532 break; 1654 break;
1533 case CPL_ERR_TCAM_FULL: 1655 case CPL_ERR_TCAM_FULL:
1534 mutex_lock(&dev->rdev.stats.lock); 1656 if (dev->rdev.lldi.enable_fw_ofld_conn) {
1535 dev->rdev.stats.tcam_full++; 1657 mutex_lock(&dev->rdev.stats.lock);
1536 mutex_unlock(&dev->rdev.stats.lock); 1658 dev->rdev.stats.tcam_full++;
1537 send_fw_act_open_req(ep, 1659 mutex_unlock(&dev->rdev.stats.lock);
1538 GET_TID_TID(GET_AOPEN_ATID(ntohl(rpl->atid_status)))); 1660 send_fw_act_open_req(ep,
1539 return 0; 1661 GET_TID_TID(GET_AOPEN_ATID(
1662 ntohl(rpl->atid_status))));
1663 return 0;
1664 }
1665 break;
1666 case CPL_ERR_CONN_EXIST:
1667 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
1668 set_bit(ACT_RETRY_INUSE, &ep->com.history);
1669 remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
1670 atid);
1671 cxgb4_free_atid(t, atid);
1672 dst_release(ep->dst);
1673 cxgb4_l2t_release(ep->l2t);
1674 c4iw_reconnect(ep);
1675 return 0;
1676 }
1540 break; 1677 break;
1541 default: 1678 default:
1542 printk(KERN_INFO MOD "Active open failure - " 1679 printk(KERN_INFO MOD "Active open failure - "
@@ -1555,6 +1692,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1555 if (status && act_open_has_tid(status)) 1692 if (status && act_open_has_tid(status))
1556 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 1693 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1557 1694
1695 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1558 cxgb4_free_atid(t, atid); 1696 cxgb4_free_atid(t, atid);
1559 dst_release(ep->dst); 1697 dst_release(ep->dst);
1560 cxgb4_l2t_release(ep->l2t); 1698 cxgb4_l2t_release(ep->l2t);
@@ -1775,7 +1913,7 @@ out:
1775 1913
1776static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 1914static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1777{ 1915{
1778 struct c4iw_ep *child_ep, *parent_ep; 1916 struct c4iw_ep *child_ep = NULL, *parent_ep;
1779 struct cpl_pass_accept_req *req = cplhdr(skb); 1917 struct cpl_pass_accept_req *req = cplhdr(skb);
1780 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid)); 1918 unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1781 struct tid_info *t = dev->rdev.lldi.tids; 1919 struct tid_info *t = dev->rdev.lldi.tids;
@@ -1856,6 +1994,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1856 init_timer(&child_ep->timer); 1994 init_timer(&child_ep->timer);
1857 cxgb4_insert_tid(t, child_ep, hwtid); 1995 cxgb4_insert_tid(t, child_ep, hwtid);
1858 accept_cr(child_ep, peer_ip, skb, req); 1996 accept_cr(child_ep, peer_ip, skb, req);
1997 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
1859 goto out; 1998 goto out;
1860reject: 1999reject:
1861 reject_cr(dev, hwtid, peer_ip, skb); 2000 reject_cr(dev, hwtid, peer_ip, skb);
@@ -1879,11 +2018,13 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1879 ntohs(req->tcp_opt)); 2018 ntohs(req->tcp_opt));
1880 2019
1881 set_emss(ep, ntohs(req->tcp_opt)); 2020 set_emss(ep, ntohs(req->tcp_opt));
2021 insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);
1882 2022
1883 dst_confirm(ep->dst); 2023 dst_confirm(ep->dst);
1884 state_set(&ep->com, MPA_REQ_WAIT); 2024 state_set(&ep->com, MPA_REQ_WAIT);
1885 start_ep_timer(ep); 2025 start_ep_timer(ep);
1886 send_flowc(ep, skb); 2026 send_flowc(ep, skb);
2027 set_bit(PASS_ESTAB, &ep->com.history);
1887 2028
1888 return 0; 2029 return 0;
1889} 2030}
@@ -1903,6 +2044,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1903 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2044 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1904 dst_confirm(ep->dst); 2045 dst_confirm(ep->dst);
1905 2046
2047 set_bit(PEER_CLOSE, &ep->com.history);
1906 mutex_lock(&ep->com.mutex); 2048 mutex_lock(&ep->com.mutex);
1907 switch (ep->com.state) { 2049 switch (ep->com.state) {
1908 case MPA_REQ_WAIT: 2050 case MPA_REQ_WAIT:
@@ -1982,74 +2124,6 @@ static int is_neg_adv_abort(unsigned int status)
1982 status == CPL_ERR_PERSIST_NEG_ADVICE; 2124 status == CPL_ERR_PERSIST_NEG_ADVICE;
1983} 2125}
1984 2126
1985static int c4iw_reconnect(struct c4iw_ep *ep)
1986{
1987 struct rtable *rt;
1988 int err = 0;
1989
1990 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
1991 init_timer(&ep->timer);
1992
1993 /*
1994 * Allocate an active TID to initiate a TCP connection.
1995 */
1996 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
1997 if (ep->atid == -1) {
1998 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1999 err = -ENOMEM;
2000 goto fail2;
2001 }
2002
2003 /* find a route */
2004 rt = find_route(ep->com.dev,
2005 ep->com.cm_id->local_addr.sin_addr.s_addr,
2006 ep->com.cm_id->remote_addr.sin_addr.s_addr,
2007 ep->com.cm_id->local_addr.sin_port,
2008 ep->com.cm_id->remote_addr.sin_port, 0);
2009 if (!rt) {
2010 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2011 err = -EHOSTUNREACH;
2012 goto fail3;
2013 }
2014 ep->dst = &rt->dst;
2015
2016 err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
2017 ep->dst, ep->com.dev, false);
2018 if (err) {
2019 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
2020 goto fail4;
2021 }
2022
2023 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2024 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2025 ep->l2t->idx);
2026
2027 state_set(&ep->com, CONNECTING);
2028 ep->tos = 0;
2029
2030 /* send connect request to rnic */
2031 err = send_connect(ep);
2032 if (!err)
2033 goto out;
2034
2035 cxgb4_l2t_release(ep->l2t);
2036fail4:
2037 dst_release(ep->dst);
2038fail3:
2039 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2040fail2:
2041 /*
2042 * remember to send notification to upper layer.
2043 * We are in here so the upper layer is not aware that this is
2044 * re-connect attempt and so, upper layer is still waiting for
2045 * response of 1st connect request.
2046 */
2047 connect_reply_upcall(ep, -ECONNRESET);
2048 c4iw_put_ep(&ep->com);
2049out:
2050 return err;
2051}
2052
2053static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2127static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2054{ 2128{
2055 struct cpl_abort_req_rss *req = cplhdr(skb); 2129 struct cpl_abort_req_rss *req = cplhdr(skb);
@@ -2070,6 +2144,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2070 } 2144 }
2071 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2145 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2072 ep->com.state); 2146 ep->com.state);
2147 set_bit(PEER_ABORT, &ep->com.history);
2073 2148
2074 /* 2149 /*
2075 * Wake up any threads in rdma_init() or rdma_fini(). 2150 * Wake up any threads in rdma_init() or rdma_fini().
@@ -2284,6 +2359,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2284 c4iw_put_ep(&ep->com); 2359 c4iw_put_ep(&ep->com);
2285 return -ECONNRESET; 2360 return -ECONNRESET;
2286 } 2361 }
2362 set_bit(ULP_REJECT, &ep->com.history);
2287 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2363 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2288 if (mpa_rev == 0) 2364 if (mpa_rev == 0)
2289 abort_connection(ep, NULL, GFP_KERNEL); 2365 abort_connection(ep, NULL, GFP_KERNEL);
@@ -2313,6 +2389,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2313 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 2389 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2314 BUG_ON(!qp); 2390 BUG_ON(!qp);
2315 2391
2392 set_bit(ULP_ACCEPT, &ep->com.history);
2316 if ((conn_param->ord > c4iw_max_read_depth) || 2393 if ((conn_param->ord > c4iw_max_read_depth) ||
2317 (conn_param->ird > c4iw_max_read_depth)) { 2394 (conn_param->ird > c4iw_max_read_depth)) {
2318 abort_connection(ep, NULL, GFP_KERNEL); 2395 abort_connection(ep, NULL, GFP_KERNEL);
@@ -2436,6 +2513,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2436 err = -ENOMEM; 2513 err = -ENOMEM;
2437 goto fail2; 2514 goto fail2;
2438 } 2515 }
2516 insert_handle(dev, &dev->atid_idr, ep, ep->atid);
2439 2517
2440 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__, 2518 PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
2441 ntohl(cm_id->local_addr.sin_addr.s_addr), 2519 ntohl(cm_id->local_addr.sin_addr.s_addr),
@@ -2481,6 +2559,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2481fail4: 2559fail4:
2482 dst_release(ep->dst); 2560 dst_release(ep->dst);
2483fail3: 2561fail3:
2562 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2484 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2563 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2485fail2: 2564fail2:
2486 cm_id->rem_ref(cm_id); 2565 cm_id->rem_ref(cm_id);
@@ -2523,6 +2602,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2523 err = -ENOMEM; 2602 err = -ENOMEM;
2524 goto fail2; 2603 goto fail2;
2525 } 2604 }
2605 insert_handle(dev, &dev->stid_idr, ep, ep->stid);
2526 state_set(&ep->com, LISTEN); 2606 state_set(&ep->com, LISTEN);
2527 if (dev->rdev.lldi.enable_fw_ofld_conn) { 2607 if (dev->rdev.lldi.enable_fw_ofld_conn) {
2528 do { 2608 do {
@@ -2530,7 +2610,10 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2530 ep->com.dev->rdev.lldi.ports[0], ep->stid, 2610 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2531 ep->com.local_addr.sin_addr.s_addr, 2611 ep->com.local_addr.sin_addr.s_addr,
2532 ep->com.local_addr.sin_port, 2612 ep->com.local_addr.sin_port,
2533 ep->com.dev->rdev.lldi.rxq_ids[0]); 2613 0,
2614 ep->com.dev->rdev.lldi.rxq_ids[0],
2615 0,
2616 0);
2534 if (err == -EBUSY) { 2617 if (err == -EBUSY) {
2535 set_current_state(TASK_UNINTERRUPTIBLE); 2618 set_current_state(TASK_UNINTERRUPTIBLE);
2536 schedule_timeout(usecs_to_jiffies(100)); 2619 schedule_timeout(usecs_to_jiffies(100));
@@ -2541,6 +2624,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2541 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 2624 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
2542 ep->stid, ep->com.local_addr.sin_addr.s_addr, 2625 ep->stid, ep->com.local_addr.sin_addr.s_addr,
2543 ep->com.local_addr.sin_port, 2626 ep->com.local_addr.sin_port,
2627 0,
2544 ep->com.dev->rdev.lldi.rxq_ids[0]); 2628 ep->com.dev->rdev.lldi.rxq_ids[0]);
2545 if (!err) 2629 if (!err)
2546 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 2630 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
@@ -2586,6 +2670,7 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2586 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 2670 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
2587 0, 0, __func__); 2671 0, 0, __func__);
2588 } 2672 }
2673 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
2589 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2674 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2590done: 2675done:
2591 cm_id->rem_ref(cm_id); 2676 cm_id->rem_ref(cm_id);
@@ -2649,10 +2734,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2649 2734
2650 if (close) { 2735 if (close) {
2651 if (abrupt) { 2736 if (abrupt) {
2737 set_bit(EP_DISC_ABORT, &ep->com.history);
2652 close_complete_upcall(ep); 2738 close_complete_upcall(ep);
2653 ret = send_abort(ep, NULL, gfp); 2739 ret = send_abort(ep, NULL, gfp);
2654 } else 2740 } else {
2741 set_bit(EP_DISC_CLOSE, &ep->com.history);
2655 ret = send_halfclose(ep, gfp); 2742 ret = send_halfclose(ep, gfp);
2743 }
2656 if (ret) 2744 if (ret)
2657 fatal = 1; 2745 fatal = 1;
2658 } 2746 }
@@ -2666,6 +2754,7 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2666 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 2754 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2667{ 2755{
2668 struct c4iw_ep *ep; 2756 struct c4iw_ep *ep;
2757 int atid = be32_to_cpu(req->tid);
2669 2758
2670 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid); 2759 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid);
2671 if (!ep) 2760 if (!ep)
@@ -2673,15 +2762,35 @@ static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2673 2762
2674 switch (req->retval) { 2763 switch (req->retval) {
2675 case FW_ENOMEM: 2764 case FW_ENOMEM:
2765 set_bit(ACT_RETRY_NOMEM, &ep->com.history);
2766 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2767 send_fw_act_open_req(ep, atid);
2768 return;
2769 }
2676 case FW_EADDRINUSE: 2770 case FW_EADDRINUSE:
2677 PDBG("%s ofld conn wr ret %d\n", __func__, req->retval); 2771 set_bit(ACT_RETRY_INUSE, &ep->com.history);
2772 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2773 send_fw_act_open_req(ep, atid);
2774 return;
2775 }
2678 break; 2776 break;
2679 default: 2777 default:
2680 pr_info("%s unexpected ofld conn wr retval %d\n", 2778 pr_info("%s unexpected ofld conn wr retval %d\n",
2681 __func__, req->retval); 2779 __func__, req->retval);
2682 break; 2780 break;
2683 } 2781 }
2782 pr_err("active ofld_connect_wr failure %d atid %d\n",
2783 req->retval, atid);
2784 mutex_lock(&dev->rdev.stats.lock);
2785 dev->rdev.stats.act_ofld_conn_fails++;
2786 mutex_unlock(&dev->rdev.stats.lock);
2684 connect_reply_upcall(ep, status2errno(req->retval)); 2787 connect_reply_upcall(ep, status2errno(req->retval));
2788 state_set(&ep->com, DEAD);
2789 remove_handle(dev, &dev->atid_idr, atid);
2790 cxgb4_free_atid(dev->rdev.lldi.tids, atid);
2791 dst_release(ep->dst);
2792 cxgb4_l2t_release(ep->l2t);
2793 c4iw_put_ep(&ep->com);
2685} 2794}
2686 2795
2687static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 2796static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
@@ -2695,6 +2804,9 @@ static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2695 BUG_ON(!rpl_skb); 2804 BUG_ON(!rpl_skb);
2696 if (req->retval) { 2805 if (req->retval) {
2697 PDBG("%s passive open failure %d\n", __func__, req->retval); 2806 PDBG("%s passive open failure %d\n", __func__, req->retval);
2807 mutex_lock(&dev->rdev.stats.lock);
2808 dev->rdev.stats.pas_ofld_conn_fails++;
2809 mutex_unlock(&dev->rdev.stats.lock);
2698 kfree_skb(rpl_skb); 2810 kfree_skb(rpl_skb);
2699 } else { 2811 } else {
2700 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 2812 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
@@ -2989,6 +3101,7 @@ static void process_timeout(struct c4iw_ep *ep)
2989 mutex_lock(&ep->com.mutex); 3101 mutex_lock(&ep->com.mutex);
2990 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 3102 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2991 ep->com.state); 3103 ep->com.state);
3104 set_bit(TIMEDOUT, &ep->com.history);
2992 switch (ep->com.state) { 3105 switch (ep->com.state) {
2993 case MPA_REQ_SENT: 3106 case MPA_REQ_SENT:
2994 __state_set(&ep->com, ABORTING); 3107 __state_set(&ep->com, ABORTING);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 6b5b3d15e48d..ba11c76c0b5a 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -280,6 +280,10 @@ static int stats_show(struct seq_file *seq, void *v)
280 db_state_str[dev->db_state], 280 db_state_str[dev->db_state],
281 dev->rdev.stats.db_state_transitions); 281 dev->rdev.stats.db_state_transitions);
282 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); 282 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
283 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
284 dev->rdev.stats.act_ofld_conn_fails);
285 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
286 dev->rdev.stats.pas_ofld_conn_fails);
283 return 0; 287 return 0;
284} 288}
285 289
@@ -310,6 +314,9 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,
310 dev->rdev.stats.db_empty = 0; 314 dev->rdev.stats.db_empty = 0;
311 dev->rdev.stats.db_drop = 0; 315 dev->rdev.stats.db_drop = 0;
312 dev->rdev.stats.db_state_transitions = 0; 316 dev->rdev.stats.db_state_transitions = 0;
317 dev->rdev.stats.tcam_full = 0;
318 dev->rdev.stats.act_ofld_conn_fails = 0;
319 dev->rdev.stats.pas_ofld_conn_fails = 0;
313 mutex_unlock(&dev->rdev.stats.lock); 320 mutex_unlock(&dev->rdev.stats.lock);
314 return count; 321 return count;
315} 322}
@@ -323,6 +330,113 @@ static const struct file_operations stats_debugfs_fops = {
323 .write = stats_clear, 330 .write = stats_clear,
324}; 331};
325 332
333static int dump_ep(int id, void *p, void *data)
334{
335 struct c4iw_ep *ep = p;
336 struct c4iw_debugfs_data *epd = data;
337 int space;
338 int cc;
339
340 space = epd->bufsize - epd->pos - 1;
341 if (space == 0)
342 return 1;
343
344 cc = snprintf(epd->buf + epd->pos, space,
345 "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
346 "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
347 ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
348 ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
349 &ep->com.local_addr.sin_addr.s_addr,
350 ntohs(ep->com.local_addr.sin_port),
351 &ep->com.remote_addr.sin_addr.s_addr,
352 ntohs(ep->com.remote_addr.sin_port));
353 if (cc < space)
354 epd->pos += cc;
355 return 0;
356}
357
358static int dump_listen_ep(int id, void *p, void *data)
359{
360 struct c4iw_listen_ep *ep = p;
361 struct c4iw_debugfs_data *epd = data;
362 int space;
363 int cc;
364
365 space = epd->bufsize - epd->pos - 1;
366 if (space == 0)
367 return 1;
368
369 cc = snprintf(epd->buf + epd->pos, space,
370 "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
371 "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
372 ep->com.flags, ep->stid, ep->backlog,
373 &ep->com.local_addr.sin_addr.s_addr,
374 ntohs(ep->com.local_addr.sin_port));
375 if (cc < space)
376 epd->pos += cc;
377 return 0;
378}
379
380static int ep_release(struct inode *inode, struct file *file)
381{
382 struct c4iw_debugfs_data *epd = file->private_data;
383 if (!epd) {
384 pr_info("%s null qpd?\n", __func__);
385 return 0;
386 }
387 vfree(epd->buf);
388 kfree(epd);
389 return 0;
390}
391
392static int ep_open(struct inode *inode, struct file *file)
393{
394 struct c4iw_debugfs_data *epd;
395 int ret = 0;
396 int count = 1;
397
398 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
399 if (!epd) {
400 ret = -ENOMEM;
401 goto out;
402 }
403 epd->devp = inode->i_private;
404 epd->pos = 0;
405
406 spin_lock_irq(&epd->devp->lock);
407 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
408 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
409 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
410 spin_unlock_irq(&epd->devp->lock);
411
412 epd->bufsize = count * 160;
413 epd->buf = vmalloc(epd->bufsize);
414 if (!epd->buf) {
415 ret = -ENOMEM;
416 goto err1;
417 }
418
419 spin_lock_irq(&epd->devp->lock);
420 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
421 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
422 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
423 spin_unlock_irq(&epd->devp->lock);
424
425 file->private_data = epd;
426 goto out;
427err1:
428 kfree(epd);
429out:
430 return ret;
431}
432
433static const struct file_operations ep_debugfs_fops = {
434 .owner = THIS_MODULE,
435 .open = ep_open,
436 .release = ep_release,
437 .read = debugfs_read,
438};
439
326static int setup_debugfs(struct c4iw_dev *devp) 440static int setup_debugfs(struct c4iw_dev *devp)
327{ 441{
328 struct dentry *de; 442 struct dentry *de;
@@ -345,6 +459,11 @@ static int setup_debugfs(struct c4iw_dev *devp)
345 if (de && de->d_inode) 459 if (de && de->d_inode)
346 de->d_inode->i_size = 4096; 460 de->d_inode->i_size = 4096;
347 461
462 de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
463 (void *)devp, &ep_debugfs_fops);
464 if (de && de->d_inode)
465 de->d_inode->i_size = 4096;
466
348 return 0; 467 return 0;
349} 468}
350 469
@@ -476,6 +595,9 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
476 idr_destroy(&ctx->dev->cqidr); 595 idr_destroy(&ctx->dev->cqidr);
477 idr_destroy(&ctx->dev->qpidr); 596 idr_destroy(&ctx->dev->qpidr);
478 idr_destroy(&ctx->dev->mmidr); 597 idr_destroy(&ctx->dev->mmidr);
598 idr_destroy(&ctx->dev->hwtid_idr);
599 idr_destroy(&ctx->dev->stid_idr);
600 idr_destroy(&ctx->dev->atid_idr);
479 iounmap(ctx->dev->rdev.oc_mw_kva); 601 iounmap(ctx->dev->rdev.oc_mw_kva);
480 ib_dealloc_device(&ctx->dev->ibdev); 602 ib_dealloc_device(&ctx->dev->ibdev);
481 ctx->dev = NULL; 603 ctx->dev = NULL;
@@ -533,6 +655,9 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
533 idr_init(&devp->cqidr); 655 idr_init(&devp->cqidr);
534 idr_init(&devp->qpidr); 656 idr_init(&devp->qpidr);
535 idr_init(&devp->mmidr); 657 idr_init(&devp->mmidr);
658 idr_init(&devp->hwtid_idr);
659 idr_init(&devp->stid_idr);
660 idr_init(&devp->atid_idr);
536 spin_lock_init(&devp->lock); 661 spin_lock_init(&devp->lock);
537 mutex_init(&devp->rdev.stats.lock); 662 mutex_init(&devp->rdev.stats.lock);
538 mutex_init(&devp->db_mutex); 663 mutex_init(&devp->db_mutex);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 6a17fde51eae..9c1644fb0259 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -131,6 +131,8 @@ struct c4iw_stats {
131 u64 db_drop; 131 u64 db_drop;
132 u64 db_state_transitions; 132 u64 db_state_transitions;
133 u64 tcam_full; 133 u64 tcam_full;
134 u64 act_ofld_conn_fails;
135 u64 pas_ofld_conn_fails;
134}; 136};
135 137
136struct c4iw_rdev { 138struct c4iw_rdev {
@@ -224,6 +226,9 @@ struct c4iw_dev {
224 struct dentry *debugfs_root; 226 struct dentry *debugfs_root;
225 enum db_state db_state; 227 enum db_state db_state;
226 int qpcnt; 228 int qpcnt;
229 struct idr hwtid_idr;
230 struct idr atid_idr;
231 struct idr stid_idr;
227}; 232};
228 233
229static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 234static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -713,6 +718,31 @@ enum c4iw_ep_flags {
713 CLOSE_SENT = 3, 718 CLOSE_SENT = 3,
714}; 719};
715 720
721enum c4iw_ep_history {
722 ACT_OPEN_REQ = 0,
723 ACT_OFLD_CONN = 1,
724 ACT_OPEN_RPL = 2,
725 ACT_ESTAB = 3,
726 PASS_ACCEPT_REQ = 4,
727 PASS_ESTAB = 5,
728 ABORT_UPCALL = 6,
729 ESTAB_UPCALL = 7,
730 CLOSE_UPCALL = 8,
731 ULP_ACCEPT = 9,
732 ULP_REJECT = 10,
733 TIMEDOUT = 11,
734 PEER_ABORT = 12,
735 PEER_CLOSE = 13,
736 CONNREQ_UPCALL = 14,
737 ABORT_CONN = 15,
738 DISCONN_UPCALL = 16,
739 EP_DISC_CLOSE = 17,
740 EP_DISC_ABORT = 18,
741 CONN_RPL_UPCALL = 19,
742 ACT_RETRY_NOMEM = 20,
743 ACT_RETRY_INUSE = 21
744};
745
716struct c4iw_ep_common { 746struct c4iw_ep_common {
717 struct iw_cm_id *cm_id; 747 struct iw_cm_id *cm_id;
718 struct c4iw_qp *qp; 748 struct c4iw_qp *qp;
@@ -724,6 +754,7 @@ struct c4iw_ep_common {
724 struct sockaddr_in remote_addr; 754 struct sockaddr_in remote_addr;
725 struct c4iw_wr_wait wr_wait; 755 struct c4iw_wr_wait wr_wait;
726 unsigned long flags; 756 unsigned long flags;
757 unsigned long history;
727}; 758};
728 759
729struct c4iw_listen_ep { 760struct c4iw_listen_ep {
@@ -761,6 +792,7 @@ struct c4iw_ep {
761 u8 tos; 792 u8 tos;
762 u8 retry_with_mpa_v1; 793 u8 retry_with_mpa_v1;
763 u8 tried_with_mpa_v1; 794 u8 tried_with_mpa_v1;
795 unsigned int retry_count;
764}; 796};
765 797
766static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) 798static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 46ddd432cdf2..6db997c78a5f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -530,6 +530,7 @@ struct adapter {
530 struct net_device *port[MAX_NPORTS]; 530 struct net_device *port[MAX_NPORTS];
531 u8 chan_map[NCHAN]; /* channel -> port map */ 531 u8 chan_map[NCHAN]; /* channel -> port map */
532 532
533 u32 filter_mode;
533 unsigned int l2t_start; 534 unsigned int l2t_start;
534 unsigned int l2t_end; 535 unsigned int l2t_end;
535 struct l2t_data *l2t; 536 struct l2t_data *l2t;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index a7830a1d8b89..f0718e1a8369 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2670,7 +2670,8 @@ static int tid_init(struct tid_info *t)
2670 * Returns <0 on error and one of the %NET_XMIT_* values on success. 2670 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2671 */ 2671 */
2672int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 2672int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2673 __be32 sip, __be16 sport, unsigned int queue) 2673 __be32 sip, __be16 sport, __be16 vlan,
2674 unsigned int queue)
2674{ 2675{
2675 unsigned int chan; 2676 unsigned int chan;
2676 struct sk_buff *skb; 2677 struct sk_buff *skb;
@@ -3043,7 +3044,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
3043 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 3044 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3044 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 3045 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3045 (adap->fn * 4)); 3046 (adap->fn * 4));
3046 lli.filt_mode = tp_vlan_pri_map; 3047 lli.filt_mode = adap->filter_mode;
3047 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 3048 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3048 for (i = 0; i < NCHAN; i++) 3049 for (i = 0; i < NCHAN; i++)
3049 lli.tx_modq[i] = i; 3050 lli.tx_modq[i] = i;
@@ -3307,7 +3308,8 @@ static int delete_filter(struct adapter *adapter, unsigned int fidx)
3307} 3308}
3308 3309
3309int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, 3310int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3310 __be32 sip, __be16 sport, unsigned int queue) 3311 __be32 sip, __be16 sport, __be16 vlan,
3312 unsigned int queue, unsigned char port, unsigned char mask)
3311{ 3313{
3312 int ret; 3314 int ret;
3313 struct filter_entry *f; 3315 struct filter_entry *f;
@@ -3339,11 +3341,16 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3339 f->fs.val.lport = cpu_to_be16(sport); 3341 f->fs.val.lport = cpu_to_be16(sport);
3340 f->fs.mask.lport = ~0; 3342 f->fs.mask.lport = ~0;
3341 val = (u8 *)&sip; 3343 val = (u8 *)&sip;
3342 if ((val[0] | val[1] | val[2] | val[3]) != 0) 3344 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
3343 for (i = 0; i < 4; i++) { 3345 for (i = 0; i < 4; i++) {
3344 f->fs.val.lip[i] = val[i]; 3346 f->fs.val.lip[i] = val[i];
3345 f->fs.mask.lip[i] = ~0; 3347 f->fs.mask.lip[i] = ~0;
3346 } 3348 }
3349 if (adap->filter_mode & F_PORT) {
3350 f->fs.val.iport = port;
3351 f->fs.mask.iport = mask;
3352 }
3353 }
3347 3354
3348 f->fs.dirsteer = 1; 3355 f->fs.dirsteer = 1;
3349 f->fs.iq = queue; 3356 f->fs.iq = queue;
@@ -4450,6 +4457,10 @@ static int adap_init0(struct adapter *adap)
4450 for (j = 0; j < NCHAN; j++) 4457 for (j = 0; j < NCHAN; j++)
4451 adap->params.tp.tx_modq[j] = j; 4458 adap->params.tp.tx_modq[j] = j;
4452 4459
4460 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4461 &adap->filter_mode, 1,
4462 TP_VLAN_PRI_MAP);
4463
4453 adap->flags |= FW_OK; 4464 adap->flags |= FW_OK;
4454 return 0; 4465 return 0;
4455 4466
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 065bbd5b2411..e2bbc7f3e2de 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -38,6 +38,7 @@
38#include <linux/cache.h> 38#include <linux/cache.h>
39#include <linux/spinlock.h> 39#include <linux/spinlock.h>
40#include <linux/skbuff.h> 40#include <linux/skbuff.h>
41#include <linux/inetdevice.h>
41#include <linux/atomic.h> 42#include <linux/atomic.h>
42 43
43/* CPL message priority levels */ 44/* CPL message priority levels */
@@ -151,9 +152,12 @@ void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
151struct in6_addr; 152struct in6_addr;
152 153
153int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 154int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
154 __be32 sip, __be16 sport, unsigned int queue); 155 __be32 sip, __be16 sport, __be16 vlan,
156 unsigned int queue);
155int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, 157int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
156 __be32 sip, __be16 sport, unsigned int queue); 158 __be32 sip, __be16 sport, __be16 vlan,
159 unsigned int queue,
160 unsigned char port, unsigned char mask);
157int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, 161int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
158 unsigned int queue, bool ipv6); 162 unsigned int queue, bool ipv6);
159static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) 163static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a2c29f7b7aa1..83ec5f7844ac 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1098,5 +1098,7 @@
1098#define A_TP_TX_SCHED_PCMD 0x25 1098#define A_TP_TX_SCHED_PCMD 0x25
1099 1099
1100#define S_PORT 1 1100#define S_PORT 1
1101#define V_PORT(x) ((x) << S_PORT)
1102#define F_PORT V_PORT(1U)
1101 1103
1102#endif /* __T4_REGS_H */ 1104#endif /* __T4_REGS_H */