diff options
author | David S. Miller <davem@davemloft.net> | 2014-06-11 01:49:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-06-11 01:49:59 -0400 |
commit | c4d4c255d85310dc720c5535c164ca0f6d5b3b81 (patch) | |
tree | c59a7d41979ef120e61d8c7ef57509291e4d31a2 | |
parent | f8c1b7ce00254a5bb75d5b5e5ef1601326a0e08e (diff) | |
parent | c887ad0e226b54b33670e22b3bffb53c8d0e3d28 (diff) |
Merge branch 'cxgb4'
Hariprasad Shenai says:
====================
Adds support for CIQ and other misc. fixes for rdma/cxgb4
This patch series adds support to allocate and use IQs specifically for
indirect interrupts, adds fixes to align ISS for iWARP connections & fixes
related to tcp snd/rvd window for Chelsio T4/T5 adapters on iw_cxgb4.
Also changes Interrupt Holdoff Packet Count threshold of response queues for
cxgb4 driver.
The patches series is created against 'net-next' tree.
And includes patches on cxgb4 and iw_cxgb4 driver.
Since this patch-series contains cxgb4 and iw_cxgb4 patches, we would like to
request this patch series to get merged via David Miller's 'net-next' tree.
We have included all the maintainers of respective drivers. Kindly review the
change and let us know in case of any review comments.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 127 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/provider.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 236 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/sge.c | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 10 |
12 files changed, 356 insertions, 57 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 1f863a96a480..965eaafd5851 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -232,12 +232,16 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) | |||
232 | 232 | ||
233 | static void set_emss(struct c4iw_ep *ep, u16 opt) | 233 | static void set_emss(struct c4iw_ep *ep, u16 opt) |
234 | { | 234 | { |
235 | ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; | 235 | ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - |
236 | sizeof(struct iphdr) - sizeof(struct tcphdr); | ||
236 | ep->mss = ep->emss; | 237 | ep->mss = ep->emss; |
237 | if (GET_TCPOPT_TSTAMP(opt)) | 238 | if (GET_TCPOPT_TSTAMP(opt)) |
238 | ep->emss -= 12; | 239 | ep->emss -= 12; |
239 | if (ep->emss < 128) | 240 | if (ep->emss < 128) |
240 | ep->emss = 128; | 241 | ep->emss = 128; |
242 | if (ep->emss & 7) | ||
243 | PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", | ||
244 | GET_TCPOPT_MSS(opt), ep->mss, ep->emss); | ||
241 | PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), | 245 | PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt), |
242 | ep->mss, ep->emss); | 246 | ep->mss, ep->emss); |
243 | } | 247 | } |
@@ -468,7 +472,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) | |||
468 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; | 472 | flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; |
469 | flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); | 473 | flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); |
470 | flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; | 474 | flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; |
471 | flowc->mnemval[6].val = cpu_to_be32(snd_win); | 475 | flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); |
472 | flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; | 476 | flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; |
473 | flowc->mnemval[7].val = cpu_to_be32(ep->emss); | 477 | flowc->mnemval[7].val = cpu_to_be32(ep->emss); |
474 | /* Pad WR to 16 byte boundary */ | 478 | /* Pad WR to 16 byte boundary */ |
@@ -528,6 +532,17 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp) | |||
528 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); | 532 | return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); |
529 | } | 533 | } |
530 | 534 | ||
535 | static void best_mtu(const unsigned short *mtus, unsigned short mtu, | ||
536 | unsigned int *idx, int use_ts) | ||
537 | { | ||
538 | unsigned short hdr_size = sizeof(struct iphdr) + | ||
539 | sizeof(struct tcphdr) + | ||
540 | (use_ts ? 12 : 0); | ||
541 | unsigned short data_size = mtu - hdr_size; | ||
542 | |||
543 | cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); | ||
544 | } | ||
545 | |||
531 | static int send_connect(struct c4iw_ep *ep) | 546 | static int send_connect(struct c4iw_ep *ep) |
532 | { | 547 | { |
533 | struct cpl_act_open_req *req; | 548 | struct cpl_act_open_req *req; |
@@ -550,6 +565,7 @@ static int send_connect(struct c4iw_ep *ep) | |||
550 | struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr; | 565 | struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr; |
551 | struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr; | 566 | struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr; |
552 | struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; | 567 | struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; |
568 | int win; | ||
553 | 569 | ||
554 | wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? | 570 | wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? |
555 | roundup(sizev4, 16) : | 571 | roundup(sizev4, 16) : |
@@ -565,8 +581,18 @@ static int send_connect(struct c4iw_ep *ep) | |||
565 | } | 581 | } |
566 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); | 582 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
567 | 583 | ||
568 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | 584 | best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, |
585 | enable_tcp_timestamps); | ||
569 | wscale = compute_wscale(rcv_win); | 586 | wscale = compute_wscale(rcv_win); |
587 | |||
588 | /* | ||
589 | * Specify the largest window that will fit in opt0. The | ||
590 | * remainder will be specified in the rx_data_ack. | ||
591 | */ | ||
592 | win = ep->rcv_win >> 10; | ||
593 | if (win > RCV_BUFSIZ_MASK) | ||
594 | win = RCV_BUFSIZ_MASK; | ||
595 | |||
570 | opt0 = (nocong ? NO_CONG(1) : 0) | | 596 | opt0 = (nocong ? NO_CONG(1) : 0) | |
571 | KEEP_ALIVE(1) | | 597 | KEEP_ALIVE(1) | |
572 | DELACK(1) | | 598 | DELACK(1) | |
@@ -577,7 +603,7 @@ static int send_connect(struct c4iw_ep *ep) | |||
577 | SMAC_SEL(ep->smac_idx) | | 603 | SMAC_SEL(ep->smac_idx) | |
578 | DSCP(ep->tos) | | 604 | DSCP(ep->tos) | |
579 | ULP_MODE(ULP_MODE_TCPDDP) | | 605 | ULP_MODE(ULP_MODE_TCPDDP) | |
580 | RCV_BUFSIZ(rcv_win>>10); | 606 | RCV_BUFSIZ(win); |
581 | opt2 = RX_CHANNEL(0) | | 607 | opt2 = RX_CHANNEL(0) | |
582 | CCTRL_ECN(enable_ecn) | | 608 | CCTRL_ECN(enable_ecn) | |
583 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | 609 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); |
@@ -633,6 +659,13 @@ static int send_connect(struct c4iw_ep *ep) | |||
633 | req6->opt2 = cpu_to_be32(opt2); | 659 | req6->opt2 = cpu_to_be32(opt2); |
634 | } | 660 | } |
635 | } else { | 661 | } else { |
662 | u32 isn = (prandom_u32() & ~7UL) - 1; | ||
663 | |||
664 | opt2 |= T5_OPT_2_VALID; | ||
665 | opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ | ||
666 | if (peer2peer) | ||
667 | isn += 4; | ||
668 | |||
636 | if (ep->com.remote_addr.ss_family == AF_INET) { | 669 | if (ep->com.remote_addr.ss_family == AF_INET) { |
637 | t5_req = (struct cpl_t5_act_open_req *) | 670 | t5_req = (struct cpl_t5_act_open_req *) |
638 | skb_put(skb, wrlen); | 671 | skb_put(skb, wrlen); |
@@ -649,6 +682,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
649 | cxgb4_select_ntuple( | 682 | cxgb4_select_ntuple( |
650 | ep->com.dev->rdev.lldi.ports[0], | 683 | ep->com.dev->rdev.lldi.ports[0], |
651 | ep->l2t))); | 684 | ep->l2t))); |
685 | t5_req->rsvd = cpu_to_be32(isn); | ||
686 | PDBG("%s snd_isn %u\n", __func__, | ||
687 | be32_to_cpu(t5_req->rsvd)); | ||
652 | t5_req->opt2 = cpu_to_be32(opt2); | 688 | t5_req->opt2 = cpu_to_be32(opt2); |
653 | } else { | 689 | } else { |
654 | t5_req6 = (struct cpl_t5_act_open_req6 *) | 690 | t5_req6 = (struct cpl_t5_act_open_req6 *) |
@@ -672,6 +708,9 @@ static int send_connect(struct c4iw_ep *ep) | |||
672 | cxgb4_select_ntuple( | 708 | cxgb4_select_ntuple( |
673 | ep->com.dev->rdev.lldi.ports[0], | 709 | ep->com.dev->rdev.lldi.ports[0], |
674 | ep->l2t)); | 710 | ep->l2t)); |
711 | t5_req6->rsvd = cpu_to_be32(isn); | ||
712 | PDBG("%s snd_isn %u\n", __func__, | ||
713 | be32_to_cpu(t5_req6->rsvd)); | ||
675 | t5_req6->opt2 = cpu_to_be32(opt2); | 714 | t5_req6->opt2 = cpu_to_be32(opt2); |
676 | } | 715 | } |
677 | } | 716 | } |
@@ -1145,6 +1184,14 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) | |||
1145 | return 0; | 1184 | return 0; |
1146 | } | 1185 | } |
1147 | 1186 | ||
1187 | /* | ||
1188 | * If we couldn't specify the entire rcv window at connection setup | ||
1189 | * due to the limit in the number of bits in the RCV_BUFSIZ field, | ||
1190 | * then add the overage in to the credits returned. | ||
1191 | */ | ||
1192 | if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024) | ||
1193 | credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024; | ||
1194 | |||
1148 | req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); | 1195 | req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); |
1149 | memset(req, 0, wrlen); | 1196 | memset(req, 0, wrlen); |
1150 | INIT_TP_WR(req, ep->hwtid); | 1197 | INIT_TP_WR(req, ep->hwtid); |
@@ -1618,6 +1665,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1618 | unsigned int mtu_idx; | 1665 | unsigned int mtu_idx; |
1619 | int wscale; | 1666 | int wscale; |
1620 | struct sockaddr_in *sin; | 1667 | struct sockaddr_in *sin; |
1668 | int win; | ||
1621 | 1669 | ||
1622 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); | 1670 | skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); |
1623 | req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); | 1671 | req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); |
@@ -1640,8 +1688,18 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1640 | htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); | 1688 | htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); |
1641 | req->tcb.tx_max = (__force __be32) jiffies; | 1689 | req->tcb.tx_max = (__force __be32) jiffies; |
1642 | req->tcb.rcv_adv = htons(1); | 1690 | req->tcb.rcv_adv = htons(1); |
1643 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | 1691 | best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, |
1692 | enable_tcp_timestamps); | ||
1644 | wscale = compute_wscale(rcv_win); | 1693 | wscale = compute_wscale(rcv_win); |
1694 | |||
1695 | /* | ||
1696 | * Specify the largest window that will fit in opt0. The | ||
1697 | * remainder will be specified in the rx_data_ack. | ||
1698 | */ | ||
1699 | win = ep->rcv_win >> 10; | ||
1700 | if (win > RCV_BUFSIZ_MASK) | ||
1701 | win = RCV_BUFSIZ_MASK; | ||
1702 | |||
1645 | req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | | 1703 | req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | |
1646 | (nocong ? NO_CONG(1) : 0) | | 1704 | (nocong ? NO_CONG(1) : 0) | |
1647 | KEEP_ALIVE(1) | | 1705 | KEEP_ALIVE(1) | |
@@ -1653,7 +1711,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) | |||
1653 | SMAC_SEL(ep->smac_idx) | | 1711 | SMAC_SEL(ep->smac_idx) | |
1654 | DSCP(ep->tos) | | 1712 | DSCP(ep->tos) | |
1655 | ULP_MODE(ULP_MODE_TCPDDP) | | 1713 | ULP_MODE(ULP_MODE_TCPDDP) | |
1656 | RCV_BUFSIZ(rcv_win >> 10)); | 1714 | RCV_BUFSIZ(win)); |
1657 | req->tcb.opt2 = (__force __be32) (PACE(1) | | 1715 | req->tcb.opt2 = (__force __be32) (PACE(1) | |
1658 | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | | 1716 | TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | |
1659 | RX_CHANNEL(0) | | 1717 | RX_CHANNEL(0) | |
@@ -1690,6 +1748,13 @@ static int is_neg_adv(unsigned int status) | |||
1690 | status == CPL_ERR_KEEPALV_NEG_ADVICE; | 1748 | status == CPL_ERR_KEEPALV_NEG_ADVICE; |
1691 | } | 1749 | } |
1692 | 1750 | ||
1751 | static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) | ||
1752 | { | ||
1753 | ep->snd_win = snd_win; | ||
1754 | ep->rcv_win = rcv_win; | ||
1755 | PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win); | ||
1756 | } | ||
1757 | |||
1693 | #define ACT_OPEN_RETRY_COUNT 2 | 1758 | #define ACT_OPEN_RETRY_COUNT 2 |
1694 | 1759 | ||
1695 | static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, | 1760 | static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, |
@@ -1738,6 +1803,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, | |||
1738 | ep->ctrlq_idx = cxgb4_port_idx(pdev); | 1803 | ep->ctrlq_idx = cxgb4_port_idx(pdev); |
1739 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ | 1804 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ |
1740 | cxgb4_port_idx(pdev) * step]; | 1805 | cxgb4_port_idx(pdev) * step]; |
1806 | set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); | ||
1741 | dev_put(pdev); | 1807 | dev_put(pdev); |
1742 | } else { | 1808 | } else { |
1743 | pdev = get_real_dev(n->dev); | 1809 | pdev = get_real_dev(n->dev); |
@@ -1756,6 +1822,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, | |||
1756 | cdev->rdev.lldi.nchan; | 1822 | cdev->rdev.lldi.nchan; |
1757 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ | 1823 | ep->rss_qid = cdev->rdev.lldi.rxq_ids[ |
1758 | cxgb4_port_idx(n->dev) * step]; | 1824 | cxgb4_port_idx(n->dev) * step]; |
1825 | set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); | ||
1759 | 1826 | ||
1760 | if (clear_mpa_v1) { | 1827 | if (clear_mpa_v1) { |
1761 | ep->retry_with_mpa_v1 = 0; | 1828 | ep->retry_with_mpa_v1 = 0; |
@@ -1986,13 +2053,36 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
1986 | u64 opt0; | 2053 | u64 opt0; |
1987 | u32 opt2; | 2054 | u32 opt2; |
1988 | int wscale; | 2055 | int wscale; |
2056 | struct cpl_t5_pass_accept_rpl *rpl5 = NULL; | ||
2057 | int win; | ||
1989 | 2058 | ||
1990 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); | 2059 | PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); |
1991 | BUG_ON(skb_cloned(skb)); | 2060 | BUG_ON(skb_cloned(skb)); |
1992 | skb_trim(skb, sizeof(*rpl)); | 2061 | |
1993 | skb_get(skb); | 2062 | skb_get(skb); |
1994 | cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); | 2063 | rpl = cplhdr(skb); |
2064 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | ||
2065 | skb_trim(skb, roundup(sizeof(*rpl5), 16)); | ||
2066 | rpl5 = (void *)rpl; | ||
2067 | INIT_TP_WR(rpl5, ep->hwtid); | ||
2068 | } else { | ||
2069 | skb_trim(skb, sizeof(*rpl)); | ||
2070 | INIT_TP_WR(rpl, ep->hwtid); | ||
2071 | } | ||
2072 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | ||
2073 | ep->hwtid)); | ||
2074 | |||
2075 | best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, | ||
2076 | enable_tcp_timestamps && req->tcpopt.tstamp); | ||
1995 | wscale = compute_wscale(rcv_win); | 2077 | wscale = compute_wscale(rcv_win); |
2078 | |||
2079 | /* | ||
2080 | * Specify the largest window that will fit in opt0. The | ||
2081 | * remainder will be specified in the rx_data_ack. | ||
2082 | */ | ||
2083 | win = ep->rcv_win >> 10; | ||
2084 | if (win > RCV_BUFSIZ_MASK) | ||
2085 | win = RCV_BUFSIZ_MASK; | ||
1996 | opt0 = (nocong ? NO_CONG(1) : 0) | | 2086 | opt0 = (nocong ? NO_CONG(1) : 0) | |
1997 | KEEP_ALIVE(1) | | 2087 | KEEP_ALIVE(1) | |
1998 | DELACK(1) | | 2088 | DELACK(1) | |
@@ -2003,7 +2093,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
2003 | SMAC_SEL(ep->smac_idx) | | 2093 | SMAC_SEL(ep->smac_idx) | |
2004 | DSCP(ep->tos >> 2) | | 2094 | DSCP(ep->tos >> 2) | |
2005 | ULP_MODE(ULP_MODE_TCPDDP) | | 2095 | ULP_MODE(ULP_MODE_TCPDDP) | |
2006 | RCV_BUFSIZ(rcv_win>>10); | 2096 | RCV_BUFSIZ(win); |
2007 | opt2 = RX_CHANNEL(0) | | 2097 | opt2 = RX_CHANNEL(0) | |
2008 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | 2098 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); |
2009 | 2099 | ||
@@ -2023,14 +2113,18 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, | |||
2023 | opt2 |= CCTRL_ECN(1); | 2113 | opt2 |= CCTRL_ECN(1); |
2024 | } | 2114 | } |
2025 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { | 2115 | if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { |
2116 | u32 isn = (prandom_u32() & ~7UL) - 1; | ||
2026 | opt2 |= T5_OPT_2_VALID; | 2117 | opt2 |= T5_OPT_2_VALID; |
2027 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); | 2118 | opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); |
2119 | opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ | ||
2120 | rpl5 = (void *)rpl; | ||
2121 | memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); | ||
2122 | if (peer2peer) | ||
2123 | isn += 4; | ||
2124 | rpl5->iss = cpu_to_be32(isn); | ||
2125 | PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss)); | ||
2028 | } | 2126 | } |
2029 | 2127 | ||
2030 | rpl = cplhdr(skb); | ||
2031 | INIT_TP_WR(rpl, ep->hwtid); | ||
2032 | OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, | ||
2033 | ep->hwtid)); | ||
2034 | rpl->opt0 = cpu_to_be64(opt0); | 2128 | rpl->opt0 = cpu_to_be64(opt0); |
2035 | rpl->opt2 = cpu_to_be32(opt2); | 2129 | rpl->opt2 = cpu_to_be32(opt2); |
2036 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); | 2130 | set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); |
@@ -2095,6 +2189,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2095 | int err; | 2189 | int err; |
2096 | u16 peer_mss = ntohs(req->tcpopt.mss); | 2190 | u16 peer_mss = ntohs(req->tcpopt.mss); |
2097 | int iptype; | 2191 | int iptype; |
2192 | unsigned short hdrs; | ||
2098 | 2193 | ||
2099 | parent_ep = lookup_stid(t, stid); | 2194 | parent_ep = lookup_stid(t, stid); |
2100 | if (!parent_ep) { | 2195 | if (!parent_ep) { |
@@ -2152,8 +2247,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2152 | goto reject; | 2247 | goto reject; |
2153 | } | 2248 | } |
2154 | 2249 | ||
2155 | if (peer_mss && child_ep->mtu > (peer_mss + 40)) | 2250 | hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + |
2156 | child_ep->mtu = peer_mss + 40; | 2251 | ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); |
2252 | if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) | ||
2253 | child_ep->mtu = peer_mss + hdrs; | ||
2157 | 2254 | ||
2158 | state_set(&child_ep->com, CONNECTING); | 2255 | state_set(&child_ep->com, CONNECTING); |
2159 | child_ep->com.dev = dev; | 2256 | child_ep->com.dev = dev; |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index cfaa56ada189..71fc2ef203fb 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -134,7 +134,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
134 | V_FW_RI_RES_WR_IQANUS(0) | | 134 | V_FW_RI_RES_WR_IQANUS(0) | |
135 | V_FW_RI_RES_WR_IQANUD(1) | | 135 | V_FW_RI_RES_WR_IQANUD(1) | |
136 | F_FW_RI_RES_WR_IQANDST | | 136 | F_FW_RI_RES_WR_IQANDST | |
137 | V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids)); | 137 | V_FW_RI_RES_WR_IQANDSTINDEX( |
138 | rdev->lldi.ciq_ids[cq->vector])); | ||
138 | res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( | 139 | res->u.cq.iqdroprss_to_iqesize = cpu_to_be16( |
139 | F_FW_RI_RES_WR_IQDROPRSS | | 140 | F_FW_RI_RES_WR_IQDROPRSS | |
140 | V_FW_RI_RES_WR_IQPCIECH(2) | | 141 | V_FW_RI_RES_WR_IQPCIECH(2) | |
@@ -870,6 +871,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
870 | 871 | ||
871 | rhp = to_c4iw_dev(ibdev); | 872 | rhp = to_c4iw_dev(ibdev); |
872 | 873 | ||
874 | if (vector >= rhp->rdev.lldi.nciq) | ||
875 | return ERR_PTR(-EINVAL); | ||
876 | |||
873 | chp = kzalloc(sizeof(*chp), GFP_KERNEL); | 877 | chp = kzalloc(sizeof(*chp), GFP_KERNEL); |
874 | if (!chp) | 878 | if (!chp) |
875 | return ERR_PTR(-ENOMEM); | 879 | return ERR_PTR(-ENOMEM); |
@@ -915,6 +919,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
915 | } | 919 | } |
916 | chp->cq.size = hwentries; | 920 | chp->cq.size = hwentries; |
917 | chp->cq.memsize = memsize; | 921 | chp->cq.memsize = memsize; |
922 | chp->cq.vector = vector; | ||
918 | 923 | ||
919 | ret = create_cq(&rhp->rdev, &chp->cq, | 924 | ret = create_cq(&rhp->rdev, &chp->cq, |
920 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); | 925 | ucontext ? &ucontext->uctx : &rhp->rdev.uctx); |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7474b490760a..7493dfeb812a 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -805,6 +805,8 @@ struct c4iw_ep { | |||
805 | u8 retry_with_mpa_v1; | 805 | u8 retry_with_mpa_v1; |
806 | u8 tried_with_mpa_v1; | 806 | u8 tried_with_mpa_v1; |
807 | unsigned int retry_count; | 807 | unsigned int retry_count; |
808 | int snd_win; | ||
809 | int rcv_win; | ||
808 | }; | 810 | }; |
809 | 811 | ||
810 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) | 812 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) |
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index a94a3e12c349..31cd1882c47b 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c | |||
@@ -499,7 +499,7 @@ int c4iw_register_device(struct c4iw_dev *dev) | |||
499 | dev->ibdev.node_type = RDMA_NODE_RNIC; | 499 | dev->ibdev.node_type = RDMA_NODE_RNIC; |
500 | memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC)); | 500 | memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC)); |
501 | dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; | 501 | dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports; |
502 | dev->ibdev.num_comp_vectors = 1; | 502 | dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq; |
503 | dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); | 503 | dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); |
504 | dev->ibdev.query_device = c4iw_query_device; | 504 | dev->ibdev.query_device = c4iw_query_device; |
505 | dev->ibdev.query_port = c4iw_query_port; | 505 | dev->ibdev.query_port = c4iw_query_port; |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 2178f3198410..68b0a6bf4eb0 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -542,6 +542,7 @@ struct t4_cq { | |||
542 | size_t memsize; | 542 | size_t memsize; |
543 | __be64 bits_type_ts; | 543 | __be64 bits_type_ts; |
544 | u32 cqid; | 544 | u32 cqid; |
545 | int vector; | ||
545 | u16 size; /* including status page */ | 546 | u16 size; /* including status page */ |
546 | u16 cidx; | 547 | u16 cidx; |
547 | u16 sw_pidx; | 548 | u16 sw_pidx; |
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 6121ca08fe58..91289a051af9 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | |||
@@ -848,6 +848,7 @@ enum { /* TCP congestion control algorithms */ | |||
848 | #define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) | 848 | #define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL) |
849 | #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) | 849 | #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) |
850 | 850 | ||
851 | #define CONG_CNTRL_VALID (1 << 18) | ||
851 | #define T5_OPT_2_VALID (1 << 31) | 852 | #define T5_OPT_2_VALID (1 << 31) |
852 | 853 | ||
853 | #endif /* _T4FW_RI_API_H_ */ | 854 | #endif /* _T4FW_RI_API_H_ */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 32db37709263..f503dce4ab17 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | |||
@@ -357,11 +357,17 @@ enum { | |||
357 | MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ | 357 | MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ |
358 | MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ | 358 | MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ |
359 | MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ | 359 | MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ |
360 | MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */ | ||
361 | MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ | ||
360 | }; | 362 | }; |
361 | 363 | ||
362 | enum { | 364 | enum { |
363 | MAX_EGRQ = 128, /* max # of egress queues, including FLs */ | 365 | INGQ_EXTRAS = 2, /* firmware event queue and */ |
364 | MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */ | 366 | /* forwarded interrupts */ |
367 | MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2 | ||
368 | + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES, | ||
369 | MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES | ||
370 | + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, | ||
365 | }; | 371 | }; |
366 | 372 | ||
367 | struct adapter; | 373 | struct adapter; |
@@ -538,6 +544,7 @@ struct sge { | |||
538 | struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; | 544 | struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; |
539 | struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; | 545 | struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; |
540 | struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; | 546 | struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; |
547 | struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; | ||
541 | struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; | 548 | struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; |
542 | 549 | ||
543 | struct sge_rspq intrq ____cacheline_aligned_in_smp; | 550 | struct sge_rspq intrq ____cacheline_aligned_in_smp; |
@@ -548,8 +555,10 @@ struct sge { | |||
548 | u16 ethtxq_rover; /* Tx queue to clean up next */ | 555 | u16 ethtxq_rover; /* Tx queue to clean up next */ |
549 | u16 ofldqsets; /* # of active offload queue sets */ | 556 | u16 ofldqsets; /* # of active offload queue sets */ |
550 | u16 rdmaqs; /* # of available RDMA Rx queues */ | 557 | u16 rdmaqs; /* # of available RDMA Rx queues */ |
558 | u16 rdmaciqs; /* # of available RDMA concentrator IQs */ | ||
551 | u16 ofld_rxq[MAX_OFLD_QSETS]; | 559 | u16 ofld_rxq[MAX_OFLD_QSETS]; |
552 | u16 rdma_rxq[NCHAN]; | 560 | u16 rdma_rxq[NCHAN]; |
561 | u16 rdma_ciq[NCHAN]; | ||
553 | u16 timer_val[SGE_NTIMERS]; | 562 | u16 timer_val[SGE_NTIMERS]; |
554 | u8 counter_val[SGE_NCOUNTERS]; | 563 | u8 counter_val[SGE_NCOUNTERS]; |
555 | u32 fl_pg_order; /* large page allocation size */ | 564 | u32 fl_pg_order; /* large page allocation size */ |
@@ -577,6 +586,7 @@ struct sge { | |||
577 | #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) | 586 | #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) |
578 | #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) | 587 | #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) |
579 | #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) | 588 | #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) |
589 | #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) | ||
580 | 590 | ||
581 | struct l2t_data; | 591 | struct l2t_data; |
582 | 592 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 8cf6be93f491..2f8d6b910383 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap) | |||
818 | for_each_rdmarxq(&adap->sge, i) | 818 | for_each_rdmarxq(&adap->sge, i) |
819 | snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", | 819 | snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", |
820 | adap->port[0]->name, i); | 820 | adap->port[0]->name, i); |
821 | |||
822 | for_each_rdmaciq(&adap->sge, i) | ||
823 | snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d", | ||
824 | adap->port[0]->name, i); | ||
821 | } | 825 | } |
822 | 826 | ||
823 | static int request_msix_queue_irqs(struct adapter *adap) | 827 | static int request_msix_queue_irqs(struct adapter *adap) |
824 | { | 828 | { |
825 | struct sge *s = &adap->sge; | 829 | struct sge *s = &adap->sge; |
826 | int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; | 830 | int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; |
831 | int msi_index = 2; | ||
827 | 832 | ||
828 | err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, | 833 | err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, |
829 | adap->msix_info[1].desc, &s->fw_evtq); | 834 | adap->msix_info[1].desc, &s->fw_evtq); |
@@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap) | |||
857 | goto unwind; | 862 | goto unwind; |
858 | msi_index++; | 863 | msi_index++; |
859 | } | 864 | } |
865 | for_each_rdmaciq(s, rdmaciqqidx) { | ||
866 | err = request_irq(adap->msix_info[msi_index].vec, | ||
867 | t4_sge_intr_msix, 0, | ||
868 | adap->msix_info[msi_index].desc, | ||
869 | &s->rdmaciq[rdmaciqqidx].rspq); | ||
870 | if (err) | ||
871 | goto unwind; | ||
872 | msi_index++; | ||
873 | } | ||
860 | return 0; | 874 | return 0; |
861 | 875 | ||
862 | unwind: | 876 | unwind: |
877 | while (--rdmaciqqidx >= 0) | ||
878 | free_irq(adap->msix_info[--msi_index].vec, | ||
879 | &s->rdmaciq[rdmaciqqidx].rspq); | ||
863 | while (--rdmaqidx >= 0) | 880 | while (--rdmaqidx >= 0) |
864 | free_irq(adap->msix_info[--msi_index].vec, | 881 | free_irq(adap->msix_info[--msi_index].vec, |
865 | &s->rdmarxq[rdmaqidx].rspq); | 882 | &s->rdmarxq[rdmaqidx].rspq); |
@@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap) | |||
885 | free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); | 902 | free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); |
886 | for_each_rdmarxq(s, i) | 903 | for_each_rdmarxq(s, i) |
887 | free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); | 904 | free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); |
905 | for_each_rdmaciq(s, i) | ||
906 | free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq); | ||
888 | } | 907 | } |
889 | 908 | ||
890 | /** | 909 | /** |
@@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap); | |||
1047 | if (msi_idx > 0) | 1066 | if (msi_idx > 0) |
1048 | msi_idx++; | 1067 | msi_idx++; |
1049 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, | 1068 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, |
1050 | &q->fl, uldrx_handler); | 1069 | q->fl.size ? &q->fl : NULL, |
1070 | uldrx_handler); | ||
1051 | if (err) | 1071 | if (err) |
1052 | goto freeout; | 1072 | goto freeout; |
1053 | memset(&q->stats, 0, sizeof(q->stats)); | 1073 | memset(&q->stats, 0, sizeof(q->stats)); |
@@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap); | |||
1064 | if (msi_idx > 0) | 1084 | if (msi_idx > 0) |
1065 | msi_idx++; | 1085 | msi_idx++; |
1066 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], | 1086 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], |
1067 | msi_idx, &q->fl, uldrx_handler); | 1087 | msi_idx, q->fl.size ? &q->fl : NULL, |
1088 | uldrx_handler); | ||
1068 | if (err) | 1089 | if (err) |
1069 | goto freeout; | 1090 | goto freeout; |
1070 | memset(&q->stats, 0, sizeof(q->stats)); | 1091 | memset(&q->stats, 0, sizeof(q->stats)); |
1071 | s->rdma_rxq[i] = q->rspq.abs_id; | 1092 | s->rdma_rxq[i] = q->rspq.abs_id; |
1072 | } | 1093 | } |
1073 | 1094 | ||
1095 | for_each_rdmaciq(s, i) { | ||
1096 | struct sge_ofld_rxq *q = &s->rdmaciq[i]; | ||
1097 | |||
1098 | if (msi_idx > 0) | ||
1099 | msi_idx++; | ||
1100 | err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], | ||
1101 | msi_idx, q->fl.size ? &q->fl : NULL, | ||
1102 | uldrx_handler); | ||
1103 | if (err) | ||
1104 | goto freeout; | ||
1105 | memset(&q->stats, 0, sizeof(q->stats)); | ||
1106 | s->rdma_ciq[i] = q->rspq.abs_id; | ||
1107 | } | ||
1108 | |||
1074 | for_each_port(adap, i) { | 1109 | for_each_port(adap, i) { |
1075 | /* | 1110 | /* |
1076 | * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't | 1111 | * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't |
@@ -2468,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap, | |||
2468 | } | 2503 | } |
2469 | 2504 | ||
2470 | /** | 2505 | /** |
2471 | * set_rxq_intr_params - set a queue's interrupt holdoff parameters | 2506 | * set_rspq_intr_params - set a queue's interrupt holdoff parameters |
2472 | * @adap: the adapter | ||
2473 | * @q: the Rx queue | 2507 | * @q: the Rx queue |
2474 | * @us: the hold-off time in us, or 0 to disable timer | 2508 | * @us: the hold-off time in us, or 0 to disable timer |
2475 | * @cnt: the hold-off packet count, or 0 to disable counter | 2509 | * @cnt: the hold-off packet count, or 0 to disable counter |
@@ -2477,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap, | |||
2477 | * Sets an Rx queue's interrupt hold-off time and packet count. At least | 2511 | * Sets an Rx queue's interrupt hold-off time and packet count. At least |
2478 | * one of the two needs to be enabled for the queue to generate interrupts. | 2512 | * one of the two needs to be enabled for the queue to generate interrupts. |
2479 | */ | 2513 | */ |
2480 | static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, | 2514 | static int set_rspq_intr_params(struct sge_rspq *q, |
2481 | unsigned int us, unsigned int cnt) | 2515 | unsigned int us, unsigned int cnt) |
2482 | { | 2516 | { |
2517 | struct adapter *adap = q->adap; | ||
2518 | |||
2483 | if ((us | cnt) == 0) | 2519 | if ((us | cnt) == 0) |
2484 | cnt = 1; | 2520 | cnt = 1; |
2485 | 2521 | ||
@@ -2506,24 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, | |||
2506 | return 0; | 2542 | return 0; |
2507 | } | 2543 | } |
2508 | 2544 | ||
2509 | static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | 2545 | /** |
2546 | * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! | ||
2547 | * @dev: the network device | ||
2548 | * @us: the hold-off time in us, or 0 to disable timer | ||
2549 | * @cnt: the hold-off packet count, or 0 to disable counter | ||
2550 | * | ||
2551 | * Set the RX interrupt hold-off parameters for a network device. | ||
2552 | */ | ||
2553 | static int set_rx_intr_params(struct net_device *dev, | ||
2554 | unsigned int us, unsigned int cnt) | ||
2510 | { | 2555 | { |
2511 | const struct port_info *pi = netdev_priv(dev); | 2556 | int i, err; |
2557 | struct port_info *pi = netdev_priv(dev); | ||
2512 | struct adapter *adap = pi->adapter; | 2558 | struct adapter *adap = pi->adapter; |
2513 | struct sge_rspq *q; | 2559 | struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; |
2514 | int i; | 2560 | |
2515 | int r = 0; | 2561 | for (i = 0; i < pi->nqsets; i++, q++) { |
2516 | 2562 | err = set_rspq_intr_params(&q->rspq, us, cnt); | |
2517 | for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) { | 2563 | if (err) |
2518 | q = &adap->sge.ethrxq[i].rspq; | 2564 | return err; |
2519 | r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs, | ||
2520 | c->rx_max_coalesced_frames); | ||
2521 | if (r) { | ||
2522 | dev_err(&dev->dev, "failed to set coalesce %d\n", r); | ||
2523 | break; | ||
2524 | } | ||
2525 | } | 2565 | } |
2526 | return r; | 2566 | return 0; |
2567 | } | ||
2568 | |||
2569 | static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | ||
2570 | { | ||
2571 | return set_rx_intr_params(dev, c->rx_coalesce_usecs, | ||
2572 | c->rx_max_coalesced_frames); | ||
2527 | } | 2573 | } |
2528 | 2574 | ||
2529 | static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | 2575 | static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) |
@@ -3393,6 +3439,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, | |||
3393 | EXPORT_SYMBOL(cxgb4_best_mtu); | 3439 | EXPORT_SYMBOL(cxgb4_best_mtu); |
3394 | 3440 | ||
3395 | /** | 3441 | /** |
3442 | * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned | ||
3443 | * @mtus: the HW MTU table | ||
3444 | * @header_size: Header Size | ||
3445 | * @data_size_max: maximum Data Segment Size | ||
3446 | * @data_size_align: desired Data Segment Size Alignment (2^N) | ||
3447 | * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL) | ||
3448 | * | ||
3449 | * Similar to cxgb4_best_mtu() but instead of searching the Hardware | ||
3450 | * MTU Table based solely on a Maximum MTU parameter, we break that | ||
3451 | * parameter up into a Header Size and Maximum Data Segment Size, and | ||
3452 | * provide a desired Data Segment Size Alignment. If we find an MTU in | ||
3453 | * the Hardware MTU Table which will result in a Data Segment Size with | ||
3454 | * the requested alignment _and_ that MTU isn't "too far" from the | ||
3455 | * closest MTU, then we'll return that rather than the closest MTU. | ||
3456 | */ | ||
3457 | unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, | ||
3458 | unsigned short header_size, | ||
3459 | unsigned short data_size_max, | ||
3460 | unsigned short data_size_align, | ||
3461 | unsigned int *mtu_idxp) | ||
3462 | { | ||
3463 | unsigned short max_mtu = header_size + data_size_max; | ||
3464 | unsigned short data_size_align_mask = data_size_align - 1; | ||
3465 | int mtu_idx, aligned_mtu_idx; | ||
3466 | |||
3467 | /* Scan the MTU Table till we find an MTU which is larger than our | ||
3468 | * Maximum MTU or we reach the end of the table. Along the way, | ||
3469 | * record the last MTU found, if any, which will result in a Data | ||
3470 | * Segment Length matching the requested alignment. | ||
3471 | */ | ||
3472 | for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { | ||
3473 | unsigned short data_size = mtus[mtu_idx] - header_size; | ||
3474 | |||
3475 | /* If this MTU minus the Header Size would result in a | ||
3476 | * Data Segment Size of the desired alignment, remember it. | ||
3477 | */ | ||
3478 | if ((data_size & data_size_align_mask) == 0) | ||
3479 | aligned_mtu_idx = mtu_idx; | ||
3480 | |||
3481 | /* If we're not at the end of the Hardware MTU Table and the | ||
3482 | * next element is larger than our Maximum MTU, drop out of | ||
3483 | * the loop. | ||
3484 | */ | ||
3485 | if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu) | ||
3486 | break; | ||
3487 | } | ||
3488 | |||
3489 | /* If we fell out of the loop because we ran to the end of the table, | ||
3490 | * then we just have to use the last [largest] entry. | ||
3491 | */ | ||
3492 | if (mtu_idx == NMTUS) | ||
3493 | mtu_idx--; | ||
3494 | |||
3495 | /* If we found an MTU which resulted in the requested Data Segment | ||
3496 | * Length alignment and that's "not far" from the largest MTU which is | ||
3497 | * less than or equal to the maximum MTU, then use that. | ||
3498 | */ | ||
3499 | if (aligned_mtu_idx >= 0 && | ||
3500 | mtu_idx - aligned_mtu_idx <= 1) | ||
3501 | mtu_idx = aligned_mtu_idx; | ||
3502 | |||
3503 | /* If the caller has passed in an MTU Index pointer, pass the | ||
3504 | * MTU Index back. Return the MTU value. | ||
3505 | */ | ||
3506 | if (mtu_idxp) | ||
3507 | *mtu_idxp = mtu_idx; | ||
3508 | return mtus[mtu_idx]; | ||
3509 | } | ||
3510 | EXPORT_SYMBOL(cxgb4_best_aligned_mtu); | ||
3511 | |||
3512 | /** | ||
3396 | * cxgb4_port_chan - get the HW channel of a port | 3513 | * cxgb4_port_chan - get the HW channel of a port |
3397 | * @dev: the net device for the port | 3514 | * @dev: the net device for the port |
3398 | * | 3515 | * |
@@ -3789,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld) | |||
3789 | lli.mtus = adap->params.mtus; | 3906 | lli.mtus = adap->params.mtus; |
3790 | if (uld == CXGB4_ULD_RDMA) { | 3907 | if (uld == CXGB4_ULD_RDMA) { |
3791 | lli.rxq_ids = adap->sge.rdma_rxq; | 3908 | lli.rxq_ids = adap->sge.rdma_rxq; |
3909 | lli.ciq_ids = adap->sge.rdma_ciq; | ||
3792 | lli.nrxq = adap->sge.rdmaqs; | 3910 | lli.nrxq = adap->sge.rdmaqs; |
3911 | lli.nciq = adap->sge.rdmaciqs; | ||
3793 | } else if (uld == CXGB4_ULD_ISCSI) { | 3912 | } else if (uld == CXGB4_ULD_ISCSI) { |
3794 | lli.rxq_ids = adap->sge.ofld_rxq; | 3913 | lli.rxq_ids = adap->sge.ofld_rxq; |
3795 | lli.nrxq = adap->sge.ofldqsets; | 3914 | lli.nrxq = adap->sge.ofldqsets; |
@@ -5535,13 +5654,41 @@ static int adap_init0(struct adapter *adap) | |||
5535 | #undef FW_PARAM_PFVF | 5654 | #undef FW_PARAM_PFVF |
5536 | #undef FW_PARAM_DEV | 5655 | #undef FW_PARAM_DEV |
5537 | 5656 | ||
5538 | /* | 5657 | /* The MTU/MSS Table is initialized by now, so load their values. If |
5539 | * These are finalized by FW initialization, load their values now. | 5658 | * we're initializing the adapter, then we'll make any modifications |
5659 | * we want to the MTU/MSS Table and also initialize the congestion | ||
5660 | * parameters. | ||
5540 | */ | 5661 | */ |
5541 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); | 5662 | t4_read_mtu_tbl(adap, adap->params.mtus, NULL); |
5542 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, | 5663 | if (state != DEV_STATE_INIT) { |
5543 | adap->params.b_wnd); | 5664 | int i; |
5665 | |||
5666 | /* The default MTU Table contains values 1492 and 1500. | ||
5667 | * However, for TCP, it's better to have two values which are | ||
5668 | * a multiple of 8 +/- 4 bytes apart near this popular MTU. | ||
5669 | * This allows us to have a TCP Data Payload which is a | ||
5670 | * multiple of 8 regardless of what combination of TCP Options | ||
5671 | * are in use (always a multiple of 4 bytes) which is | ||
5672 | * important for performance reasons. For instance, if no | ||
5673 | * options are in use, then we have a 20-byte IP header and a | ||
5674 | * 20-byte TCP header. In this case, a 1500-byte MSS would | ||
5675 | * result in a TCP Data Payload of 1500 - 40 == 1460 bytes | ||
5676 | * which is not a multiple of 8. So using an MSS of 1488 in | ||
5677 | * this case results in a TCP Data Payload of 1448 bytes which | ||
5678 | * is a multiple of 8. On the other hand, if 12-byte TCP Time | ||
5679 | * Stamps have been negotiated, then an MTU of 1500 bytes | ||
5680 | * results in a TCP Data Payload of 1448 bytes which, as | ||
5681 | * above, is a multiple of 8 bytes ... | ||
5682 | */ | ||
5683 | for (i = 0; i < NMTUS; i++) | ||
5684 | if (adap->params.mtus[i] == 1492) { | ||
5685 | adap->params.mtus[i] = 1488; | ||
5686 | break; | ||
5687 | } | ||
5544 | 5688 | ||
5689 | t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, | ||
5690 | adap->params.b_wnd); | ||
5691 | } | ||
5545 | t4_init_tp_params(adap); | 5692 | t4_init_tp_params(adap); |
5546 | adap->flags |= FW_OK; | 5693 | adap->flags |= FW_OK; |
5547 | return 0; | 5694 | return 0; |
@@ -5676,12 +5823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc) | |||
5676 | (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; | 5823 | (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; |
5677 | } | 5824 | } |
5678 | 5825 | ||
5679 | static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, | 5826 | static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, |
5827 | unsigned int us, unsigned int cnt, | ||
5680 | unsigned int size, unsigned int iqe_size) | 5828 | unsigned int size, unsigned int iqe_size) |
5681 | { | 5829 | { |
5682 | q->intr_params = QINTR_TIMER_IDX(timer_idx) | | 5830 | q->adap = adap; |
5683 | (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); | 5831 | set_rspq_intr_params(q, us, cnt); |
5684 | q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; | ||
5685 | q->iqe_len = iqe_size; | 5832 | q->iqe_len = iqe_size; |
5686 | q->size = size; | 5833 | q->size = size; |
5687 | } | 5834 | } |
@@ -5695,6 +5842,7 @@ static void cfg_queues(struct adapter *adap) | |||
5695 | { | 5842 | { |
5696 | struct sge *s = &adap->sge; | 5843 | struct sge *s = &adap->sge; |
5697 | int i, q10g = 0, n10g = 0, qidx = 0; | 5844 | int i, q10g = 0, n10g = 0, qidx = 0; |
5845 | int ciq_size; | ||
5698 | 5846 | ||
5699 | for_each_port(adap, i) | 5847 | for_each_port(adap, i) |
5700 | n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); | 5848 | n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); |
@@ -5733,12 +5881,13 @@ static void cfg_queues(struct adapter *adap) | |||
5733 | s->ofldqsets = adap->params.nports; | 5881 | s->ofldqsets = adap->params.nports; |
5734 | /* For RDMA one Rx queue per channel suffices */ | 5882 | /* For RDMA one Rx queue per channel suffices */ |
5735 | s->rdmaqs = adap->params.nports; | 5883 | s->rdmaqs = adap->params.nports; |
5884 | s->rdmaciqs = adap->params.nports; | ||
5736 | } | 5885 | } |
5737 | 5886 | ||
5738 | for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { | 5887 | for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { |
5739 | struct sge_eth_rxq *r = &s->ethrxq[i]; | 5888 | struct sge_eth_rxq *r = &s->ethrxq[i]; |
5740 | 5889 | ||
5741 | init_rspq(&r->rspq, 0, 0, 1024, 64); | 5890 | init_rspq(adap, &r->rspq, 5, 10, 1024, 64); |
5742 | r->fl.size = 72; | 5891 | r->fl.size = 72; |
5743 | } | 5892 | } |
5744 | 5893 | ||
@@ -5754,7 +5903,7 @@ static void cfg_queues(struct adapter *adap) | |||
5754 | for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { | 5903 | for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { |
5755 | struct sge_ofld_rxq *r = &s->ofldrxq[i]; | 5904 | struct sge_ofld_rxq *r = &s->ofldrxq[i]; |
5756 | 5905 | ||
5757 | init_rspq(&r->rspq, 0, 0, 1024, 64); | 5906 | init_rspq(adap, &r->rspq, 5, 1, 1024, 64); |
5758 | r->rspq.uld = CXGB4_ULD_ISCSI; | 5907 | r->rspq.uld = CXGB4_ULD_ISCSI; |
5759 | r->fl.size = 72; | 5908 | r->fl.size = 72; |
5760 | } | 5909 | } |
@@ -5762,13 +5911,26 @@ static void cfg_queues(struct adapter *adap) | |||
5762 | for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { | 5911 | for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { |
5763 | struct sge_ofld_rxq *r = &s->rdmarxq[i]; | 5912 | struct sge_ofld_rxq *r = &s->rdmarxq[i]; |
5764 | 5913 | ||
5765 | init_rspq(&r->rspq, 0, 0, 511, 64); | 5914 | init_rspq(adap, &r->rspq, 5, 1, 511, 64); |
5766 | r->rspq.uld = CXGB4_ULD_RDMA; | 5915 | r->rspq.uld = CXGB4_ULD_RDMA; |
5767 | r->fl.size = 72; | 5916 | r->fl.size = 72; |
5768 | } | 5917 | } |
5769 | 5918 | ||
5770 | init_rspq(&s->fw_evtq, 6, 0, 512, 64); | 5919 | ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; |
5771 | init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); | 5920 | if (ciq_size > SGE_MAX_IQ_SIZE) { |
5921 | CH_WARN(adap, "CIQ size too small for available IQs\n"); | ||
5922 | ciq_size = SGE_MAX_IQ_SIZE; | ||
5923 | } | ||
5924 | |||
5925 | for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) { | ||
5926 | struct sge_ofld_rxq *r = &s->rdmaciq[i]; | ||
5927 | |||
5928 | init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); | ||
5929 | r->rspq.uld = CXGB4_ULD_RDMA; | ||
5930 | } | ||
5931 | |||
5932 | init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); | ||
5933 | init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64); | ||
5772 | } | 5934 | } |
5773 | 5935 | ||
5774 | /* | 5936 | /* |
@@ -5815,9 +5977,9 @@ static int enable_msix(struct adapter *adap) | |||
5815 | 5977 | ||
5816 | want = s->max_ethqsets + EXTRA_VECS; | 5978 | want = s->max_ethqsets + EXTRA_VECS; |
5817 | if (is_offload(adap)) { | 5979 | if (is_offload(adap)) { |
5818 | want += s->rdmaqs + s->ofldqsets; | 5980 | want += s->rdmaqs + s->rdmaciqs + s->ofldqsets; |
5819 | /* need nchan for each possible ULD */ | 5981 | /* need nchan for each possible ULD */ |
5820 | ofld_need = 2 * nchan; | 5982 | ofld_need = 3 * nchan; |
5821 | } | 5983 | } |
5822 | need = adap->params.nports + EXTRA_VECS + ofld_need; | 5984 | need = adap->params.nports + EXTRA_VECS + ofld_need; |
5823 | 5985 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index e274a047528f..55e9daf7f9d4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
@@ -232,8 +232,10 @@ struct cxgb4_lld_info { | |||
232 | const struct cxgb4_virt_res *vr; /* assorted HW resources */ | 232 | const struct cxgb4_virt_res *vr; /* assorted HW resources */ |
233 | const unsigned short *mtus; /* MTU table */ | 233 | const unsigned short *mtus; /* MTU table */ |
234 | const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ | 234 | const unsigned short *rxq_ids; /* the ULD's Rx queue ids */ |
235 | const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */ | ||
235 | unsigned short nrxq; /* # of Rx queues */ | 236 | unsigned short nrxq; /* # of Rx queues */ |
236 | unsigned short ntxq; /* # of Tx queues */ | 237 | unsigned short ntxq; /* # of Tx queues */ |
238 | unsigned short nciq; /* # of concentrator IQ */ | ||
237 | unsigned char nchan:4; /* # of channels */ | 239 | unsigned char nchan:4; /* # of channels */ |
238 | unsigned char nports:4; /* # of ports */ | 240 | unsigned char nports:4; /* # of ports */ |
239 | unsigned char wr_cred; /* WR 16-byte credits */ | 241 | unsigned char wr_cred; /* WR 16-byte credits */ |
@@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev); | |||
274 | unsigned int cxgb4_port_idx(const struct net_device *dev); | 276 | unsigned int cxgb4_port_idx(const struct net_device *dev); |
275 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, | 277 | unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, |
276 | unsigned int *idx); | 278 | unsigned int *idx); |
279 | unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, | ||
280 | unsigned short header_size, | ||
281 | unsigned short data_size_max, | ||
282 | unsigned short data_size_align, | ||
283 | unsigned int *mtu_idxp); | ||
277 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, | 284 | void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, |
278 | struct tp_tcp_stats *v6); | 285 | struct tp_tcp_stats *v6); |
279 | void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, | 286 | void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index cced1a3d5181..58bd213fcaf2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, | |||
2215 | iq->cntxt_id = ntohs(c.iqid); | 2215 | iq->cntxt_id = ntohs(c.iqid); |
2216 | iq->abs_id = ntohs(c.physiqid); | 2216 | iq->abs_id = ntohs(c.physiqid); |
2217 | iq->size--; /* subtract status entry */ | 2217 | iq->size--; /* subtract status entry */ |
2218 | iq->adap = adap; | ||
2219 | iq->netdev = dev; | 2218 | iq->netdev = dev; |
2220 | iq->handler = hnd; | 2219 | iq->handler = hnd; |
2221 | 2220 | ||
@@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap) | |||
2515 | if (oq->rspq.desc) | 2514 | if (oq->rspq.desc) |
2516 | free_rspq_fl(adap, &oq->rspq, &oq->fl); | 2515 | free_rspq_fl(adap, &oq->rspq, &oq->fl); |
2517 | } | 2516 | } |
2517 | for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) { | ||
2518 | if (oq->rspq.desc) | ||
2519 | free_rspq_fl(adap, &oq->rspq, &oq->fl); | ||
2520 | } | ||
2518 | 2521 | ||
2519 | /* clean up offload Tx queues */ | 2522 | /* clean up offload Tx queues */ |
2520 | for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { | 2523 | for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index 1d1623be9f1e..71b799b5b0f4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h | |||
@@ -68,6 +68,7 @@ enum { | |||
68 | SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ | 68 | SGE_MAX_WR_LEN = 512, /* max WR size in bytes */ |
69 | SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ | 69 | SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */ |
70 | SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ | 70 | SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */ |
71 | SGE_MAX_IQ_SIZE = 65520, | ||
71 | 72 | ||
72 | SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ | 73 | SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */ |
73 | SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ | 74 | SGE_TIMER_UPD_CIDX = 7, /* update cidx only */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index f2738c710789..973eb11aa98a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | |||
@@ -227,6 +227,7 @@ struct cpl_pass_open_req { | |||
227 | #define DELACK(x) ((x) << 5) | 227 | #define DELACK(x) ((x) << 5) |
228 | #define ULP_MODE(x) ((x) << 8) | 228 | #define ULP_MODE(x) ((x) << 8) |
229 | #define RCV_BUFSIZ(x) ((x) << 12) | 229 | #define RCV_BUFSIZ(x) ((x) << 12) |
230 | #define RCV_BUFSIZ_MASK 0x3FFU | ||
230 | #define DSCP(x) ((x) << 22) | 231 | #define DSCP(x) ((x) << 22) |
231 | #define SMAC_SEL(x) ((u64)(x) << 28) | 232 | #define SMAC_SEL(x) ((u64)(x) << 28) |
232 | #define L2T_IDX(x) ((u64)(x) << 36) | 233 | #define L2T_IDX(x) ((u64)(x) << 36) |
@@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl { | |||
278 | __be64 opt0; | 279 | __be64 opt0; |
279 | }; | 280 | }; |
280 | 281 | ||
282 | struct cpl_t5_pass_accept_rpl { | ||
283 | WR_HDR; | ||
284 | union opcode_tid ot; | ||
285 | __be32 opt2; | ||
286 | __be64 opt0; | ||
287 | __be32 iss; | ||
288 | __be32 rsvd; | ||
289 | }; | ||
290 | |||
281 | struct cpl_act_open_req { | 291 | struct cpl_act_open_req { |
282 | WR_HDR; | 292 | WR_HDR; |
283 | union opcode_tid ot; | 293 | union opcode_tid ot; |