aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c412
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c85
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h46
4 files changed, 497 insertions, 55 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 4878704b6d70..036ddd281529 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -38,10 +38,12 @@
38#include <linux/inetdevice.h> 38#include <linux/inetdevice.h>
39#include <linux/ip.h> 39#include <linux/ip.h>
40#include <linux/tcp.h> 40#include <linux/tcp.h>
41#include <linux/if_vlan.h>
41 42
42#include <net/neighbour.h> 43#include <net/neighbour.h>
43#include <net/netevent.h> 44#include <net/netevent.h>
44#include <net/route.h> 45#include <net/route.h>
46#include <net/tcp.h>
45 47
46#include "iw_cxgb4.h" 48#include "iw_cxgb4.h"
47 49
@@ -1569,13 +1571,14 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1569 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1571 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1570 1572
1571 if (!ep) { 1573 if (!ep) {
1572 printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); 1574 PDBG("%s stid %d lookup failure!\n", __func__, stid);
1573 return 0; 1575 goto out;
1574 } 1576 }
1575 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1577 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1576 rpl->status, status2errno(rpl->status)); 1578 rpl->status, status2errno(rpl->status));
1577 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 1579 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1578 1580
1581out:
1579 return 0; 1582 return 0;
1580} 1583}
1581 1584
@@ -1779,15 +1782,23 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1779 unsigned int hwtid = GET_TID(req); 1782 unsigned int hwtid = GET_TID(req);
1780 struct dst_entry *dst; 1783 struct dst_entry *dst;
1781 struct rtable *rt; 1784 struct rtable *rt;
1782 __be32 local_ip, peer_ip; 1785 __be32 local_ip, peer_ip = 0;
1783 __be16 local_port, peer_port; 1786 __be16 local_port, peer_port;
1784 int err; 1787 int err;
1788 u16 peer_mss = ntohs(req->tcpopt.mss);
1785 1789
1786 parent_ep = lookup_stid(t, stid); 1790 parent_ep = lookup_stid(t, stid);
1787 PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); 1791 if (!parent_ep) {
1788 1792 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
1793 goto reject;
1794 }
1789 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); 1795 get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1790 1796
1797 PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
1798 "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
1799 ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
1800 ntohs(peer_port), peer_mss);
1801
1791 if (state_read(&parent_ep->com) != LISTEN) { 1802 if (state_read(&parent_ep->com) != LISTEN) {
1792 printk(KERN_ERR "%s - listening ep not in LISTEN\n", 1803 printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1793 __func__); 1804 __func__);
@@ -1821,6 +1832,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1821 goto reject; 1832 goto reject;
1822 } 1833 }
1823 1834
1835 if (peer_mss && child_ep->mtu > (peer_mss + 40))
1836 child_ep->mtu = peer_mss + 40;
1837
1824 state_set(&child_ep->com, CONNECTING); 1838 state_set(&child_ep->com, CONNECTING);
1825 child_ep->com.dev = dev; 1839 child_ep->com.dev = dev;
1826 child_ep->com.cm_id = NULL; 1840 child_ep->com.cm_id = NULL;
@@ -1861,6 +1875,9 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1861 ep->snd_seq = be32_to_cpu(req->snd_isn); 1875 ep->snd_seq = be32_to_cpu(req->snd_isn);
1862 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1876 ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1863 1877
1878 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
1879 ntohs(req->tcp_opt));
1880
1864 set_emss(ep, ntohs(req->tcp_opt)); 1881 set_emss(ep, ntohs(req->tcp_opt));
1865 1882
1866 dst_confirm(ep->dst); 1883 dst_confirm(ep->dst);
@@ -2478,7 +2495,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2478 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2495 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2479 struct c4iw_listen_ep *ep; 2496 struct c4iw_listen_ep *ep;
2480 2497
2481
2482 might_sleep(); 2498 might_sleep();
2483 2499
2484 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2500 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
@@ -2497,30 +2513,49 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2497 /* 2513 /*
2498 * Allocate a server TID. 2514 * Allocate a server TID.
2499 */ 2515 */
2500 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2516 if (dev->rdev.lldi.enable_fw_ofld_conn)
2517 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
2518 else
2519 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2520
2501 if (ep->stid == -1) { 2521 if (ep->stid == -1) {
2502 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 2522 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2503 err = -ENOMEM; 2523 err = -ENOMEM;
2504 goto fail2; 2524 goto fail2;
2505 } 2525 }
2506
2507 state_set(&ep->com, LISTEN); 2526 state_set(&ep->com, LISTEN);
2508 c4iw_init_wr_wait(&ep->com.wr_wait); 2527 if (dev->rdev.lldi.enable_fw_ofld_conn) {
2509 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, 2528 do {
2510 ep->com.local_addr.sin_addr.s_addr, 2529 err = cxgb4_create_server_filter(
2511 ep->com.local_addr.sin_port, 2530 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2512 ep->com.dev->rdev.lldi.rxq_ids[0]); 2531 ep->com.local_addr.sin_addr.s_addr,
2513 if (err) 2532 ep->com.local_addr.sin_port,
2514 goto fail3; 2533 ep->com.dev->rdev.lldi.rxq_ids[0]);
2515 2534 if (err == -EBUSY) {
2516 /* wait for pass_open_rpl */ 2535 set_current_state(TASK_UNINTERRUPTIBLE);
2517 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2536 schedule_timeout(usecs_to_jiffies(100));
2518 __func__); 2537 }
2538 } while (err == -EBUSY);
2539 } else {
2540 c4iw_init_wr_wait(&ep->com.wr_wait);
2541 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
2542 ep->stid, ep->com.local_addr.sin_addr.s_addr,
2543 ep->com.local_addr.sin_port,
2544 ep->com.dev->rdev.lldi.rxq_ids[0]);
2545 if (!err)
2546 err = c4iw_wait_for_reply(&ep->com.dev->rdev,
2547 &ep->com.wr_wait,
2548 0, 0, __func__);
2549 }
2519 if (!err) { 2550 if (!err) {
2520 cm_id->provider_data = ep; 2551 cm_id->provider_data = ep;
2521 goto out; 2552 goto out;
2522 } 2553 }
2523fail3: 2554 pr_err("%s cxgb4_create_server/filter failed err %d " \
2555 "stid %d laddr %08x lport %d\n", \
2556 __func__, err, ep->stid,
2557 ntohl(ep->com.local_addr.sin_addr.s_addr),
2558 ntohs(ep->com.local_addr.sin_port));
2524 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2559 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2525fail2: 2560fail2:
2526 cm_id->rem_ref(cm_id); 2561 cm_id->rem_ref(cm_id);
@@ -2539,12 +2574,18 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2539 2574
2540 might_sleep(); 2575 might_sleep();
2541 state_set(&ep->com, DEAD); 2576 state_set(&ep->com, DEAD);
2542 c4iw_init_wr_wait(&ep->com.wr_wait); 2577 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
2543 err = listen_stop(ep); 2578 err = cxgb4_remove_server_filter(
2544 if (err) 2579 ep->com.dev->rdev.lldi.ports[0], ep->stid,
2545 goto done; 2580 ep->com.dev->rdev.lldi.rxq_ids[0], 0);
2546 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, 2581 } else {
2547 __func__); 2582 c4iw_init_wr_wait(&ep->com.wr_wait);
2583 err = listen_stop(ep);
2584 if (err)
2585 goto done;
2586 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
2587 0, 0, __func__);
2588 }
2548 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET); 2589 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2549done: 2590done:
2550 cm_id->rem_ref(cm_id); 2591 cm_id->rem_ref(cm_id);
@@ -2621,10 +2662,299 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2621 return ret; 2662 return ret;
2622} 2663}
2623 2664
2624static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) 2665static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2666 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2667{
2668 struct c4iw_ep *ep;
2669
2670 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, req->tid);
2671 if (!ep)
2672 return;
2673
2674 switch (req->retval) {
2675 case FW_ENOMEM:
2676 case FW_EADDRINUSE:
2677 PDBG("%s ofld conn wr ret %d\n", __func__, req->retval);
2678 break;
2679 default:
2680 pr_info("%s unexpected ofld conn wr retval %d\n",
2681 __func__, req->retval);
2682 break;
2683 }
2684 connect_reply_upcall(ep, status2errno(req->retval));
2685}
2686
2687static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
2688 struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
2689{
2690 struct sk_buff *rpl_skb;
2691 struct cpl_pass_accept_req *cpl;
2692 int ret;
2693
2694 rpl_skb = (struct sk_buff *)cpu_to_be64(req->cookie);
2695 BUG_ON(!rpl_skb);
2696 if (req->retval) {
2697 PDBG("%s passive open failure %d\n", __func__, req->retval);
2698 kfree_skb(rpl_skb);
2699 } else {
2700 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
2701 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
2702 htonl(req->tid)));
2703 ret = pass_accept_req(dev, rpl_skb);
2704 if (!ret)
2705 kfree_skb(rpl_skb);
2706 }
2707 return;
2708}
2709
2710static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2625{ 2711{
2626 struct cpl_fw6_msg *rpl = cplhdr(skb); 2712 struct cpl_fw6_msg *rpl = cplhdr(skb);
2627 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 2713 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
2714
2715 switch (rpl->type) {
2716 case FW6_TYPE_CQE:
2717 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2718 break;
2719 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
2720 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
2721 switch (req->t_state) {
2722 case TCP_SYN_SENT:
2723 active_ofld_conn_reply(dev, skb, req);
2724 break;
2725 case TCP_SYN_RECV:
2726 passive_ofld_conn_reply(dev, skb, req);
2727 break;
2728 default:
2729 pr_err("%s unexpected ofld conn wr state %d\n",
2730 __func__, req->t_state);
2731 break;
2732 }
2733 break;
2734 }
2735 return 0;
2736}
2737
2738static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2739{
2740 u32 l2info;
2741 u16 vlantag, len, hdr_len;
2742 u8 intf;
2743 struct cpl_rx_pkt *cpl = cplhdr(skb);
2744 struct cpl_pass_accept_req *req;
2745 struct tcp_options_received tmp_opt;
2746
2747 /* Store values from cpl_rx_pkt in temporary location. */
2748 vlantag = cpl->vlan;
2749 len = cpl->len;
2750 l2info = cpl->l2info;
2751 hdr_len = cpl->hdr_len;
2752 intf = cpl->iff;
2753
2754 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
2755
2756 /*
2757 * We need to parse the TCP options from SYN packet.
2758 * to generate cpl_pass_accept_req.
2759 */
2760 memset(&tmp_opt, 0, sizeof(tmp_opt));
2761 tcp_clear_options(&tmp_opt);
2762 tcp_parse_options(skb, &tmp_opt, 0, 0, NULL);
2763
2764 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
2765 memset(req, 0, sizeof(*req));
2766 req->l2info = cpu_to_be16(V_SYN_INTF(intf) |
2767 V_SYN_MAC_IDX(G_RX_MACIDX(htonl(l2info))) |
2768 F_SYN_XACT_MATCH);
2769 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(htonl(l2info))) |
2770 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(htons(hdr_len))) |
2771 V_IP_HDR_LEN(G_RX_IPHDR_LEN(htons(hdr_len))) |
2772 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(htonl(l2info))));
2773 req->vlan = vlantag;
2774 req->len = len;
2775 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
2776 PASS_OPEN_TOS(tos));
2777 req->tcpopt.mss = htons(tmp_opt.mss_clamp);
2778 if (tmp_opt.wscale_ok)
2779 req->tcpopt.wsf = tmp_opt.snd_wscale;
2780 req->tcpopt.tstamp = tmp_opt.saw_tstamp;
2781 if (tmp_opt.sack_ok)
2782 req->tcpopt.sack = 1;
2783 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
2784 return;
2785}
2786
2787static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
2788 __be32 laddr, __be16 lport,
2789 __be32 raddr, __be16 rport,
2790 u32 rcv_isn, u32 filter, u16 window,
2791 u32 rss_qid, u8 port_id)
2792{
2793 struct sk_buff *req_skb;
2794 struct fw_ofld_connection_wr *req;
2795 struct cpl_pass_accept_req *cpl = cplhdr(skb);
2796
2797 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
2798 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
2799 memset(req, 0, sizeof(*req));
2800 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1));
2801 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
2802 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL);
2803 req->le.filter = filter;
2804 req->le.lport = lport;
2805 req->le.pport = rport;
2806 req->le.u.ipv4.lip = laddr;
2807 req->le.u.ipv4.pip = raddr;
2808 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
2809 req->tcb.rcv_adv = htons(window);
2810 req->tcb.t_state_to_astid =
2811 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) |
2812 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) |
2813 V_FW_OFLD_CONNECTION_WR_ASTID(
2814 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
2815
2816 /*
2817 * We store the qid in opt2 which will be used by the firmware
2818 * to send us the wr response.
2819 */
2820 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid));
2821
2822 /*
2823 * We initialize the MSS index in TCB to 0xF.
2824 * So that when driver sends cpl_pass_accept_rpl
2825 * TCB picks up the correct value. If this was 0
2826 * TP will ignore any value > 0 for MSS index.
2827 */
2828 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF));
2829 req->cookie = cpu_to_be64((u64)skb);
2830
2831 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
2832 cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
2833}
2834
2835/*
2836 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
2837 * messages when a filter is being used instead of server to
2838 * redirect a syn packet. When packets hit filter they are redirected
2839 * to the offload queue and driver tries to establish the connection
2840 * using firmware work request.
2841 */
2842static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
2843{
2844 int stid;
2845 unsigned int filter;
2846 struct ethhdr *eh = NULL;
2847 struct vlan_ethhdr *vlan_eh = NULL;
2848 struct iphdr *iph;
2849 struct tcphdr *tcph;
2850 struct rss_header *rss = (void *)skb->data;
2851 struct cpl_rx_pkt *cpl = (void *)skb->data;
2852 struct cpl_pass_accept_req *req = (void *)(rss + 1);
2853 struct l2t_entry *e;
2854 struct dst_entry *dst;
2855 struct rtable *rt;
2856 struct c4iw_ep *lep;
2857 u16 window;
2858 struct port_info *pi;
2859 struct net_device *pdev;
2860 u16 rss_qid;
2861 int step;
2862 u32 tx_chan;
2863 struct neighbour *neigh;
2864
2865 /* Drop all non-SYN packets */
2866 if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN)))
2867 goto reject;
2868
2869 /*
2870 * Drop all packets which did not hit the filter.
2871 * Unlikely to happen.
2872 */
2873 if (!(rss->filter_hit && rss->filter_tid))
2874 goto reject;
2875
2876 /*
2877 * Calculate the server tid from filter hit index from cpl_rx_pkt.
2878 */
2879 stid = cpu_to_be32(rss->hash_val) - dev->rdev.lldi.tids->sftid_base
2880 + dev->rdev.lldi.tids->nstids;
2881
2882 lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
2883 if (!lep) {
2884 PDBG("%s connect request on invalid stid %d\n", __func__, stid);
2885 goto reject;
2886 }
2887
2888 if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) {
2889 eh = (struct ethhdr *)(req + 1);
2890 iph = (struct iphdr *)(eh + 1);
2891 } else {
2892 vlan_eh = (struct vlan_ethhdr *)(req + 1);
2893 iph = (struct iphdr *)(vlan_eh + 1);
2894 skb->vlan_tci = ntohs(cpl->vlan);
2895 }
2896
2897 if (iph->version != 0x4)
2898 goto reject;
2899
2900 tcph = (struct tcphdr *)(iph + 1);
2901 skb_set_network_header(skb, (void *)iph - (void *)rss);
2902 skb_set_transport_header(skb, (void *)tcph - (void *)rss);
2903 skb_get(skb);
2904
2905 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
2906 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
2907 ntohs(tcph->source), iph->tos);
2908
2909 rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
2910 iph->tos);
2911 if (!rt) {
2912 pr_err("%s - failed to find dst entry!\n",
2913 __func__);
2914 goto reject;
2915 }
2916 dst = &rt->dst;
2917 neigh = dst_neigh_lookup_skb(dst, skb);
2918
2919 if (neigh->dev->flags & IFF_LOOPBACK) {
2920 pdev = ip_dev_find(&init_net, iph->daddr);
2921 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
2922 pdev, 0);
2923 pi = (struct port_info *)netdev_priv(pdev);
2924 tx_chan = cxgb4_port_chan(pdev);
2925 dev_put(pdev);
2926 } else {
2927 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
2928 neigh->dev, 0);
2929 pi = (struct port_info *)netdev_priv(neigh->dev);
2930 tx_chan = cxgb4_port_chan(neigh->dev);
2931 }
2932 if (!e) {
2933 pr_err("%s - failed to allocate l2t entry!\n",
2934 __func__);
2935 goto free_dst;
2936 }
2937
2938 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
2939 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
2940 window = htons(tcph->window);
2941
2942 /* Calcuate filter portion for LE region. */
2943 filter = cpu_to_be32(select_ntuple(dev, dst, e));
2944
2945 /*
2946 * Synthesize the cpl_pass_accept_req. We have everything except the
2947 * TID. Once firmware sends a reply with TID we update the TID field
2948 * in cpl and pass it through the regular cpl_pass_accept_req path.
2949 */
2950 build_cpl_pass_accept_req(skb, stid, iph->tos);
2951 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
2952 tcph->source, ntohl(tcph->seq), filter, window,
2953 rss_qid, pi->port_id);
2954 cxgb4_l2t_release(e);
2955free_dst:
2956 dst_release(dst);
2957reject:
2628 return 0; 2958 return 0;
2629} 2959}
2630 2960
@@ -2647,7 +2977,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2647 [CPL_CLOSE_CON_RPL] = close_con_rpl, 2977 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2648 [CPL_RDMA_TERMINATE] = terminate, 2978 [CPL_RDMA_TERMINATE] = terminate,
2649 [CPL_FW4_ACK] = fw4_ack, 2979 [CPL_FW4_ACK] = fw4_ack,
2650 [CPL_FW6_MSG] = async_event 2980 [CPL_FW6_MSG] = deferred_fw6_msg,
2981 [CPL_RX_PKT] = rx_pkt
2651}; 2982};
2652 2983
2653static void process_timeout(struct c4iw_ep *ep) 2984static void process_timeout(struct c4iw_ep *ep)
@@ -2774,9 +3105,6 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2774 struct cpl_fw6_msg *rpl = cplhdr(skb); 3105 struct cpl_fw6_msg *rpl = cplhdr(skb);
2775 struct c4iw_wr_wait *wr_waitp; 3106 struct c4iw_wr_wait *wr_waitp;
2776 int ret; 3107 int ret;
2777 u8 opcode;
2778 struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
2779 struct c4iw_ep *ep;
2780 3108
2781 PDBG("%s type %u\n", __func__, rpl->type); 3109 PDBG("%s type %u\n", __func__, rpl->type);
2782 3110
@@ -2790,23 +3118,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2790 kfree_skb(skb); 3118 kfree_skb(skb);
2791 break; 3119 break;
2792 case FW6_TYPE_CQE: 3120 case FW6_TYPE_CQE:
2793 sched(dev, skb);
2794 break;
2795 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3121 case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
2796 opcode = *(const u8 *)rpl->data; 3122 sched(dev, skb);
2797 if (opcode == FW_OFLD_CONNECTION_WR) {
2798 req =
2799 (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
2800 if (req->t_state == TCP_SYN_SENT
2801 && (req->retval == FW_ENOMEM
2802 || req->retval == FW_EADDRINUSE)) {
2803 ep = (struct c4iw_ep *)
2804 lookup_atid(dev->rdev.lldi.tids,
2805 req->tid);
2806 c4iw_l2t_send(&dev->rdev, skb, ep->l2t);
2807 return 0;
2808 }
2809 }
2810 break; 3123 break;
2811 default: 3124 default:
2812 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 3125 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
@@ -2868,7 +3181,8 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2868 [CPL_RDMA_TERMINATE] = sched, 3181 [CPL_RDMA_TERMINATE] = sched,
2869 [CPL_FW4_ACK] = sched, 3182 [CPL_FW4_ACK] = sched,
2870 [CPL_SET_TCB_RPL] = set_tcb_rpl, 3183 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2871 [CPL_FW6_MSG] = fw6_msg 3184 [CPL_FW6_MSG] = fw6_msg,
3185 [CPL_RX_PKT] = sched
2872}; 3186};
2873 3187
2874int __init c4iw_cm_init(void) 3188int __init c4iw_cm_init(void)
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index cb4ecd783700..6b5b3d15e48d 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -279,6 +279,7 @@ static int stats_show(struct seq_file *seq, void *v)
279 seq_printf(seq, " DB State: %s Transitions %llu\n", 279 seq_printf(seq, " DB State: %s Transitions %llu\n",
280 db_state_str[dev->db_state], 280 db_state_str[dev->db_state],
281 dev->rdev.stats.db_state_transitions); 281 dev->rdev.stats.db_state_transitions);
282 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
282 return 0; 283 return 0;
283} 284}
284 285
@@ -577,14 +578,76 @@ out:
577 return ctx; 578 return ctx;
578} 579}
579 580
581static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
582 const __be64 *rsp,
583 u32 pktshift)
584{
585 struct sk_buff *skb;
586
587 /*
588 * Allocate space for cpl_pass_accept_req which will be synthesized by
589 * driver. Once the driver synthesizes the request the skb will go
590 * through the regular cpl_pass_accept_req processing.
591 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
592 * cpl_rx_pkt.
593 */
594 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
595 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
596 if (unlikely(!skb))
597 return NULL;
598
599 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
600 sizeof(struct rss_header) - pktshift);
601
602 /*
603 * This skb will contain:
604 * rss_header from the rspq descriptor (1 flit)
605 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
606 * space for the difference between the size of an
607 * rx_pkt and pass_accept_req cpl (1 flit)
608 * the packet data from the gl
609 */
610 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
611 sizeof(struct rss_header));
612 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
613 sizeof(struct cpl_pass_accept_req),
614 gl->va + pktshift,
615 gl->tot_len - pktshift);
616 return skb;
617}
618
619static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
620 const __be64 *rsp)
621{
622 unsigned int opcode = *(u8 *)rsp;
623 struct sk_buff *skb;
624
625 if (opcode != CPL_RX_PKT)
626 goto out;
627
628 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
629 if (skb == NULL)
630 goto out;
631
632 if (c4iw_handlers[opcode] == NULL) {
633 pr_info("%s no handler opcode 0x%x...\n", __func__,
634 opcode);
635 kfree_skb(skb);
636 goto out;
637 }
638 c4iw_handlers[opcode](dev, skb);
639 return 1;
640out:
641 return 0;
642}
643
580static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 644static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
581 const struct pkt_gl *gl) 645 const struct pkt_gl *gl)
582{ 646{
583 struct uld_ctx *ctx = handle; 647 struct uld_ctx *ctx = handle;
584 struct c4iw_dev *dev = ctx->dev; 648 struct c4iw_dev *dev = ctx->dev;
585 struct sk_buff *skb; 649 struct sk_buff *skb;
586 const struct cpl_act_establish *rpl; 650 u8 opcode;
587 unsigned int opcode;
588 651
589 if (gl == NULL) { 652 if (gl == NULL) {
590 /* omit RSS and rsp_ctrl at end of descriptor */ 653 /* omit RSS and rsp_ctrl at end of descriptor */
@@ -601,19 +664,29 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
601 u32 qid = be32_to_cpu(rc->pldbuflen_qid); 664 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
602 c4iw_ev_handler(dev, qid); 665 c4iw_ev_handler(dev, qid);
603 return 0; 666 return 0;
667 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
668 if (recv_rx_pkt(dev, gl, rsp))
669 return 0;
670
671 pr_info("%s: unexpected FL contents at %p, " \
672 "RSS %#llx, FL %#llx, len %u\n",
673 pci_name(ctx->lldi.pdev), gl->va,
674 (unsigned long long)be64_to_cpu(*rsp),
675 (unsigned long long)be64_to_cpu(*(u64 *)gl->va),
676 gl->tot_len);
677
678 return 0;
604 } else { 679 } else {
605 skb = cxgb4_pktgl_to_skb(gl, 128, 128); 680 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
606 if (unlikely(!skb)) 681 if (unlikely(!skb))
607 goto nomem; 682 goto nomem;
608 } 683 }
609 684
610 rpl = cplhdr(skb); 685 opcode = *(u8 *)rsp;
611 opcode = rpl->ot.opcode;
612
613 if (c4iw_handlers[opcode]) 686 if (c4iw_handlers[opcode])
614 c4iw_handlers[opcode](dev, skb); 687 c4iw_handlers[opcode](dev, skb);
615 else 688 else
616 printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__, 689 pr_info("%s no handler opcode 0x%x...\n", __func__,
617 opcode); 690 opcode);
618 691
619 return 0; 692 return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 888bc7fc7a38..a7830a1d8b89 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3317,6 +3317,10 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3317 3317
3318 adap = netdev2adap(dev); 3318 adap = netdev2adap(dev);
3319 3319
3320 /* Adjust stid to correct filter index */
3321 stid -= adap->tids.nstids;
3322 stid += adap->tids.nftids;
3323
3320 /* Check to make sure the filter requested is writable ... 3324 /* Check to make sure the filter requested is writable ...
3321 */ 3325 */
3322 f = &adap->tids.ftid_tab[stid]; 3326 f = &adap->tids.ftid_tab[stid];
@@ -3365,6 +3369,11 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3365 struct adapter *adap; 3369 struct adapter *adap;
3366 3370
3367 adap = netdev2adap(dev); 3371 adap = netdev2adap(dev);
3372
3373 /* Adjust stid to correct filter index */
3374 stid -= adap->tids.nstids;
3375 stid += adap->tids.nftids;
3376
3368 f = &adap->tids.ftid_tab[stid]; 3377 f = &adap->tids.ftid_tab[stid];
3369 /* Unlock the filter */ 3378 /* Unlock the filter */
3370 f->locked = 0; 3379 f->locked = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index dcf6d61794ea..261d17703adc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -199,6 +199,18 @@ struct work_request_hdr {
199 199
200#define WR_HDR struct work_request_hdr wr 200#define WR_HDR struct work_request_hdr wr
201 201
202/* option 0 fields */
203#define S_MSS_IDX 60
204#define M_MSS_IDX 0xF
205#define V_MSS_IDX(x) ((__u64)(x) << S_MSS_IDX)
206#define G_MSS_IDX(x) (((x) >> S_MSS_IDX) & M_MSS_IDX)
207
208/* option 2 fields */
209#define S_RSS_QUEUE 0
210#define M_RSS_QUEUE 0x3FF
211#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
212#define G_RSS_QUEUE(x) (((x) >> S_RSS_QUEUE) & M_RSS_QUEUE)
213
202struct cpl_pass_open_req { 214struct cpl_pass_open_req {
203 WR_HDR; 215 WR_HDR;
204 union opcode_tid ot; 216 union opcode_tid ot;
@@ -300,6 +312,9 @@ struct cpl_pass_establish {
300 union opcode_tid ot; 312 union opcode_tid ot;
301 __be32 rsvd; 313 __be32 rsvd;
302 __be32 tos_stid; 314 __be32 tos_stid;
315#define PASS_OPEN_TID(x) ((x) << 0)
316#define PASS_OPEN_TOS(x) ((x) << 24)
317#define GET_PASS_OPEN_TID(x) (((x) >> 0) & 0xFFFFFF)
303#define GET_POPEN_TID(x) ((x) & 0xffffff) 318#define GET_POPEN_TID(x) ((x) & 0xffffff)
304#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff) 319#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
305 __be16 mac_idx; 320 __be16 mac_idx;
@@ -545,6 +560,37 @@ struct cpl_rx_pkt {
545 __be16 err_vec; 560 __be16 err_vec;
546}; 561};
547 562
563/* rx_pkt.l2info fields */
564#define S_RX_ETHHDR_LEN 0
565#define M_RX_ETHHDR_LEN 0x1F
566#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
567#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
568
569#define S_RX_MACIDX 8
570#define M_RX_MACIDX 0x1FF
571#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
572#define G_RX_MACIDX(x) (((x) >> S_RX_MACIDX) & M_RX_MACIDX)
573
574#define S_RXF_SYN 21
575#define V_RXF_SYN(x) ((x) << S_RXF_SYN)
576#define F_RXF_SYN V_RXF_SYN(1U)
577
578#define S_RX_CHAN 28
579#define M_RX_CHAN 0xF
580#define V_RX_CHAN(x) ((x) << S_RX_CHAN)
581#define G_RX_CHAN(x) (((x) >> S_RX_CHAN) & M_RX_CHAN)
582
583/* rx_pkt.hdr_len fields */
584#define S_RX_TCPHDR_LEN 0
585#define M_RX_TCPHDR_LEN 0x3F
586#define V_RX_TCPHDR_LEN(x) ((x) << S_RX_TCPHDR_LEN)
587#define G_RX_TCPHDR_LEN(x) (((x) >> S_RX_TCPHDR_LEN) & M_RX_TCPHDR_LEN)
588
589#define S_RX_IPHDR_LEN 6
590#define M_RX_IPHDR_LEN 0x3FF
591#define V_RX_IPHDR_LEN(x) ((x) << S_RX_IPHDR_LEN)
592#define G_RX_IPHDR_LEN(x) (((x) >> S_RX_IPHDR_LEN) & M_RX_IPHDR_LEN)
593
548struct cpl_trace_pkt { 594struct cpl_trace_pkt {
549 u8 opcode; 595 u8 opcode;
550 u8 intf; 596 u8 intf;