diff options
author | Divy Le Ray <divy@chelsio.com> | 2008-05-21 21:56:26 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-05-22 06:34:13 -0400 |
commit | b47385bd4f67481a7dbfcf1b4b82e9a67ecb846c (patch) | |
tree | 7357a330b671dc2526fd7c467f1799cc612914ce /drivers/net/cxgb3/sge.c | |
parent | 7385ecf339c504933a98581c2056d83b69b2a82b (diff) |
cxgb3 - Add LRO support
Add LRO support.
Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r-- | drivers/net/cxgb3/sge.c | 233 |
1 files changed, 221 insertions, 12 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 3e91be55e19e..a96331c875e6 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -584,6 +584,8 @@ static void t3_reset_qset(struct sge_qset *q) | |||
584 | memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); | 584 | memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); |
585 | q->txq_stopped = 0; | 585 | q->txq_stopped = 0; |
586 | memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); | 586 | memset(&q->tx_reclaim_timer, 0, sizeof(q->tx_reclaim_timer)); |
587 | kfree(q->lro_frag_tbl); | ||
588 | q->lro_nfrags = q->lro_frag_len = 0; | ||
587 | } | 589 | } |
588 | 590 | ||
589 | 591 | ||
@@ -796,7 +798,7 @@ recycle: | |||
796 | goto recycle; | 798 | goto recycle; |
797 | 799 | ||
798 | if (!skb) | 800 | if (!skb) |
799 | newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); | 801 | newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC); |
800 | if (unlikely(!newskb)) { | 802 | if (unlikely(!newskb)) { |
801 | if (!drop_thres) | 803 | if (!drop_thres) |
802 | return NULL; | 804 | return NULL; |
@@ -1868,9 +1870,10 @@ static void restart_tx(struct sge_qset *qs) | |||
1868 | * if it was immediate data in a response. | 1870 | * if it was immediate data in a response. |
1869 | */ | 1871 | */ |
1870 | static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | 1872 | static void rx_eth(struct adapter *adap, struct sge_rspq *rq, |
1871 | struct sk_buff *skb, int pad) | 1873 | struct sk_buff *skb, int pad, int lro) |
1872 | { | 1874 | { |
1873 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); | 1875 | struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad); |
1876 | struct sge_qset *qs = rspq_to_qset(rq); | ||
1874 | struct port_info *pi; | 1877 | struct port_info *pi; |
1875 | 1878 | ||
1876 | skb_pull(skb, sizeof(*p) + pad); | 1879 | skb_pull(skb, sizeof(*p) + pad); |
@@ -1887,18 +1890,202 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1887 | if (unlikely(p->vlan_valid)) { | 1890 | if (unlikely(p->vlan_valid)) { |
1888 | struct vlan_group *grp = pi->vlan_grp; | 1891 | struct vlan_group *grp = pi->vlan_grp; |
1889 | 1892 | ||
1890 | rspq_to_qset(rq)->port_stats[SGE_PSTAT_VLANEX]++; | 1893 | qs->port_stats[SGE_PSTAT_VLANEX]++; |
1891 | if (likely(grp)) | 1894 | if (likely(grp)) |
1892 | __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), | 1895 | if (lro) |
1893 | rq->polling); | 1896 | lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, |
1897 | grp, | ||
1898 | ntohs(p->vlan), | ||
1899 | p); | ||
1900 | else | ||
1901 | __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), | ||
1902 | rq->polling); | ||
1894 | else | 1903 | else |
1895 | dev_kfree_skb_any(skb); | 1904 | dev_kfree_skb_any(skb); |
1896 | } else if (rq->polling) | 1905 | } else if (rq->polling) { |
1897 | netif_receive_skb(skb); | 1906 | if (lro) |
1898 | else | 1907 | lro_receive_skb(&qs->lro_mgr, skb, p); |
1908 | else | ||
1909 | netif_receive_skb(skb); | ||
1910 | } else | ||
1899 | netif_rx(skb); | 1911 | netif_rx(skb); |
1900 | } | 1912 | } |
1901 | 1913 | ||
1914 | static inline int is_eth_tcp(u32 rss) | ||
1915 | { | ||
1916 | return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE; | ||
1917 | } | ||
1918 | |||
1919 | /** | ||
1920 | * lro_frame_ok - check if an ingress packet is eligible for LRO | ||
1921 | * @p: the CPL header of the packet | ||
1922 | * | ||
1923 | * Returns true if a received packet is eligible for LRO. | ||
1924 | * The following conditions must be true: | ||
1925 | * - packet is TCP/IP Ethernet II (checked elsewhere) | ||
1926 | * - not an IP fragment | ||
1927 | * - no IP options | ||
1928 | * - TCP/IP checksums are correct | ||
1929 | * - the packet is for this host | ||
1930 | */ | ||
1931 | static inline int lro_frame_ok(const struct cpl_rx_pkt *p) | ||
1932 | { | ||
1933 | const struct ethhdr *eh = (struct ethhdr *)(p + 1); | ||
1934 | const struct iphdr *ih = (struct iphdr *)(eh + 1); | ||
1935 | |||
1936 | return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) && | ||
1937 | eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); | ||
1938 | } | ||
1939 | |||
1940 | #define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\ | ||
1941 | TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\ | ||
1942 | TCP_FLAG_SYN | TCP_FLAG_FIN) | ||
1943 | #define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\ | ||
1944 | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) | ||
1945 | |||
1946 | /** | ||
1947 | * lro_segment_ok - check if a TCP segment is eligible for LRO | ||
1948 | * @tcph: the TCP header of the packet | ||
1949 | * | ||
1950 | * Returns true if a TCP packet is eligible for LRO. This requires that | ||
1951 | * the packet have only the ACK flag set and no TCP options besides | ||
1952 | * time stamps. | ||
1953 | */ | ||
1954 | static inline int lro_segment_ok(const struct tcphdr *tcph) | ||
1955 | { | ||
1956 | int optlen; | ||
1957 | |||
1958 | if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK)) | ||
1959 | return 0; | ||
1960 | |||
1961 | optlen = (tcph->doff << 2) - sizeof(*tcph); | ||
1962 | if (optlen) { | ||
1963 | const u32 *opt = (const u32 *)(tcph + 1); | ||
1964 | |||
1965 | if (optlen != TCPOLEN_TSTAMP_ALIGNED || | ||
1966 | *opt != htonl(TSTAMP_WORD) || !opt[2]) | ||
1967 | return 0; | ||
1968 | } | ||
1969 | return 1; | ||
1970 | } | ||
1971 | |||
1972 | static int t3_get_lro_header(void **eh, void **iph, void **tcph, | ||
1973 | u64 *hdr_flags, void *priv) | ||
1974 | { | ||
1975 | const struct cpl_rx_pkt *cpl = priv; | ||
1976 | |||
1977 | if (!lro_frame_ok(cpl)) | ||
1978 | return -1; | ||
1979 | |||
1980 | *eh = (struct ethhdr *)(cpl + 1); | ||
1981 | *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); | ||
1982 | *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); | ||
1983 | |||
1984 | if (!lro_segment_ok(*tcph)) | ||
1985 | return -1; | ||
1986 | |||
1987 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
1988 | return 0; | ||
1989 | } | ||
1990 | |||
1991 | static int t3_get_skb_header(struct sk_buff *skb, | ||
1992 | void **iph, void **tcph, u64 *hdr_flags, | ||
1993 | void *priv) | ||
1994 | { | ||
1995 | void *eh; | ||
1996 | |||
1997 | return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv); | ||
1998 | } | ||
1999 | |||
2000 | static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh, | ||
2001 | void **iph, void **tcph, u64 *hdr_flags, | ||
2002 | void *priv) | ||
2003 | { | ||
2004 | return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv); | ||
2005 | } | ||
2006 | |||
2007 | /** | ||
2008 | * lro_add_page - add a page chunk to an LRO session | ||
2009 | * @adap: the adapter | ||
2010 | * @qs: the associated queue set | ||
2011 | * @fl: the free list containing the page chunk to add | ||
2012 | * @len: packet length | ||
2013 | * @complete: Indicates the last fragment of a frame | ||
2014 | * | ||
2015 | * Add a received packet contained in a page chunk to an existing LRO | ||
2016 | * session. | ||
2017 | */ | ||
2018 | static void lro_add_page(struct adapter *adap, struct sge_qset *qs, | ||
2019 | struct sge_fl *fl, int len, int complete) | ||
2020 | { | ||
2021 | struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; | ||
2022 | struct cpl_rx_pkt *cpl; | ||
2023 | struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; | ||
2024 | int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; | ||
2025 | int offset = 0; | ||
2026 | |||
2027 | if (!nr_frags) { | ||
2028 | offset = 2 + sizeof(struct cpl_rx_pkt); | ||
2029 | qs->lro_va = cpl = sd->pg_chunk.va + 2; | ||
2030 | } | ||
2031 | |||
2032 | fl->credits--; | ||
2033 | |||
2034 | len -= offset; | ||
2035 | pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), | ||
2036 | fl->buf_size, PCI_DMA_FROMDEVICE); | ||
2037 | |||
2038 | rx_frag += nr_frags; | ||
2039 | rx_frag->page = sd->pg_chunk.page; | ||
2040 | rx_frag->page_offset = sd->pg_chunk.offset + offset; | ||
2041 | rx_frag->size = len; | ||
2042 | frag_len += len; | ||
2043 | qs->lro_nfrags++; | ||
2044 | qs->lro_frag_len = frag_len; | ||
2045 | |||
2046 | if (!complete) | ||
2047 | return; | ||
2048 | |||
2049 | qs->lro_nfrags = qs->lro_frag_len = 0; | ||
2050 | cpl = qs->lro_va; | ||
2051 | |||
2052 | if (unlikely(cpl->vlan_valid)) { | ||
2053 | struct net_device *dev = qs->netdev; | ||
2054 | struct port_info *pi = netdev_priv(dev); | ||
2055 | struct vlan_group *grp = pi->vlan_grp; | ||
2056 | |||
2057 | if (likely(grp != NULL)) { | ||
2058 | lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, | ||
2059 | qs->lro_frag_tbl, | ||
2060 | frag_len, frag_len, | ||
2061 | grp, ntohs(cpl->vlan), | ||
2062 | cpl, 0); | ||
2063 | return; | ||
2064 | } | ||
2065 | } | ||
2066 | lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, | ||
2067 | frag_len, frag_len, cpl, 0); | ||
2068 | } | ||
2069 | |||
2070 | /** | ||
2071 | * init_lro_mgr - initialize a LRO manager object | ||
2072 | * @lro_mgr: the LRO manager object | ||
2073 | */ | ||
2074 | static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr) | ||
2075 | { | ||
2076 | lro_mgr->dev = qs->netdev; | ||
2077 | lro_mgr->features = LRO_F_NAPI; | ||
2078 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | ||
2079 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
2080 | lro_mgr->max_desc = T3_MAX_LRO_SES; | ||
2081 | lro_mgr->lro_arr = qs->lro_desc; | ||
2082 | lro_mgr->get_frag_header = t3_get_frag_header; | ||
2083 | lro_mgr->get_skb_header = t3_get_skb_header; | ||
2084 | lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS; | ||
2085 | if (lro_mgr->max_aggr > MAX_SKB_FRAGS) | ||
2086 | lro_mgr->max_aggr = MAX_SKB_FRAGS; | ||
2087 | } | ||
2088 | |||
1902 | /** | 2089 | /** |
1903 | * handle_rsp_cntrl_info - handles control information in a response | 2090 | * handle_rsp_cntrl_info - handles control information in a response |
1904 | * @qs: the queue set corresponding to the response | 2091 | * @qs: the queue set corresponding to the response |
@@ -2027,7 +2214,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs, | |||
2027 | q->next_holdoff = q->holdoff_tmr; | 2214 | q->next_holdoff = q->holdoff_tmr; |
2028 | 2215 | ||
2029 | while (likely(budget_left && is_new_response(r, q))) { | 2216 | while (likely(budget_left && is_new_response(r, q))) { |
2030 | int packet_complete, eth, ethpad = 2; | 2217 | int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled; |
2031 | struct sk_buff *skb = NULL; | 2218 | struct sk_buff *skb = NULL; |
2032 | u32 len, flags = ntohl(r->flags); | 2219 | u32 len, flags = ntohl(r->flags); |
2033 | __be32 rss_hi = *(const __be32 *)r, | 2220 | __be32 rss_hi = *(const __be32 *)r, |
@@ -2059,6 +2246,9 @@ no_mem: | |||
2059 | } else if ((len = ntohl(r->len_cq)) != 0) { | 2246 | } else if ((len = ntohl(r->len_cq)) != 0) { |
2060 | struct sge_fl *fl; | 2247 | struct sge_fl *fl; |
2061 | 2248 | ||
2249 | if (eth) | ||
2250 | lro = qs->lro_enabled && is_eth_tcp(rss_hi); | ||
2251 | |||
2062 | fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; | 2252 | fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; |
2063 | if (fl->use_pages) { | 2253 | if (fl->use_pages) { |
2064 | void *addr = fl->sdesc[fl->cidx].pg_chunk.va; | 2254 | void *addr = fl->sdesc[fl->cidx].pg_chunk.va; |
@@ -2068,6 +2258,12 @@ no_mem: | |||
2068 | prefetch(addr + L1_CACHE_BYTES); | 2258 | prefetch(addr + L1_CACHE_BYTES); |
2069 | #endif | 2259 | #endif |
2070 | __refill_fl(adap, fl); | 2260 | __refill_fl(adap, fl); |
2261 | if (lro > 0) { | ||
2262 | lro_add_page(adap, qs, fl, | ||
2263 | G_RSPD_LEN(len), | ||
2264 | flags & F_RSPD_EOP); | ||
2265 | goto next_fl; | ||
2266 | } | ||
2071 | 2267 | ||
2072 | skb = get_packet_pg(adap, fl, q, | 2268 | skb = get_packet_pg(adap, fl, q, |
2073 | G_RSPD_LEN(len), | 2269 | G_RSPD_LEN(len), |
@@ -2083,7 +2279,7 @@ no_mem: | |||
2083 | q->rx_drops++; | 2279 | q->rx_drops++; |
2084 | } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) | 2280 | } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT)) |
2085 | __skb_pull(skb, 2); | 2281 | __skb_pull(skb, 2); |
2086 | 2282 | next_fl: | |
2087 | if (++fl->cidx == fl->size) | 2283 | if (++fl->cidx == fl->size) |
2088 | fl->cidx = 0; | 2284 | fl->cidx = 0; |
2089 | } else | 2285 | } else |
@@ -2113,7 +2309,7 @@ no_mem: | |||
2113 | 2309 | ||
2114 | if (skb != NULL && packet_complete) { | 2310 | if (skb != NULL && packet_complete) { |
2115 | if (eth) | 2311 | if (eth) |
2116 | rx_eth(adap, q, skb, ethpad); | 2312 | rx_eth(adap, q, skb, ethpad, lro); |
2117 | else { | 2313 | else { |
2118 | q->offload_pkts++; | 2314 | q->offload_pkts++; |
2119 | /* Preserve the RSS info in csum & priority */ | 2315 | /* Preserve the RSS info in csum & priority */ |
@@ -2125,12 +2321,17 @@ no_mem: | |||
2125 | } | 2321 | } |
2126 | 2322 | ||
2127 | if (flags & F_RSPD_EOP) | 2323 | if (flags & F_RSPD_EOP) |
2128 | clear_rspq_bufstate(q); | 2324 | clear_rspq_bufstate(q); |
2129 | } | 2325 | } |
2130 | --budget_left; | 2326 | --budget_left; |
2131 | } | 2327 | } |
2132 | 2328 | ||
2133 | deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); | 2329 | deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); |
2330 | lro_flush_all(&qs->lro_mgr); | ||
2331 | qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated; | ||
2332 | qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed; | ||
2333 | qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc; | ||
2334 | |||
2134 | if (sleeping) | 2335 | if (sleeping) |
2135 | check_ring_db(adap, qs, sleeping); | 2336 | check_ring_db(adap, qs, sleeping); |
2136 | 2337 | ||
@@ -2674,6 +2875,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2674 | { | 2875 | { |
2675 | int i, avail, ret = -ENOMEM; | 2876 | int i, avail, ret = -ENOMEM; |
2676 | struct sge_qset *q = &adapter->sge.qs[id]; | 2877 | struct sge_qset *q = &adapter->sge.qs[id]; |
2878 | struct net_lro_mgr *lro_mgr = &q->lro_mgr; | ||
2677 | 2879 | ||
2678 | init_qset_cntxt(q, id); | 2880 | init_qset_cntxt(q, id); |
2679 | init_timer(&q->tx_reclaim_timer); | 2881 | init_timer(&q->tx_reclaim_timer); |
@@ -2754,6 +2956,10 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2754 | q->fl[0].order = FL0_PG_ORDER; | 2956 | q->fl[0].order = FL0_PG_ORDER; |
2755 | q->fl[1].order = FL1_PG_ORDER; | 2957 | q->fl[1].order = FL1_PG_ORDER; |
2756 | 2958 | ||
2959 | q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1, | ||
2960 | sizeof(struct skb_frag_struct), | ||
2961 | GFP_KERNEL); | ||
2962 | q->lro_nfrags = q->lro_frag_len = 0; | ||
2757 | spin_lock_irq(&adapter->sge.reg_lock); | 2963 | spin_lock_irq(&adapter->sge.reg_lock); |
2758 | 2964 | ||
2759 | /* FL threshold comparison uses < */ | 2965 | /* FL threshold comparison uses < */ |
@@ -2803,6 +3009,9 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports, | |||
2803 | q->adap = adapter; | 3009 | q->adap = adapter; |
2804 | q->netdev = dev; | 3010 | q->netdev = dev; |
2805 | t3_update_qset_coalesce(q, p); | 3011 | t3_update_qset_coalesce(q, p); |
3012 | |||
3013 | init_lro_mgr(q, lro_mgr); | ||
3014 | |||
2806 | avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, | 3015 | avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, |
2807 | GFP_KERNEL | __GFP_COMP); | 3016 | GFP_KERNEL | __GFP_COMP); |
2808 | if (!avail) { | 3017 | if (!avail) { |