aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cxgb3/sge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/cxgb3/sge.c')
-rw-r--r--drivers/net/cxgb3/sge.c119
1 files changed, 16 insertions, 103 deletions
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 379a1324db4..8299fb538f2 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -585,8 +585,7 @@ static void t3_reset_qset(struct sge_qset *q)
585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
586 q->txq_stopped = 0; 586 q->txq_stopped = 0;
587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ 587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
588 kfree(q->lro_frag_tbl); 588 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
589 q->lro_nfrags = q->lro_frag_len = 0;
590} 589}
591 590
592 591
@@ -1945,10 +1944,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1945 qs->port_stats[SGE_PSTAT_VLANEX]++; 1944 qs->port_stats[SGE_PSTAT_VLANEX]++;
1946 if (likely(grp)) 1945 if (likely(grp))
1947 if (lro) 1946 if (lro)
1948 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, 1947 vlan_gro_receive(&qs->napi, grp,
1949 grp, 1948 ntohs(p->vlan), skb);
1950 ntohs(p->vlan),
1951 p);
1952 else { 1949 else {
1953 if (unlikely(pi->iscsi_ipv4addr && 1950 if (unlikely(pi->iscsi_ipv4addr &&
1954 is_arp(skb))) { 1951 is_arp(skb))) {
@@ -1965,7 +1962,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1965 dev_kfree_skb_any(skb); 1962 dev_kfree_skb_any(skb);
1966 } else if (rq->polling) { 1963 } else if (rq->polling) {
1967 if (lro) 1964 if (lro)
1968 lro_receive_skb(&qs->lro_mgr, skb, p); 1965 napi_gro_receive(&qs->napi, skb);
1969 else { 1966 else {
1970 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) 1967 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1971 cxgb3_arp_process(adap, skb); 1968 cxgb3_arp_process(adap, skb);
@@ -1981,59 +1978,6 @@ static inline int is_eth_tcp(u32 rss)
1981} 1978}
1982 1979
1983/** 1980/**
1984 * lro_frame_ok - check if an ingress packet is eligible for LRO
1985 * @p: the CPL header of the packet
1986 *
1987 * Returns true if a received packet is eligible for LRO.
1988 * The following conditions must be true:
1989 * - packet is TCP/IP Ethernet II (checked elsewhere)
1990 * - not an IP fragment
1991 * - no IP options
1992 * - TCP/IP checksums are correct
1993 * - the packet is for this host
1994 */
1995static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1996{
1997 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1998 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1999
2000 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
2001 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
2002}
2003
2004static int t3_get_lro_header(void **eh, void **iph, void **tcph,
2005 u64 *hdr_flags, void *priv)
2006{
2007 const struct cpl_rx_pkt *cpl = priv;
2008
2009 if (!lro_frame_ok(cpl))
2010 return -1;
2011
2012 *eh = (struct ethhdr *)(cpl + 1);
2013 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
2014 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
2015
2016 *hdr_flags = LRO_IPV4 | LRO_TCP;
2017 return 0;
2018}
2019
2020static int t3_get_skb_header(struct sk_buff *skb,
2021 void **iph, void **tcph, u64 *hdr_flags,
2022 void *priv)
2023{
2024 void *eh;
2025
2026 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
2027}
2028
2029static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
2030 void **iph, void **tcph, u64 *hdr_flags,
2031 void *priv)
2032{
2033 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
2034}
2035
2036/**
2037 * lro_add_page - add a page chunk to an LRO session 1981 * lro_add_page - add a page chunk to an LRO session
2038 * @adap: the adapter 1982 * @adap: the adapter
2039 * @qs: the associated queue set 1983 * @qs: the associated queue set
@@ -2049,8 +1993,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2049{ 1993{
2050 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 1994 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2051 struct cpl_rx_pkt *cpl; 1995 struct cpl_rx_pkt *cpl;
2052 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; 1996 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
2053 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; 1997 int nr_frags = qs->lro_frag_tbl.nr_frags;
1998 int frag_len = qs->lro_frag_tbl.len;
2054 int offset = 0; 1999 int offset = 0;
2055 2000
2056 if (!nr_frags) { 2001 if (!nr_frags) {
@@ -2069,13 +2014,13 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2069 rx_frag->page_offset = sd->pg_chunk.offset + offset; 2014 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2070 rx_frag->size = len; 2015 rx_frag->size = len;
2071 frag_len += len; 2016 frag_len += len;
2072 qs->lro_nfrags++; 2017 qs->lro_frag_tbl.nr_frags++;
2073 qs->lro_frag_len = frag_len; 2018 qs->lro_frag_tbl.len = frag_len;
2074 2019
2075 if (!complete) 2020 if (!complete)
2076 return; 2021 return;
2077 2022
2078 qs->lro_nfrags = qs->lro_frag_len = 0; 2023 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
2079 cpl = qs->lro_va; 2024 cpl = qs->lro_va;
2080 2025
2081 if (unlikely(cpl->vlan_valid)) { 2026 if (unlikely(cpl->vlan_valid)) {
@@ -2084,36 +2029,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2084 struct vlan_group *grp = pi->vlan_grp; 2029 struct vlan_group *grp = pi->vlan_grp;
2085 2030
2086 if (likely(grp != NULL)) { 2031 if (likely(grp != NULL)) {
2087 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, 2032 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
2088 qs->lro_frag_tbl, 2033 &qs->lro_frag_tbl);
2089 frag_len, frag_len, 2034 goto out;
2090 grp, ntohs(cpl->vlan),
2091 cpl, 0);
2092 return;
2093 } 2035 }
2094 } 2036 }
2095 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, 2037 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
2096 frag_len, frag_len, cpl, 0);
2097}
2098 2038
2099/** 2039out:
2100 * init_lro_mgr - initialize a LRO manager object 2040 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
2101 * @lro_mgr: the LRO manager object
2102 */
2103static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2104{
2105 lro_mgr->dev = qs->netdev;
2106 lro_mgr->features = LRO_F_NAPI;
2107 lro_mgr->frag_align_pad = NET_IP_ALIGN;
2108 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2109 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2110 lro_mgr->max_desc = T3_MAX_LRO_SES;
2111 lro_mgr->lro_arr = qs->lro_desc;
2112 lro_mgr->get_frag_header = t3_get_frag_header;
2113 lro_mgr->get_skb_header = t3_get_skb_header;
2114 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2115 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2116 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2117} 2041}
2118 2042
2119/** 2043/**
@@ -2357,10 +2281,6 @@ next_fl:
2357 } 2281 }
2358 2282
2359 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); 2283 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2360 lro_flush_all(&qs->lro_mgr);
2361 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2362 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2363 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2364 2284
2365 if (sleeping) 2285 if (sleeping)
2366 check_ring_db(adap, qs, sleeping); 2286 check_ring_db(adap, qs, sleeping);
@@ -2907,7 +2827,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2907{ 2827{
2908 int i, avail, ret = -ENOMEM; 2828 int i, avail, ret = -ENOMEM;
2909 struct sge_qset *q = &adapter->sge.qs[id]; 2829 struct sge_qset *q = &adapter->sge.qs[id];
2910 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2911 2830
2912 init_qset_cntxt(q, id); 2831 init_qset_cntxt(q, id);
2913 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); 2832 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
@@ -2987,10 +2906,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2987 q->fl[0].order = FL0_PG_ORDER; 2906 q->fl[0].order = FL0_PG_ORDER;
2988 q->fl[1].order = FL1_PG_ORDER; 2907 q->fl[1].order = FL1_PG_ORDER;
2989 2908
2990 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2991 sizeof(struct skb_frag_struct),
2992 GFP_KERNEL);
2993 q->lro_nfrags = q->lro_frag_len = 0;
2994 spin_lock_irq(&adapter->sge.reg_lock); 2909 spin_lock_irq(&adapter->sge.reg_lock);
2995 2910
2996 /* FL threshold comparison uses < */ 2911 /* FL threshold comparison uses < */
@@ -3042,8 +2957,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3042 q->tx_q = netdevq; 2957 q->tx_q = netdevq;
3043 t3_update_qset_coalesce(q, p); 2958 t3_update_qset_coalesce(q, p);
3044 2959
3045 init_lro_mgr(q, lro_mgr);
3046
3047 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, 2960 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3048 GFP_KERNEL | __GFP_COMP); 2961 GFP_KERNEL | __GFP_COMP);
3049 if (!avail) { 2962 if (!avail) {