aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2009-01-21 17:39:13 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-21 17:39:13 -0500
commit7be2df451fa916f93e37763a58d33483feb0909f (patch)
treefae9f8e65f92d7b41a5aa13a45bf4c310b84850c /drivers
parent749c10f931923451a4c59b4435d182aa9ae27a4f (diff)
cxgb3: Replace LRO with GRO
This patch makes cxgb3 invoke the GRO hooks instead of LRO. As GRO has a compatible external interface to LRO this is a very straightforward replacement. I've kept the ioctl controls for per-queue LRO switches. However, we should not encourage anyone to use these. Because of that, I've also kept the skb construction code in cxgb3. Hopefully we can phase out those per-queue switches and then kill this too. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: Divy Le Ray <divy@chelsio.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/cxgb3/adapter.h13
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c42
-rw-r--r--drivers/net/cxgb3/sge.c119
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_ddp.h2
5 files changed, 24 insertions, 153 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 805682586c82..c4776a2adf00 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2407,7 +2407,6 @@ config CHELSIO_T3
2407 tristate "Chelsio Communications T3 10Gb Ethernet support" 2407 tristate "Chelsio Communications T3 10Gb Ethernet support"
2408 depends on CHELSIO_T3_DEPENDS 2408 depends on CHELSIO_T3_DEPENDS
2409 select FW_LOADER 2409 select FW_LOADER
2410 select INET_LRO
2411 help 2410 help
2412 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet 2411 This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
2413 adapters. 2412 adapters.
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 5fb7c6851eb2..fbe15699584e 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -42,7 +42,6 @@
42#include <linux/cache.h> 42#include <linux/cache.h>
43#include <linux/mutex.h> 43#include <linux/mutex.h>
44#include <linux/bitops.h> 44#include <linux/bitops.h>
45#include <linux/inet_lro.h>
46#include "t3cdev.h" 45#include "t3cdev.h"
47#include <asm/io.h> 46#include <asm/io.h>
48 47
@@ -178,15 +177,11 @@ enum { /* per port SGE statistics */
178 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */ 177 SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
179 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */ 178 SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
180 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */ 179 SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
181 SGE_PSTAT_LRO_AGGR, /* # of page chunks added to LRO sessions */
182 SGE_PSTAT_LRO_FLUSHED, /* # of flushed LRO sessions */
183 SGE_PSTAT_LRO_NO_DESC, /* # of overflown LRO sessions */
184 180
185 SGE_PSTAT_MAX /* must be last */ 181 SGE_PSTAT_MAX /* must be last */
186}; 182};
187 183
188#define T3_MAX_LRO_SES 8 184struct napi_gro_fraginfo;
189#define T3_MAX_LRO_MAX_PKTS 64
190 185
191struct sge_qset { /* an SGE queue set */ 186struct sge_qset { /* an SGE queue set */
192 struct adapter *adap; 187 struct adapter *adap;
@@ -194,12 +189,8 @@ struct sge_qset { /* an SGE queue set */
194 struct sge_rspq rspq; 189 struct sge_rspq rspq;
195 struct sge_fl fl[SGE_RXQ_PER_SET]; 190 struct sge_fl fl[SGE_RXQ_PER_SET];
196 struct sge_txq txq[SGE_TXQ_PER_SET]; 191 struct sge_txq txq[SGE_TXQ_PER_SET];
197 struct net_lro_mgr lro_mgr; 192 struct napi_gro_fraginfo lro_frag_tbl;
198 struct net_lro_desc lro_desc[T3_MAX_LRO_SES];
199 struct skb_frag_struct *lro_frag_tbl;
200 int lro_nfrags;
201 int lro_enabled; 193 int lro_enabled;
202 int lro_frag_len;
203 void *lro_va; 194 void *lro_va;
204 struct net_device *netdev; 195 struct net_device *netdev;
205 struct netdev_queue *tx_q; /* associated netdev TX queue */ 196 struct netdev_queue *tx_q; /* associated netdev TX queue */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 52131bd4cc70..7381f378b4e6 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -508,19 +508,9 @@ static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
508{ 508{
509 struct port_info *pi = netdev_priv(dev); 509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter; 510 struct adapter *adapter = pi->adapter;
511 int i, lro_on = 1;
512 511
513 adapter->params.sge.qset[qset_idx].lro = !!val; 512 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val; 513 adapter->sge.qs[qset_idx].lro_enabled = !!val;
515
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
519
520 if (lro_on)
521 dev->features |= NETIF_F_LRO;
522 else
523 dev->features &= ~NETIF_F_LRO;
524} 514}
525 515
526/** 516/**
@@ -1433,9 +1423,9 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS); 1423 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM); 1424 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD); 1425 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR); 1426 *data++ = 0;
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED); 1427 *data++ = 0;
1438 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC); 1428 *data++ = 0;
1439 *data++ = s->rx_cong_drops; 1429 *data++ = s->rx_cong_drops;
1440 1430
1441 *data++ = s->num_toggled; 1431 *data++ = s->num_toggled;
@@ -1826,28 +1816,6 @@ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1826 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1816 memset(&wol->sopass, 0, sizeof(wol->sopass));
1827} 1817}
1828 1818
1829static int cxgb3_set_flags(struct net_device *dev, u32 data)
1830{
1831 struct port_info *pi = netdev_priv(dev);
1832 int i;
1833
1834 if (data & ETH_FLAG_LRO) {
1835 if (!(pi->rx_offload & T3_RX_CSUM))
1836 return -EINVAL;
1837
1838 pi->rx_offload |= T3_LRO;
1839 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1840 set_qset_lro(dev, i, 1);
1841
1842 } else {
1843 pi->rx_offload &= ~T3_LRO;
1844 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1845 set_qset_lro(dev, i, 0);
1846 }
1847
1848 return 0;
1849}
1850
1851static const struct ethtool_ops cxgb_ethtool_ops = { 1819static const struct ethtool_ops cxgb_ethtool_ops = {
1852 .get_settings = get_settings, 1820 .get_settings = get_settings,
1853 .set_settings = set_settings, 1821 .set_settings = set_settings,
@@ -1877,8 +1845,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
1877 .get_regs = get_regs, 1845 .get_regs = get_regs,
1878 .get_wol = get_wol, 1846 .get_wol = get_wol,
1879 .set_tso = ethtool_op_set_tso, 1847 .set_tso = ethtool_op_set_tso,
1880 .get_flags = ethtool_op_get_flags,
1881 .set_flags = cxgb3_set_flags,
1882}; 1848};
1883 1849
1884static int in_range(int val, int lo, int hi) 1850static int in_range(int val, int lo, int hi)
@@ -2967,7 +2933,7 @@ static int __devinit init_one(struct pci_dev *pdev,
2967 netdev->mem_end = mmio_start + mmio_len - 1; 2933 netdev->mem_end = mmio_start + mmio_len - 1;
2968 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 2934 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2969 netdev->features |= NETIF_F_LLTX; 2935 netdev->features |= NETIF_F_LLTX;
2970 netdev->features |= NETIF_F_LRO; 2936 netdev->features |= NETIF_F_GRO;
2971 if (pci_using_dac) 2937 if (pci_using_dac)
2972 netdev->features |= NETIF_F_HIGHDMA; 2938 netdev->features |= NETIF_F_HIGHDMA;
2973 2939
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 379a1324db4e..8299fb538f25 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -585,8 +585,7 @@ static void t3_reset_qset(struct sge_qset *q)
585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); 585 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
586 q->txq_stopped = 0; 586 q->txq_stopped = 0;
587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ 587 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
588 kfree(q->lro_frag_tbl); 588 q->lro_frag_tbl.nr_frags = q->lro_frag_tbl.len = 0;
589 q->lro_nfrags = q->lro_frag_len = 0;
590} 589}
591 590
592 591
@@ -1945,10 +1944,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1945 qs->port_stats[SGE_PSTAT_VLANEX]++; 1944 qs->port_stats[SGE_PSTAT_VLANEX]++;
1946 if (likely(grp)) 1945 if (likely(grp))
1947 if (lro) 1946 if (lro)
1948 lro_vlan_hwaccel_receive_skb(&qs->lro_mgr, skb, 1947 vlan_gro_receive(&qs->napi, grp,
1949 grp, 1948 ntohs(p->vlan), skb);
1950 ntohs(p->vlan),
1951 p);
1952 else { 1949 else {
1953 if (unlikely(pi->iscsi_ipv4addr && 1950 if (unlikely(pi->iscsi_ipv4addr &&
1954 is_arp(skb))) { 1951 is_arp(skb))) {
@@ -1965,7 +1962,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1965 dev_kfree_skb_any(skb); 1962 dev_kfree_skb_any(skb);
1966 } else if (rq->polling) { 1963 } else if (rq->polling) {
1967 if (lro) 1964 if (lro)
1968 lro_receive_skb(&qs->lro_mgr, skb, p); 1965 napi_gro_receive(&qs->napi, skb);
1969 else { 1966 else {
1970 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) 1967 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb)))
1971 cxgb3_arp_process(adap, skb); 1968 cxgb3_arp_process(adap, skb);
@@ -1981,59 +1978,6 @@ static inline int is_eth_tcp(u32 rss)
1981} 1978}
1982 1979
1983/** 1980/**
1984 * lro_frame_ok - check if an ingress packet is eligible for LRO
1985 * @p: the CPL header of the packet
1986 *
1987 * Returns true if a received packet is eligible for LRO.
1988 * The following conditions must be true:
1989 * - packet is TCP/IP Ethernet II (checked elsewhere)
1990 * - not an IP fragment
1991 * - no IP options
1992 * - TCP/IP checksums are correct
1993 * - the packet is for this host
1994 */
1995static inline int lro_frame_ok(const struct cpl_rx_pkt *p)
1996{
1997 const struct ethhdr *eh = (struct ethhdr *)(p + 1);
1998 const struct iphdr *ih = (struct iphdr *)(eh + 1);
1999
2000 return (*((u8 *)p + 1) & 0x90) == 0x10 && p->csum == htons(0xffff) &&
2001 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2);
2002}
2003
2004static int t3_get_lro_header(void **eh, void **iph, void **tcph,
2005 u64 *hdr_flags, void *priv)
2006{
2007 const struct cpl_rx_pkt *cpl = priv;
2008
2009 if (!lro_frame_ok(cpl))
2010 return -1;
2011
2012 *eh = (struct ethhdr *)(cpl + 1);
2013 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1);
2014 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1);
2015
2016 *hdr_flags = LRO_IPV4 | LRO_TCP;
2017 return 0;
2018}
2019
2020static int t3_get_skb_header(struct sk_buff *skb,
2021 void **iph, void **tcph, u64 *hdr_flags,
2022 void *priv)
2023{
2024 void *eh;
2025
2026 return t3_get_lro_header(&eh, iph, tcph, hdr_flags, priv);
2027}
2028
2029static int t3_get_frag_header(struct skb_frag_struct *frag, void **eh,
2030 void **iph, void **tcph, u64 *hdr_flags,
2031 void *priv)
2032{
2033 return t3_get_lro_header(eh, iph, tcph, hdr_flags, priv);
2034}
2035
2036/**
2037 * lro_add_page - add a page chunk to an LRO session 1981 * lro_add_page - add a page chunk to an LRO session
2038 * @adap: the adapter 1982 * @adap: the adapter
2039 * @qs: the associated queue set 1983 * @qs: the associated queue set
@@ -2049,8 +1993,9 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2049{ 1993{
2050 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 1994 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2051 struct cpl_rx_pkt *cpl; 1995 struct cpl_rx_pkt *cpl;
2052 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl; 1996 struct skb_frag_struct *rx_frag = qs->lro_frag_tbl.frags;
2053 int nr_frags = qs->lro_nfrags, frag_len = qs->lro_frag_len; 1997 int nr_frags = qs->lro_frag_tbl.nr_frags;
1998 int frag_len = qs->lro_frag_tbl.len;
2054 int offset = 0; 1999 int offset = 0;
2055 2000
2056 if (!nr_frags) { 2001 if (!nr_frags) {
@@ -2069,13 +2014,13 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2069 rx_frag->page_offset = sd->pg_chunk.offset + offset; 2014 rx_frag->page_offset = sd->pg_chunk.offset + offset;
2070 rx_frag->size = len; 2015 rx_frag->size = len;
2071 frag_len += len; 2016 frag_len += len;
2072 qs->lro_nfrags++; 2017 qs->lro_frag_tbl.nr_frags++;
2073 qs->lro_frag_len = frag_len; 2018 qs->lro_frag_tbl.len = frag_len;
2074 2019
2075 if (!complete) 2020 if (!complete)
2076 return; 2021 return;
2077 2022
2078 qs->lro_nfrags = qs->lro_frag_len = 0; 2023 qs->lro_frag_tbl.ip_summed = CHECKSUM_UNNECESSARY;
2079 cpl = qs->lro_va; 2024 cpl = qs->lro_va;
2080 2025
2081 if (unlikely(cpl->vlan_valid)) { 2026 if (unlikely(cpl->vlan_valid)) {
@@ -2084,36 +2029,15 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2084 struct vlan_group *grp = pi->vlan_grp; 2029 struct vlan_group *grp = pi->vlan_grp;
2085 2030
2086 if (likely(grp != NULL)) { 2031 if (likely(grp != NULL)) {
2087 lro_vlan_hwaccel_receive_frags(&qs->lro_mgr, 2032 vlan_gro_frags(&qs->napi, grp, ntohs(cpl->vlan),
2088 qs->lro_frag_tbl, 2033 &qs->lro_frag_tbl);
2089 frag_len, frag_len, 2034 goto out;
2090 grp, ntohs(cpl->vlan),
2091 cpl, 0);
2092 return;
2093 } 2035 }
2094 } 2036 }
2095 lro_receive_frags(&qs->lro_mgr, qs->lro_frag_tbl, 2037 napi_gro_frags(&qs->napi, &qs->lro_frag_tbl);
2096 frag_len, frag_len, cpl, 0);
2097}
2098 2038
2099/** 2039out:
2100 * init_lro_mgr - initialize a LRO manager object 2040 qs->lro_frag_tbl.nr_frags = qs->lro_frag_tbl.len = 0;
2101 * @lro_mgr: the LRO manager object
2102 */
2103static void init_lro_mgr(struct sge_qset *qs, struct net_lro_mgr *lro_mgr)
2104{
2105 lro_mgr->dev = qs->netdev;
2106 lro_mgr->features = LRO_F_NAPI;
2107 lro_mgr->frag_align_pad = NET_IP_ALIGN;
2108 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
2109 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2110 lro_mgr->max_desc = T3_MAX_LRO_SES;
2111 lro_mgr->lro_arr = qs->lro_desc;
2112 lro_mgr->get_frag_header = t3_get_frag_header;
2113 lro_mgr->get_skb_header = t3_get_skb_header;
2114 lro_mgr->max_aggr = T3_MAX_LRO_MAX_PKTS;
2115 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2116 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2117} 2041}
2118 2042
2119/** 2043/**
@@ -2357,10 +2281,6 @@ next_fl:
2357 } 2281 }
2358 2282
2359 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); 2283 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2360 lro_flush_all(&qs->lro_mgr);
2361 qs->port_stats[SGE_PSTAT_LRO_AGGR] = qs->lro_mgr.stats.aggregated;
2362 qs->port_stats[SGE_PSTAT_LRO_FLUSHED] = qs->lro_mgr.stats.flushed;
2363 qs->port_stats[SGE_PSTAT_LRO_NO_DESC] = qs->lro_mgr.stats.no_desc;
2364 2284
2365 if (sleeping) 2285 if (sleeping)
2366 check_ring_db(adap, qs, sleeping); 2286 check_ring_db(adap, qs, sleeping);
@@ -2907,7 +2827,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2907{ 2827{
2908 int i, avail, ret = -ENOMEM; 2828 int i, avail, ret = -ENOMEM;
2909 struct sge_qset *q = &adapter->sge.qs[id]; 2829 struct sge_qset *q = &adapter->sge.qs[id];
2910 struct net_lro_mgr *lro_mgr = &q->lro_mgr;
2911 2830
2912 init_qset_cntxt(q, id); 2831 init_qset_cntxt(q, id);
2913 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q); 2832 setup_timer(&q->tx_reclaim_timer, sge_timer_cb, (unsigned long)q);
@@ -2987,10 +2906,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2987 q->fl[0].order = FL0_PG_ORDER; 2906 q->fl[0].order = FL0_PG_ORDER;
2988 q->fl[1].order = FL1_PG_ORDER; 2907 q->fl[1].order = FL1_PG_ORDER;
2989 2908
2990 q->lro_frag_tbl = kcalloc(MAX_FRAME_SIZE / FL1_PG_CHUNK_SIZE + 1,
2991 sizeof(struct skb_frag_struct),
2992 GFP_KERNEL);
2993 q->lro_nfrags = q->lro_frag_len = 0;
2994 spin_lock_irq(&adapter->sge.reg_lock); 2909 spin_lock_irq(&adapter->sge.reg_lock);
2995 2910
2996 /* FL threshold comparison uses < */ 2911 /* FL threshold comparison uses < */
@@ -3042,8 +2957,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3042 q->tx_q = netdevq; 2957 q->tx_q = netdevq;
3043 t3_update_qset_coalesce(q, p); 2958 t3_update_qset_coalesce(q, p);
3044 2959
3045 init_lro_mgr(q, lro_mgr);
3046
3047 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, 2960 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3048 GFP_KERNEL | __GFP_COMP); 2961 GFP_KERNEL | __GFP_COMP);
3049 if (!avail) { 2962 if (!avail) {
diff --git a/drivers/scsi/cxgb3i/cxgb3i_ddp.h b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
index 5c7c4d95c493..f675807cc48f 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_ddp.h
+++ b/drivers/scsi/cxgb3i/cxgb3i_ddp.h
@@ -13,6 +13,8 @@
13#ifndef __CXGB3I_ULP2_DDP_H__ 13#ifndef __CXGB3I_ULP2_DDP_H__
14#define __CXGB3I_ULP2_DDP_H__ 14#define __CXGB3I_ULP2_DDP_H__
15 15
16#include <linux/vmalloc.h>
17
16/** 18/**
17 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity 19 * struct cxgb3i_tag_format - cxgb3i ulp tag format for an iscsi entity
18 * 20 *