aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2009-01-19 18:20:57 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-21 17:34:30 -0500
commit5c0999b72b34541a3734a9138c43d5c024a42d47 (patch)
treed73efcb136aa71a7b82161b25d5e19c72865694e /drivers/net
parent649aa95d75cbadb9f440c1b8d04c666461de326f (diff)
igb: Replace LRO with GRO
This patch makes igb invoke the GRO hooks instead of LRO. As GRO has a compatible external interface to LRO this is a very straightforward replacement. Three things of note: 1) I've kept the LRO Kconfig option until we decide to enable GRO across the board at which point it can also be killed. 2) The poll_controller stuff is broken in igb as it tries to do the same work as the normal poll routine. Since poll_controller can be called in the middle of a poll, this can't be good. I noticed this because poll_controller can invoke the GRO hooks without flushing held GRO packets. However, this should be harmless (assuming the poll_controller bug above doesn't kill you first :) since the next ->poll will clear the backlog. The only time when we'll have a problem is if we're already executing the GRO code on the same ring, but that's no worse than what happens now. 3) I kept the ip_summed check before calling GRO so that we're on par with previous behaviour. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/igb/igb.h16
-rw-r--r--drivers/net/igb/igb_ethtool.c17
-rw-r--r--drivers/net/igb/igb_main.c90
4 files changed, 6 insertions, 118 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index eccb89770f56..805682586c82 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2022,7 +2022,6 @@ config IGB
2022config IGB_LRO 2022config IGB_LRO
2023 bool "Use software LRO" 2023 bool "Use software LRO"
2024 depends on IGB && INET 2024 depends on IGB && INET
2025 select INET_LRO
2026 ---help--- 2025 ---help---
2027 Say Y here if you want to use large receive offload. 2026 Say Y here if you want to use large receive offload.
2028 2027
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 5a27825cc48a..7d8c88739154 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -36,12 +36,6 @@
36 36
37struct igb_adapter; 37struct igb_adapter;
38 38
39#ifdef CONFIG_IGB_LRO
40#include <linux/inet_lro.h>
41#define MAX_LRO_AGGR 32
42#define MAX_LRO_DESCRIPTORS 8
43#endif
44
45/* Interrupt defines */ 39/* Interrupt defines */
46#define IGB_MIN_DYN_ITR 3000 40#define IGB_MIN_DYN_ITR 3000
47#define IGB_MAX_DYN_ITR 96000 41#define IGB_MAX_DYN_ITR 96000
@@ -176,10 +170,6 @@ struct igb_ring {
176 struct napi_struct napi; 170 struct napi_struct napi;
177 int set_itr; 171 int set_itr;
178 struct igb_ring *buddy; 172 struct igb_ring *buddy;
179#ifdef CONFIG_IGB_LRO
180 struct net_lro_mgr lro_mgr;
181 bool lro_used;
182#endif
183 }; 173 };
184 }; 174 };
185 175
@@ -288,12 +278,6 @@ struct igb_adapter {
288 int need_ioport; 278 int need_ioport;
289 279
290 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES]; 280 struct igb_ring *multi_tx_table[IGB_MAX_TX_QUEUES];
291#ifdef CONFIG_IGB_LRO
292 unsigned int lro_max_aggr;
293 unsigned int lro_aggregated;
294 unsigned int lro_flushed;
295 unsigned int lro_no_desc;
296#endif
297 unsigned int tx_ring_count; 281 unsigned int tx_ring_count;
298 unsigned int rx_ring_count; 282 unsigned int rx_ring_count;
299}; 283};
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 3c831f1472ad..4606e63fc6f5 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -93,11 +93,6 @@ static const struct igb_stats igb_gstrings_stats[] = {
93 { "tx_smbus", IGB_STAT(stats.mgptc) }, 93 { "tx_smbus", IGB_STAT(stats.mgptc) },
94 { "rx_smbus", IGB_STAT(stats.mgprc) }, 94 { "rx_smbus", IGB_STAT(stats.mgprc) },
95 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 95 { "dropped_smbus", IGB_STAT(stats.mgpdc) },
96#ifdef CONFIG_IGB_LRO
97 { "lro_aggregated", IGB_STAT(lro_aggregated) },
98 { "lro_flushed", IGB_STAT(lro_flushed) },
99 { "lro_no_desc", IGB_STAT(lro_no_desc) },
100#endif
101}; 96};
102 97
103#define IGB_QUEUE_STATS_LEN \ 98#define IGB_QUEUE_STATS_LEN \
@@ -1921,18 +1916,6 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1921 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64); 1916 int stat_count = sizeof(struct igb_queue_stats) / sizeof(u64);
1922 int j; 1917 int j;
1923 int i; 1918 int i;
1924#ifdef CONFIG_IGB_LRO
1925 int aggregated = 0, flushed = 0, no_desc = 0;
1926
1927 for (i = 0; i < adapter->num_rx_queues; i++) {
1928 aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated;
1929 flushed += adapter->rx_ring[i].lro_mgr.stats.flushed;
1930 no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc;
1931 }
1932 adapter->lro_aggregated = aggregated;
1933 adapter->lro_flushed = flushed;
1934 adapter->lro_no_desc = no_desc;
1935#endif
1936 1919
1937 igb_update_stats(adapter); 1920 igb_update_stats(adapter);
1938 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1921 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 3806bb9d8bfa..dbe03c2b49c9 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -115,9 +115,6 @@ static bool igb_clean_tx_irq(struct igb_ring *);
115static int igb_poll(struct napi_struct *, int); 115static int igb_poll(struct napi_struct *, int);
116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 116static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 117static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
118#ifdef CONFIG_IGB_LRO
119static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
120#endif
121static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
122static void igb_tx_timeout(struct net_device *); 119static void igb_tx_timeout(struct net_device *);
123static void igb_reset_task(struct work_struct *); 120static void igb_reset_task(struct work_struct *);
@@ -1189,7 +1186,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1189 netdev->features |= NETIF_F_TSO6; 1186 netdev->features |= NETIF_F_TSO6;
1190 1187
1191#ifdef CONFIG_IGB_LRO 1188#ifdef CONFIG_IGB_LRO
1192 netdev->features |= NETIF_F_LRO; 1189 netdev->features |= NETIF_F_GRO;
1193#endif 1190#endif
1194 1191
1195 netdev->vlan_features |= NETIF_F_TSO; 1192 netdev->vlan_features |= NETIF_F_TSO;
@@ -1739,14 +1736,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1739 struct pci_dev *pdev = adapter->pdev; 1736 struct pci_dev *pdev = adapter->pdev;
1740 int size, desc_len; 1737 int size, desc_len;
1741 1738
1742#ifdef CONFIG_IGB_LRO
1743 size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
1744 rx_ring->lro_mgr.lro_arr = vmalloc(size);
1745 if (!rx_ring->lro_mgr.lro_arr)
1746 goto err;
1747 memset(rx_ring->lro_mgr.lro_arr, 0, size);
1748#endif
1749
1750 size = sizeof(struct igb_buffer) * rx_ring->count; 1739 size = sizeof(struct igb_buffer) * rx_ring->count;
1751 rx_ring->buffer_info = vmalloc(size); 1740 rx_ring->buffer_info = vmalloc(size);
1752 if (!rx_ring->buffer_info) 1741 if (!rx_ring->buffer_info)
@@ -1773,10 +1762,6 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
1773 return 0; 1762 return 0;
1774 1763
1775err: 1764err:
1776#ifdef CONFIG_IGB_LRO
1777 vfree(rx_ring->lro_mgr.lro_arr);
1778 rx_ring->lro_mgr.lro_arr = NULL;
1779#endif
1780 vfree(rx_ring->buffer_info); 1765 vfree(rx_ring->buffer_info);
1781 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 1766 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1782 "the receive descriptor ring\n"); 1767 "the receive descriptor ring\n");
@@ -1930,16 +1915,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
1930 rxdctl |= IGB_RX_HTHRESH << 8; 1915 rxdctl |= IGB_RX_HTHRESH << 8;
1931 rxdctl |= IGB_RX_WTHRESH << 16; 1916 rxdctl |= IGB_RX_WTHRESH << 16;
1932 wr32(E1000_RXDCTL(j), rxdctl); 1917 wr32(E1000_RXDCTL(j), rxdctl);
1933#ifdef CONFIG_IGB_LRO
1934 /* Intitial LRO Settings */
1935 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
1936 ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1937 ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
1938 ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1939 ring->lro_mgr.dev = adapter->netdev;
1940 ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1941 ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1942#endif
1943 } 1918 }
1944 1919
1945 if (adapter->num_rx_queues > 1) { 1920 if (adapter->num_rx_queues > 1) {
@@ -2128,11 +2103,6 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2128 vfree(rx_ring->buffer_info); 2103 vfree(rx_ring->buffer_info);
2129 rx_ring->buffer_info = NULL; 2104 rx_ring->buffer_info = NULL;
2130 2105
2131#ifdef CONFIG_IGB_LRO
2132 vfree(rx_ring->lro_mgr.lro_arr);
2133 rx_ring->lro_mgr.lro_arr = NULL;
2134#endif
2135
2136 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2106 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2137 2107
2138 rx_ring->desc = NULL; 2108 rx_ring->desc = NULL;
@@ -3768,39 +3738,6 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3768 return (count < tx_ring->count); 3738 return (count < tx_ring->count);
3769} 3739}
3770 3740
3771#ifdef CONFIG_IGB_LRO
3772 /**
3773 * igb_get_skb_hdr - helper function for LRO header processing
3774 * @skb: pointer to sk_buff to be added to LRO packet
3775 * @iphdr: pointer to ip header structure
3776 * @tcph: pointer to tcp header structure
3777 * @hdr_flags: pointer to header flags
3778 * @priv: pointer to the receive descriptor for the current sk_buff
3779 **/
3780static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
3781 u64 *hdr_flags, void *priv)
3782{
3783 union e1000_adv_rx_desc *rx_desc = priv;
3784 u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
3785 (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
3786
3787 /* Verify that this is a valid IPv4 TCP packet */
3788 if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
3789 E1000_RXDADV_PKTTYPE_TCP))
3790 return -1;
3791
3792 /* Set network headers */
3793 skb_reset_network_header(skb);
3794 skb_set_transport_header(skb, ip_hdrlen(skb));
3795 *iphdr = ip_hdr(skb);
3796 *tcph = tcp_hdr(skb);
3797 *hdr_flags = LRO_IPV4 | LRO_TCP;
3798
3799 return 0;
3800
3801}
3802#endif /* CONFIG_IGB_LRO */
3803
3804/** 3741/**
3805 * igb_receive_skb - helper function to handle rx indications 3742 * igb_receive_skb - helper function to handle rx indications
3806 * @ring: pointer to receive ring receving this packet 3743 * @ring: pointer to receive ring receving this packet
@@ -3815,28 +3752,20 @@ static void igb_receive_skb(struct igb_ring *ring, u8 status,
3815 struct igb_adapter * adapter = ring->adapter; 3752 struct igb_adapter * adapter = ring->adapter;
3816 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 3753 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3817 3754
3818#ifdef CONFIG_IGB_LRO 3755 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
3819 if (adapter->netdev->features & NETIF_F_LRO &&
3820 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3821 if (vlan_extracted) 3756 if (vlan_extracted)
3822 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb, 3757 vlan_gro_receive(&ring->napi, adapter->vlgrp,
3823 adapter->vlgrp, 3758 le16_to_cpu(rx_desc->wb.upper.vlan),
3824 le16_to_cpu(rx_desc->wb.upper.vlan), 3759 skb);
3825 rx_desc);
3826 else 3760 else
3827 lro_receive_skb(&ring->lro_mgr,skb, rx_desc); 3761 napi_gro_receive(&ring->napi, skb);
3828 ring->lro_used = 1;
3829 } else { 3762 } else {
3830#endif
3831 if (vlan_extracted) 3763 if (vlan_extracted)
3832 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, 3764 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3833 le16_to_cpu(rx_desc->wb.upper.vlan)); 3765 le16_to_cpu(rx_desc->wb.upper.vlan));
3834 else 3766 else
3835
3836 netif_receive_skb(skb); 3767 netif_receive_skb(skb);
3837#ifdef CONFIG_IGB_LRO
3838 } 3768 }
3839#endif
3840} 3769}
3841 3770
3842 3771
@@ -3991,13 +3920,6 @@ next_desc:
3991 rx_ring->next_to_clean = i; 3920 rx_ring->next_to_clean = i;
3992 cleaned_count = IGB_DESC_UNUSED(rx_ring); 3921 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3993 3922
3994#ifdef CONFIG_IGB_LRO
3995 if (rx_ring->lro_used) {
3996 lro_flush_all(&rx_ring->lro_mgr);
3997 rx_ring->lro_used = 0;
3998 }
3999#endif
4000
4001 if (cleaned_count) 3923 if (cleaned_count)
4002 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count); 3924 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
4003 3925