aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2009-01-19 00:50:16 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-21 17:34:06 -0500
commitda3bc07171dff957906cbe2ad5abb443eccf57c4 (patch)
treec59b6bec9e78c66c7fc5ceb0ee0288965ee7f47a /drivers
parent78b6f4ce58d1c85190003840912cc9097cbb8146 (diff)
sfc: Replace LRO with GRO
This patch makes sfc invoke the GRO hooks instead of LRO. As GRO has a compatible external interface to LRO this is a very straightforward replacement. Everything should appear identical to the user except that the offload is now controlled by the GRO ethtool option instead of LRO. I've kept the lro module parameter as is since that's for compatibility only. I have eliminated efx_rx_mk_skb as the GRO layer can take care of all packets regardless of whether GRO is enabled or not. So the only case where we don't call GRO is if the packet checksum is absent. This is to keep the behaviour changes of the patch to a minimum. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/sfc/Kconfig1
-rw-r--r--drivers/net/sfc/efx.c11
-rw-r--r--drivers/net/sfc/net_driver.h9
-rw-r--r--drivers/net/sfc/rx.c207
-rw-r--r--drivers/net/sfc/rx.h3
-rw-r--r--drivers/net/sfc/sfe4001.c1
-rw-r--r--drivers/net/sfc/tenxpress.c1
7 files changed, 19 insertions, 214 deletions
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
index c535408ad6be..12a82966b577 100644
--- a/drivers/net/sfc/Kconfig
+++ b/drivers/net/sfc/Kconfig
@@ -2,7 +2,6 @@ config SFC
2 tristate "Solarflare Solarstorm SFC4000 support" 2 tristate "Solarflare Solarstorm SFC4000 support"
3 depends on PCI && INET 3 depends on PCI && INET
4 select MII 4 select MII
5 select INET_LRO
6 select CRC32 5 select CRC32
7 select I2C 6 select I2C
8 select I2C_ALGOBIT 7 select I2C_ALGOBIT
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 77aca5d67b57..3ee2a4548cba 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -182,7 +182,6 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
182 channel->rx_pkt = NULL; 182 channel->rx_pkt = NULL;
183 } 183 }
184 184
185 efx_flush_lro(channel);
186 efx_rx_strategy(channel); 185 efx_rx_strategy(channel);
187 186
188 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 187 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
@@ -1269,18 +1268,11 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1269static int efx_init_napi(struct efx_nic *efx) 1268static int efx_init_napi(struct efx_nic *efx)
1270{ 1269{
1271 struct efx_channel *channel; 1270 struct efx_channel *channel;
1272 int rc;
1273 1271
1274 efx_for_each_channel(channel, efx) { 1272 efx_for_each_channel(channel, efx) {
1275 channel->napi_dev = efx->net_dev; 1273 channel->napi_dev = efx->net_dev;
1276 rc = efx_lro_init(&channel->lro_mgr, efx);
1277 if (rc)
1278 goto err;
1279 } 1274 }
1280 return 0; 1275 return 0;
1281 err:
1282 efx_fini_napi(efx);
1283 return rc;
1284} 1276}
1285 1277
1286static void efx_fini_napi(struct efx_nic *efx) 1278static void efx_fini_napi(struct efx_nic *efx)
@@ -1288,7 +1280,6 @@ static void efx_fini_napi(struct efx_nic *efx)
1288 struct efx_channel *channel; 1280 struct efx_channel *channel;
1289 1281
1290 efx_for_each_channel(channel, efx) { 1282 efx_for_each_channel(channel, efx) {
1291 efx_lro_fini(&channel->lro_mgr);
1292 channel->napi_dev = NULL; 1283 channel->napi_dev = NULL;
1293 } 1284 }
1294} 1285}
@@ -2097,7 +2088,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2097 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG | 2088 net_dev->features |= (NETIF_F_IP_CSUM | NETIF_F_SG |
2098 NETIF_F_HIGHDMA | NETIF_F_TSO); 2089 NETIF_F_HIGHDMA | NETIF_F_TSO);
2099 if (lro) 2090 if (lro)
2100 net_dev->features |= NETIF_F_LRO; 2091 net_dev->features |= NETIF_F_GRO;
2101 /* Mask for features that also apply to VLAN devices */ 2092 /* Mask for features that also apply to VLAN devices */
2102 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | 2093 net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
2103 NETIF_F_HIGHDMA | NETIF_F_TSO); 2094 NETIF_F_HIGHDMA | NETIF_F_TSO);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 5f255f75754e..8643505788cc 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -25,15 +25,11 @@
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/inet_lro.h>
29#include <linux/i2c.h> 28#include <linux/i2c.h>
30 29
31#include "enum.h" 30#include "enum.h"
32#include "bitfield.h" 31#include "bitfield.h"
33 32
34#define EFX_MAX_LRO_DESCRIPTORS 8
35#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
36
37/************************************************************************** 33/**************************************************************************
38 * 34 *
39 * Build definitions 35 * Build definitions
@@ -340,13 +336,10 @@ enum efx_rx_alloc_method {
340 * @eventq_read_ptr: Event queue read pointer 336 * @eventq_read_ptr: Event queue read pointer
341 * @last_eventq_read_ptr: Last event queue read pointer value. 337 * @last_eventq_read_ptr: Last event queue read pointer value.
342 * @eventq_magic: Event queue magic value for driver-generated test events 338 * @eventq_magic: Event queue magic value for driver-generated test events
343 * @lro_mgr: LRO state
344 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors 339 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
345 * and diagnostic counters 340 * and diagnostic counters
346 * @rx_alloc_push_pages: RX allocation method currently in use for pushing 341 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
347 * descriptors 342 * descriptors
348 * @rx_alloc_pop_pages: RX allocation method currently in use for popping
349 * descriptors
350 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 343 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
351 * @n_rx_ip_frag_err: Count of RX IP fragment errors 344 * @n_rx_ip_frag_err: Count of RX IP fragment errors
352 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 345 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
@@ -371,10 +364,8 @@ struct efx_channel {
371 unsigned int last_eventq_read_ptr; 364 unsigned int last_eventq_read_ptr;
372 unsigned int eventq_magic; 365 unsigned int eventq_magic;
373 366
374 struct net_lro_mgr lro_mgr;
375 int rx_alloc_level; 367 int rx_alloc_level;
376 int rx_alloc_push_pages; 368 int rx_alloc_push_pages;
377 int rx_alloc_pop_pages;
378 369
379 unsigned n_rx_tobe_disc; 370 unsigned n_rx_tobe_disc;
380 unsigned n_rx_ip_frag_err; 371 unsigned n_rx_ip_frag_err;
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index b8ba4bbad889..a0345b380979 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -99,109 +99,6 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
99} 99}
100 100
101 101
102/**************************************************************************
103 *
104 * Linux generic LRO handling
105 *
106 **************************************************************************
107 */
108
109static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
110 void **tcpudp_hdr, u64 *hdr_flags, void *priv)
111{
112 struct efx_channel *channel = priv;
113 struct iphdr *iph;
114 struct tcphdr *th;
115
116 iph = (struct iphdr *)skb->data;
117 if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
118 goto fail;
119
120 th = (struct tcphdr *)(skb->data + iph->ihl * 4);
121
122 *tcpudp_hdr = th;
123 *ip_hdr = iph;
124 *hdr_flags = LRO_IPV4 | LRO_TCP;
125
126 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
127 return 0;
128fail:
129 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
130 return -1;
131}
132
133static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
134 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
135 void *priv)
136{
137 struct efx_channel *channel = priv;
138 struct ethhdr *eh;
139 struct iphdr *iph;
140
141 /* We support EtherII and VLAN encapsulated IPv4 */
142 eh = page_address(frag->page) + frag->page_offset;
143 *mac_hdr = eh;
144
145 if (eh->h_proto == htons(ETH_P_IP)) {
146 iph = (struct iphdr *)(eh + 1);
147 } else {
148 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
149 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
150 goto fail;
151
152 iph = (struct iphdr *)(veh + 1);
153 }
154 *ip_hdr = iph;
155
156 /* We can only do LRO over TCP */
157 if (iph->protocol != IPPROTO_TCP)
158 goto fail;
159
160 *hdr_flags = LRO_IPV4 | LRO_TCP;
161 *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
162
163 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
164 return 0;
165 fail:
166 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
167 return -1;
168}
169
170int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
171{
172 size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
173 struct net_lro_desc *lro_arr;
174
175 /* Allocate the LRO descriptors structure */
176 lro_arr = kzalloc(s, GFP_KERNEL);
177 if (lro_arr == NULL)
178 return -ENOMEM;
179
180 lro_mgr->lro_arr = lro_arr;
181 lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
182 lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
183 lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
184
185 lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
186 lro_mgr->get_frag_header = efx_get_frag_hdr;
187 lro_mgr->dev = efx->net_dev;
188
189 lro_mgr->features = LRO_F_NAPI;
190
191 /* We can pass packets up with the checksum intact */
192 lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
193
194 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
195
196 return 0;
197}
198
199void efx_lro_fini(struct net_lro_mgr *lro_mgr)
200{
201 kfree(lro_mgr->lro_arr);
202 lro_mgr->lro_arr = NULL;
203}
204
205/** 102/**
206 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation 103 * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
207 * 104 *
@@ -549,77 +446,31 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
549static void efx_rx_packet_lro(struct efx_channel *channel, 446static void efx_rx_packet_lro(struct efx_channel *channel,
550 struct efx_rx_buffer *rx_buf) 447 struct efx_rx_buffer *rx_buf)
551{ 448{
552 struct net_lro_mgr *lro_mgr = &channel->lro_mgr; 449 struct napi_struct *napi = &channel->napi_str;
553 void *priv = channel;
554 450
555 /* Pass the skb/page into the LRO engine */ 451 /* Pass the skb/page into the LRO engine */
556 if (rx_buf->page) { 452 if (rx_buf->page) {
557 struct skb_frag_struct frags; 453 struct napi_gro_fraginfo info;
558 454
559 frags.page = rx_buf->page; 455 info.frags[0].page = rx_buf->page;
560 frags.page_offset = efx_rx_buf_offset(rx_buf); 456 info.frags[0].page_offset = efx_rx_buf_offset(rx_buf);
561 frags.size = rx_buf->len; 457 info.frags[0].size = rx_buf->len;
458 info.nr_frags = 1;
459 info.ip_summed = CHECKSUM_UNNECESSARY;
460 info.len = rx_buf->len;
562 461
563 lro_receive_frags(lro_mgr, &frags, rx_buf->len, 462 napi_gro_frags(napi, &info);
564 rx_buf->len, priv, 0);
565 463
566 EFX_BUG_ON_PARANOID(rx_buf->skb); 464 EFX_BUG_ON_PARANOID(rx_buf->skb);
567 rx_buf->page = NULL; 465 rx_buf->page = NULL;
568 } else { 466 } else {
569 EFX_BUG_ON_PARANOID(!rx_buf->skb); 467 EFX_BUG_ON_PARANOID(!rx_buf->skb);
570 468
571 lro_receive_skb(lro_mgr, rx_buf->skb, priv); 469 napi_gro_receive(napi, rx_buf->skb);
572 rx_buf->skb = NULL; 470 rx_buf->skb = NULL;
573 } 471 }
574} 472}
575 473
576/* Allocate and construct an SKB around a struct page.*/
577static struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
578 struct efx_nic *efx,
579 int hdr_len)
580{
581 struct sk_buff *skb;
582
583 /* Allocate an SKB to store the headers */
584 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
585 if (unlikely(skb == NULL)) {
586 EFX_ERR_RL(efx, "RX out of memory for skb\n");
587 return NULL;
588 }
589
590 EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
591 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
592
593 skb->ip_summed = CHECKSUM_UNNECESSARY;
594 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
595
596 skb->len = rx_buf->len;
597 skb->truesize = rx_buf->len + sizeof(struct sk_buff);
598 memcpy(skb->data, rx_buf->data, hdr_len);
599 skb->tail += hdr_len;
600
601 /* Append the remaining page onto the frag list */
602 if (unlikely(rx_buf->len > hdr_len)) {
603 struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
604 frag->page = rx_buf->page;
605 frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
606 frag->size = skb->len - hdr_len;
607 skb_shinfo(skb)->nr_frags = 1;
608 skb->data_len = frag->size;
609 } else {
610 __free_pages(rx_buf->page, efx->rx_buffer_order);
611 skb->data_len = 0;
612 }
613
614 /* Ownership has transferred from the rx_buf to skb */
615 rx_buf->page = NULL;
616
617 /* Move past the ethernet header */
618 skb->protocol = eth_type_trans(skb, efx->net_dev);
619
620 return skb;
621}
622
623void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 474void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
624 unsigned int len, bool checksummed, bool discard) 475 unsigned int len, bool checksummed, bool discard)
625{ 476{
@@ -687,7 +538,6 @@ void __efx_rx_packet(struct efx_channel *channel,
687{ 538{
688 struct efx_nic *efx = channel->efx; 539 struct efx_nic *efx = channel->efx;
689 struct sk_buff *skb; 540 struct sk_buff *skb;
690 bool lro = !!(efx->net_dev->features & NETIF_F_LRO);
691 541
692 /* If we're in loopback test, then pass the packet directly to the 542 /* If we're in loopback test, then pass the packet directly to the
693 * loopback layer, and free the rx_buf here 543 * loopback layer, and free the rx_buf here
@@ -709,41 +559,21 @@ void __efx_rx_packet(struct efx_channel *channel,
709 efx->net_dev); 559 efx->net_dev);
710 } 560 }
711 561
712 /* Both our generic-LRO and SFC-SSR support skb and page based 562 if (likely(checksummed || rx_buf->page)) {
713 * allocation, but neither support switching from one to the
714 * other on the fly. If we spot that the allocation mode has
715 * changed, then flush the LRO state.
716 */
717 if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
718 efx_flush_lro(channel);
719 channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
720 }
721 if (likely(checksummed && lro)) {
722 efx_rx_packet_lro(channel, rx_buf); 563 efx_rx_packet_lro(channel, rx_buf);
723 goto done; 564 goto done;
724 } 565 }
725 566
726 /* Form an skb if required */ 567 /* We now own the SKB */
727 if (rx_buf->page) { 568 skb = rx_buf->skb;
728 int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS); 569 rx_buf->skb = NULL;
729 skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
730 if (unlikely(skb == NULL)) {
731 efx_free_rx_buffer(efx, rx_buf);
732 goto done;
733 }
734 } else {
735 /* We now own the SKB */
736 skb = rx_buf->skb;
737 rx_buf->skb = NULL;
738 }
739 570
740 EFX_BUG_ON_PARANOID(rx_buf->page); 571 EFX_BUG_ON_PARANOID(rx_buf->page);
741 EFX_BUG_ON_PARANOID(rx_buf->skb); 572 EFX_BUG_ON_PARANOID(rx_buf->skb);
742 EFX_BUG_ON_PARANOID(!skb); 573 EFX_BUG_ON_PARANOID(!skb);
743 574
744 /* Set the SKB flags */ 575 /* Set the SKB flags */
745 if (unlikely(!checksummed || !efx->rx_checksum_enabled)) 576 skb->ip_summed = CHECKSUM_NONE;
746 skb->ip_summed = CHECKSUM_NONE;
747 577
748 /* Pass the packet up */ 578 /* Pass the packet up */
749 netif_receive_skb(skb); 579 netif_receive_skb(skb);
@@ -760,7 +590,7 @@ void efx_rx_strategy(struct efx_channel *channel)
760 enum efx_rx_alloc_method method = rx_alloc_method; 590 enum efx_rx_alloc_method method = rx_alloc_method;
761 591
762 /* Only makes sense to use page based allocation if LRO is enabled */ 592 /* Only makes sense to use page based allocation if LRO is enabled */
763 if (!(channel->efx->net_dev->features & NETIF_F_LRO)) { 593 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
764 method = RX_ALLOC_METHOD_SKB; 594 method = RX_ALLOC_METHOD_SKB;
765 } else if (method == RX_ALLOC_METHOD_AUTO) { 595 } else if (method == RX_ALLOC_METHOD_AUTO) {
766 /* Constrain the rx_alloc_level */ 596 /* Constrain the rx_alloc_level */
@@ -865,11 +695,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
865 rx_queue->buffer = NULL; 695 rx_queue->buffer = NULL;
866} 696}
867 697
868void efx_flush_lro(struct efx_channel *channel)
869{
870 lro_flush_all(&channel->lro_mgr);
871}
872
873 698
874module_param(rx_alloc_method, int, 0644); 699module_param(rx_alloc_method, int, 0644);
875MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); 700MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
index 0e88a9ddc1c6..42ee7555a80b 100644
--- a/drivers/net/sfc/rx.h
+++ b/drivers/net/sfc/rx.h
@@ -17,9 +17,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
17void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 17void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 18void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
19 19
20int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
21void efx_lro_fini(struct net_lro_mgr *lro_mgr);
22void efx_flush_lro(struct efx_channel *channel);
23void efx_rx_strategy(struct efx_channel *channel); 20void efx_rx_strategy(struct efx_channel *channel);
24void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 21void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
25void efx_rx_work(struct work_struct *data); 22void efx_rx_work(struct work_struct *data);
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
index 16b80acb9992..d21d014bf0c1 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/sfe4001.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/rtnetlink.h>
27#include "net_driver.h" 28#include "net_driver.h"
28#include "efx.h" 29#include "efx.h"
29#include "phy.h" 30#include "phy.h"
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 9ecb77da9545..f1365097b4fd 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/rtnetlink.h>
11#include <linux/seq_file.h> 12#include <linux/seq_file.h>
12#include "efx.h" 13#include "efx.h"
13#include "mdio_10g.h" 14#include "mdio_10g.h"