summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2018-06-24 01:13:49 -0400
committerDavid S. Miller <davem@davemloft.net>2018-06-25 22:33:04 -0400
commitd4546c2509b1e9cd082e3682dcec98472e37ee5a (patch)
treeeb7393380a20d8ccaf8f94204fe2c15d2639a780
parent9ff3b40e411c00870d1c29cd6b843fca7c4160ae (diff)
net: Convert GRO SKB handling to list_head.
Manage pending per-NAPI GRO packets via list_head. Return an SKB pointer from the GRO receive handlers. When GRO receive handlers return non-NULL, it means that this SKB needs to be completed at this time and removed from the NAPI queue. Several operations are greatly simplified by this transformation, especially timing out the oldest SKB in the list when gro_count exceeds MAX_GRO_SKBS, and napi_gro_flush() which walks the queue in reverse order. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/geneve.c11
-rw-r--r--drivers/net/vxlan.c11
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/netdevice.h32
-rw-r--r--include/linux/skbuff.h3
-rw-r--r--include/linux/udp.h4
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/tcp.h2
-rw-r--r--include/net/udp.h4
-rw-r--r--include/net/udp_tunnel.h6
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/core/dev.c68
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/esp4_offload.c4
-rw-r--r--net/ipv4/fou.c20
-rw-r--r--net/ipv4/gre_offload.c8
-rw-r--r--net/ipv4/tcp_offload.c14
-rw-r--r--net/ipv4/udp_offload.c13
-rw-r--r--net/ipv6/esp6_offload.c4
-rw-r--r--net/ipv6/ip6_offload.c16
-rw-r--r--net/ipv6/tcpv6_offload.c4
-rw-r--r--net/ipv6/udp_offload.c4
24 files changed, 133 insertions, 141 deletions
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 750eaa53bf0c..3e94375b9b01 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -418,11 +418,12 @@ static int geneve_hlen(struct genevehdr *gh)
418 return sizeof(*gh) + gh->opt_len * 4; 418 return sizeof(*gh) + gh->opt_len * 4;
419} 419}
420 420
421static struct sk_buff **geneve_gro_receive(struct sock *sk, 421static struct sk_buff *geneve_gro_receive(struct sock *sk,
422 struct sk_buff **head, 422 struct list_head *head,
423 struct sk_buff *skb) 423 struct sk_buff *skb)
424{ 424{
425 struct sk_buff *p, **pp = NULL; 425 struct sk_buff *pp = NULL;
426 struct sk_buff *p;
426 struct genevehdr *gh, *gh2; 427 struct genevehdr *gh, *gh2;
427 unsigned int hlen, gh_len, off_gnv; 428 unsigned int hlen, gh_len, off_gnv;
428 const struct packet_offload *ptype; 429 const struct packet_offload *ptype;
@@ -449,7 +450,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
449 goto out; 450 goto out;
450 } 451 }
451 452
452 for (p = *head; p; p = p->next) { 453 list_for_each_entry(p, head, list) {
453 if (!NAPI_GRO_CB(p)->same_flow) 454 if (!NAPI_GRO_CB(p)->same_flow)
454 continue; 455 continue;
455 456
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index aee0e60471f1..cc14e0cd5647 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -568,11 +568,12 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
568 return vh; 568 return vh;
569} 569}
570 570
571static struct sk_buff **vxlan_gro_receive(struct sock *sk, 571static struct sk_buff *vxlan_gro_receive(struct sock *sk,
572 struct sk_buff **head, 572 struct list_head *head,
573 struct sk_buff *skb) 573 struct sk_buff *skb)
574{ 574{
575 struct sk_buff *p, **pp = NULL; 575 struct sk_buff *pp = NULL;
576 struct sk_buff *p;
576 struct vxlanhdr *vh, *vh2; 577 struct vxlanhdr *vh, *vh2;
577 unsigned int hlen, off_vx; 578 unsigned int hlen, off_vx;
578 int flush = 1; 579 int flush = 1;
@@ -607,7 +608,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
607 608
608 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ 609 skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
609 610
610 for (p = *head; p; p = p->next) { 611 list_for_each_entry(p, head, list) {
611 if (!NAPI_GRO_CB(p)->same_flow) 612 if (!NAPI_GRO_CB(p)->same_flow)
612 continue; 613 continue;
613 614
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 79563840c295..572e11bb8696 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
59 unsigned int rxqs); 59 unsigned int rxqs);
60#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1) 60#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
61 61
62struct sk_buff **eth_gro_receive(struct sk_buff **head, 62struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
63 struct sk_buff *skb);
64int eth_gro_complete(struct sk_buff *skb, int nhoff); 63int eth_gro_complete(struct sk_buff *skb, int nhoff);
65 64
66/* Reserved Ethernet Addresses per IEEE 802.1Q */ 65/* Reserved Ethernet Addresses per IEEE 802.1Q */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec9850c7936..f176d9873910 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -322,7 +322,7 @@ struct napi_struct {
322 int poll_owner; 322 int poll_owner;
323#endif 323#endif
324 struct net_device *dev; 324 struct net_device *dev;
325 struct sk_buff *gro_list; 325 struct list_head gro_list;
326 struct sk_buff *skb; 326 struct sk_buff *skb;
327 struct hrtimer timer; 327 struct hrtimer timer;
328 struct list_head dev_list; 328 struct list_head dev_list;
@@ -2255,10 +2255,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
2255 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT; 2255 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
2256} 2256}
2257 2257
2258typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *); 2258typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
2259static inline struct sk_buff **call_gro_receive(gro_receive_t cb, 2259static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
2260 struct sk_buff **head, 2260 struct list_head *head,
2261 struct sk_buff *skb) 2261 struct sk_buff *skb)
2262{ 2262{
2263 if (unlikely(gro_recursion_inc_test(skb))) { 2263 if (unlikely(gro_recursion_inc_test(skb))) {
2264 NAPI_GRO_CB(skb)->flush |= 1; 2264 NAPI_GRO_CB(skb)->flush |= 1;
@@ -2268,12 +2268,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
2268 return cb(head, skb); 2268 return cb(head, skb);
2269} 2269}
2270 2270
2271typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **, 2271typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
2272 struct sk_buff *); 2272 struct sk_buff *);
2273static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb, 2273static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
2274 struct sock *sk, 2274 struct sock *sk,
2275 struct sk_buff **head, 2275 struct list_head *head,
2276 struct sk_buff *skb) 2276 struct sk_buff *skb)
2277{ 2277{
2278 if (unlikely(gro_recursion_inc_test(skb))) { 2278 if (unlikely(gro_recursion_inc_test(skb))) {
2279 NAPI_GRO_CB(skb)->flush |= 1; 2279 NAPI_GRO_CB(skb)->flush |= 1;
@@ -2299,8 +2299,8 @@ struct packet_type {
2299struct offload_callbacks { 2299struct offload_callbacks {
2300 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 2300 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
2301 netdev_features_t features); 2301 netdev_features_t features);
2302 struct sk_buff **(*gro_receive)(struct sk_buff **head, 2302 struct sk_buff *(*gro_receive)(struct list_head *head,
2303 struct sk_buff *skb); 2303 struct sk_buff *skb);
2304 int (*gro_complete)(struct sk_buff *skb, int nhoff); 2304 int (*gro_complete)(struct sk_buff *skb, int nhoff);
2305}; 2305};
2306 2306
@@ -2568,7 +2568,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
2568struct net_device *dev_get_by_napi_id(unsigned int napi_id); 2568struct net_device *dev_get_by_napi_id(unsigned int napi_id);
2569int netdev_get_name(struct net *net, char *name, int ifindex); 2569int netdev_get_name(struct net *net, char *name, int ifindex);
2570int dev_restart(struct net_device *dev); 2570int dev_restart(struct net_device *dev);
2571int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); 2571int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
2572 2572
2573static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 2573static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
2574{ 2574{
@@ -2784,13 +2784,13 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
2784} 2784}
2785 2785
2786#ifdef CONFIG_XFRM_OFFLOAD 2786#ifdef CONFIG_XFRM_OFFLOAD
2787static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) 2787static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2788{ 2788{
2789 if (PTR_ERR(pp) != -EINPROGRESS) 2789 if (PTR_ERR(pp) != -EINPROGRESS)
2790 NAPI_GRO_CB(skb)->flush |= flush; 2790 NAPI_GRO_CB(skb)->flush |= flush;
2791} 2791}
2792#else 2792#else
2793static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush) 2793static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
2794{ 2794{
2795 NAPI_GRO_CB(skb)->flush |= flush; 2795 NAPI_GRO_CB(skb)->flush |= flush;
2796} 2796}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c86885954994..7ccc601b55d9 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -677,7 +677,8 @@ struct sk_buff {
677 int ip_defrag_offset; 677 int ip_defrag_offset;
678 }; 678 };
679 }; 679 };
680 struct rb_node rbnode; /* used in netem & tcp stack */ 680 struct rb_node rbnode; /* used in netem & tcp stack */
681 struct list_head list;
681 }; 682 };
682 struct sock *sk; 683 struct sock *sk;
683 684
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ca840345571b..320d49d85484 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -74,8 +74,8 @@ struct udp_sock {
74 void (*encap_destroy)(struct sock *sk); 74 void (*encap_destroy)(struct sock *sk);
75 75
76 /* GRO functions for UDP socket */ 76 /* GRO functions for UDP socket */
77 struct sk_buff ** (*gro_receive)(struct sock *sk, 77 struct sk_buff * (*gro_receive)(struct sock *sk,
78 struct sk_buff **head, 78 struct list_head *head,
79 struct sk_buff *skb); 79 struct sk_buff *skb);
80 int (*gro_complete)(struct sock *sk, 80 int (*gro_complete)(struct sock *sk,
81 struct sk_buff *skb, 81 struct sk_buff *skb,
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 384b90c62c0b..3ca969cbd161 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
43int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, 43int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
44 int *addr_len); 44 int *addr_len);
45 45
46struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb); 46struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
47int inet_gro_complete(struct sk_buff *skb, int nhoff); 47int inet_gro_complete(struct sk_buff *skb, int nhoff);
48struct sk_buff *inet_gso_segment(struct sk_buff *skb, 48struct sk_buff *inet_gso_segment(struct sk_buff *skb,
49 netdev_features_t features); 49 netdev_features_t features);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 822ee49ed0f9..402a88b0e8a8 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1788,7 +1788,7 @@ void tcp_v4_destroy_sock(struct sock *sk);
1788 1788
1789struct sk_buff *tcp_gso_segment(struct sk_buff *skb, 1789struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1790 netdev_features_t features); 1790 netdev_features_t features);
1791struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb); 1791struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
1792int tcp_gro_complete(struct sk_buff *skb); 1792int tcp_gro_complete(struct sk_buff *skb);
1793 1793
1794void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); 1794void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
diff --git a/include/net/udp.h b/include/net/udp.h
index b1ea8b0f5e6a..5723c6128ae4 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
170typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport, 170typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
171 __be16 dport); 171 __be16 dport);
172 172
173struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 173struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
174 struct udphdr *uh, udp_lookup_t lookup); 174 struct udphdr *uh, udp_lookup_t lookup);
175int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); 175int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
176 176
177struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, 177struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b95a6927c718..fe680ab6b15a 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net,
65 65
66typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb); 66typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
67typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk); 67typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
68typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk, 68typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
69 struct sk_buff **head, 69 struct list_head *head,
70 struct sk_buff *skb); 70 struct sk_buff *skb);
71typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb, 71typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
72 int nhoff); 72 int nhoff);
73 73
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 73a65789271b..99141986efa0 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -647,13 +647,14 @@ out:
647 return err; 647 return err;
648} 648}
649 649
650static struct sk_buff **vlan_gro_receive(struct sk_buff **head, 650static struct sk_buff *vlan_gro_receive(struct list_head *head,
651 struct sk_buff *skb) 651 struct sk_buff *skb)
652{ 652{
653 struct sk_buff *p, **pp = NULL;
654 struct vlan_hdr *vhdr;
655 unsigned int hlen, off_vlan;
656 const struct packet_offload *ptype; 653 const struct packet_offload *ptype;
654 unsigned int hlen, off_vlan;
655 struct sk_buff *pp = NULL;
656 struct vlan_hdr *vhdr;
657 struct sk_buff *p;
657 __be16 type; 658 __be16 type;
658 int flush = 1; 659 int flush = 1;
659 660
@@ -675,7 +676,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
675 676
676 flush = 0; 677 flush = 0;
677 678
678 for (p = *head; p; p = p->next) { 679 list_for_each_entry(p, head, list) {
679 struct vlan_hdr *vhdr2; 680 struct vlan_hdr *vhdr2;
680 681
681 if (!NAPI_GRO_CB(p)->same_flow) 682 if (!NAPI_GRO_CB(p)->same_flow)
diff --git a/net/core/dev.c b/net/core/dev.c
index a5aa1c7444e6..aa61b9344b46 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4881,36 +4881,25 @@ out:
4881 */ 4881 */
4882void napi_gro_flush(struct napi_struct *napi, bool flush_old) 4882void napi_gro_flush(struct napi_struct *napi, bool flush_old)
4883{ 4883{
4884 struct sk_buff *skb, *prev = NULL; 4884 struct sk_buff *skb, *p;
4885
4886 /* scan list and build reverse chain */
4887 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4888 skb->prev = prev;
4889 prev = skb;
4890 }
4891
4892 for (skb = prev; skb; skb = prev) {
4893 skb->next = NULL;
4894 4885
4886 list_for_each_entry_safe_reverse(skb, p, &napi->gro_list, list) {
4895 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) 4887 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4896 return; 4888 return;
4897 4889 list_del_init(&skb->list);
4898 prev = skb->prev;
4899 napi_gro_complete(skb); 4890 napi_gro_complete(skb);
4900 napi->gro_count--; 4891 napi->gro_count--;
4901 } 4892 }
4902
4903 napi->gro_list = NULL;
4904} 4893}
4905EXPORT_SYMBOL(napi_gro_flush); 4894EXPORT_SYMBOL(napi_gro_flush);
4906 4895
4907static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) 4896static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4908{ 4897{
4909 struct sk_buff *p;
4910 unsigned int maclen = skb->dev->hard_header_len; 4898 unsigned int maclen = skb->dev->hard_header_len;
4911 u32 hash = skb_get_hash_raw(skb); 4899 u32 hash = skb_get_hash_raw(skb);
4900 struct sk_buff *p;
4912 4901
4913 for (p = napi->gro_list; p; p = p->next) { 4902 list_for_each_entry(p, &napi->gro_list, list) {
4914 unsigned long diffs; 4903 unsigned long diffs;
4915 4904
4916 NAPI_GRO_CB(p)->flush = 0; 4905 NAPI_GRO_CB(p)->flush = 0;
@@ -4977,12 +4966,12 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4977 4966
4978static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 4967static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4979{ 4968{
4980 struct sk_buff **pp = NULL; 4969 struct list_head *head = &offload_base;
4981 struct packet_offload *ptype; 4970 struct packet_offload *ptype;
4982 __be16 type = skb->protocol; 4971 __be16 type = skb->protocol;
4983 struct list_head *head = &offload_base; 4972 struct sk_buff *pp = NULL;
4984 int same_flow;
4985 enum gro_result ret; 4973 enum gro_result ret;
4974 int same_flow;
4986 int grow; 4975 int grow;
4987 4976
4988 if (netif_elide_gro(skb->dev)) 4977 if (netif_elide_gro(skb->dev))
@@ -5039,11 +5028,8 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
5039 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; 5028 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
5040 5029
5041 if (pp) { 5030 if (pp) {
5042 struct sk_buff *nskb = *pp; 5031 list_del_init(&pp->list);
5043 5032 napi_gro_complete(pp);
5044 *pp = nskb->next;
5045 nskb->next = NULL;
5046 napi_gro_complete(nskb);
5047 napi->gro_count--; 5033 napi->gro_count--;
5048 } 5034 }
5049 5035
@@ -5054,15 +5040,10 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
5054 goto normal; 5040 goto normal;
5055 5041
5056 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) { 5042 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
5057 struct sk_buff *nskb = napi->gro_list; 5043 struct sk_buff *nskb;
5058 5044
5059 /* locate the end of the list to select the 'oldest' flow */ 5045 nskb = list_last_entry(&napi->gro_list, struct sk_buff, list);
5060 while (nskb->next) { 5046 list_del(&nskb->list);
5061 pp = &nskb->next;
5062 nskb = *pp;
5063 }
5064 *pp = NULL;
5065 nskb->next = NULL;
5066 napi_gro_complete(nskb); 5047 napi_gro_complete(nskb);
5067 } else { 5048 } else {
5068 napi->gro_count++; 5049 napi->gro_count++;
@@ -5071,8 +5052,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
5071 NAPI_GRO_CB(skb)->age = jiffies; 5052 NAPI_GRO_CB(skb)->age = jiffies;
5072 NAPI_GRO_CB(skb)->last = skb; 5053 NAPI_GRO_CB(skb)->last = skb;
5073 skb_shinfo(skb)->gso_size = skb_gro_len(skb); 5054 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
5074 skb->next = napi->gro_list; 5055 list_add(&skb->list, &napi->gro_list);
5075 napi->gro_list = skb;
5076 ret = GRO_HELD; 5056 ret = GRO_HELD;
5077 5057
5078pull: 5058pull:
@@ -5478,7 +5458,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
5478 NAPIF_STATE_IN_BUSY_POLL))) 5458 NAPIF_STATE_IN_BUSY_POLL)))
5479 return false; 5459 return false;
5480 5460
5481 if (n->gro_list) { 5461 if (!list_empty(&n->gro_list)) {
5482 unsigned long timeout = 0; 5462 unsigned long timeout = 0;
5483 5463
5484 if (work_done) 5464 if (work_done)
@@ -5687,7 +5667,7 @@ static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
5687 /* Note : we use a relaxed variant of napi_schedule_prep() not setting 5667 /* Note : we use a relaxed variant of napi_schedule_prep() not setting
5688 * NAPI_STATE_MISSED, since we do not react to a device IRQ. 5668 * NAPI_STATE_MISSED, since we do not react to a device IRQ.
5689 */ 5669 */
5690 if (napi->gro_list && !napi_disable_pending(napi) && 5670 if (!list_empty(&napi->gro_list) && !napi_disable_pending(napi) &&
5691 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state)) 5671 !test_and_set_bit(NAPI_STATE_SCHED, &napi->state))
5692 __napi_schedule_irqoff(napi); 5672 __napi_schedule_irqoff(napi);
5693 5673
@@ -5701,7 +5681,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5701 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); 5681 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5702 napi->timer.function = napi_watchdog; 5682 napi->timer.function = napi_watchdog;
5703 napi->gro_count = 0; 5683 napi->gro_count = 0;
5704 napi->gro_list = NULL; 5684 INIT_LIST_HEAD(&napi->gro_list);
5705 napi->skb = NULL; 5685 napi->skb = NULL;
5706 napi->poll = poll; 5686 napi->poll = poll;
5707 if (weight > NAPI_POLL_WEIGHT) 5687 if (weight > NAPI_POLL_WEIGHT)
@@ -5734,6 +5714,14 @@ void napi_disable(struct napi_struct *n)
5734} 5714}
5735EXPORT_SYMBOL(napi_disable); 5715EXPORT_SYMBOL(napi_disable);
5736 5716
5717static void gro_list_free(struct list_head *head)
5718{
5719 struct sk_buff *skb, *p;
5720
5721 list_for_each_entry_safe(skb, p, head, list)
5722 kfree_skb(skb);
5723}
5724
5737/* Must be called in process context */ 5725/* Must be called in process context */
5738void netif_napi_del(struct napi_struct *napi) 5726void netif_napi_del(struct napi_struct *napi)
5739{ 5727{
@@ -5743,8 +5731,8 @@ void netif_napi_del(struct napi_struct *napi)
5743 list_del_init(&napi->dev_list); 5731 list_del_init(&napi->dev_list);
5744 napi_free_frags(napi); 5732 napi_free_frags(napi);
5745 5733
5746 kfree_skb_list(napi->gro_list); 5734 gro_list_free(&napi->gro_list);
5747 napi->gro_list = NULL; 5735 INIT_LIST_HEAD(&napi->gro_list);
5748 napi->gro_count = 0; 5736 napi->gro_count = 0;
5749} 5737}
5750EXPORT_SYMBOL(netif_napi_del); 5738EXPORT_SYMBOL(netif_napi_del);
@@ -5787,7 +5775,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5787 goto out_unlock; 5775 goto out_unlock;
5788 } 5776 }
5789 5777
5790 if (n->gro_list) { 5778 if (!list_empty(&n->gro_list)) {
5791 /* flush too old packets 5779 /* flush too old packets
5792 * If HZ < 1000, flush all packets. 5780 * If HZ < 1000, flush all packets.
5793 */ 5781 */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c642304f178c..b1f274f22d85 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3815,14 +3815,14 @@ err:
3815} 3815}
3816EXPORT_SYMBOL_GPL(skb_segment); 3816EXPORT_SYMBOL_GPL(skb_segment);
3817 3817
3818int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) 3818int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
3819{ 3819{
3820 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); 3820 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3821 unsigned int offset = skb_gro_offset(skb); 3821 unsigned int offset = skb_gro_offset(skb);
3822 unsigned int headlen = skb_headlen(skb); 3822 unsigned int headlen = skb_headlen(skb);
3823 unsigned int len = skb_gro_len(skb); 3823 unsigned int len = skb_gro_len(skb);
3824 struct sk_buff *lp, *p = *head;
3825 unsigned int delta_truesize; 3824 unsigned int delta_truesize;
3825 struct sk_buff *lp;
3826 3826
3827 if (unlikely(p->len + len >= 65536)) 3827 if (unlikely(p->len + len >= 65536))
3828 return -E2BIG; 3828 return -E2BIG;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index ee28440f57c5..fd8faa0dfa61 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -427,13 +427,13 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
427} 427}
428EXPORT_SYMBOL(sysfs_format_mac); 428EXPORT_SYMBOL(sysfs_format_mac);
429 429
430struct sk_buff **eth_gro_receive(struct sk_buff **head, 430struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
431 struct sk_buff *skb)
432{ 431{
433 struct sk_buff *p, **pp = NULL;
434 struct ethhdr *eh, *eh2;
435 unsigned int hlen, off_eth;
436 const struct packet_offload *ptype; 432 const struct packet_offload *ptype;
433 unsigned int hlen, off_eth;
434 struct sk_buff *pp = NULL;
435 struct ethhdr *eh, *eh2;
436 struct sk_buff *p;
437 __be16 type; 437 __be16 type;
438 int flush = 1; 438 int flush = 1;
439 439
@@ -448,7 +448,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
448 448
449 flush = 0; 449 flush = 0;
450 450
451 for (p = *head; p; p = p->next) { 451 list_for_each_entry(p, head, list) {
452 if (!NAPI_GRO_CB(p)->same_flow) 452 if (!NAPI_GRO_CB(p)->same_flow)
453 continue; 453 continue;
454 454
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 15e125558c76..06b218a2870f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1384,12 +1384,12 @@ out:
1384} 1384}
1385EXPORT_SYMBOL(inet_gso_segment); 1385EXPORT_SYMBOL(inet_gso_segment);
1386 1386
1387struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb) 1387struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
1388{ 1388{
1389 const struct net_offload *ops; 1389 const struct net_offload *ops;
1390 struct sk_buff **pp = NULL; 1390 struct sk_buff *pp = NULL;
1391 struct sk_buff *p;
1392 const struct iphdr *iph; 1391 const struct iphdr *iph;
1392 struct sk_buff *p;
1393 unsigned int hlen; 1393 unsigned int hlen;
1394 unsigned int off; 1394 unsigned int off;
1395 unsigned int id; 1395 unsigned int id;
@@ -1425,7 +1425,7 @@ struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb)
1425 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF)); 1425 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
1426 id >>= 16; 1426 id >>= 16;
1427 1427
1428 for (p = *head; p; p = p->next) { 1428 list_for_each_entry(p, head, list) {
1429 struct iphdr *iph2; 1429 struct iphdr *iph2;
1430 u16 flush_id; 1430 u16 flush_id;
1431 1431
@@ -1505,8 +1505,8 @@ out:
1505} 1505}
1506EXPORT_SYMBOL(inet_gro_receive); 1506EXPORT_SYMBOL(inet_gro_receive);
1507 1507
1508static struct sk_buff **ipip_gro_receive(struct sk_buff **head, 1508static struct sk_buff *ipip_gro_receive(struct list_head *head,
1509 struct sk_buff *skb) 1509 struct sk_buff *skb)
1510{ 1510{
1511 if (NAPI_GRO_CB(skb)->encap_mark) { 1511 if (NAPI_GRO_CB(skb)->encap_mark) {
1512 NAPI_GRO_CB(skb)->flush = 1; 1512 NAPI_GRO_CB(skb)->flush = 1;
diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c
index 7cf755ef9efb..bbeecd13e534 100644
--- a/net/ipv4/esp4_offload.c
+++ b/net/ipv4/esp4_offload.c
@@ -28,8 +28,8 @@
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <net/udp.h> 29#include <net/udp.h>
30 30
31static struct sk_buff **esp4_gro_receive(struct sk_buff **head, 31static struct sk_buff *esp4_gro_receive(struct list_head *head,
32 struct sk_buff *skb) 32 struct sk_buff *skb)
33{ 33{
34 int offset = skb_gro_offset(skb); 34 int offset = skb_gro_offset(skb);
35 struct xfrm_offload *xo; 35 struct xfrm_offload *xo;
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 1540db65241a..efdc9e1f741e 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -224,14 +224,14 @@ drop:
224 return 0; 224 return 0;
225} 225}
226 226
227static struct sk_buff **fou_gro_receive(struct sock *sk, 227static struct sk_buff *fou_gro_receive(struct sock *sk,
228 struct sk_buff **head, 228 struct list_head *head,
229 struct sk_buff *skb) 229 struct sk_buff *skb)
230{ 230{
231 const struct net_offload *ops;
232 struct sk_buff **pp = NULL;
233 u8 proto = fou_from_sock(sk)->protocol; 231 u8 proto = fou_from_sock(sk)->protocol;
234 const struct net_offload **offloads; 232 const struct net_offload **offloads;
233 const struct net_offload *ops;
234 struct sk_buff *pp = NULL;
235 235
236 /* We can clear the encap_mark for FOU as we are essentially doing 236 /* We can clear the encap_mark for FOU as we are essentially doing
237 * one of two possible things. We are either adding an L4 tunnel 237 * one of two possible things. We are either adding an L4 tunnel
@@ -305,13 +305,13 @@ static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off,
305 return guehdr; 305 return guehdr;
306} 306}
307 307
308static struct sk_buff **gue_gro_receive(struct sock *sk, 308static struct sk_buff *gue_gro_receive(struct sock *sk,
309 struct sk_buff **head, 309 struct list_head *head,
310 struct sk_buff *skb) 310 struct sk_buff *skb)
311{ 311{
312 const struct net_offload **offloads; 312 const struct net_offload **offloads;
313 const struct net_offload *ops; 313 const struct net_offload *ops;
314 struct sk_buff **pp = NULL; 314 struct sk_buff *pp = NULL;
315 struct sk_buff *p; 315 struct sk_buff *p;
316 struct guehdr *guehdr; 316 struct guehdr *guehdr;
317 size_t len, optlen, hdrlen, off; 317 size_t len, optlen, hdrlen, off;
@@ -397,7 +397,7 @@ static struct sk_buff **gue_gro_receive(struct sock *sk,
397 397
398 skb_gro_pull(skb, hdrlen); 398 skb_gro_pull(skb, hdrlen);
399 399
400 for (p = *head; p; p = p->next) { 400 list_for_each_entry(p, head, list) {
401 const struct guehdr *guehdr2; 401 const struct guehdr *guehdr2;
402 402
403 if (!NAPI_GRO_CB(p)->same_flow) 403 if (!NAPI_GRO_CB(p)->same_flow)
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 1859c473b21a..b9673c21be45 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -108,10 +108,10 @@ out:
108 return segs; 108 return segs;
109} 109}
110 110
111static struct sk_buff **gre_gro_receive(struct sk_buff **head, 111static struct sk_buff *gre_gro_receive(struct list_head *head,
112 struct sk_buff *skb) 112 struct sk_buff *skb)
113{ 113{
114 struct sk_buff **pp = NULL; 114 struct sk_buff *pp = NULL;
115 struct sk_buff *p; 115 struct sk_buff *p;
116 const struct gre_base_hdr *greh; 116 const struct gre_base_hdr *greh;
117 unsigned int hlen, grehlen; 117 unsigned int hlen, grehlen;
@@ -182,7 +182,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
182 null_compute_pseudo); 182 null_compute_pseudo);
183 } 183 }
184 184
185 for (p = *head; p; p = p->next) { 185 list_for_each_entry(p, head, list) {
186 const struct gre_base_hdr *greh2; 186 const struct gre_base_hdr *greh2;
187 187
188 if (!NAPI_GRO_CB(p)->same_flow) 188 if (!NAPI_GRO_CB(p)->same_flow)
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 8cc7c3487330..f5aee641f825 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -180,9 +180,9 @@ out:
180 return segs; 180 return segs;
181} 181}
182 182
183struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) 183struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
184{ 184{
185 struct sk_buff **pp = NULL; 185 struct sk_buff *pp = NULL;
186 struct sk_buff *p; 186 struct sk_buff *p;
187 struct tcphdr *th; 187 struct tcphdr *th;
188 struct tcphdr *th2; 188 struct tcphdr *th2;
@@ -220,7 +220,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
220 len = skb_gro_len(skb); 220 len = skb_gro_len(skb);
221 flags = tcp_flag_word(th); 221 flags = tcp_flag_word(th);
222 222
223 for (; (p = *head); head = &p->next) { 223 list_for_each_entry(p, head, list) {
224 if (!NAPI_GRO_CB(p)->same_flow) 224 if (!NAPI_GRO_CB(p)->same_flow)
225 continue; 225 continue;
226 226
@@ -233,7 +233,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
233 233
234 goto found; 234 goto found;
235 } 235 }
236 236 p = NULL;
237 goto out_check_final; 237 goto out_check_final;
238 238
239found: 239found:
@@ -263,7 +263,7 @@ found:
263 flush |= (len - 1) >= mss; 263 flush |= (len - 1) >= mss;
264 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); 264 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
265 265
266 if (flush || skb_gro_receive(head, skb)) { 266 if (flush || skb_gro_receive(p, skb)) {
267 mss = 1; 267 mss = 1;
268 goto out_check_final; 268 goto out_check_final;
269 } 269 }
@@ -277,7 +277,7 @@ out_check_final:
277 TCP_FLAG_FIN)); 277 TCP_FLAG_FIN));
278 278
279 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) 279 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
280 pp = head; 280 pp = p;
281 281
282out: 282out:
283 NAPI_GRO_CB(skb)->flush |= (flush != 0); 283 NAPI_GRO_CB(skb)->flush |= (flush != 0);
@@ -302,7 +302,7 @@ int tcp_gro_complete(struct sk_buff *skb)
302} 302}
303EXPORT_SYMBOL(tcp_gro_complete); 303EXPORT_SYMBOL(tcp_gro_complete);
304 304
305static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 305static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
306{ 306{
307 /* Don't bother verifying checksum if we're going to flush anyway. */ 307 /* Don't bother verifying checksum if we're going to flush anyway. */
308 if (!NAPI_GRO_CB(skb)->flush && 308 if (!NAPI_GRO_CB(skb)->flush &&
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 92dc9e5a7ff3..ac46c1c55c99 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -343,10 +343,11 @@ out:
343 return segs; 343 return segs;
344} 344}
345 345
346struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, 346struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
347 struct udphdr *uh, udp_lookup_t lookup) 347 struct udphdr *uh, udp_lookup_t lookup)
348{ 348{
349 struct sk_buff *p, **pp = NULL; 349 struct sk_buff *pp = NULL;
350 struct sk_buff *p;
350 struct udphdr *uh2; 351 struct udphdr *uh2;
351 unsigned int off = skb_gro_offset(skb); 352 unsigned int off = skb_gro_offset(skb);
352 int flush = 1; 353 int flush = 1;
@@ -371,7 +372,7 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
371unflush: 372unflush:
372 flush = 0; 373 flush = 0;
373 374
374 for (p = *head; p; p = p->next) { 375 list_for_each_entry(p, head, list) {
375 if (!NAPI_GRO_CB(p)->same_flow) 376 if (!NAPI_GRO_CB(p)->same_flow)
376 continue; 377 continue;
377 378
@@ -399,8 +400,8 @@ out:
399} 400}
400EXPORT_SYMBOL(udp_gro_receive); 401EXPORT_SYMBOL(udp_gro_receive);
401 402
402static struct sk_buff **udp4_gro_receive(struct sk_buff **head, 403static struct sk_buff *udp4_gro_receive(struct list_head *head,
403 struct sk_buff *skb) 404 struct sk_buff *skb)
404{ 405{
405 struct udphdr *uh = udp_gro_udphdr(skb); 406 struct udphdr *uh = udp_gro_udphdr(skb);
406 407
diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c
index 27f59b61f70f..ddfa533a84e5 100644
--- a/net/ipv6/esp6_offload.c
+++ b/net/ipv6/esp6_offload.c
@@ -49,8 +49,8 @@ static __u16 esp6_nexthdr_esp_offset(struct ipv6hdr *ipv6_hdr, int nhlen)
49 return 0; 49 return 0;
50} 50}
51 51
52static struct sk_buff **esp6_gro_receive(struct sk_buff **head, 52static struct sk_buff *esp6_gro_receive(struct list_head *head,
53 struct sk_buff *skb) 53 struct sk_buff *skb)
54{ 54{
55 int offset = skb_gro_offset(skb); 55 int offset = skb_gro_offset(skb);
56 struct xfrm_offload *xo; 56 struct xfrm_offload *xo;
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 5b3f2f89ef41..37ff4805b20c 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -163,11 +163,11 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph,
163 return len; 163 return len;
164} 164}
165 165
166static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, 166static struct sk_buff *ipv6_gro_receive(struct list_head *head,
167 struct sk_buff *skb) 167 struct sk_buff *skb)
168{ 168{
169 const struct net_offload *ops; 169 const struct net_offload *ops;
170 struct sk_buff **pp = NULL; 170 struct sk_buff *pp = NULL;
171 struct sk_buff *p; 171 struct sk_buff *p;
172 struct ipv6hdr *iph; 172 struct ipv6hdr *iph;
173 unsigned int nlen; 173 unsigned int nlen;
@@ -214,7 +214,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
214 flush--; 214 flush--;
215 nlen = skb_network_header_len(skb); 215 nlen = skb_network_header_len(skb);
216 216
217 for (p = *head; p; p = p->next) { 217 list_for_each_entry(p, head, list) {
218 const struct ipv6hdr *iph2; 218 const struct ipv6hdr *iph2;
219 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */ 219 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
220 220
@@ -263,8 +263,8 @@ out:
263 return pp; 263 return pp;
264} 264}
265 265
266static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head, 266static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
267 struct sk_buff *skb) 267 struct sk_buff *skb)
268{ 268{
269 /* Common GRO receive for SIT and IP6IP6 */ 269 /* Common GRO receive for SIT and IP6IP6 */
270 270
@@ -278,8 +278,8 @@ static struct sk_buff **sit_ip6ip6_gro_receive(struct sk_buff **head,
278 return ipv6_gro_receive(head, skb); 278 return ipv6_gro_receive(head, skb);
279} 279}
280 280
281static struct sk_buff **ip4ip6_gro_receive(struct sk_buff **head, 281static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
282 struct sk_buff *skb) 282 struct sk_buff *skb)
283{ 283{
284 /* Common GRO receive for SIT and IP6IP6 */ 284 /* Common GRO receive for SIT and IP6IP6 */
285 285
diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
index 278e49cd67d4..e72947c99454 100644
--- a/net/ipv6/tcpv6_offload.c
+++ b/net/ipv6/tcpv6_offload.c
@@ -15,8 +15,8 @@
15#include <net/ip6_checksum.h> 15#include <net/ip6_checksum.h>
16#include "ip6_offload.h" 16#include "ip6_offload.h"
17 17
18static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, 18static struct sk_buff *tcp6_gro_receive(struct list_head *head,
19 struct sk_buff *skb) 19 struct sk_buff *skb)
20{ 20{
21 /* Don't bother verifying checksum if we're going to flush anyway. */ 21 /* Don't bother verifying checksum if we're going to flush anyway. */
22 if (!NAPI_GRO_CB(skb)->flush && 22 if (!NAPI_GRO_CB(skb)->flush &&
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 03a2ff3fe1e6..95dee9ca8d22 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -114,8 +114,8 @@ out:
114 return segs; 114 return segs;
115} 115}
116 116
117static struct sk_buff **udp6_gro_receive(struct sk_buff **head, 117static struct sk_buff *udp6_gro_receive(struct list_head *head,
118 struct sk_buff *skb) 118 struct sk_buff *skb)
119{ 119{
120 struct udphdr *uh = udp_gro_udphdr(skb); 120 struct udphdr *uh = udp_gro_udphdr(skb);
121 121