aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-04-16 19:09:14 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-16 19:09:14 -0400
commitac979929eaeaf82c3ba30161320b3f928d136de9 (patch)
tree197f234f9fc66d923f7e16d959b807d497e43ffe
parentec9dcd3507f055f025a7692a1ff90ce105f84c1b (diff)
parent3a80e1facd3c825c5ac804bc2efe118872832e33 (diff)
Merge branch 'ipv6-gre-offloads'
Alexander Duyck says: ==================== Add support for offloads with IPv6 GRE tunnels This patch series enables the use of segmentation and checksum offloads with IPv6 based GRE tunnels. In order to enable this series I had to make a change to iptunnel_handle_offloads so that it would no longer free the skb. This was necessary as there were multiple paths in the IPv6 GRE code that required the skb to still be present so it could be freed. As it turned out I believe this actually fixes a bug that was present in FOU/GUE based tunnels anyway. Below is a quick breakdown of the performance gains seen with a simple netperf test passing traffic through a ip6gretap tunnel and then an i40e interface: Throughput Throughput Local Local Result Units CPU Service Tag Util Demand % 3544.93 10^6bits/s 6.30 4.656 "before" 13081.75 10^6bits/s 3.75 0.752 "after" ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/geneve.c32
-rw-r--r--drivers/net/vxlan.c6
-rw-r--r--include/net/ip_tunnels.h2
-rw-r--r--include/net/udp_tunnel.h3
-rw-r--r--net/ipv4/fou.c16
-rw-r--r--net/ipv4/gre_offload.c14
-rw-r--r--net/ipv4/ip_gre.c20
-rw-r--r--net/ipv4/ip_tunnel_core.c13
-rw-r--r--net/ipv4/ipip.c7
-rw-r--r--net/ipv6/ip6_gre.c81
-rw-r--r--net/ipv6/sit.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c6
12 files changed, 116 insertions, 98 deletions
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index a9fbf17eb256..efbc7ceedc3a 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -696,16 +696,12 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
696 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len 696 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
697 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr); 697 + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
698 err = skb_cow_head(skb, min_headroom); 698 err = skb_cow_head(skb, min_headroom);
699 if (unlikely(err)) { 699 if (unlikely(err))
700 kfree_skb(skb);
701 goto free_rt; 700 goto free_rt;
702 }
703 701
704 skb = udp_tunnel_handle_offloads(skb, udp_sum); 702 err = udp_tunnel_handle_offloads(skb, udp_sum);
705 if (IS_ERR(skb)) { 703 if (err)
706 err = PTR_ERR(skb);
707 goto free_rt; 704 goto free_rt;
708 }
709 705
710 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 706 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
711 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); 707 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -733,16 +729,12 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
733 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len 729 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
734 + GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr); 730 + GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
735 err = skb_cow_head(skb, min_headroom); 731 err = skb_cow_head(skb, min_headroom);
736 if (unlikely(err)) { 732 if (unlikely(err))
737 kfree_skb(skb);
738 goto free_dst; 733 goto free_dst;
739 }
740 734
741 skb = udp_tunnel_handle_offloads(skb, udp_sum); 735 err = udp_tunnel_handle_offloads(skb, udp_sum);
742 if (IS_ERR(skb)) { 736 if (IS_ERR(skb))
743 err = PTR_ERR(skb);
744 goto free_dst; 737 goto free_dst;
745 }
746 738
747 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 739 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
748 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); 740 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -937,7 +929,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
937 err = geneve_build_skb(rt, skb, key->tun_flags, vni, 929 err = geneve_build_skb(rt, skb, key->tun_flags, vni,
938 info->options_len, opts, flags, xnet); 930 info->options_len, opts, flags, xnet);
939 if (unlikely(err)) 931 if (unlikely(err))
940 goto err; 932 goto tx_error;
941 933
942 tos = ip_tunnel_ecn_encap(key->tos, iip, skb); 934 tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
943 ttl = key->ttl; 935 ttl = key->ttl;
@@ -946,7 +938,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
946 err = geneve_build_skb(rt, skb, 0, geneve->vni, 938 err = geneve_build_skb(rt, skb, 0, geneve->vni,
947 0, NULL, flags, xnet); 939 0, NULL, flags, xnet);
948 if (unlikely(err)) 940 if (unlikely(err))
949 goto err; 941 goto tx_error;
950 942
951 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb); 943 tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
952 ttl = geneve->ttl; 944 ttl = geneve->ttl;
@@ -964,7 +956,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
964 956
965tx_error: 957tx_error:
966 dev_kfree_skb(skb); 958 dev_kfree_skb(skb);
967err: 959
968 if (err == -ELOOP) 960 if (err == -ELOOP)
969 dev->stats.collisions++; 961 dev->stats.collisions++;
970 else if (err == -ENETUNREACH) 962 else if (err == -ENETUNREACH)
@@ -1026,7 +1018,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1026 info->options_len, opts, 1018 info->options_len, opts,
1027 flags, xnet); 1019 flags, xnet);
1028 if (unlikely(err)) 1020 if (unlikely(err))
1029 goto err; 1021 goto tx_error;
1030 1022
1031 prio = ip_tunnel_ecn_encap(key->tos, iip, skb); 1023 prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
1032 ttl = key->ttl; 1024 ttl = key->ttl;
@@ -1035,7 +1027,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1035 err = geneve6_build_skb(dst, skb, 0, geneve->vni, 1027 err = geneve6_build_skb(dst, skb, 0, geneve->vni,
1036 0, NULL, flags, xnet); 1028 0, NULL, flags, xnet);
1037 if (unlikely(err)) 1029 if (unlikely(err))
1038 goto err; 1030 goto tx_error;
1039 1031
1040 prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel), 1032 prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
1041 iip, skb); 1033 iip, skb);
@@ -1054,7 +1046,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
1054 1046
1055tx_error: 1047tx_error:
1056 dev_kfree_skb(skb); 1048 dev_kfree_skb(skb);
1057err: 1049
1058 if (err == -ELOOP) 1050 if (err == -ELOOP)
1059 dev->stats.collisions++; 1051 dev->stats.collisions++;
1060 else if (err == -ENETUNREACH) 1052 else if (err == -ENETUNREACH)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a7112b3bc9b4..c2e22c2532a1 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1797,9 +1797,9 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
1797 if (WARN_ON(!skb)) 1797 if (WARN_ON(!skb))
1798 return -ENOMEM; 1798 return -ENOMEM;
1799 1799
1800 skb = iptunnel_handle_offloads(skb, type); 1800 err = iptunnel_handle_offloads(skb, type);
1801 if (IS_ERR(skb)) 1801 if (err)
1802 return PTR_ERR(skb); 1802 goto out_free;
1803 1803
1804 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1804 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
1805 vxh->vx_flags = VXLAN_HF_VNI; 1805 vxh->vx_flags = VXLAN_HF_VNI;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 9ae9fbbccd67..6d790910ebdf 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -309,7 +309,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
309struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, 309struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
310 gfp_t flags); 310 gfp_t flags);
311 311
312struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); 312int iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
313 313
314static inline int iptunnel_pull_offloads(struct sk_buff *skb) 314static inline int iptunnel_pull_offloads(struct sk_buff *skb)
315{ 315{
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 2dcf1de948ac..4f543262dd81 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -105,8 +105,7 @@ struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
105 __be16 flags, __be64 tunnel_id, 105 __be16 flags, __be64 tunnel_id,
106 int md_size); 106 int md_size);
107 107
108static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, 108static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
109 bool udp_csum)
110{ 109{
111 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; 110 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
112 111
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index d039f8fff57f..7ac5ec87b004 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -802,11 +802,11 @@ int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
802 int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM : 802 int type = e->flags & TUNNEL_ENCAP_FLAG_CSUM ? SKB_GSO_UDP_TUNNEL_CSUM :
803 SKB_GSO_UDP_TUNNEL; 803 SKB_GSO_UDP_TUNNEL;
804 __be16 sport; 804 __be16 sport;
805 int err;
805 806
806 skb = iptunnel_handle_offloads(skb, type); 807 err = iptunnel_handle_offloads(skb, type);
807 808 if (err)
808 if (IS_ERR(skb)) 809 return err;
809 return PTR_ERR(skb);
810 810
811 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), 811 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
812 skb, 0, 0, false); 812 skb, 0, 0, false);
@@ -826,6 +826,7 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
826 __be16 sport; 826 __be16 sport;
827 void *data; 827 void *data;
828 bool need_priv = false; 828 bool need_priv = false;
829 int err;
829 830
830 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) && 831 if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) &&
831 skb->ip_summed == CHECKSUM_PARTIAL) { 832 skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -836,10 +837,9 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
836 837
837 optlen += need_priv ? GUE_LEN_PRIV : 0; 838 optlen += need_priv ? GUE_LEN_PRIV : 0;
838 839
839 skb = iptunnel_handle_offloads(skb, type); 840 err = iptunnel_handle_offloads(skb, type);
840 841 if (err)
841 if (IS_ERR(skb)) 842 return err;
842 return PTR_ERR(skb);
843 843
844 /* Get source port (based on flow hash) before skb_push */ 844 /* Get source port (based on flow hash) before skb_push */
845 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), 845 sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev),
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 20557f211408..e88190a8699a 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -292,6 +292,18 @@ static const struct net_offload gre_offload = {
292 292
293static int __init gre_offload_init(void) 293static int __init gre_offload_init(void)
294{ 294{
295 return inet_add_offload(&gre_offload, IPPROTO_GRE); 295 int err;
296
297 err = inet_add_offload(&gre_offload, IPPROTO_GRE);
298#if IS_ENABLED(CONFIG_IPV6)
299 if (err)
300 return err;
301
302 err = inet6_add_offload(&gre_offload, IPPROTO_GRE);
303 if (err)
304 inet_del_offload(&gre_offload, IPPROTO_GRE);
305#endif
306
307 return err;
296} 308}
297device_initcall(gre_offload_init); 309device_initcall(gre_offload_init);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index af5d1f38217f..eedd829a2f87 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -500,8 +500,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
500 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol); 500 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
501} 501}
502 502
503static struct sk_buff *gre_handle_offloads(struct sk_buff *skb, 503static int gre_handle_offloads(struct sk_buff *skb, bool csum)
504 bool csum)
505{ 504{
506 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); 505 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
507} 506}
@@ -568,11 +567,8 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
568 } 567 }
569 568
570 /* Push Tunnel header. */ 569 /* Push Tunnel header. */
571 skb = gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)); 570 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
572 if (IS_ERR(skb)) {
573 skb = NULL;
574 goto err_free_rt; 571 goto err_free_rt;
575 }
576 572
577 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY); 573 flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
578 build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB), 574 build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
@@ -640,16 +636,14 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
640 tnl_params = &tunnel->parms.iph; 636 tnl_params = &tunnel->parms.iph;
641 } 637 }
642 638
643 skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); 639 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
644 if (IS_ERR(skb)) 640 goto free_skb;
645 goto out;
646 641
647 __gre_xmit(skb, dev, tnl_params, skb->protocol); 642 __gre_xmit(skb, dev, tnl_params, skb->protocol);
648 return NETDEV_TX_OK; 643 return NETDEV_TX_OK;
649 644
650free_skb: 645free_skb:
651 kfree_skb(skb); 646 kfree_skb(skb);
652out:
653 dev->stats.tx_dropped++; 647 dev->stats.tx_dropped++;
654 return NETDEV_TX_OK; 648 return NETDEV_TX_OK;
655} 649}
@@ -664,9 +658,8 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
664 return NETDEV_TX_OK; 658 return NETDEV_TX_OK;
665 } 659 }
666 660
667 skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM)); 661 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
668 if (IS_ERR(skb)) 662 goto free_skb;
669 goto out;
670 663
671 if (skb_cow_head(skb, dev->needed_headroom)) 664 if (skb_cow_head(skb, dev->needed_headroom))
672 goto free_skb; 665 goto free_skb;
@@ -676,7 +669,6 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
676 669
677free_skb: 670free_skb:
678 kfree_skb(skb); 671 kfree_skb(skb);
679out:
680 dev->stats.tx_dropped++; 672 dev->stats.tx_dropped++;
681 return NETDEV_TX_OK; 673 return NETDEV_TX_OK;
682} 674}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 43445df61efd..f46c5c873831 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -146,8 +146,8 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
146} 146}
147EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); 147EXPORT_SYMBOL_GPL(iptunnel_metadata_reply);
148 148
149struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, 149int iptunnel_handle_offloads(struct sk_buff *skb,
150 int gso_type_mask) 150 int gso_type_mask)
151{ 151{
152 int err; 152 int err;
153 153
@@ -159,9 +159,9 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
159 if (skb_is_gso(skb)) { 159 if (skb_is_gso(skb)) {
160 err = skb_unclone(skb, GFP_ATOMIC); 160 err = skb_unclone(skb, GFP_ATOMIC);
161 if (unlikely(err)) 161 if (unlikely(err))
162 goto error; 162 return err;
163 skb_shinfo(skb)->gso_type |= gso_type_mask; 163 skb_shinfo(skb)->gso_type |= gso_type_mask;
164 return skb; 164 return 0;
165 } 165 }
166 166
167 if (skb->ip_summed != CHECKSUM_PARTIAL) { 167 if (skb->ip_summed != CHECKSUM_PARTIAL) {
@@ -174,10 +174,7 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb,
174 skb->encapsulation = 0; 174 skb->encapsulation = 0;
175 } 175 }
176 176
177 return skb; 177 return 0;
178error:
179 kfree_skb(skb);
180 return ERR_PTR(err);
181} 178}
182EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); 179EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
183 180
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ec51d02166de..92827483ee3d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -219,9 +219,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
219 if (unlikely(skb->protocol != htons(ETH_P_IP))) 219 if (unlikely(skb->protocol != htons(ETH_P_IP)))
220 goto tx_error; 220 goto tx_error;
221 221
222 skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP); 222 if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
223 if (IS_ERR(skb)) 223 goto tx_error;
224 goto out;
225 224
226 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 225 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
227 226
@@ -230,7 +229,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
230 229
231tx_error: 230tx_error:
232 kfree_skb(skb); 231 kfree_skb(skb);
233out: 232
234 dev->stats.tx_errors++; 233 dev->stats.tx_errors++;
235 return NETDEV_TX_OK; 234 return NETDEV_TX_OK;
236} 235}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 4e636e60a360..ca5a2c5675c5 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -598,6 +598,18 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
598 opt->ops.opt_nflen = 8; 598 opt->ops.opt_nflen = 8;
599} 599}
600 600
601static __sum16 gre6_checksum(struct sk_buff *skb)
602{
603 __wsum csum;
604
605 if (skb->ip_summed == CHECKSUM_PARTIAL)
606 csum = lco_csum(skb);
607 else
608 csum = skb_checksum(skb, sizeof(struct ipv6hdr),
609 skb->len - sizeof(struct ipv6hdr), 0);
610 return csum_fold(csum);
611}
612
601static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb, 613static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
602 struct net_device *dev, 614 struct net_device *dev,
603 __u8 dsfield, 615 __u8 dsfield,
@@ -609,7 +621,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
609 struct net *net = tunnel->net; 621 struct net *net = tunnel->net;
610 struct net_device *tdev; /* Device to other host */ 622 struct net_device *tdev; /* Device to other host */
611 struct ipv6hdr *ipv6h; /* Our new IP header */ 623 struct ipv6hdr *ipv6h; /* Our new IP header */
612 unsigned int max_headroom = 0; /* The extra header space needed */ 624 unsigned int min_headroom = 0; /* The extra header space needed */
613 int gre_hlen; 625 int gre_hlen;
614 struct ipv6_tel_txoption opt; 626 struct ipv6_tel_txoption opt;
615 int mtu; 627 int mtu;
@@ -617,7 +629,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
617 struct net_device_stats *stats = &tunnel->dev->stats; 629 struct net_device_stats *stats = &tunnel->dev->stats;
618 int err = -1; 630 int err = -1;
619 u8 proto; 631 u8 proto;
620 struct sk_buff *new_skb;
621 __be16 protocol; 632 __be16 protocol;
622 633
623 if (dev->type == ARPHRD_ETHER) 634 if (dev->type == ARPHRD_ETHER)
@@ -660,14 +671,14 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
660 671
661 mtu = dst_mtu(dst) - sizeof(*ipv6h); 672 mtu = dst_mtu(dst) - sizeof(*ipv6h);
662 if (encap_limit >= 0) { 673 if (encap_limit >= 0) {
663 max_headroom += 8; 674 min_headroom += 8;
664 mtu -= 8; 675 mtu -= 8;
665 } 676 }
666 if (mtu < IPV6_MIN_MTU) 677 if (mtu < IPV6_MIN_MTU)
667 mtu = IPV6_MIN_MTU; 678 mtu = IPV6_MIN_MTU;
668 if (skb_dst(skb)) 679 if (skb_dst(skb))
669 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); 680 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
670 if (skb->len > mtu) { 681 if (skb->len > mtu && !skb_is_gso(skb)) {
671 *pmtu = mtu; 682 *pmtu = mtu;
672 err = -EMSGSIZE; 683 err = -EMSGSIZE;
673 goto tx_err_dst_release; 684 goto tx_err_dst_release;
@@ -685,20 +696,19 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
685 696
686 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); 697 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
687 698
688 max_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len; 699 min_headroom += LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
689 700
690 if (skb_headroom(skb) < max_headroom || skb_shared(skb) || 701 if (skb_headroom(skb) < min_headroom || skb_header_cloned(skb)) {
691 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 702 int head_delta = SKB_DATA_ALIGN(min_headroom -
692 new_skb = skb_realloc_headroom(skb, max_headroom); 703 skb_headroom(skb) +
693 if (max_headroom > dev->needed_headroom) 704 16);
694 dev->needed_headroom = max_headroom;
695 if (!new_skb)
696 goto tx_err_dst_release;
697 705
698 if (skb->sk) 706 err = pskb_expand_head(skb, max_t(int, head_delta, 0),
699 skb_set_owner_w(new_skb, skb->sk); 707 0, GFP_ATOMIC);
700 consume_skb(skb); 708 if (min_headroom > dev->needed_headroom)
701 skb = new_skb; 709 dev->needed_headroom = min_headroom;
710 if (unlikely(err))
711 goto tx_err_dst_release;
702 } 712 }
703 713
704 if (!fl6->flowi6_mark && ndst) 714 if (!fl6->flowi6_mark && ndst)
@@ -711,10 +721,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
711 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); 721 ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
712 } 722 }
713 723
714 if (likely(!skb->encapsulation)) { 724 err = iptunnel_handle_offloads(skb,
715 skb_reset_inner_headers(skb); 725 (tunnel->parms.o_flags & GRE_CSUM) ?
716 skb->encapsulation = 1; 726 SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
717 } 727 if (err)
728 goto tx_err_dst_release;
718 729
719 skb_push(skb, gre_hlen); 730 skb_push(skb, gre_hlen);
720 skb_reset_network_header(skb); 731 skb_reset_network_header(skb);
@@ -748,10 +759,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
748 *ptr = tunnel->parms.o_key; 759 *ptr = tunnel->parms.o_key;
749 ptr--; 760 ptr--;
750 } 761 }
751 if (tunnel->parms.o_flags&GRE_CSUM) { 762 if ((tunnel->parms.o_flags & GRE_CSUM) &&
763 !(skb_shinfo(skb)->gso_type &
764 (SKB_GSO_GRE | SKB_GSO_GRE_CSUM))) {
752 *ptr = 0; 765 *ptr = 0;
753 *(__sum16 *)ptr = ip_compute_csum((void *)(ipv6h+1), 766 *(__sum16 *)ptr = gre6_checksum(skb);
754 skb->len - sizeof(struct ipv6hdr));
755 } 767 }
756 } 768 }
757 769
@@ -987,6 +999,8 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
987 dev->mtu = rt->dst.dev->mtu - addend; 999 dev->mtu = rt->dst.dev->mtu - addend;
988 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1000 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
989 dev->mtu -= 8; 1001 dev->mtu -= 8;
1002 if (dev->type == ARPHRD_ETHER)
1003 dev->mtu -= ETH_HLEN;
990 1004
991 if (dev->mtu < IPV6_MIN_MTU) 1005 if (dev->mtu < IPV6_MIN_MTU)
992 dev->mtu = IPV6_MIN_MTU; 1006 dev->mtu = IPV6_MIN_MTU;
@@ -1505,6 +1519,11 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
1505 .ndo_get_iflink = ip6_tnl_get_iflink, 1519 .ndo_get_iflink = ip6_tnl_get_iflink,
1506}; 1520};
1507 1521
1522#define GRE6_FEATURES (NETIF_F_SG | \
1523 NETIF_F_FRAGLIST | \
1524 NETIF_F_HIGHDMA | \
1525 NETIF_F_HW_CSUM)
1526
1508static void ip6gre_tap_setup(struct net_device *dev) 1527static void ip6gre_tap_setup(struct net_device *dev)
1509{ 1528{
1510 1529
@@ -1538,9 +1557,21 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1538 nt->net = dev_net(dev); 1557 nt->net = dev_net(dev);
1539 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]); 1558 ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1540 1559
1541 /* Can use a lockless transmit, unless we generate output sequences */ 1560 dev->features |= GRE6_FEATURES;
1542 if (!(nt->parms.o_flags & GRE_SEQ)) 1561 dev->hw_features |= GRE6_FEATURES;
1562
1563 if (!(nt->parms.o_flags & GRE_SEQ)) {
1564 /* TCP segmentation offload is not supported when we
1565 * generate output sequences.
1566 */
1567 dev->features |= NETIF_F_GSO_SOFTWARE;
1568 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1569
1570 /* Can use a lockless transmit, unless we generate
1571 * output sequences
1572 */
1543 dev->features |= NETIF_F_LLTX; 1573 dev->features |= NETIF_F_LLTX;
1574 }
1544 1575
1545 err = register_netdevice(dev); 1576 err = register_netdevice(dev);
1546 if (err) 1577 if (err)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 83384308d032..a13d8c114ccb 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -913,10 +913,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
913 goto tx_error; 913 goto tx_error;
914 } 914 }
915 915
916 skb = iptunnel_handle_offloads(skb, SKB_GSO_SIT); 916 if (iptunnel_handle_offloads(skb, SKB_GSO_SIT)) {
917 if (IS_ERR(skb)) {
918 ip_rt_put(rt); 917 ip_rt_put(rt);
919 goto out; 918 goto tx_error;
920 } 919 }
921 920
922 if (df) { 921 if (df) {
@@ -992,7 +991,6 @@ tx_error_icmp:
992 dst_link_failure(skb); 991 dst_link_failure(skb);
993tx_error: 992tx_error:
994 kfree_skb(skb); 993 kfree_skb(skb);
995out:
996 dev->stats.tx_errors++; 994 dev->stats.tx_errors++;
997 return NETDEV_TX_OK; 995 return NETDEV_TX_OK;
998} 996}
@@ -1002,15 +1000,15 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
1002 struct ip_tunnel *tunnel = netdev_priv(dev); 1000 struct ip_tunnel *tunnel = netdev_priv(dev);
1003 const struct iphdr *tiph = &tunnel->parms.iph; 1001 const struct iphdr *tiph = &tunnel->parms.iph;
1004 1002
1005 skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP); 1003 if (iptunnel_handle_offloads(skb, SKB_GSO_IPIP))
1006 if (IS_ERR(skb)) 1004 goto tx_error;
1007 goto out;
1008 1005
1009 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1006 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1010 1007
1011 ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP); 1008 ip_tunnel_xmit(skb, dev, tiph, IPPROTO_IPIP);
1012 return NETDEV_TX_OK; 1009 return NETDEV_TX_OK;
1013out: 1010tx_error:
1011 kfree_skb(skb);
1014 dev->stats.tx_errors++; 1012 dev->stats.tx_errors++;
1015 return NETDEV_TX_OK; 1013 return NETDEV_TX_OK;
1016} 1014}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index dc196a0f501d..6d19d2eeaa60 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -1013,8 +1013,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1013 if (IS_ERR(skb)) 1013 if (IS_ERR(skb))
1014 goto tx_error; 1014 goto tx_error;
1015 1015
1016 skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)); 1016 if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)))
1017 if (IS_ERR(skb))
1018 goto tx_error; 1017 goto tx_error;
1019 1018
1020 skb->transport_header = skb->network_header; 1019 skb->transport_header = skb->network_header;
@@ -1105,8 +1104,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1105 if (IS_ERR(skb)) 1104 if (IS_ERR(skb))
1106 goto tx_error; 1105 goto tx_error;
1107 1106
1108 skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)); 1107 if (iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)))
1109 if (IS_ERR(skb))
1110 goto tx_error; 1108 goto tx_error;
1111 1109
1112 skb->transport_header = skb->network_header; 1110 skb->transport_header = skb->network_header;