diff options
author | Pravin B Shelar <pshelar@nicira.com> | 2013-02-14 09:02:41 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-02-15 15:17:11 -0500 |
commit | 68c331631143f5f039baac99a650e0b9e1ea02b6 (patch) | |
tree | c69d73c5599aab5e92a8c99bc5343c9fc9ffbbd8 /net/ipv4/ip_gre.c | |
parent | 05e8ef4ab2d8087d360e814d14da20b9f7fb2283 (diff) |
v4 GRE: Add TCP segmentation offload for GRE
Following patch adds GRE protocol offload handler so that
skb_gso_segment() can segment GRE packets.
SKB GSO CB is added to keep track of total header length so that
skb_segment can push entire header. e.g. in case of GRE, skb_segment
need to push inner and outer headers to every segment.
New NETIF_F_GRE_GSO feature is added for devices which support HW
GRE TSO offload. Currently none of devices support it therefore GRE GSO
always fall backs to software GSO.
[ Compute pkt_len before ip_local_out() invocation. -DaveM ]
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ip_gre.c')
-rw-r--r-- | net/ipv4/ip_gre.c | 82 |
1 files changed, 75 insertions, 7 deletions
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 00a14b9864ea..a56f1182c176 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -735,8 +735,33 @@ drop: | |||
735 | return 0; | 735 | return 0; |
736 | } | 736 | } |
737 | 737 | ||
738 | static struct sk_buff *handle_offloads(struct sk_buff *skb) | ||
739 | { | ||
740 | int err; | ||
741 | |||
742 | if (skb_is_gso(skb)) { | ||
743 | err = skb_unclone(skb, GFP_ATOMIC); | ||
744 | if (unlikely(err)) | ||
745 | goto error; | ||
746 | skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; | ||
747 | return skb; | ||
748 | } else if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
749 | err = skb_checksum_help(skb); | ||
750 | if (unlikely(err)) | ||
751 | goto error; | ||
752 | } | ||
753 | skb->ip_summed = CHECKSUM_NONE; | ||
754 | |||
755 | return skb; | ||
756 | |||
757 | error: | ||
758 | kfree_skb(skb); | ||
759 | return ERR_PTR(err); | ||
760 | } | ||
761 | |||
738 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 762 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
739 | { | 763 | { |
764 | struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); | ||
740 | struct ip_tunnel *tunnel = netdev_priv(dev); | 765 | struct ip_tunnel *tunnel = netdev_priv(dev); |
741 | const struct iphdr *old_iph; | 766 | const struct iphdr *old_iph; |
742 | const struct iphdr *tiph; | 767 | const struct iphdr *tiph; |
@@ -751,10 +776,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
751 | __be32 dst; | 776 | __be32 dst; |
752 | int mtu; | 777 | int mtu; |
753 | u8 ttl; | 778 | u8 ttl; |
779 | int err; | ||
780 | int pkt_len; | ||
754 | 781 | ||
755 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 782 | skb = handle_offloads(skb); |
756 | skb_checksum_help(skb)) | 783 | if (IS_ERR(skb)) { |
757 | goto tx_error; | 784 | dev->stats.tx_dropped++; |
785 | return NETDEV_TX_OK; | ||
786 | } | ||
787 | |||
788 | if (!skb->encapsulation) { | ||
789 | skb_reset_inner_headers(skb); | ||
790 | skb->encapsulation = 1; | ||
791 | } | ||
758 | 792 | ||
759 | old_iph = ip_hdr(skb); | 793 | old_iph = ip_hdr(skb); |
760 | 794 | ||
@@ -855,7 +889,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
855 | if (skb->protocol == htons(ETH_P_IP)) { | 889 | if (skb->protocol == htons(ETH_P_IP)) { |
856 | df |= (old_iph->frag_off&htons(IP_DF)); | 890 | df |= (old_iph->frag_off&htons(IP_DF)); |
857 | 891 | ||
858 | if ((old_iph->frag_off&htons(IP_DF)) && | 892 | if (!skb_is_gso(skb) && |
893 | (old_iph->frag_off&htons(IP_DF)) && | ||
859 | mtu < ntohs(old_iph->tot_len)) { | 894 | mtu < ntohs(old_iph->tot_len)) { |
860 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 895 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
861 | ip_rt_put(rt); | 896 | ip_rt_put(rt); |
@@ -875,7 +910,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
875 | } | 910 | } |
876 | } | 911 | } |
877 | 912 | ||
878 | if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { | 913 | if (!skb_is_gso(skb) && |
914 | mtu >= IPV6_MIN_MTU && | ||
915 | mtu < skb->len - tunnel->hlen + gre_hlen) { | ||
879 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 916 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
880 | ip_rt_put(rt); | 917 | ip_rt_put(rt); |
881 | goto tx_error; | 918 | goto tx_error; |
@@ -936,6 +973,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
936 | iph->daddr = fl4.daddr; | 973 | iph->daddr = fl4.daddr; |
937 | iph->saddr = fl4.saddr; | 974 | iph->saddr = fl4.saddr; |
938 | iph->ttl = ttl; | 975 | iph->ttl = ttl; |
976 | iph->id = 0; | ||
939 | 977 | ||
940 | if (ttl == 0) { | 978 | if (ttl == 0) { |
941 | if (skb->protocol == htons(ETH_P_IP)) | 979 | if (skb->protocol == htons(ETH_P_IP)) |
@@ -964,9 +1002,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
964 | *ptr = tunnel->parms.o_key; | 1002 | *ptr = tunnel->parms.o_key; |
965 | ptr--; | 1003 | ptr--; |
966 | } | 1004 | } |
967 | if (tunnel->parms.o_flags&GRE_CSUM) { | 1005 | /* Skip GRE checksum if skb is getting offloaded. */ |
1006 | if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) && | ||
1007 | (tunnel->parms.o_flags&GRE_CSUM)) { | ||
968 | int offset = skb_transport_offset(skb); | 1008 | int offset = skb_transport_offset(skb); |
969 | 1009 | ||
1010 | if (skb_has_shared_frag(skb)) { | ||
1011 | err = __skb_linearize(skb); | ||
1012 | if (err) { | ||
1013 | ip_rt_put(rt); | ||
1014 | goto tx_error; | ||
1015 | } | ||
1016 | } | ||
1017 | |||
970 | *ptr = 0; | 1018 | *ptr = 0; |
971 | *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset, | 1019 | *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset, |
972 | skb->len - offset, | 1020 | skb->len - offset, |
@@ -974,7 +1022,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
974 | } | 1022 | } |
975 | } | 1023 | } |
976 | 1024 | ||
977 | iptunnel_xmit(skb, dev); | 1025 | nf_reset(skb); |
1026 | |||
1027 | pkt_len = skb->len - skb_transport_offset(skb); | ||
1028 | err = ip_local_out(skb); | ||
1029 | if (likely(net_xmit_eval(err) == 0)) { | ||
1030 | u64_stats_update_begin(&tstats->syncp); | ||
1031 | tstats->tx_bytes += pkt_len; | ||
1032 | tstats->tx_packets++; | ||
1033 | u64_stats_update_end(&tstats->syncp); | ||
1034 | } else { | ||
1035 | dev->stats.tx_errors++; | ||
1036 | dev->stats.tx_aborted_errors++; | ||
1037 | } | ||
978 | return NETDEV_TX_OK; | 1038 | return NETDEV_TX_OK; |
979 | 1039 | ||
980 | #if IS_ENABLED(CONFIG_IPV6) | 1040 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -1044,6 +1104,11 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev) | |||
1044 | mtu = 68; | 1104 | mtu = 68; |
1045 | 1105 | ||
1046 | tunnel->hlen = addend; | 1106 | tunnel->hlen = addend; |
1107 | /* TCP offload with GRE SEQ is not supported. */ | ||
1108 | if (!(tunnel->parms.o_flags & GRE_SEQ)) { | ||
1109 | dev->features |= NETIF_F_GSO_SOFTWARE; | ||
1110 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; | ||
1111 | } | ||
1047 | 1112 | ||
1048 | return mtu; | 1113 | return mtu; |
1049 | } | 1114 | } |
@@ -1593,6 +1658,9 @@ static void ipgre_tap_setup(struct net_device *dev) | |||
1593 | 1658 | ||
1594 | dev->iflink = 0; | 1659 | dev->iflink = 0; |
1595 | dev->features |= NETIF_F_NETNS_LOCAL; | 1660 | dev->features |= NETIF_F_NETNS_LOCAL; |
1661 | |||
1662 | dev->features |= GRE_FEATURES; | ||
1663 | dev->hw_features |= GRE_FEATURES; | ||
1596 | } | 1664 | } |
1597 | 1665 | ||
1598 | static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], | 1666 | static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], |