diff options
Diffstat (limited to 'net/ipv4/ip_gre.c')
-rw-r--r-- | net/ipv4/ip_gre.c | 91 |
1 files changed, 81 insertions, 10 deletions
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e81b1caf2ea2..5ef4da780ac1 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -735,10 +735,32 @@ drop: | |||
735 | return 0; | 735 | return 0; |
736 | } | 736 | } |
737 | 737 | ||
738 | static struct sk_buff *handle_offloads(struct sk_buff *skb) | ||
739 | { | ||
740 | int err; | ||
741 | |||
742 | if (skb_is_gso(skb)) { | ||
743 | err = skb_unclone(skb, GFP_ATOMIC); | ||
744 | if (unlikely(err)) | ||
745 | goto error; | ||
746 | skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; | ||
747 | return skb; | ||
748 | } | ||
749 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
750 | skb->ip_summed = CHECKSUM_NONE; | ||
751 | |||
752 | return skb; | ||
753 | |||
754 | error: | ||
755 | kfree_skb(skb); | ||
756 | return ERR_PTR(err); | ||
757 | } | ||
758 | |||
738 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | 759 | static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) |
739 | { | 760 | { |
761 | struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); | ||
740 | struct ip_tunnel *tunnel = netdev_priv(dev); | 762 | struct ip_tunnel *tunnel = netdev_priv(dev); |
741 | const struct iphdr *old_iph = ip_hdr(skb); | 763 | const struct iphdr *old_iph; |
742 | const struct iphdr *tiph; | 764 | const struct iphdr *tiph; |
743 | struct flowi4 fl4; | 765 | struct flowi4 fl4; |
744 | u8 tos; | 766 | u8 tos; |
@@ -751,10 +773,21 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
751 | __be32 dst; | 773 | __be32 dst; |
752 | int mtu; | 774 | int mtu; |
753 | u8 ttl; | 775 | u8 ttl; |
776 | int err; | ||
777 | int pkt_len; | ||
754 | 778 | ||
755 | if (skb->ip_summed == CHECKSUM_PARTIAL && | 779 | skb = handle_offloads(skb); |
756 | skb_checksum_help(skb)) | 780 | if (IS_ERR(skb)) { |
757 | goto tx_error; | 781 | dev->stats.tx_dropped++; |
782 | return NETDEV_TX_OK; | ||
783 | } | ||
784 | |||
785 | if (!skb->encapsulation) { | ||
786 | skb_reset_inner_headers(skb); | ||
787 | skb->encapsulation = 1; | ||
788 | } | ||
789 | |||
790 | old_iph = ip_hdr(skb); | ||
758 | 791 | ||
759 | if (dev->type == ARPHRD_ETHER) | 792 | if (dev->type == ARPHRD_ETHER) |
760 | IPCB(skb)->flags = 0; | 793 | IPCB(skb)->flags = 0; |
@@ -818,8 +851,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
818 | 851 | ||
819 | ttl = tiph->ttl; | 852 | ttl = tiph->ttl; |
820 | tos = tiph->tos; | 853 | tos = tiph->tos; |
821 | if (tos == 1) { | 854 | if (tos & 0x1) { |
822 | tos = 0; | 855 | tos &= ~0x1; |
823 | if (skb->protocol == htons(ETH_P_IP)) | 856 | if (skb->protocol == htons(ETH_P_IP)) |
824 | tos = old_iph->tos; | 857 | tos = old_iph->tos; |
825 | else if (skb->protocol == htons(ETH_P_IPV6)) | 858 | else if (skb->protocol == htons(ETH_P_IPV6)) |
@@ -853,7 +886,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
853 | if (skb->protocol == htons(ETH_P_IP)) { | 886 | if (skb->protocol == htons(ETH_P_IP)) { |
854 | df |= (old_iph->frag_off&htons(IP_DF)); | 887 | df |= (old_iph->frag_off&htons(IP_DF)); |
855 | 888 | ||
856 | if ((old_iph->frag_off&htons(IP_DF)) && | 889 | if (!skb_is_gso(skb) && |
890 | (old_iph->frag_off&htons(IP_DF)) && | ||
857 | mtu < ntohs(old_iph->tot_len)) { | 891 | mtu < ntohs(old_iph->tot_len)) { |
858 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 892 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
859 | ip_rt_put(rt); | 893 | ip_rt_put(rt); |
@@ -873,7 +907,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
873 | } | 907 | } |
874 | } | 908 | } |
875 | 909 | ||
876 | if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { | 910 | if (!skb_is_gso(skb) && |
911 | mtu >= IPV6_MIN_MTU && | ||
912 | mtu < skb->len - tunnel->hlen + gre_hlen) { | ||
877 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); | 913 | icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); |
878 | ip_rt_put(rt); | 914 | ip_rt_put(rt); |
879 | goto tx_error; | 915 | goto tx_error; |
@@ -934,6 +970,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
934 | iph->daddr = fl4.daddr; | 970 | iph->daddr = fl4.daddr; |
935 | iph->saddr = fl4.saddr; | 971 | iph->saddr = fl4.saddr; |
936 | iph->ttl = ttl; | 972 | iph->ttl = ttl; |
973 | iph->id = 0; | ||
937 | 974 | ||
938 | if (ttl == 0) { | 975 | if (ttl == 0) { |
939 | if (skb->protocol == htons(ETH_P_IP)) | 976 | if (skb->protocol == htons(ETH_P_IP)) |
@@ -962,9 +999,17 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
962 | *ptr = tunnel->parms.o_key; | 999 | *ptr = tunnel->parms.o_key; |
963 | ptr--; | 1000 | ptr--; |
964 | } | 1001 | } |
965 | if (tunnel->parms.o_flags&GRE_CSUM) { | 1002 | /* Skip GRE checksum if skb is getting offloaded. */ |
1003 | if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) && | ||
1004 | (tunnel->parms.o_flags&GRE_CSUM)) { | ||
966 | int offset = skb_transport_offset(skb); | 1005 | int offset = skb_transport_offset(skb); |
967 | 1006 | ||
1007 | if (skb_has_shared_frag(skb)) { | ||
1008 | err = __skb_linearize(skb); | ||
1009 | if (err) | ||
1010 | goto tx_error; | ||
1011 | } | ||
1012 | |||
968 | *ptr = 0; | 1013 | *ptr = 0; |
969 | *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset, | 1014 | *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset, |
970 | skb->len - offset, | 1015 | skb->len - offset, |
@@ -972,7 +1017,19 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
972 | } | 1017 | } |
973 | } | 1018 | } |
974 | 1019 | ||
975 | iptunnel_xmit(skb, dev); | 1020 | nf_reset(skb); |
1021 | |||
1022 | pkt_len = skb->len - skb_transport_offset(skb); | ||
1023 | err = ip_local_out(skb); | ||
1024 | if (likely(net_xmit_eval(err) == 0)) { | ||
1025 | u64_stats_update_begin(&tstats->syncp); | ||
1026 | tstats->tx_bytes += pkt_len; | ||
1027 | tstats->tx_packets++; | ||
1028 | u64_stats_update_end(&tstats->syncp); | ||
1029 | } else { | ||
1030 | dev->stats.tx_errors++; | ||
1031 | dev->stats.tx_aborted_errors++; | ||
1032 | } | ||
976 | return NETDEV_TX_OK; | 1033 | return NETDEV_TX_OK; |
977 | 1034 | ||
978 | #if IS_ENABLED(CONFIG_IPV6) | 1035 | #if IS_ENABLED(CONFIG_IPV6) |
@@ -1042,6 +1099,17 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev) | |||
1042 | mtu = 68; | 1099 | mtu = 68; |
1043 | 1100 | ||
1044 | tunnel->hlen = addend; | 1101 | tunnel->hlen = addend; |
1102 | /* TCP offload with GRE SEQ is not supported. */ | ||
1103 | if (!(tunnel->parms.o_flags & GRE_SEQ)) { | ||
1104 | /* device supports enc gso offload*/ | ||
1105 | if (tdev->hw_enc_features & NETIF_F_GRE_GSO) { | ||
1106 | dev->features |= NETIF_F_TSO; | ||
1107 | dev->hw_features |= NETIF_F_TSO; | ||
1108 | } else { | ||
1109 | dev->features |= NETIF_F_GSO_SOFTWARE; | ||
1110 | dev->hw_features |= NETIF_F_GSO_SOFTWARE; | ||
1111 | } | ||
1112 | } | ||
1045 | 1113 | ||
1046 | return mtu; | 1114 | return mtu; |
1047 | } | 1115 | } |
@@ -1591,6 +1659,9 @@ static void ipgre_tap_setup(struct net_device *dev) | |||
1591 | 1659 | ||
1592 | dev->iflink = 0; | 1660 | dev->iflink = 0; |
1593 | dev->features |= NETIF_F_NETNS_LOCAL; | 1661 | dev->features |= NETIF_F_NETNS_LOCAL; |
1662 | |||
1663 | dev->features |= GRE_FEATURES; | ||
1664 | dev->hw_features |= GRE_FEATURES; | ||
1594 | } | 1665 | } |
1595 | 1666 | ||
1596 | static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], | 1667 | static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], |