aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp_offload.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/udp_offload.c')
-rw-r--r--net/ipv4/udp_offload.c66
1 files changed, 50 insertions, 16 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6480cea7aa53..0a5a70d0e84c 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -29,7 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
29 netdev_features_t features, 29 netdev_features_t features,
30 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, 30 struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
31 netdev_features_t features), 31 netdev_features_t features),
32 __be16 new_protocol) 32 __be16 new_protocol, bool is_ipv6)
33{ 33{
34 struct sk_buff *segs = ERR_PTR(-EINVAL); 34 struct sk_buff *segs = ERR_PTR(-EINVAL);
35 u16 mac_offset = skb->mac_header; 35 u16 mac_offset = skb->mac_header;
@@ -39,7 +39,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
39 netdev_features_t enc_features; 39 netdev_features_t enc_features;
40 int udp_offset, outer_hlen; 40 int udp_offset, outer_hlen;
41 unsigned int oldlen; 41 unsigned int oldlen;
42 bool need_csum; 42 bool need_csum = !!(skb_shinfo(skb)->gso_type &
43 SKB_GSO_UDP_TUNNEL_CSUM);
44 bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
45 bool offload_csum = false, dont_encap = (need_csum || remcsum);
43 46
44 oldlen = (u16)~skb->len; 47 oldlen = (u16)~skb->len;
45 48
@@ -52,10 +55,13 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
52 skb_set_network_header(skb, skb_inner_network_offset(skb)); 55 skb_set_network_header(skb, skb_inner_network_offset(skb));
53 skb->mac_len = skb_inner_network_offset(skb); 56 skb->mac_len = skb_inner_network_offset(skb);
54 skb->protocol = new_protocol; 57 skb->protocol = new_protocol;
58 skb->encap_hdr_csum = need_csum;
59 skb->remcsum_offload = remcsum;
55 60
56 need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); 61 /* Try to offload checksum if possible */
57 if (need_csum) 62 offload_csum = !!(need_csum &&
58 skb->encap_hdr_csum = 1; 63 (skb->dev->features &
64 (is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM)));
59 65
60 /* segment inner packet. */ 66 /* segment inner packet. */
61 enc_features = skb->dev->hw_enc_features & features; 67 enc_features = skb->dev->hw_enc_features & features;
@@ -72,11 +78,21 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
72 do { 78 do {
73 struct udphdr *uh; 79 struct udphdr *uh;
74 int len; 80 int len;
75 81 __be32 delta;
76 skb_reset_inner_headers(skb); 82
77 skb->encapsulation = 1; 83 if (dont_encap) {
84 skb->encapsulation = 0;
85 skb->ip_summed = CHECKSUM_NONE;
86 } else {
87 /* Only set up inner headers if we might be offloading
88 * inner checksum.
89 */
90 skb_reset_inner_headers(skb);
91 skb->encapsulation = 1;
92 }
78 93
79 skb->mac_len = mac_len; 94 skb->mac_len = mac_len;
95 skb->protocol = protocol;
80 96
81 skb_push(skb, outer_hlen); 97 skb_push(skb, outer_hlen);
82 skb_reset_mac_header(skb); 98 skb_reset_mac_header(skb);
@@ -86,19 +102,36 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
86 uh = udp_hdr(skb); 102 uh = udp_hdr(skb);
87 uh->len = htons(len); 103 uh->len = htons(len);
88 104
89 if (need_csum) { 105 if (!need_csum)
90 __be32 delta = htonl(oldlen + len); 106 continue;
91 107
92 uh->check = ~csum_fold((__force __wsum) 108 delta = htonl(oldlen + len);
93 ((__force u32)uh->check + 109
94 (__force u32)delta)); 110 uh->check = ~csum_fold((__force __wsum)
111 ((__force u32)uh->check +
112 (__force u32)delta));
113 if (offload_csum) {
114 skb->ip_summed = CHECKSUM_PARTIAL;
115 skb->csum_start = skb_transport_header(skb) - skb->head;
116 skb->csum_offset = offsetof(struct udphdr, check);
117 } else if (remcsum) {
118 /* Need to calculate checksum from scratch,
119 * inner checksums are never when doing
120 * remote_checksum_offload.
121 */
122
123 skb->csum = skb_checksum(skb, udp_offset,
124 skb->len - udp_offset,
125 0);
126 uh->check = csum_fold(skb->csum);
127 if (uh->check == 0)
128 uh->check = CSUM_MANGLED_0;
129 } else {
95 uh->check = gso_make_checksum(skb, ~uh->check); 130 uh->check = gso_make_checksum(skb, ~uh->check);
96 131
97 if (uh->check == 0) 132 if (uh->check == 0)
98 uh->check = CSUM_MANGLED_0; 133 uh->check = CSUM_MANGLED_0;
99 } 134 }
100
101 skb->protocol = protocol;
102 } while ((skb = skb->next)); 135 } while ((skb = skb->next));
103out: 136out:
104 return segs; 137 return segs;
@@ -134,7 +167,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
134 } 167 }
135 168
136 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, 169 segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
137 protocol); 170 protocol, is_ipv6);
138 171
139out_unlock: 172out_unlock:
140 rcu_read_unlock(); 173 rcu_read_unlock();
@@ -172,6 +205,7 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
172 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | 205 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
173 SKB_GSO_UDP_TUNNEL | 206 SKB_GSO_UDP_TUNNEL |
174 SKB_GSO_UDP_TUNNEL_CSUM | 207 SKB_GSO_UDP_TUNNEL_CSUM |
208 SKB_GSO_TUNNEL_REMCSUM |
175 SKB_GSO_IPIP | 209 SKB_GSO_IPIP |
176 SKB_GSO_GRE | SKB_GSO_GRE_CSUM | 210 SKB_GSO_GRE | SKB_GSO_GRE_CSUM |
177 SKB_GSO_MPLS) || 211 SKB_GSO_MPLS) ||