diff options
Diffstat (limited to 'net/ipv4/udp_offload.c')
-rw-r--r-- | net/ipv4/udp_offload.c | 171 |
1 files changed, 120 insertions, 51 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 59035bc3008d..507310ef4b56 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -25,30 +25,11 @@ struct udp_offload_priv { | |||
25 | struct udp_offload_priv __rcu *next; | 25 | struct udp_offload_priv __rcu *next; |
26 | }; | 26 | }; |
27 | 27 | ||
28 | static int udp4_ufo_send_check(struct sk_buff *skb) | 28 | static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, |
29 | { | 29 | netdev_features_t features, |
30 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) | 30 | struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, |
31 | return -EINVAL; | 31 | netdev_features_t features), |
32 | 32 | __be16 new_protocol) | |
33 | if (likely(!skb->encapsulation)) { | ||
34 | const struct iphdr *iph; | ||
35 | struct udphdr *uh; | ||
36 | |||
37 | iph = ip_hdr(skb); | ||
38 | uh = udp_hdr(skb); | ||
39 | |||
40 | uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, | ||
41 | IPPROTO_UDP, 0); | ||
42 | skb->csum_start = skb_transport_header(skb) - skb->head; | ||
43 | skb->csum_offset = offsetof(struct udphdr, check); | ||
44 | skb->ip_summed = CHECKSUM_PARTIAL; | ||
45 | } | ||
46 | |||
47 | return 0; | ||
48 | } | ||
49 | |||
50 | struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | ||
51 | netdev_features_t features) | ||
52 | { | 33 | { |
53 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 34 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
54 | u16 mac_offset = skb->mac_header; | 35 | u16 mac_offset = skb->mac_header; |
@@ -70,7 +51,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | |||
70 | skb_reset_mac_header(skb); | 51 | skb_reset_mac_header(skb); |
71 | skb_set_network_header(skb, skb_inner_network_offset(skb)); | 52 | skb_set_network_header(skb, skb_inner_network_offset(skb)); |
72 | skb->mac_len = skb_inner_network_offset(skb); | 53 | skb->mac_len = skb_inner_network_offset(skb); |
73 | skb->protocol = htons(ETH_P_TEB); | 54 | skb->protocol = new_protocol; |
74 | 55 | ||
75 | need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); | 56 | need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); |
76 | if (need_csum) | 57 | if (need_csum) |
@@ -78,7 +59,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | |||
78 | 59 | ||
79 | /* segment inner packet. */ | 60 | /* segment inner packet. */ |
80 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); | 61 | enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); |
81 | segs = skb_mac_gso_segment(skb, enc_features); | 62 | segs = gso_inner_segment(skb, enc_features); |
82 | if (IS_ERR_OR_NULL(segs)) { | 63 | if (IS_ERR_OR_NULL(segs)) { |
83 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, | 64 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, |
84 | mac_len); | 65 | mac_len); |
@@ -123,21 +104,63 @@ out: | |||
123 | return segs; | 104 | return segs; |
124 | } | 105 | } |
125 | 106 | ||
107 | struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, | ||
108 | netdev_features_t features, | ||
109 | bool is_ipv6) | ||
110 | { | ||
111 | __be16 protocol = skb->protocol; | ||
112 | const struct net_offload **offloads; | ||
113 | const struct net_offload *ops; | ||
114 | struct sk_buff *segs = ERR_PTR(-EINVAL); | ||
115 | struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, | ||
116 | netdev_features_t features); | ||
117 | |||
118 | rcu_read_lock(); | ||
119 | |||
120 | switch (skb->inner_protocol_type) { | ||
121 | case ENCAP_TYPE_ETHER: | ||
122 | protocol = skb->inner_protocol; | ||
123 | gso_inner_segment = skb_mac_gso_segment; | ||
124 | break; | ||
125 | case ENCAP_TYPE_IPPROTO: | ||
126 | offloads = is_ipv6 ? inet6_offloads : inet_offloads; | ||
127 | ops = rcu_dereference(offloads[skb->inner_ipproto]); | ||
128 | if (!ops || !ops->callbacks.gso_segment) | ||
129 | goto out_unlock; | ||
130 | gso_inner_segment = ops->callbacks.gso_segment; | ||
131 | break; | ||
132 | default: | ||
133 | goto out_unlock; | ||
134 | } | ||
135 | |||
136 | segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, | ||
137 | protocol); | ||
138 | |||
139 | out_unlock: | ||
140 | rcu_read_unlock(); | ||
141 | |||
142 | return segs; | ||
143 | } | ||
144 | |||
126 | static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | 145 | static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, |
127 | netdev_features_t features) | 146 | netdev_features_t features) |
128 | { | 147 | { |
129 | struct sk_buff *segs = ERR_PTR(-EINVAL); | 148 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
130 | unsigned int mss; | 149 | unsigned int mss; |
131 | int offset; | ||
132 | __wsum csum; | 150 | __wsum csum; |
151 | struct udphdr *uh; | ||
152 | struct iphdr *iph; | ||
133 | 153 | ||
134 | if (skb->encapsulation && | 154 | if (skb->encapsulation && |
135 | (skb_shinfo(skb)->gso_type & | 155 | (skb_shinfo(skb)->gso_type & |
136 | (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { | 156 | (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { |
137 | segs = skb_udp_tunnel_segment(skb, features); | 157 | segs = skb_udp_tunnel_segment(skb, features, false); |
138 | goto out; | 158 | goto out; |
139 | } | 159 | } |
140 | 160 | ||
161 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) | ||
162 | goto out; | ||
163 | |||
141 | mss = skb_shinfo(skb)->gso_size; | 164 | mss = skb_shinfo(skb)->gso_size; |
142 | if (unlikely(skb->len <= mss)) | 165 | if (unlikely(skb->len <= mss)) |
143 | goto out; | 166 | goto out; |
@@ -165,10 +188,16 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, | |||
165 | * HW cannot do checksum of UDP packets sent as multiple | 188 | * HW cannot do checksum of UDP packets sent as multiple |
166 | * IP fragments. | 189 | * IP fragments. |
167 | */ | 190 | */ |
168 | offset = skb_checksum_start_offset(skb); | 191 | |
169 | csum = skb_checksum(skb, offset, skb->len - offset, 0); | 192 | uh = udp_hdr(skb); |
170 | offset += skb->csum_offset; | 193 | iph = ip_hdr(skb); |
171 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | 194 | |
195 | uh->check = 0; | ||
196 | csum = skb_checksum(skb, 0, skb->len, 0); | ||
197 | uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); | ||
198 | if (uh->check == 0) | ||
199 | uh->check = CSUM_MANGLED_0; | ||
200 | |||
172 | skb->ip_summed = CHECKSUM_NONE; | 201 | skb->ip_summed = CHECKSUM_NONE; |
173 | 202 | ||
174 | /* Fragment the skb. IP headers of the fragments are updated in | 203 | /* Fragment the skb. IP headers of the fragments are updated in |
@@ -228,30 +257,24 @@ unlock: | |||
228 | } | 257 | } |
229 | EXPORT_SYMBOL(udp_del_offload); | 258 | EXPORT_SYMBOL(udp_del_offload); |
230 | 259 | ||
231 | static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | 260 | struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, |
261 | struct udphdr *uh) | ||
232 | { | 262 | { |
233 | struct udp_offload_priv *uo_priv; | 263 | struct udp_offload_priv *uo_priv; |
234 | struct sk_buff *p, **pp = NULL; | 264 | struct sk_buff *p, **pp = NULL; |
235 | struct udphdr *uh, *uh2; | 265 | struct udphdr *uh2; |
236 | unsigned int hlen, off; | 266 | unsigned int off = skb_gro_offset(skb); |
237 | int flush = 1; | 267 | int flush = 1; |
238 | 268 | ||
239 | if (NAPI_GRO_CB(skb)->udp_mark || | 269 | if (NAPI_GRO_CB(skb)->udp_mark || |
240 | (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE)) | 270 | (skb->ip_summed != CHECKSUM_PARTIAL && |
271 | NAPI_GRO_CB(skb)->csum_cnt == 0 && | ||
272 | !NAPI_GRO_CB(skb)->csum_valid)) | ||
241 | goto out; | 273 | goto out; |
242 | 274 | ||
243 | /* mark that this skb passed once through the udp gro layer */ | 275 | /* mark that this skb passed once through the udp gro layer */ |
244 | NAPI_GRO_CB(skb)->udp_mark = 1; | 276 | NAPI_GRO_CB(skb)->udp_mark = 1; |
245 | 277 | ||
246 | off = skb_gro_offset(skb); | ||
247 | hlen = off + sizeof(*uh); | ||
248 | uh = skb_gro_header_fast(skb, off); | ||
249 | if (skb_gro_header_hard(skb, hlen)) { | ||
250 | uh = skb_gro_header_slow(skb, hlen, off); | ||
251 | if (unlikely(!uh)) | ||
252 | goto out; | ||
253 | } | ||
254 | |||
255 | rcu_read_lock(); | 278 | rcu_read_lock(); |
256 | uo_priv = rcu_dereference(udp_offload_base); | 279 | uo_priv = rcu_dereference(udp_offload_base); |
257 | for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { | 280 | for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) { |
@@ -269,7 +292,12 @@ unflush: | |||
269 | continue; | 292 | continue; |
270 | 293 | ||
271 | uh2 = (struct udphdr *)(p->data + off); | 294 | uh2 = (struct udphdr *)(p->data + off); |
272 | if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { | 295 | |
296 | /* Match ports and either checksums are either both zero | ||
297 | * or nonzero. | ||
298 | */ | ||
299 | if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || | ||
300 | (!uh->check ^ !uh2->check)) { | ||
273 | NAPI_GRO_CB(p)->same_flow = 0; | 301 | NAPI_GRO_CB(p)->same_flow = 0; |
274 | continue; | 302 | continue; |
275 | } | 303 | } |
@@ -277,6 +305,7 @@ unflush: | |||
277 | 305 | ||
278 | skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ | 306 | skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ |
279 | skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); | 307 | skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); |
308 | NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; | ||
280 | pp = uo_priv->offload->callbacks.gro_receive(head, skb); | 309 | pp = uo_priv->offload->callbacks.gro_receive(head, skb); |
281 | 310 | ||
282 | out_unlock: | 311 | out_unlock: |
@@ -286,7 +315,34 @@ out: | |||
286 | return pp; | 315 | return pp; |
287 | } | 316 | } |
288 | 317 | ||
289 | static int udp_gro_complete(struct sk_buff *skb, int nhoff) | 318 | static struct sk_buff **udp4_gro_receive(struct sk_buff **head, |
319 | struct sk_buff *skb) | ||
320 | { | ||
321 | struct udphdr *uh = udp_gro_udphdr(skb); | ||
322 | |||
323 | if (unlikely(!uh)) | ||
324 | goto flush; | ||
325 | |||
326 | /* Don't bother verifying checksum if we're going to flush anyway. */ | ||
327 | if (NAPI_GRO_CB(skb)->flush) | ||
328 | goto skip; | ||
329 | |||
330 | if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, | ||
331 | inet_gro_compute_pseudo)) | ||
332 | goto flush; | ||
333 | else if (uh->check) | ||
334 | skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | ||
335 | inet_gro_compute_pseudo); | ||
336 | skip: | ||
337 | NAPI_GRO_CB(skb)->is_ipv6 = 0; | ||
338 | return udp_gro_receive(head, skb, uh); | ||
339 | |||
340 | flush: | ||
341 | NAPI_GRO_CB(skb)->flush = 1; | ||
342 | return NULL; | ||
343 | } | ||
344 | |||
345 | int udp_gro_complete(struct sk_buff *skb, int nhoff) | ||
290 | { | 346 | { |
291 | struct udp_offload_priv *uo_priv; | 347 | struct udp_offload_priv *uo_priv; |
292 | __be16 newlen = htons(skb->len - nhoff); | 348 | __be16 newlen = htons(skb->len - nhoff); |
@@ -304,19 +360,32 @@ static int udp_gro_complete(struct sk_buff *skb, int nhoff) | |||
304 | break; | 360 | break; |
305 | } | 361 | } |
306 | 362 | ||
307 | if (uo_priv != NULL) | 363 | if (uo_priv != NULL) { |
364 | NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto; | ||
308 | err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); | 365 | err = uo_priv->offload->callbacks.gro_complete(skb, nhoff + sizeof(struct udphdr)); |
366 | } | ||
309 | 367 | ||
310 | rcu_read_unlock(); | 368 | rcu_read_unlock(); |
311 | return err; | 369 | return err; |
312 | } | 370 | } |
313 | 371 | ||
372 | static int udp4_gro_complete(struct sk_buff *skb, int nhoff) | ||
373 | { | ||
374 | const struct iphdr *iph = ip_hdr(skb); | ||
375 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); | ||
376 | |||
377 | if (uh->check) | ||
378 | uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, | ||
379 | iph->daddr, 0); | ||
380 | |||
381 | return udp_gro_complete(skb, nhoff); | ||
382 | } | ||
383 | |||
314 | static const struct net_offload udpv4_offload = { | 384 | static const struct net_offload udpv4_offload = { |
315 | .callbacks = { | 385 | .callbacks = { |
316 | .gso_send_check = udp4_ufo_send_check, | ||
317 | .gso_segment = udp4_ufo_fragment, | 386 | .gso_segment = udp4_ufo_fragment, |
318 | .gro_receive = udp_gro_receive, | 387 | .gro_receive = udp4_gro_receive, |
319 | .gro_complete = udp_gro_complete, | 388 | .gro_complete = udp4_gro_complete, |
320 | }, | 389 | }, |
321 | }; | 390 | }; |
322 | 391 | ||