diff options
author | Tom Herbert <therbert@google.com> | 2014-08-22 16:34:44 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-08-24 21:09:24 -0400 |
commit | 57c67ff4bd92af634f7c91c40eb02a96dd785dda (patch) | |
tree | f10aac8b764b6254075ebcba255285140d1cabea | |
parent | 149d0774a729497c6a876260d3884826088724b6 (diff) |
udp: additional GRO support
Implement GRO for UDPv6. Add UDP checksum verification in gro_receive
for both UDP4 and UDP6 calling skb_gro_checksum_validate_zero_check.
Signed-off-by: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/udp.h | 18 | ||||
-rw-r--r-- | net/ipv4/udp.c | 1 | ||||
-rw-r--r-- | net/ipv4/udp_offload.c | 61 | ||||
-rw-r--r-- | net/ipv6/udp_offload.c | 33 |
4 files changed, 96 insertions, 17 deletions
diff --git a/include/net/udp.h b/include/net/udp.h index 70f941368ace..16f4e80f0519 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -158,6 +158,24 @@ static inline __sum16 udp_v4_check(int len, __be32 saddr, | |||
158 | void udp_set_csum(bool nocheck, struct sk_buff *skb, | 158 | void udp_set_csum(bool nocheck, struct sk_buff *skb, |
159 | __be32 saddr, __be32 daddr, int len); | 159 | __be32 saddr, __be32 daddr, int len); |
160 | 160 | ||
161 | struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, | ||
162 | struct udphdr *uh); | ||
163 | int udp_gro_complete(struct sk_buff *skb, int nhoff); | ||
164 | |||
165 | static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) | ||
166 | { | ||
167 | struct udphdr *uh; | ||
168 | unsigned int hlen, off; | ||
169 | |||
170 | off = skb_gro_offset(skb); | ||
171 | hlen = off + sizeof(*uh); | ||
172 | uh = skb_gro_header_fast(skb, off); | ||
173 | if (skb_gro_header_hard(skb, hlen)) | ||
174 | uh = skb_gro_header_slow(skb, hlen, off); | ||
175 | |||
176 | return uh; | ||
177 | } | ||
178 | |||
161 | /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ | 179 | /* hash routines shared between UDPv4/6 and UDP-Litev4/6 */ |
162 | static inline void udp_lib_hash(struct sock *sk) | 180 | static inline void udp_lib_hash(struct sock *sk) |
163 | { | 181 | { |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 32f9571e776b..3549c21fe5f7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -99,6 +99,7 @@ | |||
99 | #include <linux/slab.h> | 99 | #include <linux/slab.h> |
100 | #include <net/tcp_states.h> | 100 | #include <net/tcp_states.h> |
101 | #include <linux/skbuff.h> | 101 | #include <linux/skbuff.h> |
102 | #include <linux/netdevice.h> | ||
102 | #include <linux/proc_fs.h> | 103 | #include <linux/proc_fs.h> |
103 | #include <linux/seq_file.h> | 104 | #include <linux/seq_file.h> |
104 | #include <net/net_namespace.h> | 105 | #include <net/net_namespace.h> |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 59035bc3008d..8ed460e3753c 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -228,29 +228,22 @@ unlock: | |||
228 | } | 228 | } |
229 | EXPORT_SYMBOL(udp_del_offload); | 229 | EXPORT_SYMBOL(udp_del_offload); |
230 | 230 | ||
231 | static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb) | 231 | struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, |
232 | struct udphdr *uh) | ||
232 | { | 233 | { |
233 | struct udp_offload_priv *uo_priv; | 234 | struct udp_offload_priv *uo_priv; |
234 | struct sk_buff *p, **pp = NULL; | 235 | struct sk_buff *p, **pp = NULL; |
235 | struct udphdr *uh, *uh2; | 236 | struct udphdr *uh2; |
236 | unsigned int hlen, off; | 237 | unsigned int off = skb_gro_offset(skb); |
237 | int flush = 1; | 238 | int flush = 1; |
238 | 239 | ||
239 | if (NAPI_GRO_CB(skb)->udp_mark || | 240 | if (NAPI_GRO_CB(skb)->udp_mark || |
240 | (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE)) | 241 | (!skb->encapsulation && !NAPI_GRO_CB(skb)->csum_valid)) |
241 | goto out; | 242 | goto out; |
242 | 243 | ||
243 | /* mark that this skb passed once through the udp gro layer */ | 244 | /* mark that this skb passed once through the udp gro layer */ |
244 | NAPI_GRO_CB(skb)->udp_mark = 1; | 245 | NAPI_GRO_CB(skb)->udp_mark = 1; |
245 | 246 | NAPI_GRO_CB(skb)->encapsulation++; | |
246 | off = skb_gro_offset(skb); | ||
247 | hlen = off + sizeof(*uh); | ||
248 | uh = skb_gro_header_fast(skb, off); | ||
249 | if (skb_gro_header_hard(skb, hlen)) { | ||
250 | uh = skb_gro_header_slow(skb, hlen, off); | ||
251 | if (unlikely(!uh)) | ||
252 | goto out; | ||
253 | } | ||
254 | 247 | ||
255 | rcu_read_lock(); | 248 | rcu_read_lock(); |
256 | uo_priv = rcu_dereference(udp_offload_base); | 249 | uo_priv = rcu_dereference(udp_offload_base); |
@@ -269,7 +262,12 @@ unflush: | |||
269 | continue; | 262 | continue; |
270 | 263 | ||
271 | uh2 = (struct udphdr *)(p->data + off); | 264 | uh2 = (struct udphdr *)(p->data + off); |
272 | if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) { | 265 | |
266 | /* Match ports and either checksums are either both zero | ||
267 | * or nonzero. | ||
268 | */ | ||
269 | if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || | ||
270 | (!uh->check ^ !uh2->check)) { | ||
273 | NAPI_GRO_CB(p)->same_flow = 0; | 271 | NAPI_GRO_CB(p)->same_flow = 0; |
274 | continue; | 272 | continue; |
275 | } | 273 | } |
@@ -286,7 +284,24 @@ out: | |||
286 | return pp; | 284 | return pp; |
287 | } | 285 | } |
288 | 286 | ||
289 | static int udp_gro_complete(struct sk_buff *skb, int nhoff) | 287 | static struct sk_buff **udp4_gro_receive(struct sk_buff **head, |
288 | struct sk_buff *skb) | ||
289 | { | ||
290 | struct udphdr *uh = udp_gro_udphdr(skb); | ||
291 | |||
292 | /* Don't bother verifying checksum if we're going to flush anyway. */ | ||
293 | if (unlikely(!uh) || | ||
294 | (!NAPI_GRO_CB(skb)->flush && | ||
295 | skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, | ||
296 | inet_gro_compute_pseudo))) { | ||
297 | NAPI_GRO_CB(skb)->flush = 1; | ||
298 | return NULL; | ||
299 | } | ||
300 | |||
301 | return udp_gro_receive(head, skb, uh); | ||
302 | } | ||
303 | |||
304 | int udp_gro_complete(struct sk_buff *skb, int nhoff) | ||
290 | { | 305 | { |
291 | struct udp_offload_priv *uo_priv; | 306 | struct udp_offload_priv *uo_priv; |
292 | __be16 newlen = htons(skb->len - nhoff); | 307 | __be16 newlen = htons(skb->len - nhoff); |
@@ -311,12 +326,24 @@ static int udp_gro_complete(struct sk_buff *skb, int nhoff) | |||
311 | return err; | 326 | return err; |
312 | } | 327 | } |
313 | 328 | ||
329 | int udp4_gro_complete(struct sk_buff *skb, int nhoff) | ||
330 | { | ||
331 | const struct iphdr *iph = ip_hdr(skb); | ||
332 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); | ||
333 | |||
334 | if (uh->check) | ||
335 | uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, | ||
336 | iph->daddr, 0); | ||
337 | |||
338 | return udp_gro_complete(skb, nhoff); | ||
339 | } | ||
340 | |||
314 | static const struct net_offload udpv4_offload = { | 341 | static const struct net_offload udpv4_offload = { |
315 | .callbacks = { | 342 | .callbacks = { |
316 | .gso_send_check = udp4_ufo_send_check, | 343 | .gso_send_check = udp4_ufo_send_check, |
317 | .gso_segment = udp4_ufo_fragment, | 344 | .gso_segment = udp4_ufo_fragment, |
318 | .gro_receive = udp_gro_receive, | 345 | .gro_receive = udp4_gro_receive, |
319 | .gro_complete = udp_gro_complete, | 346 | .gro_complete = udp4_gro_complete, |
320 | }, | 347 | }, |
321 | }; | 348 | }; |
322 | 349 | ||
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 0ae3d98f83e0..b13e377e9c53 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * UDPv6 GSO support | 10 | * UDPv6 GSO support |
11 | */ | 11 | */ |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
13 | #include <linux/netdevice.h> | ||
13 | #include <net/protocol.h> | 14 | #include <net/protocol.h> |
14 | #include <net/ipv6.h> | 15 | #include <net/ipv6.h> |
15 | #include <net/udp.h> | 16 | #include <net/udp.h> |
@@ -127,10 +128,42 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, | |||
127 | out: | 128 | out: |
128 | return segs; | 129 | return segs; |
129 | } | 130 | } |
131 | |||
132 | static struct sk_buff **udp6_gro_receive(struct sk_buff **head, | ||
133 | struct sk_buff *skb) | ||
134 | { | ||
135 | struct udphdr *uh = udp_gro_udphdr(skb); | ||
136 | |||
137 | /* Don't bother verifying checksum if we're going to flush anyway. */ | ||
138 | if (unlikely(!uh) || | ||
139 | (!NAPI_GRO_CB(skb)->flush && | ||
140 | skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, | ||
141 | ip6_gro_compute_pseudo))) { | ||
142 | NAPI_GRO_CB(skb)->flush = 1; | ||
143 | return NULL; | ||
144 | } | ||
145 | |||
146 | return udp_gro_receive(head, skb, uh); | ||
147 | } | ||
148 | |||
149 | int udp6_gro_complete(struct sk_buff *skb, int nhoff) | ||
150 | { | ||
151 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); | ||
152 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); | ||
153 | |||
154 | if (uh->check) | ||
155 | uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, | ||
156 | &ipv6h->daddr, 0); | ||
157 | |||
158 | return udp_gro_complete(skb, nhoff); | ||
159 | } | ||
160 | |||
130 | static const struct net_offload udpv6_offload = { | 161 | static const struct net_offload udpv6_offload = { |
131 | .callbacks = { | 162 | .callbacks = { |
132 | .gso_send_check = udp6_ufo_send_check, | 163 | .gso_send_check = udp6_ufo_send_check, |
133 | .gso_segment = udp6_ufo_fragment, | 164 | .gso_segment = udp6_ufo_fragment, |
165 | .gro_receive = udp6_gro_receive, | ||
166 | .gro_complete = udp6_gro_complete, | ||
134 | }, | 167 | }, |
135 | }; | 168 | }; |
136 | 169 | ||