diff options
Diffstat (limited to 'net/openvswitch/actions.c')
-rw-r--r-- | net/openvswitch/actions.c | 373 |
1 files changed, 235 insertions, 138 deletions
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index b4cffe686126..b491c1c296fe 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -185,10 +185,15 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, | |||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
188 | static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key, | 188 | /* 'KEY' must not have any bits set outside of the 'MASK' */ |
189 | const __be32 *mpls_lse) | 189 | #define MASKED(OLD, KEY, MASK) ((KEY) | ((OLD) & ~(MASK))) |
190 | #define SET_MASKED(OLD, KEY, MASK) ((OLD) = MASKED(OLD, KEY, MASK)) | ||
191 | |||
192 | static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key, | ||
193 | const __be32 *mpls_lse, const __be32 *mask) | ||
190 | { | 194 | { |
191 | __be32 *stack; | 195 | __be32 *stack; |
196 | __be32 lse; | ||
192 | int err; | 197 | int err; |
193 | 198 | ||
194 | err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); | 199 | err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); |
@@ -196,14 +201,16 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key, | |||
196 | return err; | 201 | return err; |
197 | 202 | ||
198 | stack = (__be32 *)skb_mpls_header(skb); | 203 | stack = (__be32 *)skb_mpls_header(skb); |
204 | lse = MASKED(*stack, *mpls_lse, *mask); | ||
199 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 205 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
200 | __be32 diff[] = { ~(*stack), *mpls_lse }; | 206 | __be32 diff[] = { ~(*stack), lse }; |
207 | |||
201 | skb->csum = ~csum_partial((char *)diff, sizeof(diff), | 208 | skb->csum = ~csum_partial((char *)diff, sizeof(diff), |
202 | ~skb->csum); | 209 | ~skb->csum); |
203 | } | 210 | } |
204 | 211 | ||
205 | *stack = *mpls_lse; | 212 | *stack = lse; |
206 | key->mpls.top_lse = *mpls_lse; | 213 | flow_key->mpls.top_lse = lse; |
207 | return 0; | 214 | return 0; |
208 | } | 215 | } |
209 | 216 | ||
@@ -230,23 +237,39 @@ static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, | |||
230 | ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); | 237 | ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); |
231 | } | 238 | } |
232 | 239 | ||
233 | static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key, | 240 | /* 'src' is already properly masked. */ |
234 | const struct ovs_key_ethernet *eth_key) | 241 | static void ether_addr_copy_masked(u8 *dst_, const u8 *src_, const u8 *mask_) |
242 | { | ||
243 | u16 *dst = (u16 *)dst_; | ||
244 | const u16 *src = (const u16 *)src_; | ||
245 | const u16 *mask = (const u16 *)mask_; | ||
246 | |||
247 | SET_MASKED(dst[0], src[0], mask[0]); | ||
248 | SET_MASKED(dst[1], src[1], mask[1]); | ||
249 | SET_MASKED(dst[2], src[2], mask[2]); | ||
250 | } | ||
251 | |||
252 | static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key, | ||
253 | const struct ovs_key_ethernet *key, | ||
254 | const struct ovs_key_ethernet *mask) | ||
235 | { | 255 | { |
236 | int err; | 256 | int err; |
257 | |||
237 | err = skb_ensure_writable(skb, ETH_HLEN); | 258 | err = skb_ensure_writable(skb, ETH_HLEN); |
238 | if (unlikely(err)) | 259 | if (unlikely(err)) |
239 | return err; | 260 | return err; |
240 | 261 | ||
241 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); | 262 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); |
242 | 263 | ||
243 | ether_addr_copy(eth_hdr(skb)->h_source, eth_key->eth_src); | 264 | ether_addr_copy_masked(eth_hdr(skb)->h_source, key->eth_src, |
244 | ether_addr_copy(eth_hdr(skb)->h_dest, eth_key->eth_dst); | 265 | mask->eth_src); |
266 | ether_addr_copy_masked(eth_hdr(skb)->h_dest, key->eth_dst, | ||
267 | mask->eth_dst); | ||
245 | 268 | ||
246 | ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); | 269 | ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); |
247 | 270 | ||
248 | ether_addr_copy(key->eth.src, eth_key->eth_src); | 271 | ether_addr_copy(flow_key->eth.src, eth_hdr(skb)->h_source); |
249 | ether_addr_copy(key->eth.dst, eth_key->eth_dst); | 272 | ether_addr_copy(flow_key->eth.dst, eth_hdr(skb)->h_dest); |
250 | return 0; | 273 | return 0; |
251 | } | 274 | } |
252 | 275 | ||
@@ -304,6 +327,15 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, | |||
304 | } | 327 | } |
305 | } | 328 | } |
306 | 329 | ||
330 | static void mask_ipv6_addr(const __be32 old[4], const __be32 addr[4], | ||
331 | const __be32 mask[4], __be32 masked[4]) | ||
332 | { | ||
333 | masked[0] = MASKED(old[0], addr[0], mask[0]); | ||
334 | masked[1] = MASKED(old[1], addr[1], mask[1]); | ||
335 | masked[2] = MASKED(old[2], addr[2], mask[2]); | ||
336 | masked[3] = MASKED(old[3], addr[3], mask[3]); | ||
337 | } | ||
338 | |||
307 | static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, | 339 | static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, |
308 | __be32 addr[4], const __be32 new_addr[4], | 340 | __be32 addr[4], const __be32 new_addr[4], |
309 | bool recalculate_csum) | 341 | bool recalculate_csum) |
@@ -315,29 +347,29 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, | |||
315 | memcpy(addr, new_addr, sizeof(__be32[4])); | 347 | memcpy(addr, new_addr, sizeof(__be32[4])); |
316 | } | 348 | } |
317 | 349 | ||
318 | static void set_ipv6_tc(struct ipv6hdr *nh, u8 tc) | 350 | static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl, u32 mask) |
319 | { | 351 | { |
320 | nh->priority = tc >> 4; | 352 | /* Bits 21-24 are always unmasked, so this retains their values. */ |
321 | nh->flow_lbl[0] = (nh->flow_lbl[0] & 0x0F) | ((tc & 0x0F) << 4); | 353 | SET_MASKED(nh->flow_lbl[0], (u8)(fl >> 16), (u8)(mask >> 16)); |
354 | SET_MASKED(nh->flow_lbl[1], (u8)(fl >> 8), (u8)(mask >> 8)); | ||
355 | SET_MASKED(nh->flow_lbl[2], (u8)fl, (u8)mask); | ||
322 | } | 356 | } |
323 | 357 | ||
324 | static void set_ipv6_fl(struct ipv6hdr *nh, u32 fl) | 358 | static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl, |
359 | u8 mask) | ||
325 | { | 360 | { |
326 | nh->flow_lbl[0] = (nh->flow_lbl[0] & 0xF0) | (fl & 0x000F0000) >> 16; | 361 | new_ttl = MASKED(nh->ttl, new_ttl, mask); |
327 | nh->flow_lbl[1] = (fl & 0x0000FF00) >> 8; | ||
328 | nh->flow_lbl[2] = fl & 0x000000FF; | ||
329 | } | ||
330 | 362 | ||
331 | static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) | ||
332 | { | ||
333 | csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); | 363 | csum_replace2(&nh->check, htons(nh->ttl << 8), htons(new_ttl << 8)); |
334 | nh->ttl = new_ttl; | 364 | nh->ttl = new_ttl; |
335 | } | 365 | } |
336 | 366 | ||
337 | static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key, | 367 | static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *flow_key, |
338 | const struct ovs_key_ipv4 *ipv4_key) | 368 | const struct ovs_key_ipv4 *key, |
369 | const struct ovs_key_ipv4 *mask) | ||
339 | { | 370 | { |
340 | struct iphdr *nh; | 371 | struct iphdr *nh; |
372 | __be32 new_addr; | ||
341 | int err; | 373 | int err; |
342 | 374 | ||
343 | err = skb_ensure_writable(skb, skb_network_offset(skb) + | 375 | err = skb_ensure_writable(skb, skb_network_offset(skb) + |
@@ -347,36 +379,49 @@ static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key, | |||
347 | 379 | ||
348 | nh = ip_hdr(skb); | 380 | nh = ip_hdr(skb); |
349 | 381 | ||
350 | if (ipv4_key->ipv4_src != nh->saddr) { | 382 | /* Setting an IP addresses is typically only a side effect of |
351 | set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); | 383 | * matching on them in the current userspace implementation, so it |
352 | key->ipv4.addr.src = ipv4_key->ipv4_src; | 384 | * makes sense to check if the value actually changed. |
353 | } | 385 | */ |
386 | if (mask->ipv4_src) { | ||
387 | new_addr = MASKED(nh->saddr, key->ipv4_src, mask->ipv4_src); | ||
354 | 388 | ||
355 | if (ipv4_key->ipv4_dst != nh->daddr) { | 389 | if (unlikely(new_addr != nh->saddr)) { |
356 | set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); | 390 | set_ip_addr(skb, nh, &nh->saddr, new_addr); |
357 | key->ipv4.addr.dst = ipv4_key->ipv4_dst; | 391 | flow_key->ipv4.addr.src = new_addr; |
392 | } | ||
358 | } | 393 | } |
394 | if (mask->ipv4_dst) { | ||
395 | new_addr = MASKED(nh->daddr, key->ipv4_dst, mask->ipv4_dst); | ||
359 | 396 | ||
360 | if (ipv4_key->ipv4_tos != nh->tos) { | 397 | if (unlikely(new_addr != nh->daddr)) { |
361 | ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); | 398 | set_ip_addr(skb, nh, &nh->daddr, new_addr); |
362 | key->ip.tos = nh->tos; | 399 | flow_key->ipv4.addr.dst = new_addr; |
400 | } | ||
363 | } | 401 | } |
364 | 402 | if (mask->ipv4_tos) { | |
365 | if (ipv4_key->ipv4_ttl != nh->ttl) { | 403 | ipv4_change_dsfield(nh, ~mask->ipv4_tos, key->ipv4_tos); |
366 | set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); | 404 | flow_key->ip.tos = nh->tos; |
367 | key->ip.ttl = ipv4_key->ipv4_ttl; | 405 | } |
406 | if (mask->ipv4_ttl) { | ||
407 | set_ip_ttl(skb, nh, key->ipv4_ttl, mask->ipv4_ttl); | ||
408 | flow_key->ip.ttl = nh->ttl; | ||
368 | } | 409 | } |
369 | 410 | ||
370 | return 0; | 411 | return 0; |
371 | } | 412 | } |
372 | 413 | ||
373 | static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key, | 414 | static bool is_ipv6_mask_nonzero(const __be32 addr[4]) |
374 | const struct ovs_key_ipv6 *ipv6_key) | 415 | { |
416 | return !!(addr[0] | addr[1] | addr[2] | addr[3]); | ||
417 | } | ||
418 | |||
419 | static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *flow_key, | ||
420 | const struct ovs_key_ipv6 *key, | ||
421 | const struct ovs_key_ipv6 *mask) | ||
375 | { | 422 | { |
376 | struct ipv6hdr *nh; | 423 | struct ipv6hdr *nh; |
377 | int err; | 424 | int err; |
378 | __be32 *saddr; | ||
379 | __be32 *daddr; | ||
380 | 425 | ||
381 | err = skb_ensure_writable(skb, skb_network_offset(skb) + | 426 | err = skb_ensure_writable(skb, skb_network_offset(skb) + |
382 | sizeof(struct ipv6hdr)); | 427 | sizeof(struct ipv6hdr)); |
@@ -384,71 +429,77 @@ static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key, | |||
384 | return err; | 429 | return err; |
385 | 430 | ||
386 | nh = ipv6_hdr(skb); | 431 | nh = ipv6_hdr(skb); |
387 | saddr = (__be32 *)&nh->saddr; | ||
388 | daddr = (__be32 *)&nh->daddr; | ||
389 | |||
390 | if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) { | ||
391 | set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, | ||
392 | ipv6_key->ipv6_src, true); | ||
393 | memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src, | ||
394 | sizeof(ipv6_key->ipv6_src)); | ||
395 | } | ||
396 | 432 | ||
397 | if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { | 433 | /* Setting an IP addresses is typically only a side effect of |
434 | * matching on them in the current userspace implementation, so it | ||
435 | * makes sense to check if the value actually changed. | ||
436 | */ | ||
437 | if (is_ipv6_mask_nonzero(mask->ipv6_src)) { | ||
438 | __be32 *saddr = (__be32 *)&nh->saddr; | ||
439 | __be32 masked[4]; | ||
440 | |||
441 | mask_ipv6_addr(saddr, key->ipv6_src, mask->ipv6_src, masked); | ||
442 | |||
443 | if (unlikely(memcmp(saddr, masked, sizeof(masked)))) { | ||
444 | set_ipv6_addr(skb, key->ipv6_proto, saddr, masked, | ||
445 | true); | ||
446 | memcpy(&flow_key->ipv6.addr.src, masked, | ||
447 | sizeof(flow_key->ipv6.addr.src)); | ||
448 | } | ||
449 | } | ||
450 | if (is_ipv6_mask_nonzero(mask->ipv6_dst)) { | ||
398 | unsigned int offset = 0; | 451 | unsigned int offset = 0; |
399 | int flags = IP6_FH_F_SKIP_RH; | 452 | int flags = IP6_FH_F_SKIP_RH; |
400 | bool recalc_csum = true; | 453 | bool recalc_csum = true; |
401 | 454 | __be32 *daddr = (__be32 *)&nh->daddr; | |
402 | if (ipv6_ext_hdr(nh->nexthdr)) | 455 | __be32 masked[4]; |
403 | recalc_csum = ipv6_find_hdr(skb, &offset, | 456 | |
404 | NEXTHDR_ROUTING, NULL, | 457 | mask_ipv6_addr(daddr, key->ipv6_dst, mask->ipv6_dst, masked); |
405 | &flags) != NEXTHDR_ROUTING; | 458 | |
406 | 459 | if (unlikely(memcmp(daddr, masked, sizeof(masked)))) { | |
407 | set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, | 460 | if (ipv6_ext_hdr(nh->nexthdr)) |
408 | ipv6_key->ipv6_dst, recalc_csum); | 461 | recalc_csum = (ipv6_find_hdr(skb, &offset, |
409 | memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst, | 462 | NEXTHDR_ROUTING, |
410 | sizeof(ipv6_key->ipv6_dst)); | 463 | NULL, &flags) |
464 | != NEXTHDR_ROUTING); | ||
465 | |||
466 | set_ipv6_addr(skb, key->ipv6_proto, daddr, masked, | ||
467 | recalc_csum); | ||
468 | memcpy(&flow_key->ipv6.addr.dst, masked, | ||
469 | sizeof(flow_key->ipv6.addr.dst)); | ||
470 | } | ||
471 | } | ||
472 | if (mask->ipv6_tclass) { | ||
473 | ipv6_change_dsfield(nh, ~mask->ipv6_tclass, key->ipv6_tclass); | ||
474 | flow_key->ip.tos = ipv6_get_dsfield(nh); | ||
475 | } | ||
476 | if (mask->ipv6_label) { | ||
477 | set_ipv6_fl(nh, ntohl(key->ipv6_label), | ||
478 | ntohl(mask->ipv6_label)); | ||
479 | flow_key->ipv6.label = | ||
480 | *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); | ||
481 | } | ||
482 | if (mask->ipv6_hlimit) { | ||
483 | SET_MASKED(nh->hop_limit, key->ipv6_hlimit, mask->ipv6_hlimit); | ||
484 | flow_key->ip.ttl = nh->hop_limit; | ||
411 | } | 485 | } |
412 | |||
413 | set_ipv6_tc(nh, ipv6_key->ipv6_tclass); | ||
414 | key->ip.tos = ipv6_get_dsfield(nh); | ||
415 | |||
416 | set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); | ||
417 | key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); | ||
418 | |||
419 | nh->hop_limit = ipv6_key->ipv6_hlimit; | ||
420 | key->ip.ttl = ipv6_key->ipv6_hlimit; | ||
421 | return 0; | 486 | return 0; |
422 | } | 487 | } |
423 | 488 | ||
424 | /* Must follow skb_ensure_writable() since that can move the skb data. */ | 489 | /* Must follow skb_ensure_writable() since that can move the skb data. */ |
425 | static void set_tp_port(struct sk_buff *skb, __be16 *port, | 490 | static void set_tp_port(struct sk_buff *skb, __be16 *port, |
426 | __be16 new_port, __sum16 *check) | 491 | __be16 new_port, __sum16 *check) |
427 | { | 492 | { |
428 | inet_proto_csum_replace2(check, skb, *port, new_port, 0); | 493 | inet_proto_csum_replace2(check, skb, *port, new_port, 0); |
429 | *port = new_port; | 494 | *port = new_port; |
430 | skb_clear_hash(skb); | ||
431 | } | ||
432 | |||
433 | static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) | ||
434 | { | ||
435 | struct udphdr *uh = udp_hdr(skb); | ||
436 | |||
437 | if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { | ||
438 | set_tp_port(skb, port, new_port, &uh->check); | ||
439 | |||
440 | if (!uh->check) | ||
441 | uh->check = CSUM_MANGLED_0; | ||
442 | } else { | ||
443 | *port = new_port; | ||
444 | skb_clear_hash(skb); | ||
445 | } | ||
446 | } | 495 | } |
447 | 496 | ||
448 | static int set_udp(struct sk_buff *skb, struct sw_flow_key *key, | 497 | static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key, |
449 | const struct ovs_key_udp *udp_port_key) | 498 | const struct ovs_key_udp *key, |
499 | const struct ovs_key_udp *mask) | ||
450 | { | 500 | { |
451 | struct udphdr *uh; | 501 | struct udphdr *uh; |
502 | __be16 src, dst; | ||
452 | int err; | 503 | int err; |
453 | 504 | ||
454 | err = skb_ensure_writable(skb, skb_transport_offset(skb) + | 505 | err = skb_ensure_writable(skb, skb_transport_offset(skb) + |
@@ -457,23 +508,40 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *key, | |||
457 | return err; | 508 | return err; |
458 | 509 | ||
459 | uh = udp_hdr(skb); | 510 | uh = udp_hdr(skb); |
460 | if (udp_port_key->udp_src != uh->source) { | 511 | /* Either of the masks is non-zero, so do not bother checking them. */ |
461 | set_udp_port(skb, &uh->source, udp_port_key->udp_src); | 512 | src = MASKED(uh->source, key->udp_src, mask->udp_src); |
462 | key->tp.src = udp_port_key->udp_src; | 513 | dst = MASKED(uh->dest, key->udp_dst, mask->udp_dst); |
463 | } | ||
464 | 514 | ||
465 | if (udp_port_key->udp_dst != uh->dest) { | 515 | if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { |
466 | set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); | 516 | if (likely(src != uh->source)) { |
467 | key->tp.dst = udp_port_key->udp_dst; | 517 | set_tp_port(skb, &uh->source, src, &uh->check); |
518 | flow_key->tp.src = src; | ||
519 | } | ||
520 | if (likely(dst != uh->dest)) { | ||
521 | set_tp_port(skb, &uh->dest, dst, &uh->check); | ||
522 | flow_key->tp.dst = dst; | ||
523 | } | ||
524 | |||
525 | if (unlikely(!uh->check)) | ||
526 | uh->check = CSUM_MANGLED_0; | ||
527 | } else { | ||
528 | uh->source = src; | ||
529 | uh->dest = dst; | ||
530 | flow_key->tp.src = src; | ||
531 | flow_key->tp.dst = dst; | ||
468 | } | 532 | } |
469 | 533 | ||
534 | skb_clear_hash(skb); | ||
535 | |||
470 | return 0; | 536 | return 0; |
471 | } | 537 | } |
472 | 538 | ||
473 | static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key, | 539 | static int set_tcp(struct sk_buff *skb, struct sw_flow_key *flow_key, |
474 | const struct ovs_key_tcp *tcp_port_key) | 540 | const struct ovs_key_tcp *key, |
541 | const struct ovs_key_tcp *mask) | ||
475 | { | 542 | { |
476 | struct tcphdr *th; | 543 | struct tcphdr *th; |
544 | __be16 src, dst; | ||
477 | int err; | 545 | int err; |
478 | 546 | ||
479 | err = skb_ensure_writable(skb, skb_transport_offset(skb) + | 547 | err = skb_ensure_writable(skb, skb_transport_offset(skb) + |
@@ -482,50 +550,49 @@ static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key, | |||
482 | return err; | 550 | return err; |
483 | 551 | ||
484 | th = tcp_hdr(skb); | 552 | th = tcp_hdr(skb); |
485 | if (tcp_port_key->tcp_src != th->source) { | 553 | src = MASKED(th->source, key->tcp_src, mask->tcp_src); |
486 | set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); | 554 | if (likely(src != th->source)) { |
487 | key->tp.src = tcp_port_key->tcp_src; | 555 | set_tp_port(skb, &th->source, src, &th->check); |
556 | flow_key->tp.src = src; | ||
488 | } | 557 | } |
489 | 558 | dst = MASKED(th->dest, key->tcp_dst, mask->tcp_dst); | |
490 | if (tcp_port_key->tcp_dst != th->dest) { | 559 | if (likely(dst != th->dest)) { |
491 | set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); | 560 | set_tp_port(skb, &th->dest, dst, &th->check); |
492 | key->tp.dst = tcp_port_key->tcp_dst; | 561 | flow_key->tp.dst = dst; |
493 | } | 562 | } |
563 | skb_clear_hash(skb); | ||
494 | 564 | ||
495 | return 0; | 565 | return 0; |
496 | } | 566 | } |
497 | 567 | ||
498 | static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key, | 568 | static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, |
499 | const struct ovs_key_sctp *sctp_port_key) | 569 | const struct ovs_key_sctp *key, |
570 | const struct ovs_key_sctp *mask) | ||
500 | { | 571 | { |
572 | unsigned int sctphoff = skb_transport_offset(skb); | ||
501 | struct sctphdr *sh; | 573 | struct sctphdr *sh; |
574 | __le32 old_correct_csum, new_csum, old_csum; | ||
502 | int err; | 575 | int err; |
503 | unsigned int sctphoff = skb_transport_offset(skb); | ||
504 | 576 | ||
505 | err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr)); | 577 | err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr)); |
506 | if (unlikely(err)) | 578 | if (unlikely(err)) |
507 | return err; | 579 | return err; |
508 | 580 | ||
509 | sh = sctp_hdr(skb); | 581 | sh = sctp_hdr(skb); |
510 | if (sctp_port_key->sctp_src != sh->source || | 582 | old_csum = sh->checksum; |
511 | sctp_port_key->sctp_dst != sh->dest) { | 583 | old_correct_csum = sctp_compute_cksum(skb, sctphoff); |
512 | __le32 old_correct_csum, new_csum, old_csum; | ||
513 | 584 | ||
514 | old_csum = sh->checksum; | 585 | sh->source = MASKED(sh->source, key->sctp_src, mask->sctp_src); |
515 | old_correct_csum = sctp_compute_cksum(skb, sctphoff); | 586 | sh->dest = MASKED(sh->dest, key->sctp_dst, mask->sctp_dst); |
516 | 587 | ||
517 | sh->source = sctp_port_key->sctp_src; | 588 | new_csum = sctp_compute_cksum(skb, sctphoff); |
518 | sh->dest = sctp_port_key->sctp_dst; | ||
519 | 589 | ||
520 | new_csum = sctp_compute_cksum(skb, sctphoff); | 590 | /* Carry any checksum errors through. */ |
591 | sh->checksum = old_csum ^ old_correct_csum ^ new_csum; | ||
521 | 592 | ||
522 | /* Carry any checksum errors through. */ | 593 | skb_clear_hash(skb); |
523 | sh->checksum = old_csum ^ old_correct_csum ^ new_csum; | 594 | flow_key->tp.src = sh->source; |
524 | 595 | flow_key->tp.dst = sh->dest; | |
525 | skb_clear_hash(skb); | ||
526 | key->tp.src = sctp_port_key->sctp_src; | ||
527 | key->tp.dst = sctp_port_key->sctp_dst; | ||
528 | } | ||
529 | 596 | ||
530 | return 0; | 597 | return 0; |
531 | } | 598 | } |
@@ -653,52 +720,77 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, | |||
653 | key->ovs_flow_hash = hash; | 720 | key->ovs_flow_hash = hash; |
654 | } | 721 | } |
655 | 722 | ||
656 | static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key, | 723 | static int execute_set_action(struct sk_buff *skb, |
657 | const struct nlattr *nested_attr) | 724 | struct sw_flow_key *flow_key, |
725 | const struct nlattr *a) | ||
726 | { | ||
727 | /* Only tunnel set execution is supported without a mask. */ | ||
728 | if (nla_type(a) == OVS_KEY_ATTR_TUNNEL_INFO) { | ||
729 | OVS_CB(skb)->egress_tun_info = nla_data(a); | ||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | return -EINVAL; | ||
734 | } | ||
735 | |||
736 | /* Mask is at the midpoint of the data. */ | ||
737 | #define get_mask(a, type) ((const type)nla_data(a) + 1) | ||
738 | |||
739 | static int execute_masked_set_action(struct sk_buff *skb, | ||
740 | struct sw_flow_key *flow_key, | ||
741 | const struct nlattr *a) | ||
658 | { | 742 | { |
659 | int err = 0; | 743 | int err = 0; |
660 | 744 | ||
661 | switch (nla_type(nested_attr)) { | 745 | switch (nla_type(a)) { |
662 | case OVS_KEY_ATTR_PRIORITY: | 746 | case OVS_KEY_ATTR_PRIORITY: |
663 | skb->priority = nla_get_u32(nested_attr); | 747 | SET_MASKED(skb->priority, nla_get_u32(a), *get_mask(a, u32 *)); |
664 | key->phy.priority = skb->priority; | 748 | flow_key->phy.priority = skb->priority; |
665 | break; | 749 | break; |
666 | 750 | ||
667 | case OVS_KEY_ATTR_SKB_MARK: | 751 | case OVS_KEY_ATTR_SKB_MARK: |
668 | skb->mark = nla_get_u32(nested_attr); | 752 | SET_MASKED(skb->mark, nla_get_u32(a), *get_mask(a, u32 *)); |
669 | key->phy.skb_mark = skb->mark; | 753 | flow_key->phy.skb_mark = skb->mark; |
670 | break; | 754 | break; |
671 | 755 | ||
672 | case OVS_KEY_ATTR_TUNNEL_INFO: | 756 | case OVS_KEY_ATTR_TUNNEL_INFO: |
673 | OVS_CB(skb)->egress_tun_info = nla_data(nested_attr); | 757 | /* Masked data not supported for tunnel. */ |
758 | err = -EINVAL; | ||
674 | break; | 759 | break; |
675 | 760 | ||
676 | case OVS_KEY_ATTR_ETHERNET: | 761 | case OVS_KEY_ATTR_ETHERNET: |
677 | err = set_eth_addr(skb, key, nla_data(nested_attr)); | 762 | err = set_eth_addr(skb, flow_key, nla_data(a), |
763 | get_mask(a, struct ovs_key_ethernet *)); | ||
678 | break; | 764 | break; |
679 | 765 | ||
680 | case OVS_KEY_ATTR_IPV4: | 766 | case OVS_KEY_ATTR_IPV4: |
681 | err = set_ipv4(skb, key, nla_data(nested_attr)); | 767 | err = set_ipv4(skb, flow_key, nla_data(a), |
768 | get_mask(a, struct ovs_key_ipv4 *)); | ||
682 | break; | 769 | break; |
683 | 770 | ||
684 | case OVS_KEY_ATTR_IPV6: | 771 | case OVS_KEY_ATTR_IPV6: |
685 | err = set_ipv6(skb, key, nla_data(nested_attr)); | 772 | err = set_ipv6(skb, flow_key, nla_data(a), |
773 | get_mask(a, struct ovs_key_ipv6 *)); | ||
686 | break; | 774 | break; |
687 | 775 | ||
688 | case OVS_KEY_ATTR_TCP: | 776 | case OVS_KEY_ATTR_TCP: |
689 | err = set_tcp(skb, key, nla_data(nested_attr)); | 777 | err = set_tcp(skb, flow_key, nla_data(a), |
778 | get_mask(a, struct ovs_key_tcp *)); | ||
690 | break; | 779 | break; |
691 | 780 | ||
692 | case OVS_KEY_ATTR_UDP: | 781 | case OVS_KEY_ATTR_UDP: |
693 | err = set_udp(skb, key, nla_data(nested_attr)); | 782 | err = set_udp(skb, flow_key, nla_data(a), |
783 | get_mask(a, struct ovs_key_udp *)); | ||
694 | break; | 784 | break; |
695 | 785 | ||
696 | case OVS_KEY_ATTR_SCTP: | 786 | case OVS_KEY_ATTR_SCTP: |
697 | err = set_sctp(skb, key, nla_data(nested_attr)); | 787 | err = set_sctp(skb, flow_key, nla_data(a), |
788 | get_mask(a, struct ovs_key_sctp *)); | ||
698 | break; | 789 | break; |
699 | 790 | ||
700 | case OVS_KEY_ATTR_MPLS: | 791 | case OVS_KEY_ATTR_MPLS: |
701 | err = set_mpls(skb, key, nla_data(nested_attr)); | 792 | err = set_mpls(skb, flow_key, nla_data(a), get_mask(a, |
793 | __be32 *)); | ||
702 | break; | 794 | break; |
703 | } | 795 | } |
704 | 796 | ||
@@ -818,6 +910,11 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
818 | err = execute_set_action(skb, key, nla_data(a)); | 910 | err = execute_set_action(skb, key, nla_data(a)); |
819 | break; | 911 | break; |
820 | 912 | ||
913 | case OVS_ACTION_ATTR_SET_MASKED: | ||
914 | case OVS_ACTION_ATTR_SET_TO_MASKED: | ||
915 | err = execute_masked_set_action(skb, key, nla_data(a)); | ||
916 | break; | ||
917 | |||
821 | case OVS_ACTION_ATTR_SAMPLE: | 918 | case OVS_ACTION_ATTR_SAMPLE: |
822 | err = sample(dp, skb, key, a); | 919 | err = sample(dp, skb, key, a); |
823 | break; | 920 | break; |