diff options
author | Pravin B Shelar <pshelar@nicira.com> | 2014-11-06 09:55:14 -0500 |
---|---|---|
committer | Pravin B Shelar <pshelar@nicira.com> | 2014-11-09 21:58:44 -0500 |
commit | fff06c36a2563214073707f6e6aea152713274d1 (patch) | |
tree | d8bcca7ad36e26a911d81e06d4275c9eb4db20b8 /net/openvswitch | |
parent | 8f0aad6f35f7e8b3118b7b8a65e8e76b135cc4cb (diff) |
openvswitch: Optimize recirc action.
OVS need to flow key for flow lookup in recic action. OVS
does key extract in recic action. Most of cases we could
use OVS_CB packet key directly and can avoid packet flow key
extract. SET action we can update flow-key along with packet
to keep it consistent. But there are some action like MPLS
pop which forces OVS to do flow-extract. In such cases we
can mark flow key as invalid so that subsequent recirc
action can do full flow extract.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Acked-by: Jarno Rajahalme <jrajahalme@nicira.com>
Acked-by: Andy Zhou <azhou@nicira.com>
Diffstat (limited to 'net/openvswitch')
-rw-r--r-- | net/openvswitch/actions.c | 151 |
1 files changed, 106 insertions, 45 deletions
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index ceb618cf1292..d4c2f735d999 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -109,6 +109,16 @@ static struct deferred_action *add_deferred_actions(struct sk_buff *skb, | |||
109 | return da; | 109 | return da; |
110 | } | 110 | } |
111 | 111 | ||
112 | static void invalidate_flow_key(struct sw_flow_key *key) | ||
113 | { | ||
114 | key->eth.type = htons(0); | ||
115 | } | ||
116 | |||
117 | static bool is_flow_key_valid(const struct sw_flow_key *key) | ||
118 | { | ||
119 | return !!key->eth.type; | ||
120 | } | ||
121 | |||
112 | static int make_writable(struct sk_buff *skb, int write_len) | 122 | static int make_writable(struct sk_buff *skb, int write_len) |
113 | { | 123 | { |
114 | if (!pskb_may_pull(skb, write_len)) | 124 | if (!pskb_may_pull(skb, write_len)) |
@@ -120,7 +130,7 @@ static int make_writable(struct sk_buff *skb, int write_len) | |||
120 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); | 130 | return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); |
121 | } | 131 | } |
122 | 132 | ||
123 | static int push_mpls(struct sk_buff *skb, | 133 | static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, |
124 | const struct ovs_action_push_mpls *mpls) | 134 | const struct ovs_action_push_mpls *mpls) |
125 | { | 135 | { |
126 | __be32 *new_mpls_lse; | 136 | __be32 *new_mpls_lse; |
@@ -151,10 +161,12 @@ static int push_mpls(struct sk_buff *skb, | |||
151 | skb_set_inner_protocol(skb, skb->protocol); | 161 | skb_set_inner_protocol(skb, skb->protocol); |
152 | skb->protocol = mpls->mpls_ethertype; | 162 | skb->protocol = mpls->mpls_ethertype; |
153 | 163 | ||
164 | invalidate_flow_key(key); | ||
154 | return 0; | 165 | return 0; |
155 | } | 166 | } |
156 | 167 | ||
157 | static int pop_mpls(struct sk_buff *skb, const __be16 ethertype) | 168 | static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, |
169 | const __be16 ethertype) | ||
158 | { | 170 | { |
159 | struct ethhdr *hdr; | 171 | struct ethhdr *hdr; |
160 | int err; | 172 | int err; |
@@ -181,10 +193,13 @@ static int pop_mpls(struct sk_buff *skb, const __be16 ethertype) | |||
181 | hdr->h_proto = ethertype; | 193 | hdr->h_proto = ethertype; |
182 | if (eth_p_mpls(skb->protocol)) | 194 | if (eth_p_mpls(skb->protocol)) |
183 | skb->protocol = ethertype; | 195 | skb->protocol = ethertype; |
196 | |||
197 | invalidate_flow_key(key); | ||
184 | return 0; | 198 | return 0; |
185 | } | 199 | } |
186 | 200 | ||
187 | static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse) | 201 | static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key, |
202 | const __be32 *mpls_lse) | ||
188 | { | 203 | { |
189 | __be32 *stack; | 204 | __be32 *stack; |
190 | int err; | 205 | int err; |
@@ -196,13 +211,12 @@ static int set_mpls(struct sk_buff *skb, const __be32 *mpls_lse) | |||
196 | stack = (__be32 *)skb_mpls_header(skb); | 211 | stack = (__be32 *)skb_mpls_header(skb); |
197 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | 212 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
198 | __be32 diff[] = { ~(*stack), *mpls_lse }; | 213 | __be32 diff[] = { ~(*stack), *mpls_lse }; |
199 | |||
200 | skb->csum = ~csum_partial((char *)diff, sizeof(diff), | 214 | skb->csum = ~csum_partial((char *)diff, sizeof(diff), |
201 | ~skb->csum); | 215 | ~skb->csum); |
202 | } | 216 | } |
203 | 217 | ||
204 | *stack = *mpls_lse; | 218 | *stack = *mpls_lse; |
205 | 219 | key->mpls.top_lse = *mpls_lse; | |
206 | return 0; | 220 | return 0; |
207 | } | 221 | } |
208 | 222 | ||
@@ -237,7 +251,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) | |||
237 | return 0; | 251 | return 0; |
238 | } | 252 | } |
239 | 253 | ||
240 | static int pop_vlan(struct sk_buff *skb) | 254 | static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key) |
241 | { | 255 | { |
242 | __be16 tci; | 256 | __be16 tci; |
243 | int err; | 257 | int err; |
@@ -255,9 +269,12 @@ static int pop_vlan(struct sk_buff *skb) | |||
255 | } | 269 | } |
256 | /* move next vlan tag to hw accel tag */ | 270 | /* move next vlan tag to hw accel tag */ |
257 | if (likely(skb->protocol != htons(ETH_P_8021Q) || | 271 | if (likely(skb->protocol != htons(ETH_P_8021Q) || |
258 | skb->len < VLAN_ETH_HLEN)) | 272 | skb->len < VLAN_ETH_HLEN)) { |
273 | key->eth.tci = 0; | ||
259 | return 0; | 274 | return 0; |
275 | } | ||
260 | 276 | ||
277 | invalidate_flow_key(key); | ||
261 | err = __pop_vlan_tci(skb, &tci); | 278 | err = __pop_vlan_tci(skb, &tci); |
262 | if (unlikely(err)) | 279 | if (unlikely(err)) |
263 | return err; | 280 | return err; |
@@ -266,7 +283,8 @@ static int pop_vlan(struct sk_buff *skb) | |||
266 | return 0; | 283 | return 0; |
267 | } | 284 | } |
268 | 285 | ||
269 | static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan) | 286 | static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, |
287 | const struct ovs_action_push_vlan *vlan) | ||
270 | { | 288 | { |
271 | if (unlikely(vlan_tx_tag_present(skb))) { | 289 | if (unlikely(vlan_tx_tag_present(skb))) { |
272 | u16 current_tag; | 290 | u16 current_tag; |
@@ -283,12 +301,15 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla | |||
283 | skb->csum = csum_add(skb->csum, csum_partial(skb->data | 301 | skb->csum = csum_add(skb->csum, csum_partial(skb->data |
284 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); | 302 | + (2 * ETH_ALEN), VLAN_HLEN, 0)); |
285 | 303 | ||
304 | invalidate_flow_key(key); | ||
305 | } else { | ||
306 | key->eth.tci = vlan->vlan_tci; | ||
286 | } | 307 | } |
287 | __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); | 308 | __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); |
288 | return 0; | 309 | return 0; |
289 | } | 310 | } |
290 | 311 | ||
291 | static int set_eth_addr(struct sk_buff *skb, | 312 | static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key, |
292 | const struct ovs_key_ethernet *eth_key) | 313 | const struct ovs_key_ethernet *eth_key) |
293 | { | 314 | { |
294 | int err; | 315 | int err; |
@@ -303,11 +324,13 @@ static int set_eth_addr(struct sk_buff *skb, | |||
303 | 324 | ||
304 | ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); | 325 | ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); |
305 | 326 | ||
327 | ether_addr_copy(key->eth.src, eth_key->eth_src); | ||
328 | ether_addr_copy(key->eth.dst, eth_key->eth_dst); | ||
306 | return 0; | 329 | return 0; |
307 | } | 330 | } |
308 | 331 | ||
309 | static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, | 332 | static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, |
310 | __be32 *addr, __be32 new_addr) | 333 | __be32 *addr, __be32 new_addr) |
311 | { | 334 | { |
312 | int transport_len = skb->len - skb_transport_offset(skb); | 335 | int transport_len = skb->len - skb_transport_offset(skb); |
313 | 336 | ||
@@ -386,7 +409,8 @@ static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) | |||
386 | nh->ttl = new_ttl; | 409 | nh->ttl = new_ttl; |
387 | } | 410 | } |
388 | 411 | ||
389 | static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) | 412 | static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key, |
413 | const struct ovs_key_ipv4 *ipv4_key) | ||
390 | { | 414 | { |
391 | struct iphdr *nh; | 415 | struct iphdr *nh; |
392 | int err; | 416 | int err; |
@@ -398,22 +422,31 @@ static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) | |||
398 | 422 | ||
399 | nh = ip_hdr(skb); | 423 | nh = ip_hdr(skb); |
400 | 424 | ||
401 | if (ipv4_key->ipv4_src != nh->saddr) | 425 | if (ipv4_key->ipv4_src != nh->saddr) { |
402 | set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); | 426 | set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); |
427 | key->ipv4.addr.src = ipv4_key->ipv4_src; | ||
428 | } | ||
403 | 429 | ||
404 | if (ipv4_key->ipv4_dst != nh->daddr) | 430 | if (ipv4_key->ipv4_dst != nh->daddr) { |
405 | set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); | 431 | set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); |
432 | key->ipv4.addr.dst = ipv4_key->ipv4_dst; | ||
433 | } | ||
406 | 434 | ||
407 | if (ipv4_key->ipv4_tos != nh->tos) | 435 | if (ipv4_key->ipv4_tos != nh->tos) { |
408 | ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); | 436 | ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); |
437 | key->ip.tos = nh->tos; | ||
438 | } | ||
409 | 439 | ||
410 | if (ipv4_key->ipv4_ttl != nh->ttl) | 440 | if (ipv4_key->ipv4_ttl != nh->ttl) { |
411 | set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); | 441 | set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); |
442 | key->ip.ttl = ipv4_key->ipv4_ttl; | ||
443 | } | ||
412 | 444 | ||
413 | return 0; | 445 | return 0; |
414 | } | 446 | } |
415 | 447 | ||
416 | static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) | 448 | static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key, |
449 | const struct ovs_key_ipv6 *ipv6_key) | ||
417 | { | 450 | { |
418 | struct ipv6hdr *nh; | 451 | struct ipv6hdr *nh; |
419 | int err; | 452 | int err; |
@@ -429,9 +462,12 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) | |||
429 | saddr = (__be32 *)&nh->saddr; | 462 | saddr = (__be32 *)&nh->saddr; |
430 | daddr = (__be32 *)&nh->daddr; | 463 | daddr = (__be32 *)&nh->daddr; |
431 | 464 | ||
432 | if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) | 465 | if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) { |
433 | set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, | 466 | set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, |
434 | ipv6_key->ipv6_src, true); | 467 | ipv6_key->ipv6_src, true); |
468 | memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src, | ||
469 | sizeof(ipv6_key->ipv6_src)); | ||
470 | } | ||
435 | 471 | ||
436 | if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { | 472 | if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { |
437 | unsigned int offset = 0; | 473 | unsigned int offset = 0; |
@@ -445,12 +481,18 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) | |||
445 | 481 | ||
446 | set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, | 482 | set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, |
447 | ipv6_key->ipv6_dst, recalc_csum); | 483 | ipv6_key->ipv6_dst, recalc_csum); |
484 | memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst, | ||
485 | sizeof(ipv6_key->ipv6_dst)); | ||
448 | } | 486 | } |
449 | 487 | ||
450 | set_ipv6_tc(nh, ipv6_key->ipv6_tclass); | 488 | set_ipv6_tc(nh, ipv6_key->ipv6_tclass); |
489 | key->ip.tos = ipv6_get_dsfield(nh); | ||
490 | |||
451 | set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); | 491 | set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); |
452 | nh->hop_limit = ipv6_key->ipv6_hlimit; | 492 | key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); |
453 | 493 | ||
494 | nh->hop_limit = ipv6_key->ipv6_hlimit; | ||
495 | key->ip.ttl = ipv6_key->ipv6_hlimit; | ||
454 | return 0; | 496 | return 0; |
455 | } | 497 | } |
456 | 498 | ||
@@ -478,7 +520,8 @@ static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) | |||
478 | } | 520 | } |
479 | } | 521 | } |
480 | 522 | ||
481 | static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) | 523 | static int set_udp(struct sk_buff *skb, struct sw_flow_key *key, |
524 | const struct ovs_key_udp *udp_port_key) | ||
482 | { | 525 | { |
483 | struct udphdr *uh; | 526 | struct udphdr *uh; |
484 | int err; | 527 | int err; |
@@ -489,16 +532,21 @@ static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) | |||
489 | return err; | 532 | return err; |
490 | 533 | ||
491 | uh = udp_hdr(skb); | 534 | uh = udp_hdr(skb); |
492 | if (udp_port_key->udp_src != uh->source) | 535 | if (udp_port_key->udp_src != uh->source) { |
493 | set_udp_port(skb, &uh->source, udp_port_key->udp_src); | 536 | set_udp_port(skb, &uh->source, udp_port_key->udp_src); |
537 | key->tp.src = udp_port_key->udp_src; | ||
538 | } | ||
494 | 539 | ||
495 | if (udp_port_key->udp_dst != uh->dest) | 540 | if (udp_port_key->udp_dst != uh->dest) { |
496 | set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); | 541 | set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); |
542 | key->tp.dst = udp_port_key->udp_dst; | ||
543 | } | ||
497 | 544 | ||
498 | return 0; | 545 | return 0; |
499 | } | 546 | } |
500 | 547 | ||
501 | static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) | 548 | static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key, |
549 | const struct ovs_key_tcp *tcp_port_key) | ||
502 | { | 550 | { |
503 | struct tcphdr *th; | 551 | struct tcphdr *th; |
504 | int err; | 552 | int err; |
@@ -509,17 +557,21 @@ static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) | |||
509 | return err; | 557 | return err; |
510 | 558 | ||
511 | th = tcp_hdr(skb); | 559 | th = tcp_hdr(skb); |
512 | if (tcp_port_key->tcp_src != th->source) | 560 | if (tcp_port_key->tcp_src != th->source) { |
513 | set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); | 561 | set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); |
562 | key->tp.src = tcp_port_key->tcp_src; | ||
563 | } | ||
514 | 564 | ||
515 | if (tcp_port_key->tcp_dst != th->dest) | 565 | if (tcp_port_key->tcp_dst != th->dest) { |
516 | set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); | 566 | set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); |
567 | key->tp.dst = tcp_port_key->tcp_dst; | ||
568 | } | ||
517 | 569 | ||
518 | return 0; | 570 | return 0; |
519 | } | 571 | } |
520 | 572 | ||
521 | static int set_sctp(struct sk_buff *skb, | 573 | static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key, |
522 | const struct ovs_key_sctp *sctp_port_key) | 574 | const struct ovs_key_sctp *sctp_port_key) |
523 | { | 575 | { |
524 | struct sctphdr *sh; | 576 | struct sctphdr *sh; |
525 | int err; | 577 | int err; |
@@ -546,6 +598,8 @@ static int set_sctp(struct sk_buff *skb, | |||
546 | sh->checksum = old_csum ^ old_correct_csum ^ new_csum; | 598 | sh->checksum = old_csum ^ old_correct_csum ^ new_csum; |
547 | 599 | ||
548 | skb_clear_hash(skb); | 600 | skb_clear_hash(skb); |
601 | key->tp.src = sctp_port_key->sctp_src; | ||
602 | key->tp.dst = sctp_port_key->sctp_dst; | ||
549 | } | 603 | } |
550 | 604 | ||
551 | return 0; | 605 | return 0; |
@@ -675,18 +729,20 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, | |||
675 | key->ovs_flow_hash = hash; | 729 | key->ovs_flow_hash = hash; |
676 | } | 730 | } |
677 | 731 | ||
678 | static int execute_set_action(struct sk_buff *skb, | 732 | static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key, |
679 | const struct nlattr *nested_attr) | 733 | const struct nlattr *nested_attr) |
680 | { | 734 | { |
681 | int err = 0; | 735 | int err = 0; |
682 | 736 | ||
683 | switch (nla_type(nested_attr)) { | 737 | switch (nla_type(nested_attr)) { |
684 | case OVS_KEY_ATTR_PRIORITY: | 738 | case OVS_KEY_ATTR_PRIORITY: |
685 | skb->priority = nla_get_u32(nested_attr); | 739 | skb->priority = nla_get_u32(nested_attr); |
740 | key->phy.priority = skb->priority; | ||
686 | break; | 741 | break; |
687 | 742 | ||
688 | case OVS_KEY_ATTR_SKB_MARK: | 743 | case OVS_KEY_ATTR_SKB_MARK: |
689 | skb->mark = nla_get_u32(nested_attr); | 744 | skb->mark = nla_get_u32(nested_attr); |
745 | key->phy.skb_mark = skb->mark; | ||
690 | break; | 746 | break; |
691 | 747 | ||
692 | case OVS_KEY_ATTR_TUNNEL_INFO: | 748 | case OVS_KEY_ATTR_TUNNEL_INFO: |
@@ -694,31 +750,31 @@ static int execute_set_action(struct sk_buff *skb, | |||
694 | break; | 750 | break; |
695 | 751 | ||
696 | case OVS_KEY_ATTR_ETHERNET: | 752 | case OVS_KEY_ATTR_ETHERNET: |
697 | err = set_eth_addr(skb, nla_data(nested_attr)); | 753 | err = set_eth_addr(skb, key, nla_data(nested_attr)); |
698 | break; | 754 | break; |
699 | 755 | ||
700 | case OVS_KEY_ATTR_IPV4: | 756 | case OVS_KEY_ATTR_IPV4: |
701 | err = set_ipv4(skb, nla_data(nested_attr)); | 757 | err = set_ipv4(skb, key, nla_data(nested_attr)); |
702 | break; | 758 | break; |
703 | 759 | ||
704 | case OVS_KEY_ATTR_IPV6: | 760 | case OVS_KEY_ATTR_IPV6: |
705 | err = set_ipv6(skb, nla_data(nested_attr)); | 761 | err = set_ipv6(skb, key, nla_data(nested_attr)); |
706 | break; | 762 | break; |
707 | 763 | ||
708 | case OVS_KEY_ATTR_TCP: | 764 | case OVS_KEY_ATTR_TCP: |
709 | err = set_tcp(skb, nla_data(nested_attr)); | 765 | err = set_tcp(skb, key, nla_data(nested_attr)); |
710 | break; | 766 | break; |
711 | 767 | ||
712 | case OVS_KEY_ATTR_UDP: | 768 | case OVS_KEY_ATTR_UDP: |
713 | err = set_udp(skb, nla_data(nested_attr)); | 769 | err = set_udp(skb, key, nla_data(nested_attr)); |
714 | break; | 770 | break; |
715 | 771 | ||
716 | case OVS_KEY_ATTR_SCTP: | 772 | case OVS_KEY_ATTR_SCTP: |
717 | err = set_sctp(skb, nla_data(nested_attr)); | 773 | err = set_sctp(skb, key, nla_data(nested_attr)); |
718 | break; | 774 | break; |
719 | 775 | ||
720 | case OVS_KEY_ATTR_MPLS: | 776 | case OVS_KEY_ATTR_MPLS: |
721 | err = set_mpls(skb, nla_data(nested_attr)); | 777 | err = set_mpls(skb, key, nla_data(nested_attr)); |
722 | break; | 778 | break; |
723 | } | 779 | } |
724 | 780 | ||
@@ -730,11 +786,15 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb, | |||
730 | const struct nlattr *a, int rem) | 786 | const struct nlattr *a, int rem) |
731 | { | 787 | { |
732 | struct deferred_action *da; | 788 | struct deferred_action *da; |
733 | int err; | ||
734 | 789 | ||
735 | err = ovs_flow_key_update(skb, key); | 790 | if (!is_flow_key_valid(key)) { |
736 | if (err) | 791 | int err; |
737 | return err; | 792 | |
793 | err = ovs_flow_key_update(skb, key); | ||
794 | if (err) | ||
795 | return err; | ||
796 | } | ||
797 | BUG_ON(!is_flow_key_valid(key)); | ||
738 | 798 | ||
739 | if (!nla_is_last(a, rem)) { | 799 | if (!nla_is_last(a, rem)) { |
740 | /* Recirc action is the not the last action | 800 | /* Recirc action is the not the last action |
@@ -771,7 +831,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
771 | /* Every output action needs a separate clone of 'skb', but the common | 831 | /* Every output action needs a separate clone of 'skb', but the common |
772 | * case is just a single output action, so that doing a clone and | 832 | * case is just a single output action, so that doing a clone and |
773 | * then freeing the original skbuff is wasteful. So the following code | 833 | * then freeing the original skbuff is wasteful. So the following code |
774 | * is slightly obscure just to avoid that. */ | 834 | * is slightly obscure just to avoid that. |
835 | */ | ||
775 | int prev_port = -1; | 836 | int prev_port = -1; |
776 | const struct nlattr *a; | 837 | const struct nlattr *a; |
777 | int rem; | 838 | int rem; |
@@ -803,21 +864,21 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
803 | break; | 864 | break; |
804 | 865 | ||
805 | case OVS_ACTION_ATTR_PUSH_MPLS: | 866 | case OVS_ACTION_ATTR_PUSH_MPLS: |
806 | err = push_mpls(skb, nla_data(a)); | 867 | err = push_mpls(skb, key, nla_data(a)); |
807 | break; | 868 | break; |
808 | 869 | ||
809 | case OVS_ACTION_ATTR_POP_MPLS: | 870 | case OVS_ACTION_ATTR_POP_MPLS: |
810 | err = pop_mpls(skb, nla_get_be16(a)); | 871 | err = pop_mpls(skb, key, nla_get_be16(a)); |
811 | break; | 872 | break; |
812 | 873 | ||
813 | case OVS_ACTION_ATTR_PUSH_VLAN: | 874 | case OVS_ACTION_ATTR_PUSH_VLAN: |
814 | err = push_vlan(skb, nla_data(a)); | 875 | err = push_vlan(skb, key, nla_data(a)); |
815 | if (unlikely(err)) /* skb already freed. */ | 876 | if (unlikely(err)) /* skb already freed. */ |
816 | return err; | 877 | return err; |
817 | break; | 878 | break; |
818 | 879 | ||
819 | case OVS_ACTION_ATTR_POP_VLAN: | 880 | case OVS_ACTION_ATTR_POP_VLAN: |
820 | err = pop_vlan(skb); | 881 | err = pop_vlan(skb, key); |
821 | break; | 882 | break; |
822 | 883 | ||
823 | case OVS_ACTION_ATTR_RECIRC: | 884 | case OVS_ACTION_ATTR_RECIRC: |
@@ -832,7 +893,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, | |||
832 | break; | 893 | break; |
833 | 894 | ||
834 | case OVS_ACTION_ATTR_SET: | 895 | case OVS_ACTION_ATTR_SET: |
835 | err = execute_set_action(skb, nla_data(a)); | 896 | err = execute_set_action(skb, key, nla_data(a)); |
836 | break; | 897 | break; |
837 | 898 | ||
838 | case OVS_ACTION_ATTR_SAMPLE: | 899 | case OVS_ACTION_ATTR_SAMPLE: |