diff options
Diffstat (limited to 'net/openvswitch')
| -rw-r--r-- | net/openvswitch/Makefile | 2 | ||||
| -rw-r--r-- | net/openvswitch/datapath.c | 668 | ||||
| -rw-r--r-- | net/openvswitch/datapath.h | 9 | ||||
| -rw-r--r-- | net/openvswitch/flow.c | 1605 | ||||
| -rw-r--r-- | net/openvswitch/flow.h | 132 | ||||
| -rw-r--r-- | net/openvswitch/flow_netlink.c | 1630 | ||||
| -rw-r--r-- | net/openvswitch/flow_netlink.h | 60 | ||||
| -rw-r--r-- | net/openvswitch/flow_table.c | 592 | ||||
| -rw-r--r-- | net/openvswitch/flow_table.h | 81 | ||||
| -rw-r--r-- | net/openvswitch/vport-gre.c | 2 | ||||
| -rw-r--r-- | net/openvswitch/vport-internal_dev.c | 2 | ||||
| -rw-r--r-- | net/openvswitch/vport-vxlan.c | 1 |
12 files changed, 2496 insertions, 2288 deletions
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index ea36e99089af..3591cb5dae91 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile | |||
| @@ -9,6 +9,8 @@ openvswitch-y := \ | |||
| 9 | datapath.o \ | 9 | datapath.o \ |
| 10 | dp_notify.o \ | 10 | dp_notify.o \ |
| 11 | flow.o \ | 11 | flow.o \ |
| 12 | flow_netlink.o \ | ||
| 13 | flow_table.o \ | ||
| 12 | vport.o \ | 14 | vport.o \ |
| 13 | vport-internal_dev.o \ | 15 | vport-internal_dev.o \ |
| 14 | vport-netdev.o | 16 | vport-netdev.o |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2aa13bd7f2b2..1408adc2a2a7 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
| @@ -55,14 +55,10 @@ | |||
| 55 | 55 | ||
| 56 | #include "datapath.h" | 56 | #include "datapath.h" |
| 57 | #include "flow.h" | 57 | #include "flow.h" |
| 58 | #include "flow_netlink.h" | ||
| 58 | #include "vport-internal_dev.h" | 59 | #include "vport-internal_dev.h" |
| 59 | #include "vport-netdev.h" | 60 | #include "vport-netdev.h" |
| 60 | 61 | ||
| 61 | |||
| 62 | #define REHASH_FLOW_INTERVAL (10 * 60 * HZ) | ||
| 63 | static void rehash_flow_table(struct work_struct *work); | ||
| 64 | static DECLARE_DELAYED_WORK(rehash_flow_wq, rehash_flow_table); | ||
| 65 | |||
| 66 | int ovs_net_id __read_mostly; | 62 | int ovs_net_id __read_mostly; |
| 67 | 63 | ||
| 68 | static void ovs_notify(struct sk_buff *skb, struct genl_info *info, | 64 | static void ovs_notify(struct sk_buff *skb, struct genl_info *info, |
| @@ -165,7 +161,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) | |||
| 165 | { | 161 | { |
| 166 | struct datapath *dp = container_of(rcu, struct datapath, rcu); | 162 | struct datapath *dp = container_of(rcu, struct datapath, rcu); |
| 167 | 163 | ||
| 168 | ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false); | 164 | ovs_flow_tbl_destroy(&dp->table); |
| 169 | free_percpu(dp->stats_percpu); | 165 | free_percpu(dp->stats_percpu); |
| 170 | release_net(ovs_dp_get_net(dp)); | 166 | release_net(ovs_dp_get_net(dp)); |
| 171 | kfree(dp->ports); | 167 | kfree(dp->ports); |
| @@ -225,6 +221,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |||
| 225 | struct dp_stats_percpu *stats; | 221 | struct dp_stats_percpu *stats; |
| 226 | struct sw_flow_key key; | 222 | struct sw_flow_key key; |
| 227 | u64 *stats_counter; | 223 | u64 *stats_counter; |
| 224 | u32 n_mask_hit; | ||
| 228 | int error; | 225 | int error; |
| 229 | 226 | ||
| 230 | stats = this_cpu_ptr(dp->stats_percpu); | 227 | stats = this_cpu_ptr(dp->stats_percpu); |
| @@ -237,7 +234,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |||
| 237 | } | 234 | } |
| 238 | 235 | ||
| 239 | /* Look up flow. */ | 236 | /* Look up flow. */ |
| 240 | flow = ovs_flow_lookup(rcu_dereference(dp->table), &key); | 237 | flow = ovs_flow_tbl_lookup(&dp->table, &key, &n_mask_hit); |
| 241 | if (unlikely(!flow)) { | 238 | if (unlikely(!flow)) { |
| 242 | struct dp_upcall_info upcall; | 239 | struct dp_upcall_info upcall; |
| 243 | 240 | ||
| @@ -262,6 +259,7 @@ out: | |||
| 262 | /* Update datapath statistics. */ | 259 | /* Update datapath statistics. */ |
| 263 | u64_stats_update_begin(&stats->sync); | 260 | u64_stats_update_begin(&stats->sync); |
| 264 | (*stats_counter)++; | 261 | (*stats_counter)++; |
| 262 | stats->n_mask_hit += n_mask_hit; | ||
| 265 | u64_stats_update_end(&stats->sync); | 263 | u64_stats_update_end(&stats->sync); |
| 266 | } | 264 | } |
| 267 | 265 | ||
| @@ -435,7 +433,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex, | |||
| 435 | upcall->dp_ifindex = dp_ifindex; | 433 | upcall->dp_ifindex = dp_ifindex; |
| 436 | 434 | ||
| 437 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); | 435 | nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); |
| 438 | ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb); | 436 | ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb); |
| 439 | nla_nest_end(user_skb, nla); | 437 | nla_nest_end(user_skb, nla); |
| 440 | 438 | ||
| 441 | if (upcall_info->userdata) | 439 | if (upcall_info->userdata) |
| @@ -455,398 +453,6 @@ out: | |||
| 455 | return err; | 453 | return err; |
| 456 | } | 454 | } |
| 457 | 455 | ||
| 458 | /* Called with ovs_mutex. */ | ||
| 459 | static int flush_flows(struct datapath *dp) | ||
| 460 | { | ||
| 461 | struct flow_table *old_table; | ||
| 462 | struct flow_table *new_table; | ||
| 463 | |||
| 464 | old_table = ovsl_dereference(dp->table); | ||
| 465 | new_table = ovs_flow_tbl_alloc(TBL_MIN_BUCKETS); | ||
| 466 | if (!new_table) | ||
| 467 | return -ENOMEM; | ||
| 468 | |||
| 469 | rcu_assign_pointer(dp->table, new_table); | ||
| 470 | |||
| 471 | ovs_flow_tbl_destroy(old_table, true); | ||
| 472 | return 0; | ||
| 473 | } | ||
| 474 | |||
| 475 | static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, int attr_len) | ||
| 476 | { | ||
| 477 | |||
| 478 | struct sw_flow_actions *acts; | ||
| 479 | int new_acts_size; | ||
| 480 | int req_size = NLA_ALIGN(attr_len); | ||
| 481 | int next_offset = offsetof(struct sw_flow_actions, actions) + | ||
| 482 | (*sfa)->actions_len; | ||
| 483 | |||
| 484 | if (req_size <= (ksize(*sfa) - next_offset)) | ||
| 485 | goto out; | ||
| 486 | |||
| 487 | new_acts_size = ksize(*sfa) * 2; | ||
| 488 | |||
| 489 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { | ||
| 490 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) | ||
| 491 | return ERR_PTR(-EMSGSIZE); | ||
| 492 | new_acts_size = MAX_ACTIONS_BUFSIZE; | ||
| 493 | } | ||
| 494 | |||
| 495 | acts = ovs_flow_actions_alloc(new_acts_size); | ||
| 496 | if (IS_ERR(acts)) | ||
| 497 | return (void *)acts; | ||
| 498 | |||
| 499 | memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len); | ||
| 500 | acts->actions_len = (*sfa)->actions_len; | ||
| 501 | kfree(*sfa); | ||
| 502 | *sfa = acts; | ||
| 503 | |||
| 504 | out: | ||
| 505 | (*sfa)->actions_len += req_size; | ||
| 506 | return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset); | ||
| 507 | } | ||
| 508 | |||
| 509 | static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len) | ||
| 510 | { | ||
| 511 | struct nlattr *a; | ||
| 512 | |||
| 513 | a = reserve_sfa_size(sfa, nla_attr_size(len)); | ||
| 514 | if (IS_ERR(a)) | ||
| 515 | return PTR_ERR(a); | ||
| 516 | |||
| 517 | a->nla_type = attrtype; | ||
| 518 | a->nla_len = nla_attr_size(len); | ||
| 519 | |||
| 520 | if (data) | ||
| 521 | memcpy(nla_data(a), data, len); | ||
| 522 | memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len)); | ||
| 523 | |||
| 524 | return 0; | ||
| 525 | } | ||
| 526 | |||
| 527 | static inline int add_nested_action_start(struct sw_flow_actions **sfa, int attrtype) | ||
| 528 | { | ||
| 529 | int used = (*sfa)->actions_len; | ||
| 530 | int err; | ||
| 531 | |||
| 532 | err = add_action(sfa, attrtype, NULL, 0); | ||
| 533 | if (err) | ||
| 534 | return err; | ||
| 535 | |||
| 536 | return used; | ||
| 537 | } | ||
| 538 | |||
| 539 | static inline void add_nested_action_end(struct sw_flow_actions *sfa, int st_offset) | ||
| 540 | { | ||
| 541 | struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + st_offset); | ||
| 542 | |||
| 543 | a->nla_len = sfa->actions_len - st_offset; | ||
| 544 | } | ||
| 545 | |||
| 546 | static int validate_and_copy_actions(const struct nlattr *attr, | ||
| 547 | const struct sw_flow_key *key, int depth, | ||
| 548 | struct sw_flow_actions **sfa); | ||
| 549 | |||
| 550 | static int validate_and_copy_sample(const struct nlattr *attr, | ||
| 551 | const struct sw_flow_key *key, int depth, | ||
| 552 | struct sw_flow_actions **sfa) | ||
| 553 | { | ||
| 554 | const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; | ||
| 555 | const struct nlattr *probability, *actions; | ||
| 556 | const struct nlattr *a; | ||
| 557 | int rem, start, err, st_acts; | ||
| 558 | |||
| 559 | memset(attrs, 0, sizeof(attrs)); | ||
| 560 | nla_for_each_nested(a, attr, rem) { | ||
| 561 | int type = nla_type(a); | ||
| 562 | if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) | ||
| 563 | return -EINVAL; | ||
| 564 | attrs[type] = a; | ||
| 565 | } | ||
| 566 | if (rem) | ||
| 567 | return -EINVAL; | ||
| 568 | |||
| 569 | probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; | ||
| 570 | if (!probability || nla_len(probability) != sizeof(u32)) | ||
| 571 | return -EINVAL; | ||
| 572 | |||
| 573 | actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; | ||
| 574 | if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) | ||
| 575 | return -EINVAL; | ||
| 576 | |||
| 577 | /* validation done, copy sample action. */ | ||
| 578 | start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE); | ||
| 579 | if (start < 0) | ||
| 580 | return start; | ||
| 581 | err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, nla_data(probability), sizeof(u32)); | ||
| 582 | if (err) | ||
| 583 | return err; | ||
| 584 | st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS); | ||
| 585 | if (st_acts < 0) | ||
| 586 | return st_acts; | ||
| 587 | |||
| 588 | err = validate_and_copy_actions(actions, key, depth + 1, sfa); | ||
| 589 | if (err) | ||
| 590 | return err; | ||
| 591 | |||
| 592 | add_nested_action_end(*sfa, st_acts); | ||
| 593 | add_nested_action_end(*sfa, start); | ||
| 594 | |||
| 595 | return 0; | ||
| 596 | } | ||
| 597 | |||
| 598 | static int validate_tp_port(const struct sw_flow_key *flow_key) | ||
| 599 | { | ||
| 600 | if (flow_key->eth.type == htons(ETH_P_IP)) { | ||
| 601 | if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) | ||
| 602 | return 0; | ||
| 603 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { | ||
| 604 | if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst) | ||
| 605 | return 0; | ||
| 606 | } | ||
| 607 | |||
| 608 | return -EINVAL; | ||
| 609 | } | ||
| 610 | |||
| 611 | static int validate_and_copy_set_tun(const struct nlattr *attr, | ||
| 612 | struct sw_flow_actions **sfa) | ||
| 613 | { | ||
| 614 | struct sw_flow_match match; | ||
| 615 | struct sw_flow_key key; | ||
| 616 | int err, start; | ||
| 617 | |||
| 618 | ovs_match_init(&match, &key, NULL); | ||
| 619 | err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false); | ||
| 620 | if (err) | ||
| 621 | return err; | ||
| 622 | |||
| 623 | start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET); | ||
| 624 | if (start < 0) | ||
| 625 | return start; | ||
| 626 | |||
| 627 | err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key, | ||
| 628 | sizeof(match.key->tun_key)); | ||
| 629 | add_nested_action_end(*sfa, start); | ||
| 630 | |||
| 631 | return err; | ||
| 632 | } | ||
| 633 | |||
| 634 | static int validate_set(const struct nlattr *a, | ||
| 635 | const struct sw_flow_key *flow_key, | ||
| 636 | struct sw_flow_actions **sfa, | ||
| 637 | bool *set_tun) | ||
| 638 | { | ||
| 639 | const struct nlattr *ovs_key = nla_data(a); | ||
| 640 | int key_type = nla_type(ovs_key); | ||
| 641 | |||
| 642 | /* There can be only one key in a action */ | ||
| 643 | if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) | ||
| 644 | return -EINVAL; | ||
| 645 | |||
| 646 | if (key_type > OVS_KEY_ATTR_MAX || | ||
| 647 | (ovs_key_lens[key_type] != nla_len(ovs_key) && | ||
| 648 | ovs_key_lens[key_type] != -1)) | ||
| 649 | return -EINVAL; | ||
| 650 | |||
| 651 | switch (key_type) { | ||
| 652 | const struct ovs_key_ipv4 *ipv4_key; | ||
| 653 | const struct ovs_key_ipv6 *ipv6_key; | ||
| 654 | int err; | ||
| 655 | |||
| 656 | case OVS_KEY_ATTR_PRIORITY: | ||
| 657 | case OVS_KEY_ATTR_SKB_MARK: | ||
| 658 | case OVS_KEY_ATTR_ETHERNET: | ||
| 659 | break; | ||
| 660 | |||
| 661 | case OVS_KEY_ATTR_TUNNEL: | ||
| 662 | *set_tun = true; | ||
| 663 | err = validate_and_copy_set_tun(a, sfa); | ||
| 664 | if (err) | ||
| 665 | return err; | ||
| 666 | break; | ||
| 667 | |||
| 668 | case OVS_KEY_ATTR_IPV4: | ||
| 669 | if (flow_key->eth.type != htons(ETH_P_IP)) | ||
| 670 | return -EINVAL; | ||
| 671 | |||
| 672 | if (!flow_key->ip.proto) | ||
| 673 | return -EINVAL; | ||
| 674 | |||
| 675 | ipv4_key = nla_data(ovs_key); | ||
| 676 | if (ipv4_key->ipv4_proto != flow_key->ip.proto) | ||
| 677 | return -EINVAL; | ||
| 678 | |||
| 679 | if (ipv4_key->ipv4_frag != flow_key->ip.frag) | ||
| 680 | return -EINVAL; | ||
| 681 | |||
| 682 | break; | ||
| 683 | |||
| 684 | case OVS_KEY_ATTR_IPV6: | ||
| 685 | if (flow_key->eth.type != htons(ETH_P_IPV6)) | ||
| 686 | return -EINVAL; | ||
| 687 | |||
| 688 | if (!flow_key->ip.proto) | ||
| 689 | return -EINVAL; | ||
| 690 | |||
| 691 | ipv6_key = nla_data(ovs_key); | ||
| 692 | if (ipv6_key->ipv6_proto != flow_key->ip.proto) | ||
| 693 | return -EINVAL; | ||
| 694 | |||
| 695 | if (ipv6_key->ipv6_frag != flow_key->ip.frag) | ||
| 696 | return -EINVAL; | ||
| 697 | |||
| 698 | if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) | ||
| 699 | return -EINVAL; | ||
| 700 | |||
| 701 | break; | ||
| 702 | |||
| 703 | case OVS_KEY_ATTR_TCP: | ||
| 704 | if (flow_key->ip.proto != IPPROTO_TCP) | ||
| 705 | return -EINVAL; | ||
| 706 | |||
| 707 | return validate_tp_port(flow_key); | ||
| 708 | |||
| 709 | case OVS_KEY_ATTR_UDP: | ||
| 710 | if (flow_key->ip.proto != IPPROTO_UDP) | ||
| 711 | return -EINVAL; | ||
| 712 | |||
| 713 | return validate_tp_port(flow_key); | ||
| 714 | |||
| 715 | case OVS_KEY_ATTR_SCTP: | ||
| 716 | if (flow_key->ip.proto != IPPROTO_SCTP) | ||
| 717 | return -EINVAL; | ||
| 718 | |||
| 719 | return validate_tp_port(flow_key); | ||
| 720 | |||
| 721 | default: | ||
| 722 | return -EINVAL; | ||
| 723 | } | ||
| 724 | |||
| 725 | return 0; | ||
| 726 | } | ||
| 727 | |||
| 728 | static int validate_userspace(const struct nlattr *attr) | ||
| 729 | { | ||
| 730 | static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { | ||
| 731 | [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, | ||
| 732 | [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, | ||
| 733 | }; | ||
| 734 | struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; | ||
| 735 | int error; | ||
| 736 | |||
| 737 | error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, | ||
| 738 | attr, userspace_policy); | ||
| 739 | if (error) | ||
| 740 | return error; | ||
| 741 | |||
| 742 | if (!a[OVS_USERSPACE_ATTR_PID] || | ||
| 743 | !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) | ||
| 744 | return -EINVAL; | ||
| 745 | |||
| 746 | return 0; | ||
| 747 | } | ||
| 748 | |||
| 749 | static int copy_action(const struct nlattr *from, | ||
| 750 | struct sw_flow_actions **sfa) | ||
| 751 | { | ||
| 752 | int totlen = NLA_ALIGN(from->nla_len); | ||
| 753 | struct nlattr *to; | ||
| 754 | |||
| 755 | to = reserve_sfa_size(sfa, from->nla_len); | ||
| 756 | if (IS_ERR(to)) | ||
| 757 | return PTR_ERR(to); | ||
| 758 | |||
| 759 | memcpy(to, from, totlen); | ||
| 760 | return 0; | ||
| 761 | } | ||
| 762 | |||
| 763 | static int validate_and_copy_actions(const struct nlattr *attr, | ||
| 764 | const struct sw_flow_key *key, | ||
| 765 | int depth, | ||
| 766 | struct sw_flow_actions **sfa) | ||
| 767 | { | ||
| 768 | const struct nlattr *a; | ||
| 769 | int rem, err; | ||
| 770 | |||
| 771 | if (depth >= SAMPLE_ACTION_DEPTH) | ||
| 772 | return -EOVERFLOW; | ||
| 773 | |||
| 774 | nla_for_each_nested(a, attr, rem) { | ||
| 775 | /* Expected argument lengths, (u32)-1 for variable length. */ | ||
| 776 | static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { | ||
| 777 | [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), | ||
| 778 | [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, | ||
| 779 | [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), | ||
| 780 | [OVS_ACTION_ATTR_POP_VLAN] = 0, | ||
| 781 | [OVS_ACTION_ATTR_SET] = (u32)-1, | ||
| 782 | [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 | ||
| 783 | }; | ||
| 784 | const struct ovs_action_push_vlan *vlan; | ||
| 785 | int type = nla_type(a); | ||
| 786 | bool skip_copy; | ||
| 787 | |||
| 788 | if (type > OVS_ACTION_ATTR_MAX || | ||
| 789 | (action_lens[type] != nla_len(a) && | ||
| 790 | action_lens[type] != (u32)-1)) | ||
| 791 | return -EINVAL; | ||
| 792 | |||
| 793 | skip_copy = false; | ||
| 794 | switch (type) { | ||
| 795 | case OVS_ACTION_ATTR_UNSPEC: | ||
| 796 | return -EINVAL; | ||
| 797 | |||
| 798 | case OVS_ACTION_ATTR_USERSPACE: | ||
| 799 | err = validate_userspace(a); | ||
| 800 | if (err) | ||
| 801 | return err; | ||
| 802 | break; | ||
| 803 | |||
| 804 | case OVS_ACTION_ATTR_OUTPUT: | ||
| 805 | if (nla_get_u32(a) >= DP_MAX_PORTS) | ||
| 806 | return -EINVAL; | ||
| 807 | break; | ||
| 808 | |||
| 809 | |||
| 810 | case OVS_ACTION_ATTR_POP_VLAN: | ||
| 811 | break; | ||
| 812 | |||
| 813 | case OVS_ACTION_ATTR_PUSH_VLAN: | ||
| 814 | vlan = nla_data(a); | ||
| 815 | if (vlan->vlan_tpid != htons(ETH_P_8021Q)) | ||
| 816 | return -EINVAL; | ||
| 817 | if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) | ||
| 818 | return -EINVAL; | ||
| 819 | break; | ||
| 820 | |||
| 821 | case OVS_ACTION_ATTR_SET: | ||
| 822 | err = validate_set(a, key, sfa, &skip_copy); | ||
| 823 | if (err) | ||
| 824 | return err; | ||
| 825 | break; | ||
| 826 | |||
| 827 | case OVS_ACTION_ATTR_SAMPLE: | ||
| 828 | err = validate_and_copy_sample(a, key, depth, sfa); | ||
| 829 | if (err) | ||
| 830 | return err; | ||
| 831 | skip_copy = true; | ||
| 832 | break; | ||
| 833 | |||
| 834 | default: | ||
| 835 | return -EINVAL; | ||
| 836 | } | ||
| 837 | if (!skip_copy) { | ||
| 838 | err = copy_action(a, sfa); | ||
| 839 | if (err) | ||
| 840 | return err; | ||
| 841 | } | ||
| 842 | } | ||
| 843 | |||
| 844 | if (rem > 0) | ||
| 845 | return -EINVAL; | ||
| 846 | |||
| 847 | return 0; | ||
| 848 | } | ||
| 849 | |||
| 850 | static void clear_stats(struct sw_flow *flow) | 456 | static void clear_stats(struct sw_flow *flow) |
| 851 | { | 457 | { |
| 852 | flow->used = 0; | 458 | flow->used = 0; |
| @@ -902,15 +508,16 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) | |||
| 902 | if (err) | 508 | if (err) |
| 903 | goto err_flow_free; | 509 | goto err_flow_free; |
| 904 | 510 | ||
| 905 | err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]); | 511 | err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]); |
| 906 | if (err) | 512 | if (err) |
| 907 | goto err_flow_free; | 513 | goto err_flow_free; |
| 908 | acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); | 514 | acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); |
| 909 | err = PTR_ERR(acts); | 515 | err = PTR_ERR(acts); |
| 910 | if (IS_ERR(acts)) | 516 | if (IS_ERR(acts)) |
| 911 | goto err_flow_free; | 517 | goto err_flow_free; |
| 912 | 518 | ||
| 913 | err = validate_and_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], &flow->key, 0, &acts); | 519 | err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], |
| 520 | &flow->key, 0, &acts); | ||
| 914 | rcu_assign_pointer(flow->sf_acts, acts); | 521 | rcu_assign_pointer(flow->sf_acts, acts); |
| 915 | if (err) | 522 | if (err) |
| 916 | goto err_flow_free; | 523 | goto err_flow_free; |
| @@ -958,15 +565,18 @@ static struct genl_ops dp_packet_genl_ops[] = { | |||
| 958 | } | 565 | } |
| 959 | }; | 566 | }; |
| 960 | 567 | ||
| 961 | static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) | 568 | static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, |
| 569 | struct ovs_dp_megaflow_stats *mega_stats) | ||
| 962 | { | 570 | { |
| 963 | struct flow_table *table; | ||
| 964 | int i; | 571 | int i; |
| 965 | 572 | ||
| 966 | table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held()); | 573 | memset(mega_stats, 0, sizeof(*mega_stats)); |
| 967 | stats->n_flows = ovs_flow_tbl_count(table); | 574 | |
| 575 | stats->n_flows = ovs_flow_tbl_count(&dp->table); | ||
| 576 | mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table); | ||
| 968 | 577 | ||
| 969 | stats->n_hit = stats->n_missed = stats->n_lost = 0; | 578 | stats->n_hit = stats->n_missed = stats->n_lost = 0; |
| 579 | |||
| 970 | for_each_possible_cpu(i) { | 580 | for_each_possible_cpu(i) { |
| 971 | const struct dp_stats_percpu *percpu_stats; | 581 | const struct dp_stats_percpu *percpu_stats; |
| 972 | struct dp_stats_percpu local_stats; | 582 | struct dp_stats_percpu local_stats; |
| @@ -982,6 +592,7 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats) | |||
| 982 | stats->n_hit += local_stats.n_hit; | 592 | stats->n_hit += local_stats.n_hit; |
| 983 | stats->n_missed += local_stats.n_missed; | 593 | stats->n_missed += local_stats.n_missed; |
| 984 | stats->n_lost += local_stats.n_lost; | 594 | stats->n_lost += local_stats.n_lost; |
| 595 | mega_stats->n_mask_hit += local_stats.n_mask_hit; | ||
| 985 | } | 596 | } |
| 986 | } | 597 | } |
| 987 | 598 | ||
| @@ -1005,100 +616,6 @@ static struct genl_multicast_group ovs_dp_flow_multicast_group = { | |||
| 1005 | .name = OVS_FLOW_MCGROUP | 616 | .name = OVS_FLOW_MCGROUP |
| 1006 | }; | 617 | }; |
| 1007 | 618 | ||
| 1008 | static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb); | ||
| 1009 | static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) | ||
| 1010 | { | ||
| 1011 | const struct nlattr *a; | ||
| 1012 | struct nlattr *start; | ||
| 1013 | int err = 0, rem; | ||
| 1014 | |||
| 1015 | start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE); | ||
| 1016 | if (!start) | ||
| 1017 | return -EMSGSIZE; | ||
| 1018 | |||
| 1019 | nla_for_each_nested(a, attr, rem) { | ||
| 1020 | int type = nla_type(a); | ||
| 1021 | struct nlattr *st_sample; | ||
| 1022 | |||
| 1023 | switch (type) { | ||
| 1024 | case OVS_SAMPLE_ATTR_PROBABILITY: | ||
| 1025 | if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, sizeof(u32), nla_data(a))) | ||
| 1026 | return -EMSGSIZE; | ||
| 1027 | break; | ||
| 1028 | case OVS_SAMPLE_ATTR_ACTIONS: | ||
| 1029 | st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS); | ||
| 1030 | if (!st_sample) | ||
| 1031 | return -EMSGSIZE; | ||
| 1032 | err = actions_to_attr(nla_data(a), nla_len(a), skb); | ||
| 1033 | if (err) | ||
| 1034 | return err; | ||
| 1035 | nla_nest_end(skb, st_sample); | ||
| 1036 | break; | ||
| 1037 | } | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | nla_nest_end(skb, start); | ||
| 1041 | return err; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) | ||
| 1045 | { | ||
| 1046 | const struct nlattr *ovs_key = nla_data(a); | ||
| 1047 | int key_type = nla_type(ovs_key); | ||
| 1048 | struct nlattr *start; | ||
| 1049 | int err; | ||
| 1050 | |||
| 1051 | switch (key_type) { | ||
| 1052 | case OVS_KEY_ATTR_IPV4_TUNNEL: | ||
| 1053 | start = nla_nest_start(skb, OVS_ACTION_ATTR_SET); | ||
| 1054 | if (!start) | ||
| 1055 | return -EMSGSIZE; | ||
| 1056 | |||
| 1057 | err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key), | ||
| 1058 | nla_data(ovs_key)); | ||
| 1059 | if (err) | ||
| 1060 | return err; | ||
| 1061 | nla_nest_end(skb, start); | ||
| 1062 | break; | ||
| 1063 | default: | ||
| 1064 | if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key)) | ||
| 1065 | return -EMSGSIZE; | ||
| 1066 | break; | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | return 0; | ||
| 1070 | } | ||
| 1071 | |||
| 1072 | static int actions_to_attr(const struct nlattr *attr, int len, struct sk_buff *skb) | ||
| 1073 | { | ||
| 1074 | const struct nlattr *a; | ||
| 1075 | int rem, err; | ||
| 1076 | |||
| 1077 | nla_for_each_attr(a, attr, len, rem) { | ||
| 1078 | int type = nla_type(a); | ||
| 1079 | |||
| 1080 | switch (type) { | ||
| 1081 | case OVS_ACTION_ATTR_SET: | ||
| 1082 | err = set_action_to_attr(a, skb); | ||
| 1083 | if (err) | ||
| 1084 | return err; | ||
| 1085 | break; | ||
| 1086 | |||
| 1087 | case OVS_ACTION_ATTR_SAMPLE: | ||
| 1088 | err = sample_action_to_attr(a, skb); | ||
| 1089 | if (err) | ||
| 1090 | return err; | ||
| 1091 | break; | ||
| 1092 | default: | ||
| 1093 | if (nla_put(skb, type, nla_len(a), nla_data(a))) | ||
| 1094 | return -EMSGSIZE; | ||
| 1095 | break; | ||
| 1096 | } | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | return 0; | ||
| 1100 | } | ||
| 1101 | |||
| 1102 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) | 619 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) |
| 1103 | { | 620 | { |
| 1104 | return NLMSG_ALIGN(sizeof(struct ovs_header)) | 621 | return NLMSG_ALIGN(sizeof(struct ovs_header)) |
| @@ -1135,8 +652,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
| 1135 | if (!nla) | 652 | if (!nla) |
| 1136 | goto nla_put_failure; | 653 | goto nla_put_failure; |
| 1137 | 654 | ||
| 1138 | err = ovs_flow_to_nlattrs(&flow->unmasked_key, | 655 | err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); |
| 1139 | &flow->unmasked_key, skb); | ||
| 1140 | if (err) | 656 | if (err) |
| 1141 | goto error; | 657 | goto error; |
| 1142 | nla_nest_end(skb, nla); | 658 | nla_nest_end(skb, nla); |
| @@ -1145,7 +661,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
| 1145 | if (!nla) | 661 | if (!nla) |
| 1146 | goto nla_put_failure; | 662 | goto nla_put_failure; |
| 1147 | 663 | ||
| 1148 | err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb); | 664 | err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); |
| 1149 | if (err) | 665 | if (err) |
| 1150 | goto error; | 666 | goto error; |
| 1151 | 667 | ||
| @@ -1155,7 +671,7 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
| 1155 | used = flow->used; | 671 | used = flow->used; |
| 1156 | stats.n_packets = flow->packet_count; | 672 | stats.n_packets = flow->packet_count; |
| 1157 | stats.n_bytes = flow->byte_count; | 673 | stats.n_bytes = flow->byte_count; |
| 1158 | tcp_flags = flow->tcp_flags; | 674 | tcp_flags = (u8)ntohs(flow->tcp_flags); |
| 1159 | spin_unlock_bh(&flow->lock); | 675 | spin_unlock_bh(&flow->lock); |
| 1160 | 676 | ||
| 1161 | if (used && | 677 | if (used && |
| @@ -1188,7 +704,8 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp, | |||
| 1188 | sf_acts = rcu_dereference_check(flow->sf_acts, | 704 | sf_acts = rcu_dereference_check(flow->sf_acts, |
| 1189 | lockdep_ovsl_is_held()); | 705 | lockdep_ovsl_is_held()); |
| 1190 | 706 | ||
| 1191 | err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb); | 707 | err = ovs_nla_put_actions(sf_acts->actions, |
| 708 | sf_acts->actions_len, skb); | ||
| 1192 | if (!err) | 709 | if (!err) |
| 1193 | nla_nest_end(skb, start); | 710 | nla_nest_end(skb, start); |
| 1194 | else { | 711 | else { |
| @@ -1234,6 +751,14 @@ static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, | |||
| 1234 | return skb; | 751 | return skb; |
| 1235 | } | 752 | } |
| 1236 | 753 | ||
| 754 | static struct sw_flow *__ovs_flow_tbl_lookup(struct flow_table *tbl, | ||
| 755 | const struct sw_flow_key *key) | ||
| 756 | { | ||
| 757 | u32 __always_unused n_mask_hit; | ||
| 758 | |||
| 759 | return ovs_flow_tbl_lookup(tbl, key, &n_mask_hit); | ||
| 760 | } | ||
| 761 | |||
| 1237 | static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | 762 | static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) |
| 1238 | { | 763 | { |
| 1239 | struct nlattr **a = info->attrs; | 764 | struct nlattr **a = info->attrs; |
| @@ -1243,7 +768,6 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1243 | struct sw_flow_mask mask; | 768 | struct sw_flow_mask mask; |
| 1244 | struct sk_buff *reply; | 769 | struct sk_buff *reply; |
| 1245 | struct datapath *dp; | 770 | struct datapath *dp; |
| 1246 | struct flow_table *table; | ||
| 1247 | struct sw_flow_actions *acts = NULL; | 771 | struct sw_flow_actions *acts = NULL; |
| 1248 | struct sw_flow_match match; | 772 | struct sw_flow_match match; |
| 1249 | int error; | 773 | int error; |
| @@ -1254,21 +778,21 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1254 | goto error; | 778 | goto error; |
| 1255 | 779 | ||
| 1256 | ovs_match_init(&match, &key, &mask); | 780 | ovs_match_init(&match, &key, &mask); |
| 1257 | error = ovs_match_from_nlattrs(&match, | 781 | error = ovs_nla_get_match(&match, |
| 1258 | a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); | 782 | a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); |
| 1259 | if (error) | 783 | if (error) |
| 1260 | goto error; | 784 | goto error; |
| 1261 | 785 | ||
| 1262 | /* Validate actions. */ | 786 | /* Validate actions. */ |
| 1263 | if (a[OVS_FLOW_ATTR_ACTIONS]) { | 787 | if (a[OVS_FLOW_ATTR_ACTIONS]) { |
| 1264 | acts = ovs_flow_actions_alloc(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); | 788 | acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); |
| 1265 | error = PTR_ERR(acts); | 789 | error = PTR_ERR(acts); |
| 1266 | if (IS_ERR(acts)) | 790 | if (IS_ERR(acts)) |
| 1267 | goto error; | 791 | goto error; |
| 1268 | 792 | ||
| 1269 | ovs_flow_key_mask(&masked_key, &key, &mask); | 793 | ovs_flow_mask_key(&masked_key, &key, &mask); |
| 1270 | error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], | 794 | error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], |
| 1271 | &masked_key, 0, &acts); | 795 | &masked_key, 0, &acts); |
| 1272 | if (error) { | 796 | if (error) { |
| 1273 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); | 797 | OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); |
| 1274 | goto err_kfree; | 798 | goto err_kfree; |
| @@ -1284,29 +808,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1284 | if (!dp) | 808 | if (!dp) |
| 1285 | goto err_unlock_ovs; | 809 | goto err_unlock_ovs; |
| 1286 | 810 | ||
| 1287 | table = ovsl_dereference(dp->table); | ||
| 1288 | |||
| 1289 | /* Check if this is a duplicate flow */ | 811 | /* Check if this is a duplicate flow */ |
| 1290 | flow = ovs_flow_lookup(table, &key); | 812 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); |
| 1291 | if (!flow) { | 813 | if (!flow) { |
| 1292 | struct sw_flow_mask *mask_p; | ||
| 1293 | /* Bail out if we're not allowed to create a new flow. */ | 814 | /* Bail out if we're not allowed to create a new flow. */ |
| 1294 | error = -ENOENT; | 815 | error = -ENOENT; |
| 1295 | if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) | 816 | if (info->genlhdr->cmd == OVS_FLOW_CMD_SET) |
| 1296 | goto err_unlock_ovs; | 817 | goto err_unlock_ovs; |
| 1297 | 818 | ||
| 1298 | /* Expand table, if necessary, to make room. */ | ||
| 1299 | if (ovs_flow_tbl_need_to_expand(table)) { | ||
| 1300 | struct flow_table *new_table; | ||
| 1301 | |||
| 1302 | new_table = ovs_flow_tbl_expand(table); | ||
| 1303 | if (!IS_ERR(new_table)) { | ||
| 1304 | rcu_assign_pointer(dp->table, new_table); | ||
| 1305 | ovs_flow_tbl_destroy(table, true); | ||
| 1306 | table = ovsl_dereference(dp->table); | ||
| 1307 | } | ||
| 1308 | } | ||
| 1309 | |||
| 1310 | /* Allocate flow. */ | 819 | /* Allocate flow. */ |
| 1311 | flow = ovs_flow_alloc(); | 820 | flow = ovs_flow_alloc(); |
| 1312 | if (IS_ERR(flow)) { | 821 | if (IS_ERR(flow)) { |
| @@ -1317,25 +826,14 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1317 | 826 | ||
| 1318 | flow->key = masked_key; | 827 | flow->key = masked_key; |
| 1319 | flow->unmasked_key = key; | 828 | flow->unmasked_key = key; |
| 1320 | |||
| 1321 | /* Make sure mask is unique in the system */ | ||
| 1322 | mask_p = ovs_sw_flow_mask_find(table, &mask); | ||
| 1323 | if (!mask_p) { | ||
| 1324 | /* Allocate a new mask if none exsits. */ | ||
| 1325 | mask_p = ovs_sw_flow_mask_alloc(); | ||
| 1326 | if (!mask_p) | ||
| 1327 | goto err_flow_free; | ||
| 1328 | mask_p->key = mask.key; | ||
| 1329 | mask_p->range = mask.range; | ||
| 1330 | ovs_sw_flow_mask_insert(table, mask_p); | ||
| 1331 | } | ||
| 1332 | |||
| 1333 | ovs_sw_flow_mask_add_ref(mask_p); | ||
| 1334 | flow->mask = mask_p; | ||
| 1335 | rcu_assign_pointer(flow->sf_acts, acts); | 829 | rcu_assign_pointer(flow->sf_acts, acts); |
| 1336 | 830 | ||
| 1337 | /* Put flow in bucket. */ | 831 | /* Put flow in bucket. */ |
| 1338 | ovs_flow_insert(table, flow); | 832 | error = ovs_flow_tbl_insert(&dp->table, flow, &mask); |
| 833 | if (error) { | ||
| 834 | acts = NULL; | ||
| 835 | goto err_flow_free; | ||
| 836 | } | ||
| 1339 | 837 | ||
| 1340 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 838 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
| 1341 | info->snd_seq, OVS_FLOW_CMD_NEW); | 839 | info->snd_seq, OVS_FLOW_CMD_NEW); |
| @@ -1356,7 +854,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1356 | 854 | ||
| 1357 | /* The unmasked key has to be the same for flow updates. */ | 855 | /* The unmasked key has to be the same for flow updates. */ |
| 1358 | error = -EINVAL; | 856 | error = -EINVAL; |
| 1359 | if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) { | 857 | if (!ovs_flow_cmp_unmasked_key(flow, &match)) { |
| 1360 | OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); | 858 | OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n"); |
| 1361 | goto err_unlock_ovs; | 859 | goto err_unlock_ovs; |
| 1362 | } | 860 | } |
| @@ -1364,7 +862,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info) | |||
| 1364 | /* Update actions. */ | 862 | /* Update actions. */ |
| 1365 | old_acts = ovsl_dereference(flow->sf_acts); | 863 | old_acts = ovsl_dereference(flow->sf_acts); |
| 1366 | rcu_assign_pointer(flow->sf_acts, acts); | 864 | rcu_assign_pointer(flow->sf_acts, acts); |
| 1367 | ovs_flow_deferred_free_acts(old_acts); | 865 | ovs_nla_free_flow_actions(old_acts); |
| 1368 | 866 | ||
| 1369 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, | 867 | reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid, |
| 1370 | info->snd_seq, OVS_FLOW_CMD_NEW); | 868 | info->snd_seq, OVS_FLOW_CMD_NEW); |
| @@ -1403,7 +901,6 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
| 1403 | struct sk_buff *reply; | 901 | struct sk_buff *reply; |
| 1404 | struct sw_flow *flow; | 902 | struct sw_flow *flow; |
| 1405 | struct datapath *dp; | 903 | struct datapath *dp; |
| 1406 | struct flow_table *table; | ||
| 1407 | struct sw_flow_match match; | 904 | struct sw_flow_match match; |
| 1408 | int err; | 905 | int err; |
| 1409 | 906 | ||
| @@ -1413,7 +910,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
| 1413 | } | 910 | } |
| 1414 | 911 | ||
| 1415 | ovs_match_init(&match, &key, NULL); | 912 | ovs_match_init(&match, &key, NULL); |
| 1416 | err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); | 913 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); |
| 1417 | if (err) | 914 | if (err) |
| 1418 | return err; | 915 | return err; |
| 1419 | 916 | ||
| @@ -1424,9 +921,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
| 1424 | goto unlock; | 921 | goto unlock; |
| 1425 | } | 922 | } |
| 1426 | 923 | ||
| 1427 | table = ovsl_dereference(dp->table); | 924 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); |
| 1428 | flow = ovs_flow_lookup_unmasked_key(table, &match); | 925 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { |
| 1429 | if (!flow) { | ||
| 1430 | err = -ENOENT; | 926 | err = -ENOENT; |
| 1431 | goto unlock; | 927 | goto unlock; |
| 1432 | } | 928 | } |
| @@ -1453,7 +949,6 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
| 1453 | struct sk_buff *reply; | 949 | struct sk_buff *reply; |
| 1454 | struct sw_flow *flow; | 950 | struct sw_flow *flow; |
| 1455 | struct datapath *dp; | 951 | struct datapath *dp; |
| 1456 | struct flow_table *table; | ||
| 1457 | struct sw_flow_match match; | 952 | struct sw_flow_match match; |
| 1458 | int err; | 953 | int err; |
| 1459 | 954 | ||
| @@ -1465,18 +960,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
| 1465 | } | 960 | } |
| 1466 | 961 | ||
| 1467 | if (!a[OVS_FLOW_ATTR_KEY]) { | 962 | if (!a[OVS_FLOW_ATTR_KEY]) { |
| 1468 | err = flush_flows(dp); | 963 | err = ovs_flow_tbl_flush(&dp->table); |
| 1469 | goto unlock; | 964 | goto unlock; |
| 1470 | } | 965 | } |
| 1471 | 966 | ||
| 1472 | ovs_match_init(&match, &key, NULL); | 967 | ovs_match_init(&match, &key, NULL); |
| 1473 | err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL); | 968 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); |
| 1474 | if (err) | 969 | if (err) |
| 1475 | goto unlock; | 970 | goto unlock; |
| 1476 | 971 | ||
| 1477 | table = ovsl_dereference(dp->table); | 972 | flow = __ovs_flow_tbl_lookup(&dp->table, &key); |
| 1478 | flow = ovs_flow_lookup_unmasked_key(table, &match); | 973 | if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { |
| 1479 | if (!flow) { | ||
| 1480 | err = -ENOENT; | 974 | err = -ENOENT; |
| 1481 | goto unlock; | 975 | goto unlock; |
| 1482 | } | 976 | } |
| @@ -1487,7 +981,7 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
| 1487 | goto unlock; | 981 | goto unlock; |
| 1488 | } | 982 | } |
| 1489 | 983 | ||
| 1490 | ovs_flow_remove(table, flow); | 984 | ovs_flow_tbl_remove(&dp->table, flow); |
| 1491 | 985 | ||
| 1492 | err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, | 986 | err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid, |
| 1493 | info->snd_seq, 0, OVS_FLOW_CMD_DEL); | 987 | info->snd_seq, 0, OVS_FLOW_CMD_DEL); |
| @@ -1506,8 +1000,8 @@ unlock: | |||
| 1506 | static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | 1000 | static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) |
| 1507 | { | 1001 | { |
| 1508 | struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); | 1002 | struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); |
| 1003 | struct table_instance *ti; | ||
| 1509 | struct datapath *dp; | 1004 | struct datapath *dp; |
| 1510 | struct flow_table *table; | ||
| 1511 | 1005 | ||
| 1512 | rcu_read_lock(); | 1006 | rcu_read_lock(); |
| 1513 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); | 1007 | dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); |
| @@ -1516,14 +1010,14 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 1516 | return -ENODEV; | 1010 | return -ENODEV; |
| 1517 | } | 1011 | } |
| 1518 | 1012 | ||
| 1519 | table = rcu_dereference(dp->table); | 1013 | ti = rcu_dereference(dp->table.ti); |
| 1520 | for (;;) { | 1014 | for (;;) { |
| 1521 | struct sw_flow *flow; | 1015 | struct sw_flow *flow; |
| 1522 | u32 bucket, obj; | 1016 | u32 bucket, obj; |
| 1523 | 1017 | ||
| 1524 | bucket = cb->args[0]; | 1018 | bucket = cb->args[0]; |
| 1525 | obj = cb->args[1]; | 1019 | obj = cb->args[1]; |
| 1526 | flow = ovs_flow_dump_next(table, &bucket, &obj); | 1020 | flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj); |
| 1527 | if (!flow) | 1021 | if (!flow) |
| 1528 | break; | 1022 | break; |
| 1529 | 1023 | ||
| @@ -1589,6 +1083,7 @@ static size_t ovs_dp_cmd_msg_size(void) | |||
| 1589 | 1083 | ||
| 1590 | msgsize += nla_total_size(IFNAMSIZ); | 1084 | msgsize += nla_total_size(IFNAMSIZ); |
| 1591 | msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); | 1085 | msgsize += nla_total_size(sizeof(struct ovs_dp_stats)); |
| 1086 | msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats)); | ||
| 1592 | 1087 | ||
| 1593 | return msgsize; | 1088 | return msgsize; |
| 1594 | } | 1089 | } |
| @@ -1598,6 +1093,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, | |||
| 1598 | { | 1093 | { |
| 1599 | struct ovs_header *ovs_header; | 1094 | struct ovs_header *ovs_header; |
| 1600 | struct ovs_dp_stats dp_stats; | 1095 | struct ovs_dp_stats dp_stats; |
| 1096 | struct ovs_dp_megaflow_stats dp_megaflow_stats; | ||
| 1601 | int err; | 1097 | int err; |
| 1602 | 1098 | ||
| 1603 | ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, | 1099 | ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family, |
| @@ -1613,8 +1109,14 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, | |||
| 1613 | if (err) | 1109 | if (err) |
| 1614 | goto nla_put_failure; | 1110 | goto nla_put_failure; |
| 1615 | 1111 | ||
| 1616 | get_dp_stats(dp, &dp_stats); | 1112 | get_dp_stats(dp, &dp_stats, &dp_megaflow_stats); |
| 1617 | if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats)) | 1113 | if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), |
| 1114 | &dp_stats)) | ||
| 1115 | goto nla_put_failure; | ||
| 1116 | |||
| 1117 | if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS, | ||
| 1118 | sizeof(struct ovs_dp_megaflow_stats), | ||
| 1119 | &dp_megaflow_stats)) | ||
| 1618 | goto nla_put_failure; | 1120 | goto nla_put_failure; |
| 1619 | 1121 | ||
| 1620 | return genlmsg_end(skb, ovs_header); | 1122 | return genlmsg_end(skb, ovs_header); |
| @@ -1687,9 +1189,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
| 1687 | ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); | 1189 | ovs_dp_set_net(dp, hold_net(sock_net(skb->sk))); |
| 1688 | 1190 | ||
| 1689 | /* Allocate table. */ | 1191 | /* Allocate table. */ |
| 1690 | err = -ENOMEM; | 1192 | err = ovs_flow_tbl_init(&dp->table); |
| 1691 | rcu_assign_pointer(dp->table, ovs_flow_tbl_alloc(TBL_MIN_BUCKETS)); | 1193 | if (err) |
| 1692 | if (!dp->table) | ||
| 1693 | goto err_free_dp; | 1194 | goto err_free_dp; |
| 1694 | 1195 | ||
| 1695 | dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); | 1196 | dp->stats_percpu = alloc_percpu(struct dp_stats_percpu); |
| @@ -1699,7 +1200,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
| 1699 | } | 1200 | } |
| 1700 | 1201 | ||
| 1701 | dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), | 1202 | dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head), |
| 1702 | GFP_KERNEL); | 1203 | GFP_KERNEL); |
| 1703 | if (!dp->ports) { | 1204 | if (!dp->ports) { |
| 1704 | err = -ENOMEM; | 1205 | err = -ENOMEM; |
| 1705 | goto err_destroy_percpu; | 1206 | goto err_destroy_percpu; |
| @@ -1746,7 +1247,7 @@ err_destroy_ports_array: | |||
| 1746 | err_destroy_percpu: | 1247 | err_destroy_percpu: |
| 1747 | free_percpu(dp->stats_percpu); | 1248 | free_percpu(dp->stats_percpu); |
| 1748 | err_destroy_table: | 1249 | err_destroy_table: |
| 1749 | ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false); | 1250 | ovs_flow_tbl_destroy(&dp->table); |
| 1750 | err_free_dp: | 1251 | err_free_dp: |
| 1751 | release_net(ovs_dp_get_net(dp)); | 1252 | release_net(ovs_dp_get_net(dp)); |
| 1752 | kfree(dp); | 1253 | kfree(dp); |
| @@ -2336,32 +1837,6 @@ error: | |||
| 2336 | return err; | 1837 | return err; |
| 2337 | } | 1838 | } |
| 2338 | 1839 | ||
| 2339 | static void rehash_flow_table(struct work_struct *work) | ||
| 2340 | { | ||
| 2341 | struct datapath *dp; | ||
| 2342 | struct net *net; | ||
| 2343 | |||
| 2344 | ovs_lock(); | ||
| 2345 | rtnl_lock(); | ||
| 2346 | for_each_net(net) { | ||
| 2347 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | ||
| 2348 | |||
| 2349 | list_for_each_entry(dp, &ovs_net->dps, list_node) { | ||
| 2350 | struct flow_table *old_table = ovsl_dereference(dp->table); | ||
| 2351 | struct flow_table *new_table; | ||
| 2352 | |||
| 2353 | new_table = ovs_flow_tbl_rehash(old_table); | ||
| 2354 | if (!IS_ERR(new_table)) { | ||
| 2355 | rcu_assign_pointer(dp->table, new_table); | ||
| 2356 | ovs_flow_tbl_destroy(old_table, true); | ||
| 2357 | } | ||
| 2358 | } | ||
| 2359 | } | ||
| 2360 | rtnl_unlock(); | ||
| 2361 | ovs_unlock(); | ||
| 2362 | schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); | ||
| 2363 | } | ||
| 2364 | |||
| 2365 | static int __net_init ovs_init_net(struct net *net) | 1840 | static int __net_init ovs_init_net(struct net *net) |
| 2366 | { | 1841 | { |
| 2367 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); | 1842 | struct ovs_net *ovs_net = net_generic(net, ovs_net_id); |
| @@ -2419,8 +1894,6 @@ static int __init dp_init(void) | |||
| 2419 | if (err < 0) | 1894 | if (err < 0) |
| 2420 | goto error_unreg_notifier; | 1895 | goto error_unreg_notifier; |
| 2421 | 1896 | ||
| 2422 | schedule_delayed_work(&rehash_flow_wq, REHASH_FLOW_INTERVAL); | ||
| 2423 | |||
| 2424 | return 0; | 1897 | return 0; |
| 2425 | 1898 | ||
| 2426 | error_unreg_notifier: | 1899 | error_unreg_notifier: |
| @@ -2437,7 +1910,6 @@ error: | |||
| 2437 | 1910 | ||
| 2438 | static void dp_cleanup(void) | 1911 | static void dp_cleanup(void) |
| 2439 | { | 1912 | { |
| 2440 | cancel_delayed_work_sync(&rehash_flow_wq); | ||
| 2441 | dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); | 1913 | dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); |
| 2442 | unregister_netdevice_notifier(&ovs_dp_device_notifier); | 1914 | unregister_netdevice_notifier(&ovs_dp_device_notifier); |
| 2443 | unregister_pernet_device(&ovs_net_ops); | 1915 | unregister_pernet_device(&ovs_net_ops); |
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 4d109c176ef3..d3d14a58aa91 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/u64_stats_sync.h> | 27 | #include <linux/u64_stats_sync.h> |
| 28 | 28 | ||
| 29 | #include "flow.h" | 29 | #include "flow.h" |
| 30 | #include "flow_table.h" | ||
| 30 | #include "vport.h" | 31 | #include "vport.h" |
| 31 | 32 | ||
| 32 | #define DP_MAX_PORTS USHRT_MAX | 33 | #define DP_MAX_PORTS USHRT_MAX |
| @@ -45,11 +46,15 @@ | |||
| 45 | * @n_lost: Number of received packets that had no matching flow in the flow | 46 | * @n_lost: Number of received packets that had no matching flow in the flow |
| 46 | * table that could not be sent to userspace (normally due to an overflow in | 47 | * table that could not be sent to userspace (normally due to an overflow in |
| 47 | * one of the datapath's queues). | 48 | * one of the datapath's queues). |
| 49 | * @n_mask_hit: Number of masks looked up for flow match. | ||
| 50 | * @n_mask_hit / (@n_hit + @n_missed) will be the average masks looked | ||
| 51 | * up per packet. | ||
| 48 | */ | 52 | */ |
| 49 | struct dp_stats_percpu { | 53 | struct dp_stats_percpu { |
| 50 | u64 n_hit; | 54 | u64 n_hit; |
| 51 | u64 n_missed; | 55 | u64 n_missed; |
| 52 | u64 n_lost; | 56 | u64 n_lost; |
| 57 | u64 n_mask_hit; | ||
| 53 | struct u64_stats_sync sync; | 58 | struct u64_stats_sync sync; |
| 54 | }; | 59 | }; |
| 55 | 60 | ||
| @@ -57,7 +62,7 @@ struct dp_stats_percpu { | |||
| 57 | * struct datapath - datapath for flow-based packet switching | 62 | * struct datapath - datapath for flow-based packet switching |
| 58 | * @rcu: RCU callback head for deferred destruction. | 63 | * @rcu: RCU callback head for deferred destruction. |
| 59 | * @list_node: Element in global 'dps' list. | 64 | * @list_node: Element in global 'dps' list. |
| 60 | * @table: Current flow table. Protected by ovs_mutex and RCU. | 65 | * @table: flow table. |
| 61 | * @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by | 66 | * @ports: Hash table for ports. %OVSP_LOCAL port always exists. Protected by |
| 62 | * ovs_mutex and RCU. | 67 | * ovs_mutex and RCU. |
| 63 | * @stats_percpu: Per-CPU datapath statistics. | 68 | * @stats_percpu: Per-CPU datapath statistics. |
| @@ -71,7 +76,7 @@ struct datapath { | |||
| 71 | struct list_head list_node; | 76 | struct list_head list_node; |
| 72 | 77 | ||
| 73 | /* Flow table. */ | 78 | /* Flow table. */ |
| 74 | struct flow_table __rcu *table; | 79 | struct flow_table table; |
| 75 | 80 | ||
| 76 | /* Switch ports. */ | 81 | /* Switch ports. */ |
| 77 | struct hlist_head *ports; | 82 | struct hlist_head *ports; |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 410db90db73d..b409f5279601 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
| @@ -45,202 +45,38 @@ | |||
| 45 | #include <net/ipv6.h> | 45 | #include <net/ipv6.h> |
| 46 | #include <net/ndisc.h> | 46 | #include <net/ndisc.h> |
| 47 | 47 | ||
| 48 | static struct kmem_cache *flow_cache; | 48 | u64 ovs_flow_used_time(unsigned long flow_jiffies) |
| 49 | |||
| 50 | static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, | ||
| 51 | struct sw_flow_key_range *range, u8 val); | ||
| 52 | |||
| 53 | static void update_range__(struct sw_flow_match *match, | ||
| 54 | size_t offset, size_t size, bool is_mask) | ||
| 55 | { | 49 | { |
| 56 | struct sw_flow_key_range *range = NULL; | 50 | struct timespec cur_ts; |
| 57 | size_t start = rounddown(offset, sizeof(long)); | 51 | u64 cur_ms, idle_ms; |
| 58 | size_t end = roundup(offset + size, sizeof(long)); | ||
| 59 | |||
| 60 | if (!is_mask) | ||
| 61 | range = &match->range; | ||
| 62 | else if (match->mask) | ||
| 63 | range = &match->mask->range; | ||
| 64 | |||
| 65 | if (!range) | ||
| 66 | return; | ||
| 67 | |||
| 68 | if (range->start == range->end) { | ||
| 69 | range->start = start; | ||
| 70 | range->end = end; | ||
| 71 | return; | ||
| 72 | } | ||
| 73 | |||
| 74 | if (range->start > start) | ||
| 75 | range->start = start; | ||
| 76 | 52 | ||
| 77 | if (range->end < end) | 53 | ktime_get_ts(&cur_ts); |
| 78 | range->end = end; | 54 | idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); |
| 79 | } | 55 | cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + |
| 56 | cur_ts.tv_nsec / NSEC_PER_MSEC; | ||
| 80 | 57 | ||
| 81 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ | 58 | return cur_ms - idle_ms; |
| 82 | do { \ | ||
| 83 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
| 84 | sizeof((match)->key->field), is_mask); \ | ||
| 85 | if (is_mask) { \ | ||
| 86 | if ((match)->mask) \ | ||
| 87 | (match)->mask->key.field = value; \ | ||
| 88 | } else { \ | ||
| 89 | (match)->key->field = value; \ | ||
| 90 | } \ | ||
| 91 | } while (0) | ||
| 92 | |||
| 93 | #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ | ||
| 94 | do { \ | ||
| 95 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
| 96 | len, is_mask); \ | ||
| 97 | if (is_mask) { \ | ||
| 98 | if ((match)->mask) \ | ||
| 99 | memcpy(&(match)->mask->key.field, value_p, len);\ | ||
| 100 | } else { \ | ||
| 101 | memcpy(&(match)->key->field, value_p, len); \ | ||
| 102 | } \ | ||
| 103 | } while (0) | ||
| 104 | |||
| 105 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | ||
| 106 | { | ||
| 107 | return range->end - range->start; | ||
| 108 | } | 59 | } |
| 109 | 60 | ||
| 110 | void ovs_match_init(struct sw_flow_match *match, | 61 | #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) |
| 111 | struct sw_flow_key *key, | ||
| 112 | struct sw_flow_mask *mask) | ||
| 113 | { | ||
| 114 | memset(match, 0, sizeof(*match)); | ||
| 115 | match->key = key; | ||
| 116 | match->mask = mask; | ||
| 117 | 62 | ||
| 118 | memset(key, 0, sizeof(*key)); | 63 | void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) |
| 119 | |||
| 120 | if (mask) { | ||
| 121 | memset(&mask->key, 0, sizeof(mask->key)); | ||
| 122 | mask->range.start = mask->range.end = 0; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | static bool ovs_match_validate(const struct sw_flow_match *match, | ||
| 127 | u64 key_attrs, u64 mask_attrs) | ||
| 128 | { | 64 | { |
| 129 | u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET; | 65 | __be16 tcp_flags = 0; |
| 130 | u64 mask_allowed = key_attrs; /* At most allow all key attributes */ | ||
| 131 | |||
| 132 | /* The following mask attributes allowed only if they | ||
| 133 | * pass the validation tests. */ | ||
| 134 | mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4) | ||
| 135 | | (1 << OVS_KEY_ATTR_IPV6) | ||
| 136 | | (1 << OVS_KEY_ATTR_TCP) | ||
| 137 | | (1 << OVS_KEY_ATTR_UDP) | ||
| 138 | | (1 << OVS_KEY_ATTR_SCTP) | ||
| 139 | | (1 << OVS_KEY_ATTR_ICMP) | ||
| 140 | | (1 << OVS_KEY_ATTR_ICMPV6) | ||
| 141 | | (1 << OVS_KEY_ATTR_ARP) | ||
| 142 | | (1 << OVS_KEY_ATTR_ND)); | ||
| 143 | |||
| 144 | /* Always allowed mask fields. */ | ||
| 145 | mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) | ||
| 146 | | (1 << OVS_KEY_ATTR_IN_PORT) | ||
| 147 | | (1 << OVS_KEY_ATTR_ETHERTYPE)); | ||
| 148 | |||
| 149 | /* Check key attributes. */ | ||
| 150 | if (match->key->eth.type == htons(ETH_P_ARP) | ||
| 151 | || match->key->eth.type == htons(ETH_P_RARP)) { | ||
| 152 | key_expected |= 1 << OVS_KEY_ATTR_ARP; | ||
| 153 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
| 154 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | ||
| 155 | } | ||
| 156 | 66 | ||
| 157 | if (match->key->eth.type == htons(ETH_P_IP)) { | 67 | if ((flow->key.eth.type == htons(ETH_P_IP) || |
| 158 | key_expected |= 1 << OVS_KEY_ATTR_IPV4; | 68 | flow->key.eth.type == htons(ETH_P_IPV6)) && |
| 159 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | 69 | flow->key.ip.proto == IPPROTO_TCP && |
| 160 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV4; | 70 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { |
| 161 | 71 | tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb)); | |
| 162 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 163 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
| 164 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
| 165 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 166 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
| 167 | } | ||
| 168 | |||
| 169 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
| 170 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 171 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 172 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 173 | } | ||
| 174 | |||
| 175 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
| 176 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
| 177 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 178 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
| 179 | } | ||
| 180 | |||
| 181 | if (match->key->ip.proto == IPPROTO_ICMP) { | ||
| 182 | key_expected |= 1 << OVS_KEY_ATTR_ICMP; | ||
| 183 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 184 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMP; | ||
| 185 | } | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | if (match->key->eth.type == htons(ETH_P_IPV6)) { | ||
| 190 | key_expected |= 1 << OVS_KEY_ATTR_IPV6; | ||
| 191 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
| 192 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV6; | ||
| 193 | |||
| 194 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 195 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
| 196 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
| 197 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 198 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
| 199 | } | ||
| 200 | |||
| 201 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
| 202 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 203 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 204 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 205 | } | ||
| 206 | |||
| 207 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
| 208 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
| 209 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 210 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
| 211 | } | ||
| 212 | |||
| 213 | if (match->key->ip.proto == IPPROTO_ICMPV6) { | ||
| 214 | key_expected |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
| 215 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 216 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
| 217 | |||
| 218 | if (match->key->ipv6.tp.src == | ||
| 219 | htons(NDISC_NEIGHBOUR_SOLICITATION) || | ||
| 220 | match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { | ||
| 221 | key_expected |= 1 << OVS_KEY_ATTR_ND; | ||
| 222 | if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff))) | ||
| 223 | mask_allowed |= 1 << OVS_KEY_ATTR_ND; | ||
| 224 | } | ||
| 225 | } | ||
| 226 | } | ||
| 227 | } | ||
| 228 | |||
| 229 | if ((key_attrs & key_expected) != key_expected) { | ||
| 230 | /* Key attributes check failed. */ | ||
| 231 | OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", | ||
| 232 | key_attrs, key_expected); | ||
| 233 | return false; | ||
| 234 | } | ||
| 235 | |||
| 236 | if ((mask_attrs & mask_allowed) != mask_attrs) { | ||
| 237 | /* Mask attributes check failed. */ | ||
| 238 | OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", | ||
| 239 | mask_attrs, mask_allowed); | ||
| 240 | return false; | ||
| 241 | } | 72 | } |
| 242 | 73 | ||
| 243 | return true; | 74 | spin_lock(&flow->lock); |
| 75 | flow->used = jiffies; | ||
| 76 | flow->packet_count++; | ||
| 77 | flow->byte_count += skb->len; | ||
| 78 | flow->tcp_flags |= tcp_flags; | ||
| 79 | spin_unlock(&flow->lock); | ||
| 244 | } | 80 | } |
| 245 | 81 | ||
| 246 | static int check_header(struct sk_buff *skb, int len) | 82 | static int check_header(struct sk_buff *skb, int len) |
| @@ -311,19 +147,6 @@ static bool icmphdr_ok(struct sk_buff *skb) | |||
| 311 | sizeof(struct icmphdr)); | 147 | sizeof(struct icmphdr)); |
| 312 | } | 148 | } |
| 313 | 149 | ||
| 314 | u64 ovs_flow_used_time(unsigned long flow_jiffies) | ||
| 315 | { | ||
| 316 | struct timespec cur_ts; | ||
| 317 | u64 cur_ms, idle_ms; | ||
| 318 | |||
| 319 | ktime_get_ts(&cur_ts); | ||
| 320 | idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); | ||
| 321 | cur_ms = (u64)cur_ts.tv_sec * MSEC_PER_SEC + | ||
| 322 | cur_ts.tv_nsec / NSEC_PER_MSEC; | ||
| 323 | |||
| 324 | return cur_ms - idle_ms; | ||
| 325 | } | ||
| 326 | |||
| 327 | static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) | 150 | static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) |
| 328 | { | 151 | { |
| 329 | unsigned int nh_ofs = skb_network_offset(skb); | 152 | unsigned int nh_ofs = skb_network_offset(skb); |
| @@ -372,311 +195,6 @@ static bool icmp6hdr_ok(struct sk_buff *skb) | |||
| 372 | sizeof(struct icmp6hdr)); | 195 | sizeof(struct icmp6hdr)); |
| 373 | } | 196 | } |
| 374 | 197 | ||
| 375 | void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
| 376 | const struct sw_flow_mask *mask) | ||
| 377 | { | ||
| 378 | const long *m = (long *)((u8 *)&mask->key + mask->range.start); | ||
| 379 | const long *s = (long *)((u8 *)src + mask->range.start); | ||
| 380 | long *d = (long *)((u8 *)dst + mask->range.start); | ||
| 381 | int i; | ||
| 382 | |||
| 383 | /* The memory outside of the 'mask->range' are not set since | ||
| 384 | * further operations on 'dst' only uses contents within | ||
| 385 | * 'mask->range'. | ||
| 386 | */ | ||
| 387 | for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) | ||
| 388 | *d++ = *s++ & *m++; | ||
| 389 | } | ||
| 390 | |||
| 391 | #define TCP_FLAGS_OFFSET 13 | ||
| 392 | #define TCP_FLAG_MASK 0x3f | ||
| 393 | |||
| 394 | void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) | ||
| 395 | { | ||
| 396 | u8 tcp_flags = 0; | ||
| 397 | |||
| 398 | if ((flow->key.eth.type == htons(ETH_P_IP) || | ||
| 399 | flow->key.eth.type == htons(ETH_P_IPV6)) && | ||
| 400 | flow->key.ip.proto == IPPROTO_TCP && | ||
| 401 | likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { | ||
| 402 | u8 *tcp = (u8 *)tcp_hdr(skb); | ||
| 403 | tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; | ||
| 404 | } | ||
| 405 | |||
| 406 | spin_lock(&flow->lock); | ||
| 407 | flow->used = jiffies; | ||
| 408 | flow->packet_count++; | ||
| 409 | flow->byte_count += skb->len; | ||
| 410 | flow->tcp_flags |= tcp_flags; | ||
| 411 | spin_unlock(&flow->lock); | ||
| 412 | } | ||
| 413 | |||
| 414 | struct sw_flow_actions *ovs_flow_actions_alloc(int size) | ||
| 415 | { | ||
| 416 | struct sw_flow_actions *sfa; | ||
| 417 | |||
| 418 | if (size > MAX_ACTIONS_BUFSIZE) | ||
| 419 | return ERR_PTR(-EINVAL); | ||
| 420 | |||
| 421 | sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); | ||
| 422 | if (!sfa) | ||
| 423 | return ERR_PTR(-ENOMEM); | ||
| 424 | |||
| 425 | sfa->actions_len = 0; | ||
| 426 | return sfa; | ||
| 427 | } | ||
| 428 | |||
| 429 | struct sw_flow *ovs_flow_alloc(void) | ||
| 430 | { | ||
| 431 | struct sw_flow *flow; | ||
| 432 | |||
| 433 | flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); | ||
| 434 | if (!flow) | ||
| 435 | return ERR_PTR(-ENOMEM); | ||
| 436 | |||
| 437 | spin_lock_init(&flow->lock); | ||
| 438 | flow->sf_acts = NULL; | ||
| 439 | flow->mask = NULL; | ||
| 440 | |||
| 441 | return flow; | ||
| 442 | } | ||
| 443 | |||
| 444 | static struct hlist_head *find_bucket(struct flow_table *table, u32 hash) | ||
| 445 | { | ||
| 446 | hash = jhash_1word(hash, table->hash_seed); | ||
| 447 | return flex_array_get(table->buckets, | ||
| 448 | (hash & (table->n_buckets - 1))); | ||
| 449 | } | ||
| 450 | |||
| 451 | static struct flex_array *alloc_buckets(unsigned int n_buckets) | ||
| 452 | { | ||
| 453 | struct flex_array *buckets; | ||
| 454 | int i, err; | ||
| 455 | |||
| 456 | buckets = flex_array_alloc(sizeof(struct hlist_head), | ||
| 457 | n_buckets, GFP_KERNEL); | ||
| 458 | if (!buckets) | ||
| 459 | return NULL; | ||
| 460 | |||
| 461 | err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); | ||
| 462 | if (err) { | ||
| 463 | flex_array_free(buckets); | ||
| 464 | return NULL; | ||
| 465 | } | ||
| 466 | |||
| 467 | for (i = 0; i < n_buckets; i++) | ||
| 468 | INIT_HLIST_HEAD((struct hlist_head *) | ||
| 469 | flex_array_get(buckets, i)); | ||
| 470 | |||
| 471 | return buckets; | ||
| 472 | } | ||
| 473 | |||
| 474 | static void free_buckets(struct flex_array *buckets) | ||
| 475 | { | ||
| 476 | flex_array_free(buckets); | ||
| 477 | } | ||
| 478 | |||
| 479 | static struct flow_table *__flow_tbl_alloc(int new_size) | ||
| 480 | { | ||
| 481 | struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL); | ||
| 482 | |||
| 483 | if (!table) | ||
| 484 | return NULL; | ||
| 485 | |||
| 486 | table->buckets = alloc_buckets(new_size); | ||
| 487 | |||
| 488 | if (!table->buckets) { | ||
| 489 | kfree(table); | ||
| 490 | return NULL; | ||
| 491 | } | ||
| 492 | table->n_buckets = new_size; | ||
| 493 | table->count = 0; | ||
| 494 | table->node_ver = 0; | ||
| 495 | table->keep_flows = false; | ||
| 496 | get_random_bytes(&table->hash_seed, sizeof(u32)); | ||
| 497 | table->mask_list = NULL; | ||
| 498 | |||
| 499 | return table; | ||
| 500 | } | ||
| 501 | |||
| 502 | static void __flow_tbl_destroy(struct flow_table *table) | ||
| 503 | { | ||
| 504 | int i; | ||
| 505 | |||
| 506 | if (table->keep_flows) | ||
| 507 | goto skip_flows; | ||
| 508 | |||
| 509 | for (i = 0; i < table->n_buckets; i++) { | ||
| 510 | struct sw_flow *flow; | ||
| 511 | struct hlist_head *head = flex_array_get(table->buckets, i); | ||
| 512 | struct hlist_node *n; | ||
| 513 | int ver = table->node_ver; | ||
| 514 | |||
| 515 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
| 516 | hlist_del(&flow->hash_node[ver]); | ||
| 517 | ovs_flow_free(flow, false); | ||
| 518 | } | ||
| 519 | } | ||
| 520 | |||
| 521 | BUG_ON(!list_empty(table->mask_list)); | ||
| 522 | kfree(table->mask_list); | ||
| 523 | |||
| 524 | skip_flows: | ||
| 525 | free_buckets(table->buckets); | ||
| 526 | kfree(table); | ||
| 527 | } | ||
| 528 | |||
| 529 | struct flow_table *ovs_flow_tbl_alloc(int new_size) | ||
| 530 | { | ||
| 531 | struct flow_table *table = __flow_tbl_alloc(new_size); | ||
| 532 | |||
| 533 | if (!table) | ||
| 534 | return NULL; | ||
| 535 | |||
| 536 | table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL); | ||
| 537 | if (!table->mask_list) { | ||
| 538 | table->keep_flows = true; | ||
| 539 | __flow_tbl_destroy(table); | ||
| 540 | return NULL; | ||
| 541 | } | ||
| 542 | INIT_LIST_HEAD(table->mask_list); | ||
| 543 | |||
| 544 | return table; | ||
| 545 | } | ||
| 546 | |||
| 547 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | ||
| 548 | { | ||
| 549 | struct flow_table *table = container_of(rcu, struct flow_table, rcu); | ||
| 550 | |||
| 551 | __flow_tbl_destroy(table); | ||
| 552 | } | ||
| 553 | |||
| 554 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) | ||
| 555 | { | ||
| 556 | if (!table) | ||
| 557 | return; | ||
| 558 | |||
| 559 | if (deferred) | ||
| 560 | call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb); | ||
| 561 | else | ||
| 562 | __flow_tbl_destroy(table); | ||
| 563 | } | ||
| 564 | |||
| 565 | struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last) | ||
| 566 | { | ||
| 567 | struct sw_flow *flow; | ||
| 568 | struct hlist_head *head; | ||
| 569 | int ver; | ||
| 570 | int i; | ||
| 571 | |||
| 572 | ver = table->node_ver; | ||
| 573 | while (*bucket < table->n_buckets) { | ||
| 574 | i = 0; | ||
| 575 | head = flex_array_get(table->buckets, *bucket); | ||
| 576 | hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { | ||
| 577 | if (i < *last) { | ||
| 578 | i++; | ||
| 579 | continue; | ||
| 580 | } | ||
| 581 | *last = i + 1; | ||
| 582 | return flow; | ||
| 583 | } | ||
| 584 | (*bucket)++; | ||
| 585 | *last = 0; | ||
| 586 | } | ||
| 587 | |||
| 588 | return NULL; | ||
| 589 | } | ||
| 590 | |||
| 591 | static void __tbl_insert(struct flow_table *table, struct sw_flow *flow) | ||
| 592 | { | ||
| 593 | struct hlist_head *head; | ||
| 594 | |||
| 595 | head = find_bucket(table, flow->hash); | ||
| 596 | hlist_add_head_rcu(&flow->hash_node[table->node_ver], head); | ||
| 597 | |||
| 598 | table->count++; | ||
| 599 | } | ||
| 600 | |||
| 601 | static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new) | ||
| 602 | { | ||
| 603 | int old_ver; | ||
| 604 | int i; | ||
| 605 | |||
| 606 | old_ver = old->node_ver; | ||
| 607 | new->node_ver = !old_ver; | ||
| 608 | |||
| 609 | /* Insert in new table. */ | ||
| 610 | for (i = 0; i < old->n_buckets; i++) { | ||
| 611 | struct sw_flow *flow; | ||
| 612 | struct hlist_head *head; | ||
| 613 | |||
| 614 | head = flex_array_get(old->buckets, i); | ||
| 615 | |||
| 616 | hlist_for_each_entry(flow, head, hash_node[old_ver]) | ||
| 617 | __tbl_insert(new, flow); | ||
| 618 | } | ||
| 619 | |||
| 620 | new->mask_list = old->mask_list; | ||
| 621 | old->keep_flows = true; | ||
| 622 | } | ||
| 623 | |||
| 624 | static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buckets) | ||
| 625 | { | ||
| 626 | struct flow_table *new_table; | ||
| 627 | |||
| 628 | new_table = __flow_tbl_alloc(n_buckets); | ||
| 629 | if (!new_table) | ||
| 630 | return ERR_PTR(-ENOMEM); | ||
| 631 | |||
| 632 | flow_table_copy_flows(table, new_table); | ||
| 633 | |||
| 634 | return new_table; | ||
| 635 | } | ||
| 636 | |||
| 637 | struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table) | ||
| 638 | { | ||
| 639 | return __flow_tbl_rehash(table, table->n_buckets); | ||
| 640 | } | ||
| 641 | |||
| 642 | struct flow_table *ovs_flow_tbl_expand(struct flow_table *table) | ||
| 643 | { | ||
| 644 | return __flow_tbl_rehash(table, table->n_buckets * 2); | ||
| 645 | } | ||
| 646 | |||
| 647 | static void __flow_free(struct sw_flow *flow) | ||
| 648 | { | ||
| 649 | kfree((struct sf_flow_acts __force *)flow->sf_acts); | ||
| 650 | kmem_cache_free(flow_cache, flow); | ||
| 651 | } | ||
| 652 | |||
| 653 | static void rcu_free_flow_callback(struct rcu_head *rcu) | ||
| 654 | { | ||
| 655 | struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); | ||
| 656 | |||
| 657 | __flow_free(flow); | ||
| 658 | } | ||
| 659 | |||
| 660 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | ||
| 661 | { | ||
| 662 | if (!flow) | ||
| 663 | return; | ||
| 664 | |||
| 665 | ovs_sw_flow_mask_del_ref(flow->mask, deferred); | ||
| 666 | |||
| 667 | if (deferred) | ||
| 668 | call_rcu(&flow->rcu, rcu_free_flow_callback); | ||
| 669 | else | ||
| 670 | __flow_free(flow); | ||
| 671 | } | ||
| 672 | |||
| 673 | /* Schedules 'sf_acts' to be freed after the next RCU grace period. | ||
| 674 | * The caller must hold rcu_read_lock for this to be sensible. */ | ||
| 675 | void ovs_flow_deferred_free_acts(struct sw_flow_actions *sf_acts) | ||
| 676 | { | ||
| 677 | kfree_rcu(sf_acts, rcu); | ||
| 678 | } | ||
| 679 | |||
| 680 | static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) | 198 | static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) |
| 681 | { | 199 | { |
| 682 | struct qtag_prefix { | 200 | struct qtag_prefix { |
| @@ -910,6 +428,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) | |||
| 910 | struct tcphdr *tcp = tcp_hdr(skb); | 428 | struct tcphdr *tcp = tcp_hdr(skb); |
| 911 | key->ipv4.tp.src = tcp->source; | 429 | key->ipv4.tp.src = tcp->source; |
| 912 | key->ipv4.tp.dst = tcp->dest; | 430 | key->ipv4.tp.dst = tcp->dest; |
| 431 | key->ipv4.tp.flags = TCP_FLAGS_BE16(tcp); | ||
| 913 | } | 432 | } |
| 914 | } else if (key->ip.proto == IPPROTO_UDP) { | 433 | } else if (key->ip.proto == IPPROTO_UDP) { |
| 915 | if (udphdr_ok(skb)) { | 434 | if (udphdr_ok(skb)) { |
| @@ -978,6 +497,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) | |||
| 978 | struct tcphdr *tcp = tcp_hdr(skb); | 497 | struct tcphdr *tcp = tcp_hdr(skb); |
| 979 | key->ipv6.tp.src = tcp->source; | 498 | key->ipv6.tp.src = tcp->source; |
| 980 | key->ipv6.tp.dst = tcp->dest; | 499 | key->ipv6.tp.dst = tcp->dest; |
| 500 | key->ipv6.tp.flags = TCP_FLAGS_BE16(tcp); | ||
| 981 | } | 501 | } |
| 982 | } else if (key->ip.proto == NEXTHDR_UDP) { | 502 | } else if (key->ip.proto == NEXTHDR_UDP) { |
| 983 | if (udphdr_ok(skb)) { | 503 | if (udphdr_ok(skb)) { |
| @@ -1002,1080 +522,3 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key) | |||
| 1002 | 522 | ||
| 1003 | return 0; | 523 | return 0; |
| 1004 | } | 524 | } |
| 1005 | |||
| 1006 | static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, | ||
| 1007 | int key_end) | ||
| 1008 | { | ||
| 1009 | u32 *hash_key = (u32 *)((u8 *)key + key_start); | ||
| 1010 | int hash_u32s = (key_end - key_start) >> 2; | ||
| 1011 | |||
| 1012 | /* Make sure number of hash bytes are multiple of u32. */ | ||
| 1013 | BUILD_BUG_ON(sizeof(long) % sizeof(u32)); | ||
| 1014 | |||
| 1015 | return jhash2(hash_key, hash_u32s, 0); | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | static int flow_key_start(const struct sw_flow_key *key) | ||
| 1019 | { | ||
| 1020 | if (key->tun_key.ipv4_dst) | ||
| 1021 | return 0; | ||
| 1022 | else | ||
| 1023 | return rounddown(offsetof(struct sw_flow_key, phy), | ||
| 1024 | sizeof(long)); | ||
| 1025 | } | ||
| 1026 | |||
| 1027 | static bool __cmp_key(const struct sw_flow_key *key1, | ||
| 1028 | const struct sw_flow_key *key2, int key_start, int key_end) | ||
| 1029 | { | ||
| 1030 | const long *cp1 = (long *)((u8 *)key1 + key_start); | ||
| 1031 | const long *cp2 = (long *)((u8 *)key2 + key_start); | ||
| 1032 | long diffs = 0; | ||
| 1033 | int i; | ||
| 1034 | |||
| 1035 | for (i = key_start; i < key_end; i += sizeof(long)) | ||
| 1036 | diffs |= *cp1++ ^ *cp2++; | ||
| 1037 | |||
| 1038 | return diffs == 0; | ||
| 1039 | } | ||
| 1040 | |||
| 1041 | static bool __flow_cmp_masked_key(const struct sw_flow *flow, | ||
| 1042 | const struct sw_flow_key *key, int key_start, int key_end) | ||
| 1043 | { | ||
| 1044 | return __cmp_key(&flow->key, key, key_start, key_end); | ||
| 1045 | } | ||
| 1046 | |||
| 1047 | static bool __flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
| 1048 | const struct sw_flow_key *key, int key_start, int key_end) | ||
| 1049 | { | ||
| 1050 | return __cmp_key(&flow->unmasked_key, key, key_start, key_end); | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
| 1054 | const struct sw_flow_key *key, int key_end) | ||
| 1055 | { | ||
| 1056 | int key_start; | ||
| 1057 | key_start = flow_key_start(key); | ||
| 1058 | |||
| 1059 | return __flow_cmp_unmasked_key(flow, key, key_start, key_end); | ||
| 1060 | |||
| 1061 | } | ||
| 1062 | |||
| 1063 | struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, | ||
| 1064 | struct sw_flow_match *match) | ||
| 1065 | { | ||
| 1066 | struct sw_flow_key *unmasked = match->key; | ||
| 1067 | int key_end = match->range.end; | ||
| 1068 | struct sw_flow *flow; | ||
| 1069 | |||
| 1070 | flow = ovs_flow_lookup(table, unmasked); | ||
| 1071 | if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end))) | ||
| 1072 | flow = NULL; | ||
| 1073 | |||
| 1074 | return flow; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table, | ||
| 1078 | const struct sw_flow_key *unmasked, | ||
| 1079 | struct sw_flow_mask *mask) | ||
| 1080 | { | ||
| 1081 | struct sw_flow *flow; | ||
| 1082 | struct hlist_head *head; | ||
| 1083 | int key_start = mask->range.start; | ||
| 1084 | int key_end = mask->range.end; | ||
| 1085 | u32 hash; | ||
| 1086 | struct sw_flow_key masked_key; | ||
| 1087 | |||
| 1088 | ovs_flow_key_mask(&masked_key, unmasked, mask); | ||
| 1089 | hash = ovs_flow_hash(&masked_key, key_start, key_end); | ||
| 1090 | head = find_bucket(table, hash); | ||
| 1091 | hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) { | ||
| 1092 | if (flow->mask == mask && | ||
| 1093 | __flow_cmp_masked_key(flow, &masked_key, | ||
| 1094 | key_start, key_end)) | ||
| 1095 | return flow; | ||
| 1096 | } | ||
| 1097 | return NULL; | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | struct sw_flow *ovs_flow_lookup(struct flow_table *tbl, | ||
| 1101 | const struct sw_flow_key *key) | ||
| 1102 | { | ||
| 1103 | struct sw_flow *flow = NULL; | ||
| 1104 | struct sw_flow_mask *mask; | ||
| 1105 | |||
| 1106 | list_for_each_entry_rcu(mask, tbl->mask_list, list) { | ||
| 1107 | flow = ovs_masked_flow_lookup(tbl, key, mask); | ||
| 1108 | if (flow) /* Found */ | ||
| 1109 | break; | ||
| 1110 | } | ||
| 1111 | |||
| 1112 | return flow; | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | |||
| 1116 | void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow) | ||
| 1117 | { | ||
| 1118 | flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start, | ||
| 1119 | flow->mask->range.end); | ||
| 1120 | __tbl_insert(table, flow); | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow) | ||
| 1124 | { | ||
| 1125 | BUG_ON(table->count == 0); | ||
| 1126 | hlist_del_rcu(&flow->hash_node[table->node_ver]); | ||
| 1127 | table->count--; | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | ||
| 1131 | const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | ||
| 1132 | [OVS_KEY_ATTR_ENCAP] = -1, | ||
| 1133 | [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), | ||
| 1134 | [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), | ||
| 1135 | [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32), | ||
| 1136 | [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), | ||
| 1137 | [OVS_KEY_ATTR_VLAN] = sizeof(__be16), | ||
| 1138 | [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), | ||
| 1139 | [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), | ||
| 1140 | [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), | ||
| 1141 | [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), | ||
| 1142 | [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), | ||
| 1143 | [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp), | ||
| 1144 | [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), | ||
| 1145 | [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), | ||
| 1146 | [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), | ||
| 1147 | [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), | ||
| 1148 | [OVS_KEY_ATTR_TUNNEL] = -1, | ||
| 1149 | }; | ||
| 1150 | |||
| 1151 | static bool is_all_zero(const u8 *fp, size_t size) | ||
| 1152 | { | ||
| 1153 | int i; | ||
| 1154 | |||
| 1155 | if (!fp) | ||
| 1156 | return false; | ||
| 1157 | |||
| 1158 | for (i = 0; i < size; i++) | ||
| 1159 | if (fp[i]) | ||
| 1160 | return false; | ||
| 1161 | |||
| 1162 | return true; | ||
| 1163 | } | ||
| 1164 | |||
| 1165 | static int __parse_flow_nlattrs(const struct nlattr *attr, | ||
| 1166 | const struct nlattr *a[], | ||
| 1167 | u64 *attrsp, bool nz) | ||
| 1168 | { | ||
| 1169 | const struct nlattr *nla; | ||
| 1170 | u32 attrs; | ||
| 1171 | int rem; | ||
| 1172 | |||
| 1173 | attrs = *attrsp; | ||
| 1174 | nla_for_each_nested(nla, attr, rem) { | ||
| 1175 | u16 type = nla_type(nla); | ||
| 1176 | int expected_len; | ||
| 1177 | |||
| 1178 | if (type > OVS_KEY_ATTR_MAX) { | ||
| 1179 | OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", | ||
| 1180 | type, OVS_KEY_ATTR_MAX); | ||
| 1181 | return -EINVAL; | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | if (attrs & (1 << type)) { | ||
| 1185 | OVS_NLERR("Duplicate key attribute (type %d).\n", type); | ||
| 1186 | return -EINVAL; | ||
| 1187 | } | ||
| 1188 | |||
| 1189 | expected_len = ovs_key_lens[type]; | ||
| 1190 | if (nla_len(nla) != expected_len && expected_len != -1) { | ||
| 1191 | OVS_NLERR("Key attribute has unexpected length (type=%d" | ||
| 1192 | ", length=%d, expected=%d).\n", type, | ||
| 1193 | nla_len(nla), expected_len); | ||
| 1194 | return -EINVAL; | ||
| 1195 | } | ||
| 1196 | |||
| 1197 | if (!nz || !is_all_zero(nla_data(nla), expected_len)) { | ||
| 1198 | attrs |= 1 << type; | ||
| 1199 | a[type] = nla; | ||
| 1200 | } | ||
| 1201 | } | ||
| 1202 | if (rem) { | ||
| 1203 | OVS_NLERR("Message has %d unknown bytes.\n", rem); | ||
| 1204 | return -EINVAL; | ||
| 1205 | } | ||
| 1206 | |||
| 1207 | *attrsp = attrs; | ||
| 1208 | return 0; | ||
| 1209 | } | ||
| 1210 | |||
| 1211 | static int parse_flow_mask_nlattrs(const struct nlattr *attr, | ||
| 1212 | const struct nlattr *a[], u64 *attrsp) | ||
| 1213 | { | ||
| 1214 | return __parse_flow_nlattrs(attr, a, attrsp, true); | ||
| 1215 | } | ||
| 1216 | |||
| 1217 | static int parse_flow_nlattrs(const struct nlattr *attr, | ||
| 1218 | const struct nlattr *a[], u64 *attrsp) | ||
| 1219 | { | ||
| 1220 | return __parse_flow_nlattrs(attr, a, attrsp, false); | ||
| 1221 | } | ||
| 1222 | |||
| 1223 | int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, | ||
| 1224 | struct sw_flow_match *match, bool is_mask) | ||
| 1225 | { | ||
| 1226 | struct nlattr *a; | ||
| 1227 | int rem; | ||
| 1228 | bool ttl = false; | ||
| 1229 | __be16 tun_flags = 0; | ||
| 1230 | |||
| 1231 | nla_for_each_nested(a, attr, rem) { | ||
| 1232 | int type = nla_type(a); | ||
| 1233 | static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { | ||
| 1234 | [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), | ||
| 1235 | [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), | ||
| 1236 | [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32), | ||
| 1237 | [OVS_TUNNEL_KEY_ATTR_TOS] = 1, | ||
| 1238 | [OVS_TUNNEL_KEY_ATTR_TTL] = 1, | ||
| 1239 | [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, | ||
| 1240 | [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, | ||
| 1241 | }; | ||
| 1242 | |||
| 1243 | if (type > OVS_TUNNEL_KEY_ATTR_MAX) { | ||
| 1244 | OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", | ||
| 1245 | type, OVS_TUNNEL_KEY_ATTR_MAX); | ||
| 1246 | return -EINVAL; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | if (ovs_tunnel_key_lens[type] != nla_len(a)) { | ||
| 1250 | OVS_NLERR("IPv4 tunnel attribute type has unexpected " | ||
| 1251 | " length (type=%d, length=%d, expected=%d).\n", | ||
| 1252 | type, nla_len(a), ovs_tunnel_key_lens[type]); | ||
| 1253 | return -EINVAL; | ||
| 1254 | } | ||
| 1255 | |||
| 1256 | switch (type) { | ||
| 1257 | case OVS_TUNNEL_KEY_ATTR_ID: | ||
| 1258 | SW_FLOW_KEY_PUT(match, tun_key.tun_id, | ||
| 1259 | nla_get_be64(a), is_mask); | ||
| 1260 | tun_flags |= TUNNEL_KEY; | ||
| 1261 | break; | ||
| 1262 | case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: | ||
| 1263 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_src, | ||
| 1264 | nla_get_be32(a), is_mask); | ||
| 1265 | break; | ||
| 1266 | case OVS_TUNNEL_KEY_ATTR_IPV4_DST: | ||
| 1267 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst, | ||
| 1268 | nla_get_be32(a), is_mask); | ||
| 1269 | break; | ||
| 1270 | case OVS_TUNNEL_KEY_ATTR_TOS: | ||
| 1271 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos, | ||
| 1272 | nla_get_u8(a), is_mask); | ||
| 1273 | break; | ||
| 1274 | case OVS_TUNNEL_KEY_ATTR_TTL: | ||
| 1275 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl, | ||
| 1276 | nla_get_u8(a), is_mask); | ||
| 1277 | ttl = true; | ||
| 1278 | break; | ||
| 1279 | case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: | ||
| 1280 | tun_flags |= TUNNEL_DONT_FRAGMENT; | ||
| 1281 | break; | ||
| 1282 | case OVS_TUNNEL_KEY_ATTR_CSUM: | ||
| 1283 | tun_flags |= TUNNEL_CSUM; | ||
| 1284 | break; | ||
| 1285 | default: | ||
| 1286 | return -EINVAL; | ||
| 1287 | } | ||
| 1288 | } | ||
| 1289 | |||
| 1290 | SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); | ||
| 1291 | |||
| 1292 | if (rem > 0) { | ||
| 1293 | OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); | ||
| 1294 | return -EINVAL; | ||
| 1295 | } | ||
| 1296 | |||
| 1297 | if (!is_mask) { | ||
| 1298 | if (!match->key->tun_key.ipv4_dst) { | ||
| 1299 | OVS_NLERR("IPv4 tunnel destination address is zero.\n"); | ||
| 1300 | return -EINVAL; | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | if (!ttl) { | ||
| 1304 | OVS_NLERR("IPv4 tunnel TTL not specified.\n"); | ||
| 1305 | return -EINVAL; | ||
| 1306 | } | ||
| 1307 | } | ||
| 1308 | |||
| 1309 | return 0; | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, | ||
| 1313 | const struct ovs_key_ipv4_tunnel *tun_key, | ||
| 1314 | const struct ovs_key_ipv4_tunnel *output) | ||
| 1315 | { | ||
| 1316 | struct nlattr *nla; | ||
| 1317 | |||
| 1318 | nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); | ||
| 1319 | if (!nla) | ||
| 1320 | return -EMSGSIZE; | ||
| 1321 | |||
| 1322 | if (output->tun_flags & TUNNEL_KEY && | ||
| 1323 | nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) | ||
| 1324 | return -EMSGSIZE; | ||
| 1325 | if (output->ipv4_src && | ||
| 1326 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) | ||
| 1327 | return -EMSGSIZE; | ||
| 1328 | if (output->ipv4_dst && | ||
| 1329 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) | ||
| 1330 | return -EMSGSIZE; | ||
| 1331 | if (output->ipv4_tos && | ||
| 1332 | nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) | ||
| 1333 | return -EMSGSIZE; | ||
| 1334 | if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) | ||
| 1335 | return -EMSGSIZE; | ||
| 1336 | if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && | ||
| 1337 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) | ||
| 1338 | return -EMSGSIZE; | ||
| 1339 | if ((output->tun_flags & TUNNEL_CSUM) && | ||
| 1340 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) | ||
| 1341 | return -EMSGSIZE; | ||
| 1342 | |||
| 1343 | nla_nest_end(skb, nla); | ||
| 1344 | return 0; | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, | ||
| 1348 | const struct nlattr **a, bool is_mask) | ||
| 1349 | { | ||
| 1350 | if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { | ||
| 1351 | SW_FLOW_KEY_PUT(match, phy.priority, | ||
| 1352 | nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); | ||
| 1353 | *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); | ||
| 1354 | } | ||
| 1355 | |||
| 1356 | if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { | ||
| 1357 | u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); | ||
| 1358 | |||
| 1359 | if (is_mask) | ||
| 1360 | in_port = 0xffffffff; /* Always exact match in_port. */ | ||
| 1361 | else if (in_port >= DP_MAX_PORTS) | ||
| 1362 | return -EINVAL; | ||
| 1363 | |||
| 1364 | SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); | ||
| 1365 | *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); | ||
| 1366 | } else if (!is_mask) { | ||
| 1367 | SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask); | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { | ||
| 1371 | uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); | ||
| 1372 | |||
| 1373 | SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask); | ||
| 1374 | *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); | ||
| 1375 | } | ||
| 1376 | if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { | ||
| 1377 | if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, | ||
| 1378 | is_mask)) | ||
| 1379 | return -EINVAL; | ||
| 1380 | *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); | ||
| 1381 | } | ||
| 1382 | return 0; | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | ||
| 1386 | const struct nlattr **a, bool is_mask) | ||
| 1387 | { | ||
| 1388 | int err; | ||
| 1389 | u64 orig_attrs = attrs; | ||
| 1390 | |||
| 1391 | err = metadata_from_nlattrs(match, &attrs, a, is_mask); | ||
| 1392 | if (err) | ||
| 1393 | return err; | ||
| 1394 | |||
| 1395 | if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) { | ||
| 1396 | const struct ovs_key_ethernet *eth_key; | ||
| 1397 | |||
| 1398 | eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); | ||
| 1399 | SW_FLOW_KEY_MEMCPY(match, eth.src, | ||
| 1400 | eth_key->eth_src, ETH_ALEN, is_mask); | ||
| 1401 | SW_FLOW_KEY_MEMCPY(match, eth.dst, | ||
| 1402 | eth_key->eth_dst, ETH_ALEN, is_mask); | ||
| 1403 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); | ||
| 1404 | } | ||
| 1405 | |||
| 1406 | if (attrs & (1 << OVS_KEY_ATTR_VLAN)) { | ||
| 1407 | __be16 tci; | ||
| 1408 | |||
| 1409 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
| 1410 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
| 1411 | if (is_mask) | ||
| 1412 | OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); | ||
| 1413 | else | ||
| 1414 | OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); | ||
| 1415 | |||
| 1416 | return -EINVAL; | ||
| 1417 | } | ||
| 1418 | |||
| 1419 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); | ||
| 1420 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); | ||
| 1421 | } else if (!is_mask) | ||
| 1422 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
| 1423 | |||
| 1424 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { | ||
| 1425 | __be16 eth_type; | ||
| 1426 | |||
| 1427 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
| 1428 | if (is_mask) { | ||
| 1429 | /* Always exact match EtherType. */ | ||
| 1430 | eth_type = htons(0xffff); | ||
| 1431 | } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { | ||
| 1432 | OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n", | ||
| 1433 | ntohs(eth_type), ETH_P_802_3_MIN); | ||
| 1434 | return -EINVAL; | ||
| 1435 | } | ||
| 1436 | |||
| 1437 | SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask); | ||
| 1438 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
| 1439 | } else if (!is_mask) { | ||
| 1440 | SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); | ||
| 1441 | } | ||
| 1442 | |||
| 1443 | if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 1444 | const struct ovs_key_ipv4 *ipv4_key; | ||
| 1445 | |||
| 1446 | ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); | ||
| 1447 | if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { | ||
| 1448 | OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", | ||
| 1449 | ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); | ||
| 1450 | return -EINVAL; | ||
| 1451 | } | ||
| 1452 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
| 1453 | ipv4_key->ipv4_proto, is_mask); | ||
| 1454 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
| 1455 | ipv4_key->ipv4_tos, is_mask); | ||
| 1456 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
| 1457 | ipv4_key->ipv4_ttl, is_mask); | ||
| 1458 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
| 1459 | ipv4_key->ipv4_frag, is_mask); | ||
| 1460 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
| 1461 | ipv4_key->ipv4_src, is_mask); | ||
| 1462 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
| 1463 | ipv4_key->ipv4_dst, is_mask); | ||
| 1464 | attrs &= ~(1 << OVS_KEY_ATTR_IPV4); | ||
| 1465 | } | ||
| 1466 | |||
| 1467 | if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { | ||
| 1468 | const struct ovs_key_ipv6 *ipv6_key; | ||
| 1469 | |||
| 1470 | ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); | ||
| 1471 | if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { | ||
| 1472 | OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", | ||
| 1473 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); | ||
| 1474 | return -EINVAL; | ||
| 1475 | } | ||
| 1476 | SW_FLOW_KEY_PUT(match, ipv6.label, | ||
| 1477 | ipv6_key->ipv6_label, is_mask); | ||
| 1478 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
| 1479 | ipv6_key->ipv6_proto, is_mask); | ||
| 1480 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
| 1481 | ipv6_key->ipv6_tclass, is_mask); | ||
| 1482 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
| 1483 | ipv6_key->ipv6_hlimit, is_mask); | ||
| 1484 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
| 1485 | ipv6_key->ipv6_frag, is_mask); | ||
| 1486 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src, | ||
| 1487 | ipv6_key->ipv6_src, | ||
| 1488 | sizeof(match->key->ipv6.addr.src), | ||
| 1489 | is_mask); | ||
| 1490 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst, | ||
| 1491 | ipv6_key->ipv6_dst, | ||
| 1492 | sizeof(match->key->ipv6.addr.dst), | ||
| 1493 | is_mask); | ||
| 1494 | |||
| 1495 | attrs &= ~(1 << OVS_KEY_ATTR_IPV6); | ||
| 1496 | } | ||
| 1497 | |||
| 1498 | if (attrs & (1 << OVS_KEY_ATTR_ARP)) { | ||
| 1499 | const struct ovs_key_arp *arp_key; | ||
| 1500 | |||
| 1501 | arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); | ||
| 1502 | if (!is_mask && (arp_key->arp_op & htons(0xff00))) { | ||
| 1503 | OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", | ||
| 1504 | arp_key->arp_op); | ||
| 1505 | return -EINVAL; | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
| 1509 | arp_key->arp_sip, is_mask); | ||
| 1510 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
| 1511 | arp_key->arp_tip, is_mask); | ||
| 1512 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
| 1513 | ntohs(arp_key->arp_op), is_mask); | ||
| 1514 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha, | ||
| 1515 | arp_key->arp_sha, ETH_ALEN, is_mask); | ||
| 1516 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha, | ||
| 1517 | arp_key->arp_tha, ETH_ALEN, is_mask); | ||
| 1518 | |||
| 1519 | attrs &= ~(1 << OVS_KEY_ATTR_ARP); | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | if (attrs & (1 << OVS_KEY_ATTR_TCP)) { | ||
| 1523 | const struct ovs_key_tcp *tcp_key; | ||
| 1524 | |||
| 1525 | tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); | ||
| 1526 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 1527 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 1528 | tcp_key->tcp_src, is_mask); | ||
| 1529 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 1530 | tcp_key->tcp_dst, is_mask); | ||
| 1531 | } else { | ||
| 1532 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 1533 | tcp_key->tcp_src, is_mask); | ||
| 1534 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 1535 | tcp_key->tcp_dst, is_mask); | ||
| 1536 | } | ||
| 1537 | attrs &= ~(1 << OVS_KEY_ATTR_TCP); | ||
| 1538 | } | ||
| 1539 | |||
| 1540 | if (attrs & (1 << OVS_KEY_ATTR_UDP)) { | ||
| 1541 | const struct ovs_key_udp *udp_key; | ||
| 1542 | |||
| 1543 | udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); | ||
| 1544 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 1545 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 1546 | udp_key->udp_src, is_mask); | ||
| 1547 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 1548 | udp_key->udp_dst, is_mask); | ||
| 1549 | } else { | ||
| 1550 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 1551 | udp_key->udp_src, is_mask); | ||
| 1552 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 1553 | udp_key->udp_dst, is_mask); | ||
| 1554 | } | ||
| 1555 | attrs &= ~(1 << OVS_KEY_ATTR_UDP); | ||
| 1556 | } | ||
| 1557 | |||
| 1558 | if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { | ||
| 1559 | const struct ovs_key_sctp *sctp_key; | ||
| 1560 | |||
| 1561 | sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); | ||
| 1562 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 1563 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 1564 | sctp_key->sctp_src, is_mask); | ||
| 1565 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 1566 | sctp_key->sctp_dst, is_mask); | ||
| 1567 | } else { | ||
| 1568 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 1569 | sctp_key->sctp_src, is_mask); | ||
| 1570 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 1571 | sctp_key->sctp_dst, is_mask); | ||
| 1572 | } | ||
| 1573 | attrs &= ~(1 << OVS_KEY_ATTR_SCTP); | ||
| 1574 | } | ||
| 1575 | |||
| 1576 | if (attrs & (1 << OVS_KEY_ATTR_ICMP)) { | ||
| 1577 | const struct ovs_key_icmp *icmp_key; | ||
| 1578 | |||
| 1579 | icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); | ||
| 1580 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 1581 | htons(icmp_key->icmp_type), is_mask); | ||
| 1582 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 1583 | htons(icmp_key->icmp_code), is_mask); | ||
| 1584 | attrs &= ~(1 << OVS_KEY_ATTR_ICMP); | ||
| 1585 | } | ||
| 1586 | |||
| 1587 | if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) { | ||
| 1588 | const struct ovs_key_icmpv6 *icmpv6_key; | ||
| 1589 | |||
| 1590 | icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); | ||
| 1591 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 1592 | htons(icmpv6_key->icmpv6_type), is_mask); | ||
| 1593 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 1594 | htons(icmpv6_key->icmpv6_code), is_mask); | ||
| 1595 | attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); | ||
| 1596 | } | ||
| 1597 | |||
| 1598 | if (attrs & (1 << OVS_KEY_ATTR_ND)) { | ||
| 1599 | const struct ovs_key_nd *nd_key; | ||
| 1600 | |||
| 1601 | nd_key = nla_data(a[OVS_KEY_ATTR_ND]); | ||
| 1602 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target, | ||
| 1603 | nd_key->nd_target, | ||
| 1604 | sizeof(match->key->ipv6.nd.target), | ||
| 1605 | is_mask); | ||
| 1606 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll, | ||
| 1607 | nd_key->nd_sll, ETH_ALEN, is_mask); | ||
| 1608 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll, | ||
| 1609 | nd_key->nd_tll, ETH_ALEN, is_mask); | ||
| 1610 | attrs &= ~(1 << OVS_KEY_ATTR_ND); | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | if (attrs != 0) | ||
| 1614 | return -EINVAL; | ||
| 1615 | |||
| 1616 | return 0; | ||
| 1617 | } | ||
| 1618 | |||
| 1619 | /** | ||
| 1620 | * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and | ||
| 1621 | * mask. In case the 'mask' is NULL, the flow is treated as exact match | ||
| 1622 | * flow. Otherwise, it is treated as a wildcarded flow, except the mask | ||
| 1623 | * does not include any don't care bit. | ||
| 1624 | * @match: receives the extracted flow match information. | ||
| 1625 | * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
| 1626 | * sequence. The fields should of the packet that triggered the creation | ||
| 1627 | * of this flow. | ||
| 1628 | * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink | ||
| 1629 | * attribute specifies the mask field of the wildcarded flow. | ||
| 1630 | */ | ||
| 1631 | int ovs_match_from_nlattrs(struct sw_flow_match *match, | ||
| 1632 | const struct nlattr *key, | ||
| 1633 | const struct nlattr *mask) | ||
| 1634 | { | ||
| 1635 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
| 1636 | const struct nlattr *encap; | ||
| 1637 | u64 key_attrs = 0; | ||
| 1638 | u64 mask_attrs = 0; | ||
| 1639 | bool encap_valid = false; | ||
| 1640 | int err; | ||
| 1641 | |||
| 1642 | err = parse_flow_nlattrs(key, a, &key_attrs); | ||
| 1643 | if (err) | ||
| 1644 | return err; | ||
| 1645 | |||
| 1646 | if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) && | ||
| 1647 | (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) && | ||
| 1648 | (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) { | ||
| 1649 | __be16 tci; | ||
| 1650 | |||
| 1651 | if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && | ||
| 1652 | (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { | ||
| 1653 | OVS_NLERR("Invalid Vlan frame.\n"); | ||
| 1654 | return -EINVAL; | ||
| 1655 | } | ||
| 1656 | |||
| 1657 | key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
| 1658 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
| 1659 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
| 1660 | key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
| 1661 | encap_valid = true; | ||
| 1662 | |||
| 1663 | if (tci & htons(VLAN_TAG_PRESENT)) { | ||
| 1664 | err = parse_flow_nlattrs(encap, a, &key_attrs); | ||
| 1665 | if (err) | ||
| 1666 | return err; | ||
| 1667 | } else if (!tci) { | ||
| 1668 | /* Corner case for truncated 802.1Q header. */ | ||
| 1669 | if (nla_len(encap)) { | ||
| 1670 | OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n"); | ||
| 1671 | return -EINVAL; | ||
| 1672 | } | ||
| 1673 | } else { | ||
| 1674 | OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); | ||
| 1675 | return -EINVAL; | ||
| 1676 | } | ||
| 1677 | } | ||
| 1678 | |||
| 1679 | err = ovs_key_from_nlattrs(match, key_attrs, a, false); | ||
| 1680 | if (err) | ||
| 1681 | return err; | ||
| 1682 | |||
| 1683 | if (mask) { | ||
| 1684 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | ||
| 1685 | if (err) | ||
| 1686 | return err; | ||
| 1687 | |||
| 1688 | if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) { | ||
| 1689 | __be16 eth_type = 0; | ||
| 1690 | __be16 tci = 0; | ||
| 1691 | |||
| 1692 | if (!encap_valid) { | ||
| 1693 | OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); | ||
| 1694 | return -EINVAL; | ||
| 1695 | } | ||
| 1696 | |||
| 1697 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
| 1698 | if (a[OVS_KEY_ATTR_ETHERTYPE]) | ||
| 1699 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
| 1700 | |||
| 1701 | if (eth_type == htons(0xffff)) { | ||
| 1702 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
| 1703 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
| 1704 | err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); | ||
| 1705 | } else { | ||
| 1706 | OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", | ||
| 1707 | ntohs(eth_type)); | ||
| 1708 | return -EINVAL; | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | if (a[OVS_KEY_ATTR_VLAN]) | ||
| 1712 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
| 1713 | |||
| 1714 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
| 1715 | OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); | ||
| 1716 | return -EINVAL; | ||
| 1717 | } | ||
| 1718 | } | ||
| 1719 | |||
| 1720 | err = ovs_key_from_nlattrs(match, mask_attrs, a, true); | ||
| 1721 | if (err) | ||
| 1722 | return err; | ||
| 1723 | } else { | ||
| 1724 | /* Populate exact match flow's key mask. */ | ||
| 1725 | if (match->mask) | ||
| 1726 | ovs_sw_flow_mask_set(match->mask, &match->range, 0xff); | ||
| 1727 | } | ||
| 1728 | |||
| 1729 | if (!ovs_match_validate(match, key_attrs, mask_attrs)) | ||
| 1730 | return -EINVAL; | ||
| 1731 | |||
| 1732 | return 0; | ||
| 1733 | } | ||
| 1734 | |||
| 1735 | /** | ||
| 1736 | * ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key. | ||
| 1737 | * @flow: Receives extracted in_port, priority, tun_key and skb_mark. | ||
| 1738 | * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
| 1739 | * sequence. | ||
| 1740 | * | ||
| 1741 | * This parses a series of Netlink attributes that form a flow key, which must | ||
| 1742 | * take the same form accepted by flow_from_nlattrs(), but only enough of it to | ||
| 1743 | * get the metadata, that is, the parts of the flow key that cannot be | ||
| 1744 | * extracted from the packet itself. | ||
| 1745 | */ | ||
| 1746 | |||
| 1747 | int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, | ||
| 1748 | const struct nlattr *attr) | ||
| 1749 | { | ||
| 1750 | struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; | ||
| 1751 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
| 1752 | u64 attrs = 0; | ||
| 1753 | int err; | ||
| 1754 | struct sw_flow_match match; | ||
| 1755 | |||
| 1756 | flow->key.phy.in_port = DP_MAX_PORTS; | ||
| 1757 | flow->key.phy.priority = 0; | ||
| 1758 | flow->key.phy.skb_mark = 0; | ||
| 1759 | memset(tun_key, 0, sizeof(flow->key.tun_key)); | ||
| 1760 | |||
| 1761 | err = parse_flow_nlattrs(attr, a, &attrs); | ||
| 1762 | if (err) | ||
| 1763 | return -EINVAL; | ||
| 1764 | |||
| 1765 | memset(&match, 0, sizeof(match)); | ||
| 1766 | match.key = &flow->key; | ||
| 1767 | |||
| 1768 | err = metadata_from_nlattrs(&match, &attrs, a, false); | ||
| 1769 | if (err) | ||
| 1770 | return err; | ||
| 1771 | |||
| 1772 | return 0; | ||
| 1773 | } | ||
| 1774 | |||
| 1775 | int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, | ||
| 1776 | const struct sw_flow_key *output, struct sk_buff *skb) | ||
| 1777 | { | ||
| 1778 | struct ovs_key_ethernet *eth_key; | ||
| 1779 | struct nlattr *nla, *encap; | ||
| 1780 | bool is_mask = (swkey != output); | ||
| 1781 | |||
| 1782 | if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) | ||
| 1783 | goto nla_put_failure; | ||
| 1784 | |||
| 1785 | if ((swkey->tun_key.ipv4_dst || is_mask) && | ||
| 1786 | ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) | ||
| 1787 | goto nla_put_failure; | ||
| 1788 | |||
| 1789 | if (swkey->phy.in_port == DP_MAX_PORTS) { | ||
| 1790 | if (is_mask && (output->phy.in_port == 0xffff)) | ||
| 1791 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) | ||
| 1792 | goto nla_put_failure; | ||
| 1793 | } else { | ||
| 1794 | u16 upper_u16; | ||
| 1795 | upper_u16 = !is_mask ? 0 : 0xffff; | ||
| 1796 | |||
| 1797 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, | ||
| 1798 | (upper_u16 << 16) | output->phy.in_port)) | ||
| 1799 | goto nla_put_failure; | ||
| 1800 | } | ||
| 1801 | |||
| 1802 | if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) | ||
| 1803 | goto nla_put_failure; | ||
| 1804 | |||
| 1805 | nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); | ||
| 1806 | if (!nla) | ||
| 1807 | goto nla_put_failure; | ||
| 1808 | |||
| 1809 | eth_key = nla_data(nla); | ||
| 1810 | memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN); | ||
| 1811 | memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN); | ||
| 1812 | |||
| 1813 | if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { | ||
| 1814 | __be16 eth_type; | ||
| 1815 | eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff); | ||
| 1816 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) || | ||
| 1817 | nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci)) | ||
| 1818 | goto nla_put_failure; | ||
| 1819 | encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); | ||
| 1820 | if (!swkey->eth.tci) | ||
| 1821 | goto unencap; | ||
| 1822 | } else | ||
| 1823 | encap = NULL; | ||
| 1824 | |||
| 1825 | if (swkey->eth.type == htons(ETH_P_802_2)) { | ||
| 1826 | /* | ||
| 1827 | * Ethertype 802.2 is represented in the netlink with omitted | ||
| 1828 | * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and | ||
| 1829 | * 0xffff in the mask attribute. Ethertype can also | ||
| 1830 | * be wildcarded. | ||
| 1831 | */ | ||
| 1832 | if (is_mask && output->eth.type) | ||
| 1833 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, | ||
| 1834 | output->eth.type)) | ||
| 1835 | goto nla_put_failure; | ||
| 1836 | goto unencap; | ||
| 1837 | } | ||
| 1838 | |||
| 1839 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) | ||
| 1840 | goto nla_put_failure; | ||
| 1841 | |||
| 1842 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 1843 | struct ovs_key_ipv4 *ipv4_key; | ||
| 1844 | |||
| 1845 | nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); | ||
| 1846 | if (!nla) | ||
| 1847 | goto nla_put_failure; | ||
| 1848 | ipv4_key = nla_data(nla); | ||
| 1849 | ipv4_key->ipv4_src = output->ipv4.addr.src; | ||
| 1850 | ipv4_key->ipv4_dst = output->ipv4.addr.dst; | ||
| 1851 | ipv4_key->ipv4_proto = output->ip.proto; | ||
| 1852 | ipv4_key->ipv4_tos = output->ip.tos; | ||
| 1853 | ipv4_key->ipv4_ttl = output->ip.ttl; | ||
| 1854 | ipv4_key->ipv4_frag = output->ip.frag; | ||
| 1855 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 1856 | struct ovs_key_ipv6 *ipv6_key; | ||
| 1857 | |||
| 1858 | nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); | ||
| 1859 | if (!nla) | ||
| 1860 | goto nla_put_failure; | ||
| 1861 | ipv6_key = nla_data(nla); | ||
| 1862 | memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src, | ||
| 1863 | sizeof(ipv6_key->ipv6_src)); | ||
| 1864 | memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst, | ||
| 1865 | sizeof(ipv6_key->ipv6_dst)); | ||
| 1866 | ipv6_key->ipv6_label = output->ipv6.label; | ||
| 1867 | ipv6_key->ipv6_proto = output->ip.proto; | ||
| 1868 | ipv6_key->ipv6_tclass = output->ip.tos; | ||
| 1869 | ipv6_key->ipv6_hlimit = output->ip.ttl; | ||
| 1870 | ipv6_key->ipv6_frag = output->ip.frag; | ||
| 1871 | } else if (swkey->eth.type == htons(ETH_P_ARP) || | ||
| 1872 | swkey->eth.type == htons(ETH_P_RARP)) { | ||
| 1873 | struct ovs_key_arp *arp_key; | ||
| 1874 | |||
| 1875 | nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); | ||
| 1876 | if (!nla) | ||
| 1877 | goto nla_put_failure; | ||
| 1878 | arp_key = nla_data(nla); | ||
| 1879 | memset(arp_key, 0, sizeof(struct ovs_key_arp)); | ||
| 1880 | arp_key->arp_sip = output->ipv4.addr.src; | ||
| 1881 | arp_key->arp_tip = output->ipv4.addr.dst; | ||
| 1882 | arp_key->arp_op = htons(output->ip.proto); | ||
| 1883 | memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN); | ||
| 1884 | memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN); | ||
| 1885 | } | ||
| 1886 | |||
| 1887 | if ((swkey->eth.type == htons(ETH_P_IP) || | ||
| 1888 | swkey->eth.type == htons(ETH_P_IPV6)) && | ||
| 1889 | swkey->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 1890 | |||
| 1891 | if (swkey->ip.proto == IPPROTO_TCP) { | ||
| 1892 | struct ovs_key_tcp *tcp_key; | ||
| 1893 | |||
| 1894 | nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); | ||
| 1895 | if (!nla) | ||
| 1896 | goto nla_put_failure; | ||
| 1897 | tcp_key = nla_data(nla); | ||
| 1898 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 1899 | tcp_key->tcp_src = output->ipv4.tp.src; | ||
| 1900 | tcp_key->tcp_dst = output->ipv4.tp.dst; | ||
| 1901 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 1902 | tcp_key->tcp_src = output->ipv6.tp.src; | ||
| 1903 | tcp_key->tcp_dst = output->ipv6.tp.dst; | ||
| 1904 | } | ||
| 1905 | } else if (swkey->ip.proto == IPPROTO_UDP) { | ||
| 1906 | struct ovs_key_udp *udp_key; | ||
| 1907 | |||
| 1908 | nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); | ||
| 1909 | if (!nla) | ||
| 1910 | goto nla_put_failure; | ||
| 1911 | udp_key = nla_data(nla); | ||
| 1912 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 1913 | udp_key->udp_src = output->ipv4.tp.src; | ||
| 1914 | udp_key->udp_dst = output->ipv4.tp.dst; | ||
| 1915 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 1916 | udp_key->udp_src = output->ipv6.tp.src; | ||
| 1917 | udp_key->udp_dst = output->ipv6.tp.dst; | ||
| 1918 | } | ||
| 1919 | } else if (swkey->ip.proto == IPPROTO_SCTP) { | ||
| 1920 | struct ovs_key_sctp *sctp_key; | ||
| 1921 | |||
| 1922 | nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key)); | ||
| 1923 | if (!nla) | ||
| 1924 | goto nla_put_failure; | ||
| 1925 | sctp_key = nla_data(nla); | ||
| 1926 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 1927 | sctp_key->sctp_src = swkey->ipv4.tp.src; | ||
| 1928 | sctp_key->sctp_dst = swkey->ipv4.tp.dst; | ||
| 1929 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 1930 | sctp_key->sctp_src = swkey->ipv6.tp.src; | ||
| 1931 | sctp_key->sctp_dst = swkey->ipv6.tp.dst; | ||
| 1932 | } | ||
| 1933 | } else if (swkey->eth.type == htons(ETH_P_IP) && | ||
| 1934 | swkey->ip.proto == IPPROTO_ICMP) { | ||
| 1935 | struct ovs_key_icmp *icmp_key; | ||
| 1936 | |||
| 1937 | nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); | ||
| 1938 | if (!nla) | ||
| 1939 | goto nla_put_failure; | ||
| 1940 | icmp_key = nla_data(nla); | ||
| 1941 | icmp_key->icmp_type = ntohs(output->ipv4.tp.src); | ||
| 1942 | icmp_key->icmp_code = ntohs(output->ipv4.tp.dst); | ||
| 1943 | } else if (swkey->eth.type == htons(ETH_P_IPV6) && | ||
| 1944 | swkey->ip.proto == IPPROTO_ICMPV6) { | ||
| 1945 | struct ovs_key_icmpv6 *icmpv6_key; | ||
| 1946 | |||
| 1947 | nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, | ||
| 1948 | sizeof(*icmpv6_key)); | ||
| 1949 | if (!nla) | ||
| 1950 | goto nla_put_failure; | ||
| 1951 | icmpv6_key = nla_data(nla); | ||
| 1952 | icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src); | ||
| 1953 | icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst); | ||
| 1954 | |||
| 1955 | if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || | ||
| 1956 | icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { | ||
| 1957 | struct ovs_key_nd *nd_key; | ||
| 1958 | |||
| 1959 | nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); | ||
| 1960 | if (!nla) | ||
| 1961 | goto nla_put_failure; | ||
| 1962 | nd_key = nla_data(nla); | ||
| 1963 | memcpy(nd_key->nd_target, &output->ipv6.nd.target, | ||
| 1964 | sizeof(nd_key->nd_target)); | ||
| 1965 | memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN); | ||
| 1966 | memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN); | ||
| 1967 | } | ||
| 1968 | } | ||
| 1969 | } | ||
| 1970 | |||
| 1971 | unencap: | ||
| 1972 | if (encap) | ||
| 1973 | nla_nest_end(skb, encap); | ||
| 1974 | |||
| 1975 | return 0; | ||
| 1976 | |||
| 1977 | nla_put_failure: | ||
| 1978 | return -EMSGSIZE; | ||
| 1979 | } | ||
| 1980 | |||
| 1981 | /* Initializes the flow module. | ||
| 1982 | * Returns zero if successful or a negative error code. */ | ||
| 1983 | int ovs_flow_init(void) | ||
| 1984 | { | ||
| 1985 | BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); | ||
| 1986 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); | ||
| 1987 | |||
| 1988 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, | ||
| 1989 | 0, NULL); | ||
| 1990 | if (flow_cache == NULL) | ||
| 1991 | return -ENOMEM; | ||
| 1992 | |||
| 1993 | return 0; | ||
| 1994 | } | ||
| 1995 | |||
| 1996 | /* Uninitializes the flow module. */ | ||
| 1997 | void ovs_flow_exit(void) | ||
| 1998 | { | ||
| 1999 | kmem_cache_destroy(flow_cache); | ||
| 2000 | } | ||
| 2001 | |||
| 2002 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void) | ||
| 2003 | { | ||
| 2004 | struct sw_flow_mask *mask; | ||
| 2005 | |||
| 2006 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | ||
| 2007 | if (mask) | ||
| 2008 | mask->ref_count = 0; | ||
| 2009 | |||
| 2010 | return mask; | ||
| 2011 | } | ||
| 2012 | |||
| 2013 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask) | ||
| 2014 | { | ||
| 2015 | mask->ref_count++; | ||
| 2016 | } | ||
| 2017 | |||
| 2018 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
| 2019 | { | ||
| 2020 | if (!mask) | ||
| 2021 | return; | ||
| 2022 | |||
| 2023 | BUG_ON(!mask->ref_count); | ||
| 2024 | mask->ref_count--; | ||
| 2025 | |||
| 2026 | if (!mask->ref_count) { | ||
| 2027 | list_del_rcu(&mask->list); | ||
| 2028 | if (deferred) | ||
| 2029 | kfree_rcu(mask, rcu); | ||
| 2030 | else | ||
| 2031 | kfree(mask); | ||
| 2032 | } | ||
| 2033 | } | ||
| 2034 | |||
| 2035 | static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a, | ||
| 2036 | const struct sw_flow_mask *b) | ||
| 2037 | { | ||
| 2038 | u8 *a_ = (u8 *)&a->key + a->range.start; | ||
| 2039 | u8 *b_ = (u8 *)&b->key + b->range.start; | ||
| 2040 | |||
| 2041 | return (a->range.end == b->range.end) | ||
| 2042 | && (a->range.start == b->range.start) | ||
| 2043 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); | ||
| 2044 | } | ||
| 2045 | |||
| 2046 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl, | ||
| 2047 | const struct sw_flow_mask *mask) | ||
| 2048 | { | ||
| 2049 | struct list_head *ml; | ||
| 2050 | |||
| 2051 | list_for_each(ml, tbl->mask_list) { | ||
| 2052 | struct sw_flow_mask *m; | ||
| 2053 | m = container_of(ml, struct sw_flow_mask, list); | ||
| 2054 | if (ovs_sw_flow_mask_equal(mask, m)) | ||
| 2055 | return m; | ||
| 2056 | } | ||
| 2057 | |||
| 2058 | return NULL; | ||
| 2059 | } | ||
| 2060 | |||
| 2061 | /** | ||
| 2062 | * add a new mask into the mask list. | ||
| 2063 | * The caller needs to make sure that 'mask' is not the same | ||
| 2064 | * as any masks that are already on the list. | ||
| 2065 | */ | ||
| 2066 | void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask) | ||
| 2067 | { | ||
| 2068 | list_add_rcu(&mask->list, tbl->mask_list); | ||
| 2069 | } | ||
| 2070 | |||
| 2071 | /** | ||
| 2072 | * Set 'range' fields in the mask to the value of 'val'. | ||
| 2073 | */ | ||
| 2074 | static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask, | ||
| 2075 | struct sw_flow_key_range *range, u8 val) | ||
| 2076 | { | ||
| 2077 | u8 *m = (u8 *)&mask->key + range->start; | ||
| 2078 | |||
| 2079 | mask->range = *range; | ||
| 2080 | memset(m, val, range_n_bytes(range)); | ||
| 2081 | } | ||
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 212fbf7510c4..1510f51dbf74 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
| @@ -33,14 +33,6 @@ | |||
| 33 | #include <net/inet_ecn.h> | 33 | #include <net/inet_ecn.h> |
| 34 | 34 | ||
| 35 | struct sk_buff; | 35 | struct sk_buff; |
| 36 | struct sw_flow_mask; | ||
| 37 | struct flow_table; | ||
| 38 | |||
| 39 | struct sw_flow_actions { | ||
| 40 | struct rcu_head rcu; | ||
| 41 | u32 actions_len; | ||
| 42 | struct nlattr actions[]; | ||
| 43 | }; | ||
| 44 | 36 | ||
| 45 | /* Used to memset ovs_key_ipv4_tunnel padding. */ | 37 | /* Used to memset ovs_key_ipv4_tunnel padding. */ |
| 46 | #define OVS_TUNNEL_KEY_SIZE \ | 38 | #define OVS_TUNNEL_KEY_SIZE \ |
| @@ -101,6 +93,7 @@ struct sw_flow_key { | |||
| 101 | struct { | 93 | struct { |
| 102 | __be16 src; /* TCP/UDP/SCTP source port. */ | 94 | __be16 src; /* TCP/UDP/SCTP source port. */ |
| 103 | __be16 dst; /* TCP/UDP/SCTP destination port. */ | 95 | __be16 dst; /* TCP/UDP/SCTP destination port. */ |
| 96 | __be16 flags; /* TCP flags. */ | ||
| 104 | } tp; | 97 | } tp; |
| 105 | struct { | 98 | struct { |
| 106 | u8 sha[ETH_ALEN]; /* ARP source hardware address. */ | 99 | u8 sha[ETH_ALEN]; /* ARP source hardware address. */ |
| @@ -117,6 +110,7 @@ struct sw_flow_key { | |||
| 117 | struct { | 110 | struct { |
| 118 | __be16 src; /* TCP/UDP/SCTP source port. */ | 111 | __be16 src; /* TCP/UDP/SCTP source port. */ |
| 119 | __be16 dst; /* TCP/UDP/SCTP destination port. */ | 112 | __be16 dst; /* TCP/UDP/SCTP destination port. */ |
| 113 | __be16 flags; /* TCP flags. */ | ||
| 120 | } tp; | 114 | } tp; |
| 121 | struct { | 115 | struct { |
| 122 | struct in6_addr target; /* ND target address. */ | 116 | struct in6_addr target; /* ND target address. */ |
| @@ -127,6 +121,31 @@ struct sw_flow_key { | |||
| 127 | }; | 121 | }; |
| 128 | } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ | 122 | } __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */ |
| 129 | 123 | ||
| 124 | struct sw_flow_key_range { | ||
| 125 | size_t start; | ||
| 126 | size_t end; | ||
| 127 | }; | ||
| 128 | |||
| 129 | struct sw_flow_mask { | ||
| 130 | int ref_count; | ||
| 131 | struct rcu_head rcu; | ||
| 132 | struct list_head list; | ||
| 133 | struct sw_flow_key_range range; | ||
| 134 | struct sw_flow_key key; | ||
| 135 | }; | ||
| 136 | |||
| 137 | struct sw_flow_match { | ||
| 138 | struct sw_flow_key *key; | ||
| 139 | struct sw_flow_key_range range; | ||
| 140 | struct sw_flow_mask *mask; | ||
| 141 | }; | ||
| 142 | |||
| 143 | struct sw_flow_actions { | ||
| 144 | struct rcu_head rcu; | ||
| 145 | u32 actions_len; | ||
| 146 | struct nlattr actions[]; | ||
| 147 | }; | ||
| 148 | |||
| 130 | struct sw_flow { | 149 | struct sw_flow { |
| 131 | struct rcu_head rcu; | 150 | struct rcu_head rcu; |
| 132 | struct hlist_node hash_node[2]; | 151 | struct hlist_node hash_node[2]; |
| @@ -141,23 +160,9 @@ struct sw_flow { | |||
| 141 | unsigned long used; /* Last used time (in jiffies). */ | 160 | unsigned long used; /* Last used time (in jiffies). */ |
| 142 | u64 packet_count; /* Number of packets matched. */ | 161 | u64 packet_count; /* Number of packets matched. */ |
| 143 | u64 byte_count; /* Number of bytes matched. */ | 162 | u64 byte_count; /* Number of bytes matched. */ |
| 144 | u8 tcp_flags; /* Union of seen TCP flags. */ | 163 | __be16 tcp_flags; /* Union of seen TCP flags. */ |
| 145 | }; | ||
| 146 | |||
| 147 | struct sw_flow_key_range { | ||
| 148 | size_t start; | ||
| 149 | size_t end; | ||
| 150 | }; | 164 | }; |
| 151 | 165 | ||
| 152 | struct sw_flow_match { | ||
| 153 | struct sw_flow_key *key; | ||
| 154 | struct sw_flow_key_range range; | ||
| 155 | struct sw_flow_mask *mask; | ||
| 156 | }; | ||
| 157 | |||
| 158 | void ovs_match_init(struct sw_flow_match *match, | ||
| 159 | struct sw_flow_key *key, struct sw_flow_mask *mask); | ||
| 160 | |||
| 161 | struct arp_eth_header { | 166 | struct arp_eth_header { |
| 162 | __be16 ar_hrd; /* format of hardware address */ | 167 | __be16 ar_hrd; /* format of hardware address */ |
| 163 | __be16 ar_pro; /* format of protocol address */ | 168 | __be16 ar_pro; /* format of protocol address */ |
| @@ -172,88 +177,9 @@ struct arp_eth_header { | |||
| 172 | unsigned char ar_tip[4]; /* target IP address */ | 177 | unsigned char ar_tip[4]; /* target IP address */ |
| 173 | } __packed; | 178 | } __packed; |
| 174 | 179 | ||
| 175 | int ovs_flow_init(void); | ||
| 176 | void ovs_flow_exit(void); | ||
| 177 | |||
| 178 | struct sw_flow *ovs_flow_alloc(void); | ||
| 179 | void ovs_flow_deferred_free(struct sw_flow *); | ||
| 180 | void ovs_flow_free(struct sw_flow *, bool deferred); | ||
| 181 | |||
| 182 | struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len); | ||
| 183 | void ovs_flow_deferred_free_acts(struct sw_flow_actions *); | ||
| 184 | |||
| 185 | int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); | ||
| 186 | void ovs_flow_used(struct sw_flow *, struct sk_buff *); | 180 | void ovs_flow_used(struct sw_flow *, struct sk_buff *); |
| 187 | u64 ovs_flow_used_time(unsigned long flow_jiffies); | 181 | u64 ovs_flow_used_time(unsigned long flow_jiffies); |
| 188 | int ovs_flow_to_nlattrs(const struct sw_flow_key *, | ||
| 189 | const struct sw_flow_key *, struct sk_buff *); | ||
| 190 | int ovs_match_from_nlattrs(struct sw_flow_match *match, | ||
| 191 | const struct nlattr *, | ||
| 192 | const struct nlattr *); | ||
| 193 | int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, | ||
| 194 | const struct nlattr *attr); | ||
| 195 | 182 | ||
| 196 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) | 183 | int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *); |
| 197 | #define TBL_MIN_BUCKETS 1024 | ||
| 198 | |||
| 199 | struct flow_table { | ||
| 200 | struct flex_array *buckets; | ||
| 201 | unsigned int count, n_buckets; | ||
| 202 | struct rcu_head rcu; | ||
| 203 | struct list_head *mask_list; | ||
| 204 | int node_ver; | ||
| 205 | u32 hash_seed; | ||
| 206 | bool keep_flows; | ||
| 207 | }; | ||
| 208 | |||
| 209 | static inline int ovs_flow_tbl_count(struct flow_table *table) | ||
| 210 | { | ||
| 211 | return table->count; | ||
| 212 | } | ||
| 213 | |||
| 214 | static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table) | ||
| 215 | { | ||
| 216 | return (table->count > table->n_buckets); | ||
| 217 | } | ||
| 218 | |||
| 219 | struct sw_flow *ovs_flow_lookup(struct flow_table *, | ||
| 220 | const struct sw_flow_key *); | ||
| 221 | struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table, | ||
| 222 | struct sw_flow_match *match); | ||
| 223 | |||
| 224 | void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); | ||
| 225 | struct flow_table *ovs_flow_tbl_alloc(int new_size); | ||
| 226 | struct flow_table *ovs_flow_tbl_expand(struct flow_table *table); | ||
| 227 | struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table); | ||
| 228 | |||
| 229 | void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow); | ||
| 230 | void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow); | ||
| 231 | |||
| 232 | struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx); | ||
| 233 | extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1]; | ||
| 234 | int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr, | ||
| 235 | struct sw_flow_match *match, bool is_mask); | ||
| 236 | int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb, | ||
| 237 | const struct ovs_key_ipv4_tunnel *tun_key, | ||
| 238 | const struct ovs_key_ipv4_tunnel *output); | ||
| 239 | |||
| 240 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
| 241 | const struct sw_flow_key *key, int key_end); | ||
| 242 | |||
| 243 | struct sw_flow_mask { | ||
| 244 | int ref_count; | ||
| 245 | struct rcu_head rcu; | ||
| 246 | struct list_head list; | ||
| 247 | struct sw_flow_key_range range; | ||
| 248 | struct sw_flow_key key; | ||
| 249 | }; | ||
| 250 | 184 | ||
| 251 | struct sw_flow_mask *ovs_sw_flow_mask_alloc(void); | ||
| 252 | void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *); | ||
| 253 | void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred); | ||
| 254 | void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *); | ||
| 255 | struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *, | ||
| 256 | const struct sw_flow_mask *); | ||
| 257 | void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
| 258 | const struct sw_flow_mask *mask); | ||
| 259 | #endif /* flow.h */ | 185 | #endif /* flow.h */ |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c new file mode 100644 index 000000000000..2bc1bc1aca3b --- /dev/null +++ b/net/openvswitch/flow_netlink.c | |||
| @@ -0,0 +1,1630 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007-2013 Nicira, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of version 2 of the GNU General Public | ||
| 6 | * License as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program; if not, write to the Free Software | ||
| 15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
| 16 | * 02110-1301, USA | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include "flow.h" | ||
| 20 | #include "datapath.h" | ||
| 21 | #include <linux/uaccess.h> | ||
| 22 | #include <linux/netdevice.h> | ||
| 23 | #include <linux/etherdevice.h> | ||
| 24 | #include <linux/if_ether.h> | ||
| 25 | #include <linux/if_vlan.h> | ||
| 26 | #include <net/llc_pdu.h> | ||
| 27 | #include <linux/kernel.h> | ||
| 28 | #include <linux/jhash.h> | ||
| 29 | #include <linux/jiffies.h> | ||
| 30 | #include <linux/llc.h> | ||
| 31 | #include <linux/module.h> | ||
| 32 | #include <linux/in.h> | ||
| 33 | #include <linux/rcupdate.h> | ||
| 34 | #include <linux/if_arp.h> | ||
| 35 | #include <linux/ip.h> | ||
| 36 | #include <linux/ipv6.h> | ||
| 37 | #include <linux/sctp.h> | ||
| 38 | #include <linux/tcp.h> | ||
| 39 | #include <linux/udp.h> | ||
| 40 | #include <linux/icmp.h> | ||
| 41 | #include <linux/icmpv6.h> | ||
| 42 | #include <linux/rculist.h> | ||
| 43 | #include <net/ip.h> | ||
| 44 | #include <net/ipv6.h> | ||
| 45 | #include <net/ndisc.h> | ||
| 46 | |||
| 47 | #include "flow_netlink.h" | ||
| 48 | |||
| 49 | static void update_range__(struct sw_flow_match *match, | ||
| 50 | size_t offset, size_t size, bool is_mask) | ||
| 51 | { | ||
| 52 | struct sw_flow_key_range *range = NULL; | ||
| 53 | size_t start = rounddown(offset, sizeof(long)); | ||
| 54 | size_t end = roundup(offset + size, sizeof(long)); | ||
| 55 | |||
| 56 | if (!is_mask) | ||
| 57 | range = &match->range; | ||
| 58 | else if (match->mask) | ||
| 59 | range = &match->mask->range; | ||
| 60 | |||
| 61 | if (!range) | ||
| 62 | return; | ||
| 63 | |||
| 64 | if (range->start == range->end) { | ||
| 65 | range->start = start; | ||
| 66 | range->end = end; | ||
| 67 | return; | ||
| 68 | } | ||
| 69 | |||
| 70 | if (range->start > start) | ||
| 71 | range->start = start; | ||
| 72 | |||
| 73 | if (range->end < end) | ||
| 74 | range->end = end; | ||
| 75 | } | ||
| 76 | |||
| 77 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ | ||
| 78 | do { \ | ||
| 79 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
| 80 | sizeof((match)->key->field), is_mask); \ | ||
| 81 | if (is_mask) { \ | ||
| 82 | if ((match)->mask) \ | ||
| 83 | (match)->mask->key.field = value; \ | ||
| 84 | } else { \ | ||
| 85 | (match)->key->field = value; \ | ||
| 86 | } \ | ||
| 87 | } while (0) | ||
| 88 | |||
| 89 | #define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \ | ||
| 90 | do { \ | ||
| 91 | update_range__(match, offsetof(struct sw_flow_key, field), \ | ||
| 92 | len, is_mask); \ | ||
| 93 | if (is_mask) { \ | ||
| 94 | if ((match)->mask) \ | ||
| 95 | memcpy(&(match)->mask->key.field, value_p, len);\ | ||
| 96 | } else { \ | ||
| 97 | memcpy(&(match)->key->field, value_p, len); \ | ||
| 98 | } \ | ||
| 99 | } while (0) | ||
| 100 | |||
| 101 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | ||
| 102 | { | ||
| 103 | return range->end - range->start; | ||
| 104 | } | ||
| 105 | |||
| 106 | static bool match_validate(const struct sw_flow_match *match, | ||
| 107 | u64 key_attrs, u64 mask_attrs) | ||
| 108 | { | ||
| 109 | u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET; | ||
| 110 | u64 mask_allowed = key_attrs; /* At most allow all key attributes */ | ||
| 111 | |||
| 112 | /* The following mask attributes allowed only if they | ||
| 113 | * pass the validation tests. */ | ||
| 114 | mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4) | ||
| 115 | | (1 << OVS_KEY_ATTR_IPV6) | ||
| 116 | | (1 << OVS_KEY_ATTR_TCP) | ||
| 117 | | (1 << OVS_KEY_ATTR_TCP_FLAGS) | ||
| 118 | | (1 << OVS_KEY_ATTR_UDP) | ||
| 119 | | (1 << OVS_KEY_ATTR_SCTP) | ||
| 120 | | (1 << OVS_KEY_ATTR_ICMP) | ||
| 121 | | (1 << OVS_KEY_ATTR_ICMPV6) | ||
| 122 | | (1 << OVS_KEY_ATTR_ARP) | ||
| 123 | | (1 << OVS_KEY_ATTR_ND)); | ||
| 124 | |||
| 125 | /* Always allowed mask fields. */ | ||
| 126 | mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) | ||
| 127 | | (1 << OVS_KEY_ATTR_IN_PORT) | ||
| 128 | | (1 << OVS_KEY_ATTR_ETHERTYPE)); | ||
| 129 | |||
| 130 | /* Check key attributes. */ | ||
| 131 | if (match->key->eth.type == htons(ETH_P_ARP) | ||
| 132 | || match->key->eth.type == htons(ETH_P_RARP)) { | ||
| 133 | key_expected |= 1 << OVS_KEY_ATTR_ARP; | ||
| 134 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
| 135 | mask_allowed |= 1 << OVS_KEY_ATTR_ARP; | ||
| 136 | } | ||
| 137 | |||
| 138 | if (match->key->eth.type == htons(ETH_P_IP)) { | ||
| 139 | key_expected |= 1 << OVS_KEY_ATTR_IPV4; | ||
| 140 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
| 141 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV4; | ||
| 142 | |||
| 143 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 144 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
| 145 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
| 146 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 147 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
| 148 | } | ||
| 149 | |||
| 150 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
| 151 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 152 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 153 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 154 | } | ||
| 155 | |||
| 156 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
| 157 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
| 158 | key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS; | ||
| 159 | if (match->mask && (match->mask->key.ip.proto == 0xff)) { | ||
| 160 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
| 161 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS; | ||
| 162 | } | ||
| 163 | } | ||
| 164 | |||
| 165 | if (match->key->ip.proto == IPPROTO_ICMP) { | ||
| 166 | key_expected |= 1 << OVS_KEY_ATTR_ICMP; | ||
| 167 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 168 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMP; | ||
| 169 | } | ||
| 170 | } | ||
| 171 | } | ||
| 172 | |||
| 173 | if (match->key->eth.type == htons(ETH_P_IPV6)) { | ||
| 174 | key_expected |= 1 << OVS_KEY_ATTR_IPV6; | ||
| 175 | if (match->mask && (match->mask->key.eth.type == htons(0xffff))) | ||
| 176 | mask_allowed |= 1 << OVS_KEY_ATTR_IPV6; | ||
| 177 | |||
| 178 | if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 179 | if (match->key->ip.proto == IPPROTO_UDP) { | ||
| 180 | key_expected |= 1 << OVS_KEY_ATTR_UDP; | ||
| 181 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 182 | mask_allowed |= 1 << OVS_KEY_ATTR_UDP; | ||
| 183 | } | ||
| 184 | |||
| 185 | if (match->key->ip.proto == IPPROTO_SCTP) { | ||
| 186 | key_expected |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 187 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 188 | mask_allowed |= 1 << OVS_KEY_ATTR_SCTP; | ||
| 189 | } | ||
| 190 | |||
| 191 | if (match->key->ip.proto == IPPROTO_TCP) { | ||
| 192 | key_expected |= 1 << OVS_KEY_ATTR_TCP; | ||
| 193 | key_expected |= 1 << OVS_KEY_ATTR_TCP_FLAGS; | ||
| 194 | if (match->mask && (match->mask->key.ip.proto == 0xff)) { | ||
| 195 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP; | ||
| 196 | mask_allowed |= 1 << OVS_KEY_ATTR_TCP_FLAGS; | ||
| 197 | } | ||
| 198 | } | ||
| 199 | |||
| 200 | if (match->key->ip.proto == IPPROTO_ICMPV6) { | ||
| 201 | key_expected |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
| 202 | if (match->mask && (match->mask->key.ip.proto == 0xff)) | ||
| 203 | mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6; | ||
| 204 | |||
| 205 | if (match->key->ipv6.tp.src == | ||
| 206 | htons(NDISC_NEIGHBOUR_SOLICITATION) || | ||
| 207 | match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) { | ||
| 208 | key_expected |= 1 << OVS_KEY_ATTR_ND; | ||
| 209 | if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff))) | ||
| 210 | mask_allowed |= 1 << OVS_KEY_ATTR_ND; | ||
| 211 | } | ||
| 212 | } | ||
| 213 | } | ||
| 214 | } | ||
| 215 | |||
| 216 | if ((key_attrs & key_expected) != key_expected) { | ||
| 217 | /* Key attributes check failed. */ | ||
| 218 | OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", | ||
| 219 | key_attrs, key_expected); | ||
| 220 | return false; | ||
| 221 | } | ||
| 222 | |||
| 223 | if ((mask_attrs & mask_allowed) != mask_attrs) { | ||
| 224 | /* Mask attributes check failed. */ | ||
| 225 | OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", | ||
| 226 | mask_attrs, mask_allowed); | ||
| 227 | return false; | ||
| 228 | } | ||
| 229 | |||
| 230 | return true; | ||
| 231 | } | ||
| 232 | |||
| 233 | /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ | ||
| 234 | static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { | ||
| 235 | [OVS_KEY_ATTR_ENCAP] = -1, | ||
| 236 | [OVS_KEY_ATTR_PRIORITY] = sizeof(u32), | ||
| 237 | [OVS_KEY_ATTR_IN_PORT] = sizeof(u32), | ||
| 238 | [OVS_KEY_ATTR_SKB_MARK] = sizeof(u32), | ||
| 239 | [OVS_KEY_ATTR_ETHERNET] = sizeof(struct ovs_key_ethernet), | ||
| 240 | [OVS_KEY_ATTR_VLAN] = sizeof(__be16), | ||
| 241 | [OVS_KEY_ATTR_ETHERTYPE] = sizeof(__be16), | ||
| 242 | [OVS_KEY_ATTR_IPV4] = sizeof(struct ovs_key_ipv4), | ||
| 243 | [OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6), | ||
| 244 | [OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp), | ||
| 245 | [OVS_KEY_ATTR_TCP_FLAGS] = sizeof(__be16), | ||
| 246 | [OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp), | ||
| 247 | [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp), | ||
| 248 | [OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp), | ||
| 249 | [OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6), | ||
| 250 | [OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp), | ||
| 251 | [OVS_KEY_ATTR_ND] = sizeof(struct ovs_key_nd), | ||
| 252 | [OVS_KEY_ATTR_TUNNEL] = -1, | ||
| 253 | }; | ||
| 254 | |||
| 255 | static bool is_all_zero(const u8 *fp, size_t size) | ||
| 256 | { | ||
| 257 | int i; | ||
| 258 | |||
| 259 | if (!fp) | ||
| 260 | return false; | ||
| 261 | |||
| 262 | for (i = 0; i < size; i++) | ||
| 263 | if (fp[i]) | ||
| 264 | return false; | ||
| 265 | |||
| 266 | return true; | ||
| 267 | } | ||
| 268 | |||
| 269 | static int __parse_flow_nlattrs(const struct nlattr *attr, | ||
| 270 | const struct nlattr *a[], | ||
| 271 | u64 *attrsp, bool nz) | ||
| 272 | { | ||
| 273 | const struct nlattr *nla; | ||
| 274 | u64 attrs; | ||
| 275 | int rem; | ||
| 276 | |||
| 277 | attrs = *attrsp; | ||
| 278 | nla_for_each_nested(nla, attr, rem) { | ||
| 279 | u16 type = nla_type(nla); | ||
| 280 | int expected_len; | ||
| 281 | |||
| 282 | if (type > OVS_KEY_ATTR_MAX) { | ||
| 283 | OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", | ||
| 284 | type, OVS_KEY_ATTR_MAX); | ||
| 285 | return -EINVAL; | ||
| 286 | } | ||
| 287 | |||
| 288 | if (attrs & (1 << type)) { | ||
| 289 | OVS_NLERR("Duplicate key attribute (type %d).\n", type); | ||
| 290 | return -EINVAL; | ||
| 291 | } | ||
| 292 | |||
| 293 | expected_len = ovs_key_lens[type]; | ||
| 294 | if (nla_len(nla) != expected_len && expected_len != -1) { | ||
| 295 | OVS_NLERR("Key attribute has unexpected length (type=%d" | ||
| 296 | ", length=%d, expected=%d).\n", type, | ||
| 297 | nla_len(nla), expected_len); | ||
| 298 | return -EINVAL; | ||
| 299 | } | ||
| 300 | |||
| 301 | if (!nz || !is_all_zero(nla_data(nla), expected_len)) { | ||
| 302 | attrs |= 1 << type; | ||
| 303 | a[type] = nla; | ||
| 304 | } | ||
| 305 | } | ||
| 306 | if (rem) { | ||
| 307 | OVS_NLERR("Message has %d unknown bytes.\n", rem); | ||
| 308 | return -EINVAL; | ||
| 309 | } | ||
| 310 | |||
| 311 | *attrsp = attrs; | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | static int parse_flow_mask_nlattrs(const struct nlattr *attr, | ||
| 316 | const struct nlattr *a[], u64 *attrsp) | ||
| 317 | { | ||
| 318 | return __parse_flow_nlattrs(attr, a, attrsp, true); | ||
| 319 | } | ||
| 320 | |||
| 321 | static int parse_flow_nlattrs(const struct nlattr *attr, | ||
| 322 | const struct nlattr *a[], u64 *attrsp) | ||
| 323 | { | ||
| 324 | return __parse_flow_nlattrs(attr, a, attrsp, false); | ||
| 325 | } | ||
| 326 | |||
| 327 | static int ipv4_tun_from_nlattr(const struct nlattr *attr, | ||
| 328 | struct sw_flow_match *match, bool is_mask) | ||
| 329 | { | ||
| 330 | struct nlattr *a; | ||
| 331 | int rem; | ||
| 332 | bool ttl = false; | ||
| 333 | __be16 tun_flags = 0; | ||
| 334 | |||
| 335 | nla_for_each_nested(a, attr, rem) { | ||
| 336 | int type = nla_type(a); | ||
| 337 | static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { | ||
| 338 | [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), | ||
| 339 | [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), | ||
| 340 | [OVS_TUNNEL_KEY_ATTR_IPV4_DST] = sizeof(u32), | ||
| 341 | [OVS_TUNNEL_KEY_ATTR_TOS] = 1, | ||
| 342 | [OVS_TUNNEL_KEY_ATTR_TTL] = 1, | ||
| 343 | [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, | ||
| 344 | [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, | ||
| 345 | }; | ||
| 346 | |||
| 347 | if (type > OVS_TUNNEL_KEY_ATTR_MAX) { | ||
| 348 | OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", | ||
| 349 | type, OVS_TUNNEL_KEY_ATTR_MAX); | ||
| 350 | return -EINVAL; | ||
| 351 | } | ||
| 352 | |||
| 353 | if (ovs_tunnel_key_lens[type] != nla_len(a)) { | ||
| 354 | OVS_NLERR("IPv4 tunnel attribute type has unexpected " | ||
| 355 | " length (type=%d, length=%d, expected=%d).\n", | ||
| 356 | type, nla_len(a), ovs_tunnel_key_lens[type]); | ||
| 357 | return -EINVAL; | ||
| 358 | } | ||
| 359 | |||
| 360 | switch (type) { | ||
| 361 | case OVS_TUNNEL_KEY_ATTR_ID: | ||
| 362 | SW_FLOW_KEY_PUT(match, tun_key.tun_id, | ||
| 363 | nla_get_be64(a), is_mask); | ||
| 364 | tun_flags |= TUNNEL_KEY; | ||
| 365 | break; | ||
| 366 | case OVS_TUNNEL_KEY_ATTR_IPV4_SRC: | ||
| 367 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_src, | ||
| 368 | nla_get_be32(a), is_mask); | ||
| 369 | break; | ||
| 370 | case OVS_TUNNEL_KEY_ATTR_IPV4_DST: | ||
| 371 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst, | ||
| 372 | nla_get_be32(a), is_mask); | ||
| 373 | break; | ||
| 374 | case OVS_TUNNEL_KEY_ATTR_TOS: | ||
| 375 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos, | ||
| 376 | nla_get_u8(a), is_mask); | ||
| 377 | break; | ||
| 378 | case OVS_TUNNEL_KEY_ATTR_TTL: | ||
| 379 | SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl, | ||
| 380 | nla_get_u8(a), is_mask); | ||
| 381 | ttl = true; | ||
| 382 | break; | ||
| 383 | case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT: | ||
| 384 | tun_flags |= TUNNEL_DONT_FRAGMENT; | ||
| 385 | break; | ||
| 386 | case OVS_TUNNEL_KEY_ATTR_CSUM: | ||
| 387 | tun_flags |= TUNNEL_CSUM; | ||
| 388 | break; | ||
| 389 | default: | ||
| 390 | return -EINVAL; | ||
| 391 | } | ||
| 392 | } | ||
| 393 | |||
| 394 | SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); | ||
| 395 | |||
| 396 | if (rem > 0) { | ||
| 397 | OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); | ||
| 398 | return -EINVAL; | ||
| 399 | } | ||
| 400 | |||
| 401 | if (!is_mask) { | ||
| 402 | if (!match->key->tun_key.ipv4_dst) { | ||
| 403 | OVS_NLERR("IPv4 tunnel destination address is zero.\n"); | ||
| 404 | return -EINVAL; | ||
| 405 | } | ||
| 406 | |||
| 407 | if (!ttl) { | ||
| 408 | OVS_NLERR("IPv4 tunnel TTL not specified.\n"); | ||
| 409 | return -EINVAL; | ||
| 410 | } | ||
| 411 | } | ||
| 412 | |||
| 413 | return 0; | ||
| 414 | } | ||
| 415 | |||
| 416 | static int ipv4_tun_to_nlattr(struct sk_buff *skb, | ||
| 417 | const struct ovs_key_ipv4_tunnel *tun_key, | ||
| 418 | const struct ovs_key_ipv4_tunnel *output) | ||
| 419 | { | ||
| 420 | struct nlattr *nla; | ||
| 421 | |||
| 422 | nla = nla_nest_start(skb, OVS_KEY_ATTR_TUNNEL); | ||
| 423 | if (!nla) | ||
| 424 | return -EMSGSIZE; | ||
| 425 | |||
| 426 | if (output->tun_flags & TUNNEL_KEY && | ||
| 427 | nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id)) | ||
| 428 | return -EMSGSIZE; | ||
| 429 | if (output->ipv4_src && | ||
| 430 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src)) | ||
| 431 | return -EMSGSIZE; | ||
| 432 | if (output->ipv4_dst && | ||
| 433 | nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst)) | ||
| 434 | return -EMSGSIZE; | ||
| 435 | if (output->ipv4_tos && | ||
| 436 | nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos)) | ||
| 437 | return -EMSGSIZE; | ||
| 438 | if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl)) | ||
| 439 | return -EMSGSIZE; | ||
| 440 | if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) && | ||
| 441 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT)) | ||
| 442 | return -EMSGSIZE; | ||
| 443 | if ((output->tun_flags & TUNNEL_CSUM) && | ||
| 444 | nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) | ||
| 445 | return -EMSGSIZE; | ||
| 446 | |||
| 447 | nla_nest_end(skb, nla); | ||
| 448 | return 0; | ||
| 449 | } | ||
| 450 | |||
| 451 | |||
| 452 | static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, | ||
| 453 | const struct nlattr **a, bool is_mask) | ||
| 454 | { | ||
| 455 | if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) { | ||
| 456 | SW_FLOW_KEY_PUT(match, phy.priority, | ||
| 457 | nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask); | ||
| 458 | *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY); | ||
| 459 | } | ||
| 460 | |||
| 461 | if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { | ||
| 462 | u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); | ||
| 463 | |||
| 464 | if (is_mask) | ||
| 465 | in_port = 0xffffffff; /* Always exact match in_port. */ | ||
| 466 | else if (in_port >= DP_MAX_PORTS) | ||
| 467 | return -EINVAL; | ||
| 468 | |||
| 469 | SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); | ||
| 470 | *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); | ||
| 471 | } else if (!is_mask) { | ||
| 472 | SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask); | ||
| 473 | } | ||
| 474 | |||
| 475 | if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) { | ||
| 476 | uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]); | ||
| 477 | |||
| 478 | SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask); | ||
| 479 | *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK); | ||
| 480 | } | ||
| 481 | if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { | ||
| 482 | if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, | ||
| 483 | is_mask)) | ||
| 484 | return -EINVAL; | ||
| 485 | *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); | ||
| 486 | } | ||
| 487 | return 0; | ||
| 488 | } | ||
| 489 | |||
| 490 | static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | ||
| 491 | const struct nlattr **a, bool is_mask) | ||
| 492 | { | ||
| 493 | int err; | ||
| 494 | u64 orig_attrs = attrs; | ||
| 495 | |||
| 496 | err = metadata_from_nlattrs(match, &attrs, a, is_mask); | ||
| 497 | if (err) | ||
| 498 | return err; | ||
| 499 | |||
| 500 | if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) { | ||
| 501 | const struct ovs_key_ethernet *eth_key; | ||
| 502 | |||
| 503 | eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]); | ||
| 504 | SW_FLOW_KEY_MEMCPY(match, eth.src, | ||
| 505 | eth_key->eth_src, ETH_ALEN, is_mask); | ||
| 506 | SW_FLOW_KEY_MEMCPY(match, eth.dst, | ||
| 507 | eth_key->eth_dst, ETH_ALEN, is_mask); | ||
| 508 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET); | ||
| 509 | } | ||
| 510 | |||
| 511 | if (attrs & (1 << OVS_KEY_ATTR_VLAN)) { | ||
| 512 | __be16 tci; | ||
| 513 | |||
| 514 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
| 515 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
| 516 | if (is_mask) | ||
| 517 | OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); | ||
| 518 | else | ||
| 519 | OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); | ||
| 520 | |||
| 521 | return -EINVAL; | ||
| 522 | } | ||
| 523 | |||
| 524 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); | ||
| 525 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); | ||
| 526 | } else if (!is_mask) | ||
| 527 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
| 528 | |||
| 529 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { | ||
| 530 | __be16 eth_type; | ||
| 531 | |||
| 532 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
| 533 | if (is_mask) { | ||
| 534 | /* Always exact match EtherType. */ | ||
| 535 | eth_type = htons(0xffff); | ||
| 536 | } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { | ||
| 537 | OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n", | ||
| 538 | ntohs(eth_type), ETH_P_802_3_MIN); | ||
| 539 | return -EINVAL; | ||
| 540 | } | ||
| 541 | |||
| 542 | SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask); | ||
| 543 | attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
| 544 | } else if (!is_mask) { | ||
| 545 | SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask); | ||
| 546 | } | ||
| 547 | |||
| 548 | if (attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 549 | const struct ovs_key_ipv4 *ipv4_key; | ||
| 550 | |||
| 551 | ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); | ||
| 552 | if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { | ||
| 553 | OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", | ||
| 554 | ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); | ||
| 555 | return -EINVAL; | ||
| 556 | } | ||
| 557 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
| 558 | ipv4_key->ipv4_proto, is_mask); | ||
| 559 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
| 560 | ipv4_key->ipv4_tos, is_mask); | ||
| 561 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
| 562 | ipv4_key->ipv4_ttl, is_mask); | ||
| 563 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
| 564 | ipv4_key->ipv4_frag, is_mask); | ||
| 565 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
| 566 | ipv4_key->ipv4_src, is_mask); | ||
| 567 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
| 568 | ipv4_key->ipv4_dst, is_mask); | ||
| 569 | attrs &= ~(1 << OVS_KEY_ATTR_IPV4); | ||
| 570 | } | ||
| 571 | |||
| 572 | if (attrs & (1 << OVS_KEY_ATTR_IPV6)) { | ||
| 573 | const struct ovs_key_ipv6 *ipv6_key; | ||
| 574 | |||
| 575 | ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); | ||
| 576 | if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { | ||
| 577 | OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", | ||
| 578 | ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); | ||
| 579 | return -EINVAL; | ||
| 580 | } | ||
| 581 | SW_FLOW_KEY_PUT(match, ipv6.label, | ||
| 582 | ipv6_key->ipv6_label, is_mask); | ||
| 583 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
| 584 | ipv6_key->ipv6_proto, is_mask); | ||
| 585 | SW_FLOW_KEY_PUT(match, ip.tos, | ||
| 586 | ipv6_key->ipv6_tclass, is_mask); | ||
| 587 | SW_FLOW_KEY_PUT(match, ip.ttl, | ||
| 588 | ipv6_key->ipv6_hlimit, is_mask); | ||
| 589 | SW_FLOW_KEY_PUT(match, ip.frag, | ||
| 590 | ipv6_key->ipv6_frag, is_mask); | ||
| 591 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src, | ||
| 592 | ipv6_key->ipv6_src, | ||
| 593 | sizeof(match->key->ipv6.addr.src), | ||
| 594 | is_mask); | ||
| 595 | SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst, | ||
| 596 | ipv6_key->ipv6_dst, | ||
| 597 | sizeof(match->key->ipv6.addr.dst), | ||
| 598 | is_mask); | ||
| 599 | |||
| 600 | attrs &= ~(1 << OVS_KEY_ATTR_IPV6); | ||
| 601 | } | ||
| 602 | |||
| 603 | if (attrs & (1 << OVS_KEY_ATTR_ARP)) { | ||
| 604 | const struct ovs_key_arp *arp_key; | ||
| 605 | |||
| 606 | arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); | ||
| 607 | if (!is_mask && (arp_key->arp_op & htons(0xff00))) { | ||
| 608 | OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", | ||
| 609 | arp_key->arp_op); | ||
| 610 | return -EINVAL; | ||
| 611 | } | ||
| 612 | |||
| 613 | SW_FLOW_KEY_PUT(match, ipv4.addr.src, | ||
| 614 | arp_key->arp_sip, is_mask); | ||
| 615 | SW_FLOW_KEY_PUT(match, ipv4.addr.dst, | ||
| 616 | arp_key->arp_tip, is_mask); | ||
| 617 | SW_FLOW_KEY_PUT(match, ip.proto, | ||
| 618 | ntohs(arp_key->arp_op), is_mask); | ||
| 619 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha, | ||
| 620 | arp_key->arp_sha, ETH_ALEN, is_mask); | ||
| 621 | SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha, | ||
| 622 | arp_key->arp_tha, ETH_ALEN, is_mask); | ||
| 623 | |||
| 624 | attrs &= ~(1 << OVS_KEY_ATTR_ARP); | ||
| 625 | } | ||
| 626 | |||
| 627 | if (attrs & (1 << OVS_KEY_ATTR_TCP)) { | ||
| 628 | const struct ovs_key_tcp *tcp_key; | ||
| 629 | |||
| 630 | tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]); | ||
| 631 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 632 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 633 | tcp_key->tcp_src, is_mask); | ||
| 634 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 635 | tcp_key->tcp_dst, is_mask); | ||
| 636 | } else { | ||
| 637 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 638 | tcp_key->tcp_src, is_mask); | ||
| 639 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 640 | tcp_key->tcp_dst, is_mask); | ||
| 641 | } | ||
| 642 | attrs &= ~(1 << OVS_KEY_ATTR_TCP); | ||
| 643 | } | ||
| 644 | |||
| 645 | if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { | ||
| 646 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 647 | SW_FLOW_KEY_PUT(match, ipv4.tp.flags, | ||
| 648 | nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), | ||
| 649 | is_mask); | ||
| 650 | } else { | ||
| 651 | SW_FLOW_KEY_PUT(match, ipv6.tp.flags, | ||
| 652 | nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), | ||
| 653 | is_mask); | ||
| 654 | } | ||
| 655 | attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS); | ||
| 656 | } | ||
| 657 | |||
| 658 | if (attrs & (1 << OVS_KEY_ATTR_UDP)) { | ||
| 659 | const struct ovs_key_udp *udp_key; | ||
| 660 | |||
| 661 | udp_key = nla_data(a[OVS_KEY_ATTR_UDP]); | ||
| 662 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 663 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 664 | udp_key->udp_src, is_mask); | ||
| 665 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 666 | udp_key->udp_dst, is_mask); | ||
| 667 | } else { | ||
| 668 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 669 | udp_key->udp_src, is_mask); | ||
| 670 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 671 | udp_key->udp_dst, is_mask); | ||
| 672 | } | ||
| 673 | attrs &= ~(1 << OVS_KEY_ATTR_UDP); | ||
| 674 | } | ||
| 675 | |||
| 676 | if (attrs & (1 << OVS_KEY_ATTR_SCTP)) { | ||
| 677 | const struct ovs_key_sctp *sctp_key; | ||
| 678 | |||
| 679 | sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]); | ||
| 680 | if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { | ||
| 681 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 682 | sctp_key->sctp_src, is_mask); | ||
| 683 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 684 | sctp_key->sctp_dst, is_mask); | ||
| 685 | } else { | ||
| 686 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 687 | sctp_key->sctp_src, is_mask); | ||
| 688 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 689 | sctp_key->sctp_dst, is_mask); | ||
| 690 | } | ||
| 691 | attrs &= ~(1 << OVS_KEY_ATTR_SCTP); | ||
| 692 | } | ||
| 693 | |||
| 694 | if (attrs & (1 << OVS_KEY_ATTR_ICMP)) { | ||
| 695 | const struct ovs_key_icmp *icmp_key; | ||
| 696 | |||
| 697 | icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]); | ||
| 698 | SW_FLOW_KEY_PUT(match, ipv4.tp.src, | ||
| 699 | htons(icmp_key->icmp_type), is_mask); | ||
| 700 | SW_FLOW_KEY_PUT(match, ipv4.tp.dst, | ||
| 701 | htons(icmp_key->icmp_code), is_mask); | ||
| 702 | attrs &= ~(1 << OVS_KEY_ATTR_ICMP); | ||
| 703 | } | ||
| 704 | |||
| 705 | if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) { | ||
| 706 | const struct ovs_key_icmpv6 *icmpv6_key; | ||
| 707 | |||
| 708 | icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]); | ||
| 709 | SW_FLOW_KEY_PUT(match, ipv6.tp.src, | ||
| 710 | htons(icmpv6_key->icmpv6_type), is_mask); | ||
| 711 | SW_FLOW_KEY_PUT(match, ipv6.tp.dst, | ||
| 712 | htons(icmpv6_key->icmpv6_code), is_mask); | ||
| 713 | attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6); | ||
| 714 | } | ||
| 715 | |||
| 716 | if (attrs & (1 << OVS_KEY_ATTR_ND)) { | ||
| 717 | const struct ovs_key_nd *nd_key; | ||
| 718 | |||
| 719 | nd_key = nla_data(a[OVS_KEY_ATTR_ND]); | ||
| 720 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target, | ||
| 721 | nd_key->nd_target, | ||
| 722 | sizeof(match->key->ipv6.nd.target), | ||
| 723 | is_mask); | ||
| 724 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll, | ||
| 725 | nd_key->nd_sll, ETH_ALEN, is_mask); | ||
| 726 | SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll, | ||
| 727 | nd_key->nd_tll, ETH_ALEN, is_mask); | ||
| 728 | attrs &= ~(1 << OVS_KEY_ATTR_ND); | ||
| 729 | } | ||
| 730 | |||
| 731 | if (attrs != 0) | ||
| 732 | return -EINVAL; | ||
| 733 | |||
| 734 | return 0; | ||
| 735 | } | ||
| 736 | |||
| 737 | static void sw_flow_mask_set(struct sw_flow_mask *mask, | ||
| 738 | struct sw_flow_key_range *range, u8 val) | ||
| 739 | { | ||
| 740 | u8 *m = (u8 *)&mask->key + range->start; | ||
| 741 | |||
| 742 | mask->range = *range; | ||
| 743 | memset(m, val, range_n_bytes(range)); | ||
| 744 | } | ||
| 745 | |||
| 746 | /** | ||
| 747 | * ovs_nla_get_match - parses Netlink attributes into a flow key and | ||
| 748 | * mask. In case the 'mask' is NULL, the flow is treated as exact match | ||
| 749 | * flow. Otherwise, it is treated as a wildcarded flow, except the mask | ||
| 750 | * does not include any don't care bit. | ||
| 751 | * @match: receives the extracted flow match information. | ||
| 752 | * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
| 753 | * sequence. The fields should of the packet that triggered the creation | ||
| 754 | * of this flow. | ||
| 755 | * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink | ||
| 756 | * attribute specifies the mask field of the wildcarded flow. | ||
| 757 | */ | ||
| 758 | int ovs_nla_get_match(struct sw_flow_match *match, | ||
| 759 | const struct nlattr *key, | ||
| 760 | const struct nlattr *mask) | ||
| 761 | { | ||
| 762 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
| 763 | const struct nlattr *encap; | ||
| 764 | u64 key_attrs = 0; | ||
| 765 | u64 mask_attrs = 0; | ||
| 766 | bool encap_valid = false; | ||
| 767 | int err; | ||
| 768 | |||
| 769 | err = parse_flow_nlattrs(key, a, &key_attrs); | ||
| 770 | if (err) | ||
| 771 | return err; | ||
| 772 | |||
| 773 | if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) && | ||
| 774 | (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) && | ||
| 775 | (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) { | ||
| 776 | __be16 tci; | ||
| 777 | |||
| 778 | if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && | ||
| 779 | (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { | ||
| 780 | OVS_NLERR("Invalid Vlan frame.\n"); | ||
| 781 | return -EINVAL; | ||
| 782 | } | ||
| 783 | |||
| 784 | key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
| 785 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
| 786 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
| 787 | key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
| 788 | encap_valid = true; | ||
| 789 | |||
| 790 | if (tci & htons(VLAN_TAG_PRESENT)) { | ||
| 791 | err = parse_flow_nlattrs(encap, a, &key_attrs); | ||
| 792 | if (err) | ||
| 793 | return err; | ||
| 794 | } else if (!tci) { | ||
| 795 | /* Corner case for truncated 802.1Q header. */ | ||
| 796 | if (nla_len(encap)) { | ||
| 797 | OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n"); | ||
| 798 | return -EINVAL; | ||
| 799 | } | ||
| 800 | } else { | ||
| 801 | OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); | ||
| 802 | return -EINVAL; | ||
| 803 | } | ||
| 804 | } | ||
| 805 | |||
| 806 | err = ovs_key_from_nlattrs(match, key_attrs, a, false); | ||
| 807 | if (err) | ||
| 808 | return err; | ||
| 809 | |||
| 810 | if (mask) { | ||
| 811 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | ||
| 812 | if (err) | ||
| 813 | return err; | ||
| 814 | |||
| 815 | if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { | ||
| 816 | __be16 eth_type = 0; | ||
| 817 | __be16 tci = 0; | ||
| 818 | |||
| 819 | if (!encap_valid) { | ||
| 820 | OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); | ||
| 821 | return -EINVAL; | ||
| 822 | } | ||
| 823 | |||
| 824 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); | ||
| 825 | if (a[OVS_KEY_ATTR_ETHERTYPE]) | ||
| 826 | eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); | ||
| 827 | |||
| 828 | if (eth_type == htons(0xffff)) { | ||
| 829 | mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); | ||
| 830 | encap = a[OVS_KEY_ATTR_ENCAP]; | ||
| 831 | err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); | ||
| 832 | } else { | ||
| 833 | OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", | ||
| 834 | ntohs(eth_type)); | ||
| 835 | return -EINVAL; | ||
| 836 | } | ||
| 837 | |||
| 838 | if (a[OVS_KEY_ATTR_VLAN]) | ||
| 839 | tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); | ||
| 840 | |||
| 841 | if (!(tci & htons(VLAN_TAG_PRESENT))) { | ||
| 842 | OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); | ||
| 843 | return -EINVAL; | ||
| 844 | } | ||
| 845 | } | ||
| 846 | |||
| 847 | err = ovs_key_from_nlattrs(match, mask_attrs, a, true); | ||
| 848 | if (err) | ||
| 849 | return err; | ||
| 850 | } else { | ||
| 851 | /* Populate exact match flow's key mask. */ | ||
| 852 | if (match->mask) | ||
| 853 | sw_flow_mask_set(match->mask, &match->range, 0xff); | ||
| 854 | } | ||
| 855 | |||
| 856 | if (!match_validate(match, key_attrs, mask_attrs)) | ||
| 857 | return -EINVAL; | ||
| 858 | |||
| 859 | return 0; | ||
| 860 | } | ||
| 861 | |||
| 862 | /** | ||
| 863 | * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key. | ||
| 864 | * @flow: Receives extracted in_port, priority, tun_key and skb_mark. | ||
| 865 | * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute | ||
| 866 | * sequence. | ||
| 867 | * | ||
| 868 | * This parses a series of Netlink attributes that form a flow key, which must | ||
| 869 | * take the same form accepted by flow_from_nlattrs(), but only enough of it to | ||
| 870 | * get the metadata, that is, the parts of the flow key that cannot be | ||
| 871 | * extracted from the packet itself. | ||
| 872 | */ | ||
| 873 | |||
| 874 | int ovs_nla_get_flow_metadata(struct sw_flow *flow, | ||
| 875 | const struct nlattr *attr) | ||
| 876 | { | ||
| 877 | struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key; | ||
| 878 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | ||
| 879 | u64 attrs = 0; | ||
| 880 | int err; | ||
| 881 | struct sw_flow_match match; | ||
| 882 | |||
| 883 | flow->key.phy.in_port = DP_MAX_PORTS; | ||
| 884 | flow->key.phy.priority = 0; | ||
| 885 | flow->key.phy.skb_mark = 0; | ||
| 886 | memset(tun_key, 0, sizeof(flow->key.tun_key)); | ||
| 887 | |||
| 888 | err = parse_flow_nlattrs(attr, a, &attrs); | ||
| 889 | if (err) | ||
| 890 | return -EINVAL; | ||
| 891 | |||
| 892 | memset(&match, 0, sizeof(match)); | ||
| 893 | match.key = &flow->key; | ||
| 894 | |||
| 895 | err = metadata_from_nlattrs(&match, &attrs, a, false); | ||
| 896 | if (err) | ||
| 897 | return err; | ||
| 898 | |||
| 899 | return 0; | ||
| 900 | } | ||
| 901 | |||
| 902 | int ovs_nla_put_flow(const struct sw_flow_key *swkey, | ||
| 903 | const struct sw_flow_key *output, struct sk_buff *skb) | ||
| 904 | { | ||
| 905 | struct ovs_key_ethernet *eth_key; | ||
| 906 | struct nlattr *nla, *encap; | ||
| 907 | bool is_mask = (swkey != output); | ||
| 908 | |||
| 909 | if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority)) | ||
| 910 | goto nla_put_failure; | ||
| 911 | |||
| 912 | if ((swkey->tun_key.ipv4_dst || is_mask) && | ||
| 913 | ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key)) | ||
| 914 | goto nla_put_failure; | ||
| 915 | |||
| 916 | if (swkey->phy.in_port == DP_MAX_PORTS) { | ||
| 917 | if (is_mask && (output->phy.in_port == 0xffff)) | ||
| 918 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff)) | ||
| 919 | goto nla_put_failure; | ||
| 920 | } else { | ||
| 921 | u16 upper_u16; | ||
| 922 | upper_u16 = !is_mask ? 0 : 0xffff; | ||
| 923 | |||
| 924 | if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, | ||
| 925 | (upper_u16 << 16) | output->phy.in_port)) | ||
| 926 | goto nla_put_failure; | ||
| 927 | } | ||
| 928 | |||
| 929 | if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark)) | ||
| 930 | goto nla_put_failure; | ||
| 931 | |||
| 932 | nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); | ||
| 933 | if (!nla) | ||
| 934 | goto nla_put_failure; | ||
| 935 | |||
| 936 | eth_key = nla_data(nla); | ||
| 937 | memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN); | ||
| 938 | memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN); | ||
| 939 | |||
| 940 | if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { | ||
| 941 | __be16 eth_type; | ||
| 942 | eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff); | ||
| 943 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) || | ||
| 944 | nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci)) | ||
| 945 | goto nla_put_failure; | ||
| 946 | encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); | ||
| 947 | if (!swkey->eth.tci) | ||
| 948 | goto unencap; | ||
| 949 | } else | ||
| 950 | encap = NULL; | ||
| 951 | |||
| 952 | if (swkey->eth.type == htons(ETH_P_802_2)) { | ||
| 953 | /* | ||
| 954 | * Ethertype 802.2 is represented in the netlink with omitted | ||
| 955 | * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and | ||
| 956 | * 0xffff in the mask attribute. Ethertype can also | ||
| 957 | * be wildcarded. | ||
| 958 | */ | ||
| 959 | if (is_mask && output->eth.type) | ||
| 960 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, | ||
| 961 | output->eth.type)) | ||
| 962 | goto nla_put_failure; | ||
| 963 | goto unencap; | ||
| 964 | } | ||
| 965 | |||
| 966 | if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type)) | ||
| 967 | goto nla_put_failure; | ||
| 968 | |||
| 969 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 970 | struct ovs_key_ipv4 *ipv4_key; | ||
| 971 | |||
| 972 | nla = nla_reserve(skb, OVS_KEY_ATTR_IPV4, sizeof(*ipv4_key)); | ||
| 973 | if (!nla) | ||
| 974 | goto nla_put_failure; | ||
| 975 | ipv4_key = nla_data(nla); | ||
| 976 | ipv4_key->ipv4_src = output->ipv4.addr.src; | ||
| 977 | ipv4_key->ipv4_dst = output->ipv4.addr.dst; | ||
| 978 | ipv4_key->ipv4_proto = output->ip.proto; | ||
| 979 | ipv4_key->ipv4_tos = output->ip.tos; | ||
| 980 | ipv4_key->ipv4_ttl = output->ip.ttl; | ||
| 981 | ipv4_key->ipv4_frag = output->ip.frag; | ||
| 982 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 983 | struct ovs_key_ipv6 *ipv6_key; | ||
| 984 | |||
| 985 | nla = nla_reserve(skb, OVS_KEY_ATTR_IPV6, sizeof(*ipv6_key)); | ||
| 986 | if (!nla) | ||
| 987 | goto nla_put_failure; | ||
| 988 | ipv6_key = nla_data(nla); | ||
| 989 | memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src, | ||
| 990 | sizeof(ipv6_key->ipv6_src)); | ||
| 991 | memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst, | ||
| 992 | sizeof(ipv6_key->ipv6_dst)); | ||
| 993 | ipv6_key->ipv6_label = output->ipv6.label; | ||
| 994 | ipv6_key->ipv6_proto = output->ip.proto; | ||
| 995 | ipv6_key->ipv6_tclass = output->ip.tos; | ||
| 996 | ipv6_key->ipv6_hlimit = output->ip.ttl; | ||
| 997 | ipv6_key->ipv6_frag = output->ip.frag; | ||
| 998 | } else if (swkey->eth.type == htons(ETH_P_ARP) || | ||
| 999 | swkey->eth.type == htons(ETH_P_RARP)) { | ||
| 1000 | struct ovs_key_arp *arp_key; | ||
| 1001 | |||
| 1002 | nla = nla_reserve(skb, OVS_KEY_ATTR_ARP, sizeof(*arp_key)); | ||
| 1003 | if (!nla) | ||
| 1004 | goto nla_put_failure; | ||
| 1005 | arp_key = nla_data(nla); | ||
| 1006 | memset(arp_key, 0, sizeof(struct ovs_key_arp)); | ||
| 1007 | arp_key->arp_sip = output->ipv4.addr.src; | ||
| 1008 | arp_key->arp_tip = output->ipv4.addr.dst; | ||
| 1009 | arp_key->arp_op = htons(output->ip.proto); | ||
| 1010 | memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN); | ||
| 1011 | memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN); | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | if ((swkey->eth.type == htons(ETH_P_IP) || | ||
| 1015 | swkey->eth.type == htons(ETH_P_IPV6)) && | ||
| 1016 | swkey->ip.frag != OVS_FRAG_TYPE_LATER) { | ||
| 1017 | |||
| 1018 | if (swkey->ip.proto == IPPROTO_TCP) { | ||
| 1019 | struct ovs_key_tcp *tcp_key; | ||
| 1020 | |||
| 1021 | nla = nla_reserve(skb, OVS_KEY_ATTR_TCP, sizeof(*tcp_key)); | ||
| 1022 | if (!nla) | ||
| 1023 | goto nla_put_failure; | ||
| 1024 | tcp_key = nla_data(nla); | ||
| 1025 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 1026 | tcp_key->tcp_src = output->ipv4.tp.src; | ||
| 1027 | tcp_key->tcp_dst = output->ipv4.tp.dst; | ||
| 1028 | if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS, | ||
| 1029 | output->ipv4.tp.flags)) | ||
| 1030 | goto nla_put_failure; | ||
| 1031 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 1032 | tcp_key->tcp_src = output->ipv6.tp.src; | ||
| 1033 | tcp_key->tcp_dst = output->ipv6.tp.dst; | ||
| 1034 | if (nla_put_be16(skb, OVS_KEY_ATTR_TCP_FLAGS, | ||
| 1035 | output->ipv6.tp.flags)) | ||
| 1036 | goto nla_put_failure; | ||
| 1037 | } | ||
| 1038 | } else if (swkey->ip.proto == IPPROTO_UDP) { | ||
| 1039 | struct ovs_key_udp *udp_key; | ||
| 1040 | |||
| 1041 | nla = nla_reserve(skb, OVS_KEY_ATTR_UDP, sizeof(*udp_key)); | ||
| 1042 | if (!nla) | ||
| 1043 | goto nla_put_failure; | ||
| 1044 | udp_key = nla_data(nla); | ||
| 1045 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 1046 | udp_key->udp_src = output->ipv4.tp.src; | ||
| 1047 | udp_key->udp_dst = output->ipv4.tp.dst; | ||
| 1048 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 1049 | udp_key->udp_src = output->ipv6.tp.src; | ||
| 1050 | udp_key->udp_dst = output->ipv6.tp.dst; | ||
| 1051 | } | ||
| 1052 | } else if (swkey->ip.proto == IPPROTO_SCTP) { | ||
| 1053 | struct ovs_key_sctp *sctp_key; | ||
| 1054 | |||
| 1055 | nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key)); | ||
| 1056 | if (!nla) | ||
| 1057 | goto nla_put_failure; | ||
| 1058 | sctp_key = nla_data(nla); | ||
| 1059 | if (swkey->eth.type == htons(ETH_P_IP)) { | ||
| 1060 | sctp_key->sctp_src = swkey->ipv4.tp.src; | ||
| 1061 | sctp_key->sctp_dst = swkey->ipv4.tp.dst; | ||
| 1062 | } else if (swkey->eth.type == htons(ETH_P_IPV6)) { | ||
| 1063 | sctp_key->sctp_src = swkey->ipv6.tp.src; | ||
| 1064 | sctp_key->sctp_dst = swkey->ipv6.tp.dst; | ||
| 1065 | } | ||
| 1066 | } else if (swkey->eth.type == htons(ETH_P_IP) && | ||
| 1067 | swkey->ip.proto == IPPROTO_ICMP) { | ||
| 1068 | struct ovs_key_icmp *icmp_key; | ||
| 1069 | |||
| 1070 | nla = nla_reserve(skb, OVS_KEY_ATTR_ICMP, sizeof(*icmp_key)); | ||
| 1071 | if (!nla) | ||
| 1072 | goto nla_put_failure; | ||
| 1073 | icmp_key = nla_data(nla); | ||
| 1074 | icmp_key->icmp_type = ntohs(output->ipv4.tp.src); | ||
| 1075 | icmp_key->icmp_code = ntohs(output->ipv4.tp.dst); | ||
| 1076 | } else if (swkey->eth.type == htons(ETH_P_IPV6) && | ||
| 1077 | swkey->ip.proto == IPPROTO_ICMPV6) { | ||
| 1078 | struct ovs_key_icmpv6 *icmpv6_key; | ||
| 1079 | |||
| 1080 | nla = nla_reserve(skb, OVS_KEY_ATTR_ICMPV6, | ||
| 1081 | sizeof(*icmpv6_key)); | ||
| 1082 | if (!nla) | ||
| 1083 | goto nla_put_failure; | ||
| 1084 | icmpv6_key = nla_data(nla); | ||
| 1085 | icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src); | ||
| 1086 | icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst); | ||
| 1087 | |||
| 1088 | if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION || | ||
| 1089 | icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) { | ||
| 1090 | struct ovs_key_nd *nd_key; | ||
| 1091 | |||
| 1092 | nla = nla_reserve(skb, OVS_KEY_ATTR_ND, sizeof(*nd_key)); | ||
| 1093 | if (!nla) | ||
| 1094 | goto nla_put_failure; | ||
| 1095 | nd_key = nla_data(nla); | ||
| 1096 | memcpy(nd_key->nd_target, &output->ipv6.nd.target, | ||
| 1097 | sizeof(nd_key->nd_target)); | ||
| 1098 | memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN); | ||
| 1099 | memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN); | ||
| 1100 | } | ||
| 1101 | } | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | unencap: | ||
| 1105 | if (encap) | ||
| 1106 | nla_nest_end(skb, encap); | ||
| 1107 | |||
| 1108 | return 0; | ||
| 1109 | |||
| 1110 | nla_put_failure: | ||
| 1111 | return -EMSGSIZE; | ||
| 1112 | } | ||
| 1113 | |||
| 1114 | #define MAX_ACTIONS_BUFSIZE (32 * 1024) | ||
| 1115 | |||
| 1116 | struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size) | ||
| 1117 | { | ||
| 1118 | struct sw_flow_actions *sfa; | ||
| 1119 | |||
| 1120 | if (size > MAX_ACTIONS_BUFSIZE) | ||
| 1121 | return ERR_PTR(-EINVAL); | ||
| 1122 | |||
| 1123 | sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); | ||
| 1124 | if (!sfa) | ||
| 1125 | return ERR_PTR(-ENOMEM); | ||
| 1126 | |||
| 1127 | sfa->actions_len = 0; | ||
| 1128 | return sfa; | ||
| 1129 | } | ||
| 1130 | |||
| 1131 | /* RCU callback used by ovs_nla_free_flow_actions. */ | ||
| 1132 | static void rcu_free_acts_callback(struct rcu_head *rcu) | ||
| 1133 | { | ||
| 1134 | struct sw_flow_actions *sf_acts = container_of(rcu, | ||
| 1135 | struct sw_flow_actions, rcu); | ||
| 1136 | kfree(sf_acts); | ||
| 1137 | } | ||
| 1138 | |||
| 1139 | /* Schedules 'sf_acts' to be freed after the next RCU grace period. | ||
| 1140 | * The caller must hold rcu_read_lock for this to be sensible. */ | ||
| 1141 | void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) | ||
| 1142 | { | ||
| 1143 | call_rcu(&sf_acts->rcu, rcu_free_acts_callback); | ||
| 1144 | } | ||
| 1145 | |||
| 1146 | static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, | ||
| 1147 | int attr_len) | ||
| 1148 | { | ||
| 1149 | |||
| 1150 | struct sw_flow_actions *acts; | ||
| 1151 | int new_acts_size; | ||
| 1152 | int req_size = NLA_ALIGN(attr_len); | ||
| 1153 | int next_offset = offsetof(struct sw_flow_actions, actions) + | ||
| 1154 | (*sfa)->actions_len; | ||
| 1155 | |||
| 1156 | if (req_size <= (ksize(*sfa) - next_offset)) | ||
| 1157 | goto out; | ||
| 1158 | |||
| 1159 | new_acts_size = ksize(*sfa) * 2; | ||
| 1160 | |||
| 1161 | if (new_acts_size > MAX_ACTIONS_BUFSIZE) { | ||
| 1162 | if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) | ||
| 1163 | return ERR_PTR(-EMSGSIZE); | ||
| 1164 | new_acts_size = MAX_ACTIONS_BUFSIZE; | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | acts = ovs_nla_alloc_flow_actions(new_acts_size); | ||
| 1168 | if (IS_ERR(acts)) | ||
| 1169 | return (void *)acts; | ||
| 1170 | |||
| 1171 | memcpy(acts->actions, (*sfa)->actions, (*sfa)->actions_len); | ||
| 1172 | acts->actions_len = (*sfa)->actions_len; | ||
| 1173 | kfree(*sfa); | ||
| 1174 | *sfa = acts; | ||
| 1175 | |||
| 1176 | out: | ||
| 1177 | (*sfa)->actions_len += req_size; | ||
| 1178 | return (struct nlattr *) ((unsigned char *)(*sfa) + next_offset); | ||
| 1179 | } | ||
| 1180 | |||
| 1181 | static int add_action(struct sw_flow_actions **sfa, int attrtype, void *data, int len) | ||
| 1182 | { | ||
| 1183 | struct nlattr *a; | ||
| 1184 | |||
| 1185 | a = reserve_sfa_size(sfa, nla_attr_size(len)); | ||
| 1186 | if (IS_ERR(a)) | ||
| 1187 | return PTR_ERR(a); | ||
| 1188 | |||
| 1189 | a->nla_type = attrtype; | ||
| 1190 | a->nla_len = nla_attr_size(len); | ||
| 1191 | |||
| 1192 | if (data) | ||
| 1193 | memcpy(nla_data(a), data, len); | ||
| 1194 | memset((unsigned char *) a + a->nla_len, 0, nla_padlen(len)); | ||
| 1195 | |||
| 1196 | return 0; | ||
| 1197 | } | ||
| 1198 | |||
| 1199 | static inline int add_nested_action_start(struct sw_flow_actions **sfa, | ||
| 1200 | int attrtype) | ||
| 1201 | { | ||
| 1202 | int used = (*sfa)->actions_len; | ||
| 1203 | int err; | ||
| 1204 | |||
| 1205 | err = add_action(sfa, attrtype, NULL, 0); | ||
| 1206 | if (err) | ||
| 1207 | return err; | ||
| 1208 | |||
| 1209 | return used; | ||
| 1210 | } | ||
| 1211 | |||
| 1212 | static inline void add_nested_action_end(struct sw_flow_actions *sfa, | ||
| 1213 | int st_offset) | ||
| 1214 | { | ||
| 1215 | struct nlattr *a = (struct nlattr *) ((unsigned char *)sfa->actions + | ||
| 1216 | st_offset); | ||
| 1217 | |||
| 1218 | a->nla_len = sfa->actions_len - st_offset; | ||
| 1219 | } | ||
| 1220 | |||
| 1221 | static int validate_and_copy_sample(const struct nlattr *attr, | ||
| 1222 | const struct sw_flow_key *key, int depth, | ||
| 1223 | struct sw_flow_actions **sfa) | ||
| 1224 | { | ||
| 1225 | const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; | ||
| 1226 | const struct nlattr *probability, *actions; | ||
| 1227 | const struct nlattr *a; | ||
| 1228 | int rem, start, err, st_acts; | ||
| 1229 | |||
| 1230 | memset(attrs, 0, sizeof(attrs)); | ||
| 1231 | nla_for_each_nested(a, attr, rem) { | ||
| 1232 | int type = nla_type(a); | ||
| 1233 | if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type]) | ||
| 1234 | return -EINVAL; | ||
| 1235 | attrs[type] = a; | ||
| 1236 | } | ||
| 1237 | if (rem) | ||
| 1238 | return -EINVAL; | ||
| 1239 | |||
| 1240 | probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY]; | ||
| 1241 | if (!probability || nla_len(probability) != sizeof(u32)) | ||
| 1242 | return -EINVAL; | ||
| 1243 | |||
| 1244 | actions = attrs[OVS_SAMPLE_ATTR_ACTIONS]; | ||
| 1245 | if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN)) | ||
| 1246 | return -EINVAL; | ||
| 1247 | |||
| 1248 | /* validation done, copy sample action. */ | ||
| 1249 | start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE); | ||
| 1250 | if (start < 0) | ||
| 1251 | return start; | ||
| 1252 | err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, | ||
| 1253 | nla_data(probability), sizeof(u32)); | ||
| 1254 | if (err) | ||
| 1255 | return err; | ||
| 1256 | st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS); | ||
| 1257 | if (st_acts < 0) | ||
| 1258 | return st_acts; | ||
| 1259 | |||
| 1260 | err = ovs_nla_copy_actions(actions, key, depth + 1, sfa); | ||
| 1261 | if (err) | ||
| 1262 | return err; | ||
| 1263 | |||
| 1264 | add_nested_action_end(*sfa, st_acts); | ||
| 1265 | add_nested_action_end(*sfa, start); | ||
| 1266 | |||
| 1267 | return 0; | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | static int validate_tp_port(const struct sw_flow_key *flow_key) | ||
| 1271 | { | ||
| 1272 | if (flow_key->eth.type == htons(ETH_P_IP)) { | ||
| 1273 | if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) | ||
| 1274 | return 0; | ||
| 1275 | } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { | ||
| 1276 | if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst) | ||
| 1277 | return 0; | ||
| 1278 | } | ||
| 1279 | |||
| 1280 | return -EINVAL; | ||
| 1281 | } | ||
| 1282 | |||
| 1283 | void ovs_match_init(struct sw_flow_match *match, | ||
| 1284 | struct sw_flow_key *key, | ||
| 1285 | struct sw_flow_mask *mask) | ||
| 1286 | { | ||
| 1287 | memset(match, 0, sizeof(*match)); | ||
| 1288 | match->key = key; | ||
| 1289 | match->mask = mask; | ||
| 1290 | |||
| 1291 | memset(key, 0, sizeof(*key)); | ||
| 1292 | |||
| 1293 | if (mask) { | ||
| 1294 | memset(&mask->key, 0, sizeof(mask->key)); | ||
| 1295 | mask->range.start = mask->range.end = 0; | ||
| 1296 | } | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | static int validate_and_copy_set_tun(const struct nlattr *attr, | ||
| 1300 | struct sw_flow_actions **sfa) | ||
| 1301 | { | ||
| 1302 | struct sw_flow_match match; | ||
| 1303 | struct sw_flow_key key; | ||
| 1304 | int err, start; | ||
| 1305 | |||
| 1306 | ovs_match_init(&match, &key, NULL); | ||
| 1307 | err = ipv4_tun_from_nlattr(nla_data(attr), &match, false); | ||
| 1308 | if (err) | ||
| 1309 | return err; | ||
| 1310 | |||
| 1311 | start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET); | ||
| 1312 | if (start < 0) | ||
| 1313 | return start; | ||
| 1314 | |||
| 1315 | err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key, | ||
| 1316 | sizeof(match.key->tun_key)); | ||
| 1317 | add_nested_action_end(*sfa, start); | ||
| 1318 | |||
| 1319 | return err; | ||
| 1320 | } | ||
| 1321 | |||
| 1322 | static int validate_set(const struct nlattr *a, | ||
| 1323 | const struct sw_flow_key *flow_key, | ||
| 1324 | struct sw_flow_actions **sfa, | ||
| 1325 | bool *set_tun) | ||
| 1326 | { | ||
| 1327 | const struct nlattr *ovs_key = nla_data(a); | ||
| 1328 | int key_type = nla_type(ovs_key); | ||
| 1329 | |||
| 1330 | /* There can be only one key in a action */ | ||
| 1331 | if (nla_total_size(nla_len(ovs_key)) != nla_len(a)) | ||
| 1332 | return -EINVAL; | ||
| 1333 | |||
| 1334 | if (key_type > OVS_KEY_ATTR_MAX || | ||
| 1335 | (ovs_key_lens[key_type] != nla_len(ovs_key) && | ||
| 1336 | ovs_key_lens[key_type] != -1)) | ||
| 1337 | return -EINVAL; | ||
| 1338 | |||
| 1339 | switch (key_type) { | ||
| 1340 | const struct ovs_key_ipv4 *ipv4_key; | ||
| 1341 | const struct ovs_key_ipv6 *ipv6_key; | ||
| 1342 | int err; | ||
| 1343 | |||
| 1344 | case OVS_KEY_ATTR_PRIORITY: | ||
| 1345 | case OVS_KEY_ATTR_SKB_MARK: | ||
| 1346 | case OVS_KEY_ATTR_ETHERNET: | ||
| 1347 | break; | ||
| 1348 | |||
| 1349 | case OVS_KEY_ATTR_TUNNEL: | ||
| 1350 | *set_tun = true; | ||
| 1351 | err = validate_and_copy_set_tun(a, sfa); | ||
| 1352 | if (err) | ||
| 1353 | return err; | ||
| 1354 | break; | ||
| 1355 | |||
| 1356 | case OVS_KEY_ATTR_IPV4: | ||
| 1357 | if (flow_key->eth.type != htons(ETH_P_IP)) | ||
| 1358 | return -EINVAL; | ||
| 1359 | |||
| 1360 | if (!flow_key->ip.proto) | ||
| 1361 | return -EINVAL; | ||
| 1362 | |||
| 1363 | ipv4_key = nla_data(ovs_key); | ||
| 1364 | if (ipv4_key->ipv4_proto != flow_key->ip.proto) | ||
| 1365 | return -EINVAL; | ||
| 1366 | |||
| 1367 | if (ipv4_key->ipv4_frag != flow_key->ip.frag) | ||
| 1368 | return -EINVAL; | ||
| 1369 | |||
| 1370 | break; | ||
| 1371 | |||
| 1372 | case OVS_KEY_ATTR_IPV6: | ||
| 1373 | if (flow_key->eth.type != htons(ETH_P_IPV6)) | ||
| 1374 | return -EINVAL; | ||
| 1375 | |||
| 1376 | if (!flow_key->ip.proto) | ||
| 1377 | return -EINVAL; | ||
| 1378 | |||
| 1379 | ipv6_key = nla_data(ovs_key); | ||
| 1380 | if (ipv6_key->ipv6_proto != flow_key->ip.proto) | ||
| 1381 | return -EINVAL; | ||
| 1382 | |||
| 1383 | if (ipv6_key->ipv6_frag != flow_key->ip.frag) | ||
| 1384 | return -EINVAL; | ||
| 1385 | |||
| 1386 | if (ntohl(ipv6_key->ipv6_label) & 0xFFF00000) | ||
| 1387 | return -EINVAL; | ||
| 1388 | |||
| 1389 | break; | ||
| 1390 | |||
| 1391 | case OVS_KEY_ATTR_TCP: | ||
| 1392 | if (flow_key->ip.proto != IPPROTO_TCP) | ||
| 1393 | return -EINVAL; | ||
| 1394 | |||
| 1395 | return validate_tp_port(flow_key); | ||
| 1396 | |||
| 1397 | case OVS_KEY_ATTR_UDP: | ||
| 1398 | if (flow_key->ip.proto != IPPROTO_UDP) | ||
| 1399 | return -EINVAL; | ||
| 1400 | |||
| 1401 | return validate_tp_port(flow_key); | ||
| 1402 | |||
| 1403 | case OVS_KEY_ATTR_SCTP: | ||
| 1404 | if (flow_key->ip.proto != IPPROTO_SCTP) | ||
| 1405 | return -EINVAL; | ||
| 1406 | |||
| 1407 | return validate_tp_port(flow_key); | ||
| 1408 | |||
| 1409 | default: | ||
| 1410 | return -EINVAL; | ||
| 1411 | } | ||
| 1412 | |||
| 1413 | return 0; | ||
| 1414 | } | ||
| 1415 | |||
| 1416 | static int validate_userspace(const struct nlattr *attr) | ||
| 1417 | { | ||
| 1418 | static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { | ||
| 1419 | [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, | ||
| 1420 | [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, | ||
| 1421 | }; | ||
| 1422 | struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; | ||
| 1423 | int error; | ||
| 1424 | |||
| 1425 | error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, | ||
| 1426 | attr, userspace_policy); | ||
| 1427 | if (error) | ||
| 1428 | return error; | ||
| 1429 | |||
| 1430 | if (!a[OVS_USERSPACE_ATTR_PID] || | ||
| 1431 | !nla_get_u32(a[OVS_USERSPACE_ATTR_PID])) | ||
| 1432 | return -EINVAL; | ||
| 1433 | |||
| 1434 | return 0; | ||
| 1435 | } | ||
| 1436 | |||
| 1437 | static int copy_action(const struct nlattr *from, | ||
| 1438 | struct sw_flow_actions **sfa) | ||
| 1439 | { | ||
| 1440 | int totlen = NLA_ALIGN(from->nla_len); | ||
| 1441 | struct nlattr *to; | ||
| 1442 | |||
| 1443 | to = reserve_sfa_size(sfa, from->nla_len); | ||
| 1444 | if (IS_ERR(to)) | ||
| 1445 | return PTR_ERR(to); | ||
| 1446 | |||
| 1447 | memcpy(to, from, totlen); | ||
| 1448 | return 0; | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | int ovs_nla_copy_actions(const struct nlattr *attr, | ||
| 1452 | const struct sw_flow_key *key, | ||
| 1453 | int depth, | ||
| 1454 | struct sw_flow_actions **sfa) | ||
| 1455 | { | ||
| 1456 | const struct nlattr *a; | ||
| 1457 | int rem, err; | ||
| 1458 | |||
| 1459 | if (depth >= SAMPLE_ACTION_DEPTH) | ||
| 1460 | return -EOVERFLOW; | ||
| 1461 | |||
| 1462 | nla_for_each_nested(a, attr, rem) { | ||
| 1463 | /* Expected argument lengths, (u32)-1 for variable length. */ | ||
| 1464 | static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { | ||
| 1465 | [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), | ||
| 1466 | [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, | ||
| 1467 | [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), | ||
| 1468 | [OVS_ACTION_ATTR_POP_VLAN] = 0, | ||
| 1469 | [OVS_ACTION_ATTR_SET] = (u32)-1, | ||
| 1470 | [OVS_ACTION_ATTR_SAMPLE] = (u32)-1 | ||
| 1471 | }; | ||
| 1472 | const struct ovs_action_push_vlan *vlan; | ||
| 1473 | int type = nla_type(a); | ||
| 1474 | bool skip_copy; | ||
| 1475 | |||
| 1476 | if (type > OVS_ACTION_ATTR_MAX || | ||
| 1477 | (action_lens[type] != nla_len(a) && | ||
| 1478 | action_lens[type] != (u32)-1)) | ||
| 1479 | return -EINVAL; | ||
| 1480 | |||
| 1481 | skip_copy = false; | ||
| 1482 | switch (type) { | ||
| 1483 | case OVS_ACTION_ATTR_UNSPEC: | ||
| 1484 | return -EINVAL; | ||
| 1485 | |||
| 1486 | case OVS_ACTION_ATTR_USERSPACE: | ||
| 1487 | err = validate_userspace(a); | ||
| 1488 | if (err) | ||
| 1489 | return err; | ||
| 1490 | break; | ||
| 1491 | |||
| 1492 | case OVS_ACTION_ATTR_OUTPUT: | ||
| 1493 | if (nla_get_u32(a) >= DP_MAX_PORTS) | ||
| 1494 | return -EINVAL; | ||
| 1495 | break; | ||
| 1496 | |||
| 1497 | |||
| 1498 | case OVS_ACTION_ATTR_POP_VLAN: | ||
| 1499 | break; | ||
| 1500 | |||
| 1501 | case OVS_ACTION_ATTR_PUSH_VLAN: | ||
| 1502 | vlan = nla_data(a); | ||
| 1503 | if (vlan->vlan_tpid != htons(ETH_P_8021Q)) | ||
| 1504 | return -EINVAL; | ||
| 1505 | if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) | ||
| 1506 | return -EINVAL; | ||
| 1507 | break; | ||
| 1508 | |||
| 1509 | case OVS_ACTION_ATTR_SET: | ||
| 1510 | err = validate_set(a, key, sfa, &skip_copy); | ||
| 1511 | if (err) | ||
| 1512 | return err; | ||
| 1513 | break; | ||
| 1514 | |||
| 1515 | case OVS_ACTION_ATTR_SAMPLE: | ||
| 1516 | err = validate_and_copy_sample(a, key, depth, sfa); | ||
| 1517 | if (err) | ||
| 1518 | return err; | ||
| 1519 | skip_copy = true; | ||
| 1520 | break; | ||
| 1521 | |||
| 1522 | default: | ||
| 1523 | return -EINVAL; | ||
| 1524 | } | ||
| 1525 | if (!skip_copy) { | ||
| 1526 | err = copy_action(a, sfa); | ||
| 1527 | if (err) | ||
| 1528 | return err; | ||
| 1529 | } | ||
| 1530 | } | ||
| 1531 | |||
| 1532 | if (rem > 0) | ||
| 1533 | return -EINVAL; | ||
| 1534 | |||
| 1535 | return 0; | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) | ||
| 1539 | { | ||
| 1540 | const struct nlattr *a; | ||
| 1541 | struct nlattr *start; | ||
| 1542 | int err = 0, rem; | ||
| 1543 | |||
| 1544 | start = nla_nest_start(skb, OVS_ACTION_ATTR_SAMPLE); | ||
| 1545 | if (!start) | ||
| 1546 | return -EMSGSIZE; | ||
| 1547 | |||
| 1548 | nla_for_each_nested(a, attr, rem) { | ||
| 1549 | int type = nla_type(a); | ||
| 1550 | struct nlattr *st_sample; | ||
| 1551 | |||
| 1552 | switch (type) { | ||
| 1553 | case OVS_SAMPLE_ATTR_PROBABILITY: | ||
| 1554 | if (nla_put(skb, OVS_SAMPLE_ATTR_PROBABILITY, | ||
| 1555 | sizeof(u32), nla_data(a))) | ||
| 1556 | return -EMSGSIZE; | ||
| 1557 | break; | ||
| 1558 | case OVS_SAMPLE_ATTR_ACTIONS: | ||
| 1559 | st_sample = nla_nest_start(skb, OVS_SAMPLE_ATTR_ACTIONS); | ||
| 1560 | if (!st_sample) | ||
| 1561 | return -EMSGSIZE; | ||
| 1562 | err = ovs_nla_put_actions(nla_data(a), nla_len(a), skb); | ||
| 1563 | if (err) | ||
| 1564 | return err; | ||
| 1565 | nla_nest_end(skb, st_sample); | ||
| 1566 | break; | ||
| 1567 | } | ||
| 1568 | } | ||
| 1569 | |||
| 1570 | nla_nest_end(skb, start); | ||
| 1571 | return err; | ||
| 1572 | } | ||
| 1573 | |||
| 1574 | static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb) | ||
| 1575 | { | ||
| 1576 | const struct nlattr *ovs_key = nla_data(a); | ||
| 1577 | int key_type = nla_type(ovs_key); | ||
| 1578 | struct nlattr *start; | ||
| 1579 | int err; | ||
| 1580 | |||
| 1581 | switch (key_type) { | ||
| 1582 | case OVS_KEY_ATTR_IPV4_TUNNEL: | ||
| 1583 | start = nla_nest_start(skb, OVS_ACTION_ATTR_SET); | ||
| 1584 | if (!start) | ||
| 1585 | return -EMSGSIZE; | ||
| 1586 | |||
| 1587 | err = ipv4_tun_to_nlattr(skb, nla_data(ovs_key), | ||
| 1588 | nla_data(ovs_key)); | ||
| 1589 | if (err) | ||
| 1590 | return err; | ||
| 1591 | nla_nest_end(skb, start); | ||
| 1592 | break; | ||
| 1593 | default: | ||
| 1594 | if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a), ovs_key)) | ||
| 1595 | return -EMSGSIZE; | ||
| 1596 | break; | ||
| 1597 | } | ||
| 1598 | |||
| 1599 | return 0; | ||
| 1600 | } | ||
| 1601 | |||
| 1602 | int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb) | ||
| 1603 | { | ||
| 1604 | const struct nlattr *a; | ||
| 1605 | int rem, err; | ||
| 1606 | |||
| 1607 | nla_for_each_attr(a, attr, len, rem) { | ||
| 1608 | int type = nla_type(a); | ||
| 1609 | |||
| 1610 | switch (type) { | ||
| 1611 | case OVS_ACTION_ATTR_SET: | ||
| 1612 | err = set_action_to_attr(a, skb); | ||
| 1613 | if (err) | ||
| 1614 | return err; | ||
| 1615 | break; | ||
| 1616 | |||
| 1617 | case OVS_ACTION_ATTR_SAMPLE: | ||
| 1618 | err = sample_action_to_attr(a, skb); | ||
| 1619 | if (err) | ||
| 1620 | return err; | ||
| 1621 | break; | ||
| 1622 | default: | ||
| 1623 | if (nla_put(skb, type, nla_len(a), nla_data(a))) | ||
| 1624 | return -EMSGSIZE; | ||
| 1625 | break; | ||
| 1626 | } | ||
| 1627 | } | ||
| 1628 | |||
| 1629 | return 0; | ||
| 1630 | } | ||
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h new file mode 100644 index 000000000000..440151045d39 --- /dev/null +++ b/net/openvswitch/flow_netlink.h | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007-2013 Nicira, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of version 2 of the GNU General Public | ||
| 6 | * License as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program; if not, write to the Free Software | ||
| 15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
| 16 | * 02110-1301, USA | ||
| 17 | */ | ||
| 18 | |||
| 19 | |||
| 20 | #ifndef FLOW_NETLINK_H | ||
| 21 | #define FLOW_NETLINK_H 1 | ||
| 22 | |||
| 23 | #include <linux/kernel.h> | ||
| 24 | #include <linux/netlink.h> | ||
| 25 | #include <linux/openvswitch.h> | ||
| 26 | #include <linux/spinlock.h> | ||
| 27 | #include <linux/types.h> | ||
| 28 | #include <linux/rcupdate.h> | ||
| 29 | #include <linux/if_ether.h> | ||
| 30 | #include <linux/in6.h> | ||
| 31 | #include <linux/jiffies.h> | ||
| 32 | #include <linux/time.h> | ||
| 33 | #include <linux/flex_array.h> | ||
| 34 | |||
| 35 | #include <net/inet_ecn.h> | ||
| 36 | #include <net/ip_tunnels.h> | ||
| 37 | |||
| 38 | #include "flow.h" | ||
| 39 | |||
| 40 | void ovs_match_init(struct sw_flow_match *match, | ||
| 41 | struct sw_flow_key *key, struct sw_flow_mask *mask); | ||
| 42 | |||
| 43 | int ovs_nla_put_flow(const struct sw_flow_key *, | ||
| 44 | const struct sw_flow_key *, struct sk_buff *); | ||
| 45 | int ovs_nla_get_flow_metadata(struct sw_flow *flow, | ||
| 46 | const struct nlattr *attr); | ||
| 47 | int ovs_nla_get_match(struct sw_flow_match *match, | ||
| 48 | const struct nlattr *, | ||
| 49 | const struct nlattr *); | ||
| 50 | |||
| 51 | int ovs_nla_copy_actions(const struct nlattr *attr, | ||
| 52 | const struct sw_flow_key *key, int depth, | ||
| 53 | struct sw_flow_actions **sfa); | ||
| 54 | int ovs_nla_put_actions(const struct nlattr *attr, | ||
| 55 | int len, struct sk_buff *skb); | ||
| 56 | |||
| 57 | struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len); | ||
| 58 | void ovs_nla_free_flow_actions(struct sw_flow_actions *); | ||
| 59 | |||
| 60 | #endif /* flow_netlink.h */ | ||
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c new file mode 100644 index 000000000000..e42542706087 --- /dev/null +++ b/net/openvswitch/flow_table.c | |||
| @@ -0,0 +1,592 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007-2013 Nicira, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of version 2 of the GNU General Public | ||
| 6 | * License as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program; if not, write to the Free Software | ||
| 15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
| 16 | * 02110-1301, USA | ||
| 17 | */ | ||
| 18 | |||
| 19 | #include "flow.h" | ||
| 20 | #include "datapath.h" | ||
| 21 | #include <linux/uaccess.h> | ||
| 22 | #include <linux/netdevice.h> | ||
| 23 | #include <linux/etherdevice.h> | ||
| 24 | #include <linux/if_ether.h> | ||
| 25 | #include <linux/if_vlan.h> | ||
| 26 | #include <net/llc_pdu.h> | ||
| 27 | #include <linux/kernel.h> | ||
| 28 | #include <linux/jhash.h> | ||
| 29 | #include <linux/jiffies.h> | ||
| 30 | #include <linux/llc.h> | ||
| 31 | #include <linux/module.h> | ||
| 32 | #include <linux/in.h> | ||
| 33 | #include <linux/rcupdate.h> | ||
| 34 | #include <linux/if_arp.h> | ||
| 35 | #include <linux/ip.h> | ||
| 36 | #include <linux/ipv6.h> | ||
| 37 | #include <linux/sctp.h> | ||
| 38 | #include <linux/tcp.h> | ||
| 39 | #include <linux/udp.h> | ||
| 40 | #include <linux/icmp.h> | ||
| 41 | #include <linux/icmpv6.h> | ||
| 42 | #include <linux/rculist.h> | ||
| 43 | #include <net/ip.h> | ||
| 44 | #include <net/ipv6.h> | ||
| 45 | #include <net/ndisc.h> | ||
| 46 | |||
| 47 | #include "datapath.h" | ||
| 48 | |||
| 49 | #define TBL_MIN_BUCKETS 1024 | ||
| 50 | #define REHASH_INTERVAL (10 * 60 * HZ) | ||
| 51 | |||
| 52 | static struct kmem_cache *flow_cache; | ||
| 53 | |||
| 54 | static u16 range_n_bytes(const struct sw_flow_key_range *range) | ||
| 55 | { | ||
| 56 | return range->end - range->start; | ||
| 57 | } | ||
| 58 | |||
| 59 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
| 60 | const struct sw_flow_mask *mask) | ||
| 61 | { | ||
| 62 | const long *m = (long *)((u8 *)&mask->key + mask->range.start); | ||
| 63 | const long *s = (long *)((u8 *)src + mask->range.start); | ||
| 64 | long *d = (long *)((u8 *)dst + mask->range.start); | ||
| 65 | int i; | ||
| 66 | |||
| 67 | /* The memory outside of the 'mask->range' are not set since | ||
| 68 | * further operations on 'dst' only uses contents within | ||
| 69 | * 'mask->range'. | ||
| 70 | */ | ||
| 71 | for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) | ||
| 72 | *d++ = *s++ & *m++; | ||
| 73 | } | ||
| 74 | |||
| 75 | struct sw_flow *ovs_flow_alloc(void) | ||
| 76 | { | ||
| 77 | struct sw_flow *flow; | ||
| 78 | |||
| 79 | flow = kmem_cache_alloc(flow_cache, GFP_KERNEL); | ||
| 80 | if (!flow) | ||
| 81 | return ERR_PTR(-ENOMEM); | ||
| 82 | |||
| 83 | spin_lock_init(&flow->lock); | ||
| 84 | flow->sf_acts = NULL; | ||
| 85 | flow->mask = NULL; | ||
| 86 | |||
| 87 | return flow; | ||
| 88 | } | ||
| 89 | |||
| 90 | int ovs_flow_tbl_count(struct flow_table *table) | ||
| 91 | { | ||
| 92 | return table->count; | ||
| 93 | } | ||
| 94 | |||
| 95 | static struct flex_array *alloc_buckets(unsigned int n_buckets) | ||
| 96 | { | ||
| 97 | struct flex_array *buckets; | ||
| 98 | int i, err; | ||
| 99 | |||
| 100 | buckets = flex_array_alloc(sizeof(struct hlist_head), | ||
| 101 | n_buckets, GFP_KERNEL); | ||
| 102 | if (!buckets) | ||
| 103 | return NULL; | ||
| 104 | |||
| 105 | err = flex_array_prealloc(buckets, 0, n_buckets, GFP_KERNEL); | ||
| 106 | if (err) { | ||
| 107 | flex_array_free(buckets); | ||
| 108 | return NULL; | ||
| 109 | } | ||
| 110 | |||
| 111 | for (i = 0; i < n_buckets; i++) | ||
| 112 | INIT_HLIST_HEAD((struct hlist_head *) | ||
| 113 | flex_array_get(buckets, i)); | ||
| 114 | |||
| 115 | return buckets; | ||
| 116 | } | ||
| 117 | |||
| 118 | static void flow_free(struct sw_flow *flow) | ||
| 119 | { | ||
| 120 | kfree((struct sf_flow_acts __force *)flow->sf_acts); | ||
| 121 | kmem_cache_free(flow_cache, flow); | ||
| 122 | } | ||
| 123 | |||
| 124 | static void rcu_free_flow_callback(struct rcu_head *rcu) | ||
| 125 | { | ||
| 126 | struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu); | ||
| 127 | |||
| 128 | flow_free(flow); | ||
| 129 | } | ||
| 130 | |||
| 131 | static void rcu_free_sw_flow_mask_cb(struct rcu_head *rcu) | ||
| 132 | { | ||
| 133 | struct sw_flow_mask *mask = container_of(rcu, struct sw_flow_mask, rcu); | ||
| 134 | |||
| 135 | kfree(mask); | ||
| 136 | } | ||
| 137 | |||
| 138 | static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred) | ||
| 139 | { | ||
| 140 | if (!mask) | ||
| 141 | return; | ||
| 142 | |||
| 143 | BUG_ON(!mask->ref_count); | ||
| 144 | mask->ref_count--; | ||
| 145 | |||
| 146 | if (!mask->ref_count) { | ||
| 147 | list_del_rcu(&mask->list); | ||
| 148 | if (deferred) | ||
| 149 | call_rcu(&mask->rcu, rcu_free_sw_flow_mask_cb); | ||
| 150 | else | ||
| 151 | kfree(mask); | ||
| 152 | } | ||
| 153 | } | ||
| 154 | |||
| 155 | void ovs_flow_free(struct sw_flow *flow, bool deferred) | ||
| 156 | { | ||
| 157 | if (!flow) | ||
| 158 | return; | ||
| 159 | |||
| 160 | flow_mask_del_ref(flow->mask, deferred); | ||
| 161 | |||
| 162 | if (deferred) | ||
| 163 | call_rcu(&flow->rcu, rcu_free_flow_callback); | ||
| 164 | else | ||
| 165 | flow_free(flow); | ||
| 166 | } | ||
| 167 | |||
| 168 | static void free_buckets(struct flex_array *buckets) | ||
| 169 | { | ||
| 170 | flex_array_free(buckets); | ||
| 171 | } | ||
| 172 | |||
| 173 | static void __table_instance_destroy(struct table_instance *ti) | ||
| 174 | { | ||
| 175 | int i; | ||
| 176 | |||
| 177 | if (ti->keep_flows) | ||
| 178 | goto skip_flows; | ||
| 179 | |||
| 180 | for (i = 0; i < ti->n_buckets; i++) { | ||
| 181 | struct sw_flow *flow; | ||
| 182 | struct hlist_head *head = flex_array_get(ti->buckets, i); | ||
| 183 | struct hlist_node *n; | ||
| 184 | int ver = ti->node_ver; | ||
| 185 | |||
| 186 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | ||
| 187 | hlist_del(&flow->hash_node[ver]); | ||
| 188 | ovs_flow_free(flow, false); | ||
| 189 | } | ||
| 190 | } | ||
| 191 | |||
| 192 | skip_flows: | ||
| 193 | free_buckets(ti->buckets); | ||
| 194 | kfree(ti); | ||
| 195 | } | ||
| 196 | |||
| 197 | static struct table_instance *table_instance_alloc(int new_size) | ||
| 198 | { | ||
| 199 | struct table_instance *ti = kmalloc(sizeof(*ti), GFP_KERNEL); | ||
| 200 | |||
| 201 | if (!ti) | ||
| 202 | return NULL; | ||
| 203 | |||
| 204 | ti->buckets = alloc_buckets(new_size); | ||
| 205 | |||
| 206 | if (!ti->buckets) { | ||
| 207 | kfree(ti); | ||
| 208 | return NULL; | ||
| 209 | } | ||
| 210 | ti->n_buckets = new_size; | ||
| 211 | ti->node_ver = 0; | ||
| 212 | ti->keep_flows = false; | ||
| 213 | get_random_bytes(&ti->hash_seed, sizeof(u32)); | ||
| 214 | |||
| 215 | return ti; | ||
| 216 | } | ||
| 217 | |||
| 218 | int ovs_flow_tbl_init(struct flow_table *table) | ||
| 219 | { | ||
| 220 | struct table_instance *ti; | ||
| 221 | |||
| 222 | ti = table_instance_alloc(TBL_MIN_BUCKETS); | ||
| 223 | |||
| 224 | if (!ti) | ||
| 225 | return -ENOMEM; | ||
| 226 | |||
| 227 | rcu_assign_pointer(table->ti, ti); | ||
| 228 | INIT_LIST_HEAD(&table->mask_list); | ||
| 229 | table->last_rehash = jiffies; | ||
| 230 | table->count = 0; | ||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | |||
| 234 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | ||
| 235 | { | ||
| 236 | struct table_instance *ti = container_of(rcu, struct table_instance, rcu); | ||
| 237 | |||
| 238 | __table_instance_destroy(ti); | ||
| 239 | } | ||
| 240 | |||
| 241 | static void table_instance_destroy(struct table_instance *ti, bool deferred) | ||
| 242 | { | ||
| 243 | if (!ti) | ||
| 244 | return; | ||
| 245 | |||
| 246 | if (deferred) | ||
| 247 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | ||
| 248 | else | ||
| 249 | __table_instance_destroy(ti); | ||
| 250 | } | ||
| 251 | |||
| 252 | void ovs_flow_tbl_destroy(struct flow_table *table) | ||
| 253 | { | ||
| 254 | struct table_instance *ti = ovsl_dereference(table->ti); | ||
| 255 | |||
| 256 | table_instance_destroy(ti, false); | ||
| 257 | } | ||
| 258 | |||
| 259 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | ||
| 260 | u32 *bucket, u32 *last) | ||
| 261 | { | ||
| 262 | struct sw_flow *flow; | ||
| 263 | struct hlist_head *head; | ||
| 264 | int ver; | ||
| 265 | int i; | ||
| 266 | |||
| 267 | ver = ti->node_ver; | ||
| 268 | while (*bucket < ti->n_buckets) { | ||
| 269 | i = 0; | ||
| 270 | head = flex_array_get(ti->buckets, *bucket); | ||
| 271 | hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { | ||
| 272 | if (i < *last) { | ||
| 273 | i++; | ||
| 274 | continue; | ||
| 275 | } | ||
| 276 | *last = i + 1; | ||
| 277 | return flow; | ||
| 278 | } | ||
| 279 | (*bucket)++; | ||
| 280 | *last = 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | return NULL; | ||
| 284 | } | ||
| 285 | |||
| 286 | static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) | ||
| 287 | { | ||
| 288 | hash = jhash_1word(hash, ti->hash_seed); | ||
| 289 | return flex_array_get(ti->buckets, | ||
| 290 | (hash & (ti->n_buckets - 1))); | ||
| 291 | } | ||
| 292 | |||
| 293 | static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow) | ||
| 294 | { | ||
| 295 | struct hlist_head *head; | ||
| 296 | |||
| 297 | head = find_bucket(ti, flow->hash); | ||
| 298 | hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head); | ||
| 299 | } | ||
| 300 | |||
| 301 | static void flow_table_copy_flows(struct table_instance *old, | ||
| 302 | struct table_instance *new) | ||
| 303 | { | ||
| 304 | int old_ver; | ||
| 305 | int i; | ||
| 306 | |||
| 307 | old_ver = old->node_ver; | ||
| 308 | new->node_ver = !old_ver; | ||
| 309 | |||
| 310 | /* Insert in new table. */ | ||
| 311 | for (i = 0; i < old->n_buckets; i++) { | ||
| 312 | struct sw_flow *flow; | ||
| 313 | struct hlist_head *head; | ||
| 314 | |||
| 315 | head = flex_array_get(old->buckets, i); | ||
| 316 | |||
| 317 | hlist_for_each_entry(flow, head, hash_node[old_ver]) | ||
| 318 | table_instance_insert(new, flow); | ||
| 319 | } | ||
| 320 | |||
| 321 | old->keep_flows = true; | ||
| 322 | } | ||
| 323 | |||
| 324 | static struct table_instance *table_instance_rehash(struct table_instance *ti, | ||
| 325 | int n_buckets) | ||
| 326 | { | ||
| 327 | struct table_instance *new_ti; | ||
| 328 | |||
| 329 | new_ti = table_instance_alloc(n_buckets); | ||
| 330 | if (!new_ti) | ||
| 331 | return NULL; | ||
| 332 | |||
| 333 | flow_table_copy_flows(ti, new_ti); | ||
| 334 | |||
| 335 | return new_ti; | ||
| 336 | } | ||
| 337 | |||
| 338 | int ovs_flow_tbl_flush(struct flow_table *flow_table) | ||
| 339 | { | ||
| 340 | struct table_instance *old_ti; | ||
| 341 | struct table_instance *new_ti; | ||
| 342 | |||
| 343 | old_ti = ovsl_dereference(flow_table->ti); | ||
| 344 | new_ti = table_instance_alloc(TBL_MIN_BUCKETS); | ||
| 345 | if (!new_ti) | ||
| 346 | return -ENOMEM; | ||
| 347 | |||
| 348 | rcu_assign_pointer(flow_table->ti, new_ti); | ||
| 349 | flow_table->last_rehash = jiffies; | ||
| 350 | flow_table->count = 0; | ||
| 351 | |||
| 352 | table_instance_destroy(old_ti, true); | ||
| 353 | return 0; | ||
| 354 | } | ||
| 355 | |||
| 356 | static u32 flow_hash(const struct sw_flow_key *key, int key_start, | ||
| 357 | int key_end) | ||
| 358 | { | ||
| 359 | u32 *hash_key = (u32 *)((u8 *)key + key_start); | ||
| 360 | int hash_u32s = (key_end - key_start) >> 2; | ||
| 361 | |||
| 362 | /* Make sure number of hash bytes are multiple of u32. */ | ||
| 363 | BUILD_BUG_ON(sizeof(long) % sizeof(u32)); | ||
| 364 | |||
| 365 | return jhash2(hash_key, hash_u32s, 0); | ||
| 366 | } | ||
| 367 | |||
| 368 | static int flow_key_start(const struct sw_flow_key *key) | ||
| 369 | { | ||
| 370 | if (key->tun_key.ipv4_dst) | ||
| 371 | return 0; | ||
| 372 | else | ||
| 373 | return rounddown(offsetof(struct sw_flow_key, phy), | ||
| 374 | sizeof(long)); | ||
| 375 | } | ||
| 376 | |||
| 377 | static bool cmp_key(const struct sw_flow_key *key1, | ||
| 378 | const struct sw_flow_key *key2, | ||
| 379 | int key_start, int key_end) | ||
| 380 | { | ||
| 381 | const long *cp1 = (long *)((u8 *)key1 + key_start); | ||
| 382 | const long *cp2 = (long *)((u8 *)key2 + key_start); | ||
| 383 | long diffs = 0; | ||
| 384 | int i; | ||
| 385 | |||
| 386 | for (i = key_start; i < key_end; i += sizeof(long)) | ||
| 387 | diffs |= *cp1++ ^ *cp2++; | ||
| 388 | |||
| 389 | return diffs == 0; | ||
| 390 | } | ||
| 391 | |||
| 392 | static bool flow_cmp_masked_key(const struct sw_flow *flow, | ||
| 393 | const struct sw_flow_key *key, | ||
| 394 | int key_start, int key_end) | ||
| 395 | { | ||
| 396 | return cmp_key(&flow->key, key, key_start, key_end); | ||
| 397 | } | ||
| 398 | |||
| 399 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
| 400 | struct sw_flow_match *match) | ||
| 401 | { | ||
| 402 | struct sw_flow_key *key = match->key; | ||
| 403 | int key_start = flow_key_start(key); | ||
| 404 | int key_end = match->range.end; | ||
| 405 | |||
| 406 | return cmp_key(&flow->unmasked_key, key, key_start, key_end); | ||
| 407 | } | ||
| 408 | |||
| 409 | static struct sw_flow *masked_flow_lookup(struct table_instance *ti, | ||
| 410 | const struct sw_flow_key *unmasked, | ||
| 411 | struct sw_flow_mask *mask) | ||
| 412 | { | ||
| 413 | struct sw_flow *flow; | ||
| 414 | struct hlist_head *head; | ||
| 415 | int key_start = mask->range.start; | ||
| 416 | int key_end = mask->range.end; | ||
| 417 | u32 hash; | ||
| 418 | struct sw_flow_key masked_key; | ||
| 419 | |||
| 420 | ovs_flow_mask_key(&masked_key, unmasked, mask); | ||
| 421 | hash = flow_hash(&masked_key, key_start, key_end); | ||
| 422 | head = find_bucket(ti, hash); | ||
| 423 | hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) { | ||
| 424 | if (flow->mask == mask && flow->hash == hash && | ||
| 425 | flow_cmp_masked_key(flow, &masked_key, | ||
| 426 | key_start, key_end)) | ||
| 427 | return flow; | ||
| 428 | } | ||
| 429 | return NULL; | ||
| 430 | } | ||
| 431 | |||
| 432 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, | ||
| 433 | const struct sw_flow_key *key, | ||
| 434 | u32 *n_mask_hit) | ||
| 435 | { | ||
| 436 | struct table_instance *ti = rcu_dereference(tbl->ti); | ||
| 437 | struct sw_flow_mask *mask; | ||
| 438 | struct sw_flow *flow; | ||
| 439 | |||
| 440 | *n_mask_hit = 0; | ||
| 441 | list_for_each_entry_rcu(mask, &tbl->mask_list, list) { | ||
| 442 | (*n_mask_hit)++; | ||
| 443 | flow = masked_flow_lookup(ti, key, mask); | ||
| 444 | if (flow) /* Found */ | ||
| 445 | return flow; | ||
| 446 | } | ||
| 447 | return NULL; | ||
| 448 | } | ||
| 449 | |||
| 450 | int ovs_flow_tbl_num_masks(const struct flow_table *table) | ||
| 451 | { | ||
| 452 | struct sw_flow_mask *mask; | ||
| 453 | int num = 0; | ||
| 454 | |||
| 455 | list_for_each_entry(mask, &table->mask_list, list) | ||
| 456 | num++; | ||
| 457 | |||
| 458 | return num; | ||
| 459 | } | ||
| 460 | |||
| 461 | static struct table_instance *table_instance_expand(struct table_instance *ti) | ||
| 462 | { | ||
| 463 | return table_instance_rehash(ti, ti->n_buckets * 2); | ||
| 464 | } | ||
| 465 | |||
| 466 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | ||
| 467 | { | ||
| 468 | struct table_instance *ti = ovsl_dereference(table->ti); | ||
| 469 | |||
| 470 | BUG_ON(table->count == 0); | ||
| 471 | hlist_del_rcu(&flow->hash_node[ti->node_ver]); | ||
| 472 | table->count--; | ||
| 473 | } | ||
| 474 | |||
| 475 | static struct sw_flow_mask *mask_alloc(void) | ||
| 476 | { | ||
| 477 | struct sw_flow_mask *mask; | ||
| 478 | |||
| 479 | mask = kmalloc(sizeof(*mask), GFP_KERNEL); | ||
| 480 | if (mask) | ||
| 481 | mask->ref_count = 0; | ||
| 482 | |||
| 483 | return mask; | ||
| 484 | } | ||
| 485 | |||
| 486 | static void mask_add_ref(struct sw_flow_mask *mask) | ||
| 487 | { | ||
| 488 | mask->ref_count++; | ||
| 489 | } | ||
| 490 | |||
| 491 | static bool mask_equal(const struct sw_flow_mask *a, | ||
| 492 | const struct sw_flow_mask *b) | ||
| 493 | { | ||
| 494 | u8 *a_ = (u8 *)&a->key + a->range.start; | ||
| 495 | u8 *b_ = (u8 *)&b->key + b->range.start; | ||
| 496 | |||
| 497 | return (a->range.end == b->range.end) | ||
| 498 | && (a->range.start == b->range.start) | ||
| 499 | && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0); | ||
| 500 | } | ||
| 501 | |||
| 502 | static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, | ||
| 503 | const struct sw_flow_mask *mask) | ||
| 504 | { | ||
| 505 | struct list_head *ml; | ||
| 506 | |||
| 507 | list_for_each(ml, &tbl->mask_list) { | ||
| 508 | struct sw_flow_mask *m; | ||
| 509 | m = container_of(ml, struct sw_flow_mask, list); | ||
| 510 | if (mask_equal(mask, m)) | ||
| 511 | return m; | ||
| 512 | } | ||
| 513 | |||
| 514 | return NULL; | ||
| 515 | } | ||
| 516 | |||
| 517 | /** | ||
| 518 | * add a new mask into the mask list. | ||
| 519 | * The caller needs to make sure that 'mask' is not the same | ||
| 520 | * as any masks that are already on the list. | ||
| 521 | */ | ||
| 522 | static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, | ||
| 523 | struct sw_flow_mask *new) | ||
| 524 | { | ||
| 525 | struct sw_flow_mask *mask; | ||
| 526 | mask = flow_mask_find(tbl, new); | ||
| 527 | if (!mask) { | ||
| 528 | /* Allocate a new mask if none exsits. */ | ||
| 529 | mask = mask_alloc(); | ||
| 530 | if (!mask) | ||
| 531 | return -ENOMEM; | ||
| 532 | mask->key = new->key; | ||
| 533 | mask->range = new->range; | ||
| 534 | list_add_rcu(&mask->list, &tbl->mask_list); | ||
| 535 | } | ||
| 536 | |||
| 537 | mask_add_ref(mask); | ||
| 538 | flow->mask = mask; | ||
| 539 | return 0; | ||
| 540 | } | ||
| 541 | |||
| 542 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | ||
| 543 | struct sw_flow_mask *mask) | ||
| 544 | { | ||
| 545 | struct table_instance *new_ti = NULL; | ||
| 546 | struct table_instance *ti; | ||
| 547 | int err; | ||
| 548 | |||
| 549 | err = flow_mask_insert(table, flow, mask); | ||
| 550 | if (err) | ||
| 551 | return err; | ||
| 552 | |||
| 553 | flow->hash = flow_hash(&flow->key, flow->mask->range.start, | ||
| 554 | flow->mask->range.end); | ||
| 555 | ti = ovsl_dereference(table->ti); | ||
| 556 | table_instance_insert(ti, flow); | ||
| 557 | table->count++; | ||
| 558 | |||
| 559 | /* Expand table, if necessary, to make room. */ | ||
| 560 | if (table->count > ti->n_buckets) | ||
| 561 | new_ti = table_instance_expand(ti); | ||
| 562 | else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) | ||
| 563 | new_ti = table_instance_rehash(ti, ti->n_buckets); | ||
| 564 | |||
| 565 | if (new_ti) { | ||
| 566 | rcu_assign_pointer(table->ti, new_ti); | ||
| 567 | table_instance_destroy(ti, true); | ||
| 568 | table->last_rehash = jiffies; | ||
| 569 | } | ||
| 570 | return 0; | ||
| 571 | } | ||
| 572 | |||
| 573 | /* Initializes the flow module. | ||
| 574 | * Returns zero if successful or a negative error code. */ | ||
| 575 | int ovs_flow_init(void) | ||
| 576 | { | ||
| 577 | BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long)); | ||
| 578 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); | ||
| 579 | |||
| 580 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0, | ||
| 581 | 0, NULL); | ||
| 582 | if (flow_cache == NULL) | ||
| 583 | return -ENOMEM; | ||
| 584 | |||
| 585 | return 0; | ||
| 586 | } | ||
| 587 | |||
| 588 | /* Uninitializes the flow module. */ | ||
| 589 | void ovs_flow_exit(void) | ||
| 590 | { | ||
| 591 | kmem_cache_destroy(flow_cache); | ||
| 592 | } | ||
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h new file mode 100644 index 000000000000..fbe45d5ad07d --- /dev/null +++ b/net/openvswitch/flow_table.h | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2007-2013 Nicira, Inc. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of version 2 of the GNU General Public | ||
| 6 | * License as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, but | ||
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 11 | * General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program; if not, write to the Free Software | ||
| 15 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
| 16 | * 02110-1301, USA | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifndef FLOW_TABLE_H | ||
| 20 | #define FLOW_TABLE_H 1 | ||
| 21 | |||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/netlink.h> | ||
| 24 | #include <linux/openvswitch.h> | ||
| 25 | #include <linux/spinlock.h> | ||
| 26 | #include <linux/types.h> | ||
| 27 | #include <linux/rcupdate.h> | ||
| 28 | #include <linux/if_ether.h> | ||
| 29 | #include <linux/in6.h> | ||
| 30 | #include <linux/jiffies.h> | ||
| 31 | #include <linux/time.h> | ||
| 32 | #include <linux/flex_array.h> | ||
| 33 | |||
| 34 | #include <net/inet_ecn.h> | ||
| 35 | #include <net/ip_tunnels.h> | ||
| 36 | |||
| 37 | #include "flow.h" | ||
| 38 | |||
| 39 | struct table_instance { | ||
| 40 | struct flex_array *buckets; | ||
| 41 | unsigned int n_buckets; | ||
| 42 | struct rcu_head rcu; | ||
| 43 | int node_ver; | ||
| 44 | u32 hash_seed; | ||
| 45 | bool keep_flows; | ||
| 46 | }; | ||
| 47 | |||
| 48 | struct flow_table { | ||
| 49 | struct table_instance __rcu *ti; | ||
| 50 | struct list_head mask_list; | ||
| 51 | unsigned long last_rehash; | ||
| 52 | unsigned int count; | ||
| 53 | }; | ||
| 54 | |||
| 55 | int ovs_flow_init(void); | ||
| 56 | void ovs_flow_exit(void); | ||
| 57 | |||
| 58 | struct sw_flow *ovs_flow_alloc(void); | ||
| 59 | void ovs_flow_free(struct sw_flow *, bool deferred); | ||
| 60 | |||
| 61 | int ovs_flow_tbl_init(struct flow_table *); | ||
| 62 | int ovs_flow_tbl_count(struct flow_table *table); | ||
| 63 | void ovs_flow_tbl_destroy(struct flow_table *table); | ||
| 64 | int ovs_flow_tbl_flush(struct flow_table *flow_table); | ||
| 65 | |||
| 66 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | ||
| 67 | struct sw_flow_mask *mask); | ||
| 68 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); | ||
| 69 | int ovs_flow_tbl_num_masks(const struct flow_table *table); | ||
| 70 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, | ||
| 71 | u32 *bucket, u32 *idx); | ||
| 72 | struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, | ||
| 73 | const struct sw_flow_key *, | ||
| 74 | u32 *n_mask_hit); | ||
| 75 | |||
| 76 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | ||
| 77 | struct sw_flow_match *match); | ||
| 78 | |||
| 79 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | ||
| 80 | const struct sw_flow_mask *mask); | ||
| 81 | #endif /* flow_table.h */ | ||
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index c99dea543d64..a3d6951602db 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c | |||
| @@ -24,8 +24,6 @@ | |||
| 24 | #include <linux/if_tunnel.h> | 24 | #include <linux/if_tunnel.h> |
| 25 | #include <linux/if_vlan.h> | 25 | #include <linux/if_vlan.h> |
| 26 | #include <linux/in.h> | 26 | #include <linux/in.h> |
| 27 | #include <linux/if_vlan.h> | ||
| 28 | #include <linux/in.h> | ||
| 29 | #include <linux/in_route.h> | 27 | #include <linux/in_route.h> |
| 30 | #include <linux/inetdevice.h> | 28 | #include <linux/inetdevice.h> |
| 31 | #include <linux/jhash.h> | 29 | #include <linux/jhash.h> |
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 98d3edbbc235..729c68763fe7 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c | |||
| @@ -134,7 +134,7 @@ static void do_setup(struct net_device *netdev) | |||
| 134 | netdev->tx_queue_len = 0; | 134 | netdev->tx_queue_len = 0; |
| 135 | 135 | ||
| 136 | netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | | 136 | netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST | |
| 137 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_TSO; | 137 | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; |
| 138 | 138 | ||
| 139 | netdev->vlan_features = netdev->features; | 139 | netdev->vlan_features = netdev->features; |
| 140 | netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; | 140 | netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; |
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 56e22b74cf96..e797a50ac2be 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c | |||
| @@ -29,7 +29,6 @@ | |||
| 29 | #include <net/ip.h> | 29 | #include <net/ip.h> |
| 30 | #include <net/udp.h> | 30 | #include <net/udp.h> |
| 31 | #include <net/ip_tunnels.h> | 31 | #include <net/ip_tunnels.h> |
| 32 | #include <net/udp.h> | ||
| 33 | #include <net/rtnetlink.h> | 32 | #include <net/rtnetlink.h> |
| 34 | #include <net/route.h> | 33 | #include <net/route.h> |
| 35 | #include <net/dsfield.h> | 34 | #include <net/dsfield.h> |
