diff options
author | Joe Stringer <joestringer@nicira.com> | 2015-01-21 19:42:52 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-26 18:45:50 -0500 |
commit | 74ed7ab9264c54471c7f057409d352052820d750 (patch) | |
tree | f90152a0560337fceaf55538a9e57e8c9aaa9182 | |
parent | 7b1883cefc288b2725966357edd2d8f321605622 (diff) |
openvswitch: Add support for unique flow IDs.
Previously, flows were manipulated by userspace specifying a full,
unmasked flow key. This adds significant burden onto flow
serialization/deserialization, particularly when dumping flows.
This patch adds an alternative way to refer to flows using a
variable-length "unique flow identifier" (UFID). At flow setup time,
userspace may specify a UFID for a flow, which is stored with the flow
and inserted into a separate table for lookup, in addition to the
standard flow table. Flows created using a UFID must be fetched or
deleted using the UFID.
All flow dump operations may now be made more terse with OVS_UFID_F_*
flags. For example, the OVS_UFID_F_OMIT_KEY flag allows responses to
omit the flow key from a datapath operation if the flow has a
corresponding UFID. This significantly reduces the time spent assembling
and transacting netlink messages. With all OVS_UFID_F_OMIT_* flags
enabled, the datapath only returns the UFID and statistics for each flow
during flow dump, increasing ovs-vswitchd revalidator performance by 40%
or more.
Signed-off-by: Joe Stringer <joestringer@nicira.com>
Acked-by: Pravin B Shelar <pshelar@nicira.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | Documentation/networking/openvswitch.txt | 13 | ||||
-rw-r--r-- | include/uapi/linux/openvswitch.h | 20 | ||||
-rw-r--r-- | net/openvswitch/datapath.c | 207 | ||||
-rw-r--r-- | net/openvswitch/flow.h | 28 | ||||
-rw-r--r-- | net/openvswitch/flow_netlink.c | 68 | ||||
-rw-r--r-- | net/openvswitch/flow_netlink.h | 8 | ||||
-rw-r--r-- | net/openvswitch/flow_table.c | 187 | ||||
-rw-r--r-- | net/openvswitch/flow_table.h | 8 |
8 files changed, 448 insertions, 91 deletions
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt index 37c20ee2455e..b3b9ac61d29d 100644 --- a/Documentation/networking/openvswitch.txt +++ b/Documentation/networking/openvswitch.txt | |||
@@ -131,6 +131,19 @@ performs best-effort detection of overlapping wildcarded flows and may reject | |||
131 | some but not all of them. However, this behavior may change in future versions. | 131 | some but not all of them. However, this behavior may change in future versions. |
132 | 132 | ||
133 | 133 | ||
134 | Unique flow identifiers | ||
135 | ----------------------- | ||
136 | |||
137 | An alternative to using the original match portion of a key as the handle for | ||
138 | flow identification is a unique flow identifier, or "UFID". UFIDs are optional | ||
139 | for both the kernel and user space program. | ||
140 | |||
141 | User space programs that support UFID are expected to provide it during flow | ||
142 | setup in addition to the flow, then refer to the flow using the UFID for all | ||
143 | future operations. The kernel is not required to index flows by the original | ||
144 | flow key if a UFID is specified. | ||
145 | |||
146 | |||
134 | Basic rule for evolving flow keys | 147 | Basic rule for evolving flow keys |
135 | --------------------------------- | 148 | --------------------------------- |
136 | 149 | ||
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index cd8d933963c2..7a8785a99243 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h | |||
@@ -459,6 +459,14 @@ struct ovs_key_nd { | |||
459 | * a wildcarded match. Omitting attribute is treated as wildcarding all | 459 | * a wildcarded match. Omitting attribute is treated as wildcarding all |
460 | * corresponding fields. Optional for all requests. If not present, | 460 | * corresponding fields. Optional for all requests. If not present, |
461 | * all flow key bits are exact match bits. | 461 | * all flow key bits are exact match bits. |
462 | * @OVS_FLOW_ATTR_UFID: A value between 1-16 octets specifying a unique | ||
463 | * identifier for the flow. Causes the flow to be indexed by this value rather | ||
464 | * than the value of the %OVS_FLOW_ATTR_KEY attribute. Optional for all | ||
465 | * requests. Present in notifications if the flow was created with this | ||
466 | * attribute. | ||
467 | * @OVS_FLOW_ATTR_UFID_FLAGS: A 32-bit value of OR'd %OVS_UFID_F_* | ||
468 | * flags that provide alternative semantics for flow installation and | ||
469 | * retrieval. Optional for all requests. | ||
462 | * | 470 | * |
463 | * These attributes follow the &struct ovs_header within the Generic Netlink | 471 | * These attributes follow the &struct ovs_header within the Generic Netlink |
464 | * payload for %OVS_FLOW_* commands. | 472 | * payload for %OVS_FLOW_* commands. |
@@ -474,12 +482,24 @@ enum ovs_flow_attr { | |||
474 | OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */ | 482 | OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */ |
475 | OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error | 483 | OVS_FLOW_ATTR_PROBE, /* Flow operation is a feature probe, error |
476 | * logging should be suppressed. */ | 484 | * logging should be suppressed. */ |
485 | OVS_FLOW_ATTR_UFID, /* Variable length unique flow identifier. */ | ||
486 | OVS_FLOW_ATTR_UFID_FLAGS,/* u32 of OVS_UFID_F_*. */ | ||
477 | __OVS_FLOW_ATTR_MAX | 487 | __OVS_FLOW_ATTR_MAX |
478 | }; | 488 | }; |
479 | 489 | ||
480 | #define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1) | 490 | #define OVS_FLOW_ATTR_MAX (__OVS_FLOW_ATTR_MAX - 1) |
481 | 491 | ||
482 | /** | 492 | /** |
493 | * Omit attributes for notifications. | ||
494 | * | ||
495 | * If a datapath request contains an %OVS_UFID_F_OMIT_* flag, then the datapath | ||
496 | * may omit the corresponding %OVS_FLOW_ATTR_* from the response. | ||
497 | */ | ||
498 | #define OVS_UFID_F_OMIT_KEY (1 << 0) | ||
499 | #define OVS_UFID_F_OMIT_MASK (1 << 1) | ||
500 | #define OVS_UFID_F_OMIT_ACTIONS (1 << 2) | ||
501 | |||
502 | /** | ||
483 | * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action. | 503 | * enum ovs_sample_attr - Attributes for %OVS_ACTION_ATTR_SAMPLE action. |
484 | * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with | 504 | * @OVS_SAMPLE_ATTR_PROBABILITY: 32-bit fraction of packets to sample with |
485 | * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of | 505 | * @OVS_ACTION_ATTR_SAMPLE. A value of 0 samples no packets, a value of |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 257b97546b33..ae5e77cdc0ca 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -65,6 +65,8 @@ static struct genl_family dp_packet_genl_family; | |||
65 | static struct genl_family dp_flow_genl_family; | 65 | static struct genl_family dp_flow_genl_family; |
66 | static struct genl_family dp_datapath_genl_family; | 66 | static struct genl_family dp_datapath_genl_family; |
67 | 67 | ||
68 | static const struct nla_policy flow_policy[]; | ||
69 | |||
68 | static const struct genl_multicast_group ovs_dp_flow_multicast_group = { | 70 | static const struct genl_multicast_group ovs_dp_flow_multicast_group = { |
69 | .name = OVS_FLOW_MCGROUP, | 71 | .name = OVS_FLOW_MCGROUP, |
70 | }; | 72 | }; |
@@ -662,15 +664,48 @@ static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, | |||
662 | } | 664 | } |
663 | } | 665 | } |
664 | 666 | ||
665 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) | 667 | static bool should_fill_key(const struct sw_flow_id *sfid, uint32_t ufid_flags) |
668 | { | ||
669 | return ovs_identifier_is_ufid(sfid) && | ||
670 | !(ufid_flags & OVS_UFID_F_OMIT_KEY); | ||
671 | } | ||
672 | |||
673 | static bool should_fill_mask(uint32_t ufid_flags) | ||
674 | { | ||
675 | return !(ufid_flags & OVS_UFID_F_OMIT_MASK); | ||
676 | } | ||
677 | |||
678 | static bool should_fill_actions(uint32_t ufid_flags) | ||
666 | { | 679 | { |
667 | return NLMSG_ALIGN(sizeof(struct ovs_header)) | 680 | return !(ufid_flags & OVS_UFID_F_OMIT_ACTIONS); |
668 | + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */ | 681 | } |
669 | + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */ | 682 | |
683 | static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts, | ||
684 | const struct sw_flow_id *sfid, | ||
685 | uint32_t ufid_flags) | ||
686 | { | ||
687 | size_t len = NLMSG_ALIGN(sizeof(struct ovs_header)); | ||
688 | |||
689 | /* OVS_FLOW_ATTR_UFID */ | ||
690 | if (sfid && ovs_identifier_is_ufid(sfid)) | ||
691 | len += nla_total_size(sfid->ufid_len); | ||
692 | |||
693 | /* OVS_FLOW_ATTR_KEY */ | ||
694 | if (!sfid || should_fill_key(sfid, ufid_flags)) | ||
695 | len += nla_total_size(ovs_key_attr_size()); | ||
696 | |||
697 | /* OVS_FLOW_ATTR_MASK */ | ||
698 | if (should_fill_mask(ufid_flags)) | ||
699 | len += nla_total_size(ovs_key_attr_size()); | ||
700 | |||
701 | /* OVS_FLOW_ATTR_ACTIONS */ | ||
702 | if (should_fill_actions(ufid_flags)) | ||
703 | len += nla_total_size(acts->actions_len); | ||
704 | |||
705 | return len | ||
670 | + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ | 706 | + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ |
671 | + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ | 707 | + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ |
672 | + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ | 708 | + nla_total_size(8); /* OVS_FLOW_ATTR_USED */ |
673 | + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */ | ||
674 | } | 709 | } |
675 | 710 | ||
676 | /* Called with ovs_mutex or RCU read lock. */ | 711 | /* Called with ovs_mutex or RCU read lock. */ |
@@ -741,7 +776,7 @@ static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow, | |||
741 | /* Called with ovs_mutex or RCU read lock. */ | 776 | /* Called with ovs_mutex or RCU read lock. */ |
742 | static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, | 777 | static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, |
743 | struct sk_buff *skb, u32 portid, | 778 | struct sk_buff *skb, u32 portid, |
744 | u32 seq, u32 flags, u8 cmd) | 779 | u32 seq, u32 flags, u8 cmd, u32 ufid_flags) |
745 | { | 780 | { |
746 | const int skb_orig_len = skb->len; | 781 | const int skb_orig_len = skb->len; |
747 | struct ovs_header *ovs_header; | 782 | struct ovs_header *ovs_header; |
@@ -754,21 +789,31 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, | |||
754 | 789 | ||
755 | ovs_header->dp_ifindex = dp_ifindex; | 790 | ovs_header->dp_ifindex = dp_ifindex; |
756 | 791 | ||
757 | err = ovs_nla_put_unmasked_key(flow, skb); | 792 | err = ovs_nla_put_identifier(flow, skb); |
758 | if (err) | 793 | if (err) |
759 | goto error; | 794 | goto error; |
760 | 795 | ||
761 | err = ovs_nla_put_mask(flow, skb); | 796 | if (should_fill_key(&flow->id, ufid_flags)) { |
762 | if (err) | 797 | err = ovs_nla_put_masked_key(flow, skb); |
763 | goto error; | 798 | if (err) |
799 | goto error; | ||
800 | } | ||
801 | |||
802 | if (should_fill_mask(ufid_flags)) { | ||
803 | err = ovs_nla_put_mask(flow, skb); | ||
804 | if (err) | ||
805 | goto error; | ||
806 | } | ||
764 | 807 | ||
765 | err = ovs_flow_cmd_fill_stats(flow, skb); | 808 | err = ovs_flow_cmd_fill_stats(flow, skb); |
766 | if (err) | 809 | if (err) |
767 | goto error; | 810 | goto error; |
768 | 811 | ||
769 | err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len); | 812 | if (should_fill_actions(ufid_flags)) { |
770 | if (err) | 813 | err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len); |
771 | goto error; | 814 | if (err) |
815 | goto error; | ||
816 | } | ||
772 | 817 | ||
773 | genlmsg_end(skb, ovs_header); | 818 | genlmsg_end(skb, ovs_header); |
774 | return 0; | 819 | return 0; |
@@ -780,15 +825,19 @@ error: | |||
780 | 825 | ||
781 | /* May not be called with RCU read lock. */ | 826 | /* May not be called with RCU read lock. */ |
782 | static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts, | 827 | static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts, |
828 | const struct sw_flow_id *sfid, | ||
783 | struct genl_info *info, | 829 | struct genl_info *info, |
784 | bool always) | 830 | bool always, |
831 | uint32_t ufid_flags) | ||
785 | { | 832 | { |
786 | struct sk_buff *skb; | 833 | struct sk_buff *skb; |
834 | size_t len; | ||
787 | 835 | ||
788 | if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0)) | 836 | if (!always && !ovs_must_notify(&dp_flow_genl_family, info, 0)) |
789 | return NULL; | 837 | return NULL; |
790 | 838 | ||
791 | skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL); | 839 | len = ovs_flow_cmd_msg_size(acts, sfid, ufid_flags); |
840 | skb = genlmsg_new_unicast(len, info, GFP_KERNEL); | ||
792 | if (!skb) | 841 | if (!skb) |
793 | return ERR_PTR(-ENOMEM); | 842 | return ERR_PTR(-ENOMEM); |
794 | 843 | ||
@@ -799,19 +848,19 @@ static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *act | |||
799 | static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow, | 848 | static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow, |
800 | int dp_ifindex, | 849 | int dp_ifindex, |
801 | struct genl_info *info, u8 cmd, | 850 | struct genl_info *info, u8 cmd, |
802 | bool always) | 851 | bool always, u32 ufid_flags) |
803 | { | 852 | { |
804 | struct sk_buff *skb; | 853 | struct sk_buff *skb; |
805 | int retval; | 854 | int retval; |
806 | 855 | ||
807 | skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info, | 856 | skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), |
808 | always); | 857 | &flow->id, info, always, ufid_flags); |
809 | if (IS_ERR_OR_NULL(skb)) | 858 | if (IS_ERR_OR_NULL(skb)) |
810 | return skb; | 859 | return skb; |
811 | 860 | ||
812 | retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb, | 861 | retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb, |
813 | info->snd_portid, info->snd_seq, 0, | 862 | info->snd_portid, info->snd_seq, 0, |
814 | cmd); | 863 | cmd, ufid_flags); |
815 | BUG_ON(retval < 0); | 864 | BUG_ON(retval < 0); |
816 | return skb; | 865 | return skb; |
817 | } | 866 | } |
@@ -820,12 +869,14 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
820 | { | 869 | { |
821 | struct nlattr **a = info->attrs; | 870 | struct nlattr **a = info->attrs; |
822 | struct ovs_header *ovs_header = info->userhdr; | 871 | struct ovs_header *ovs_header = info->userhdr; |
823 | struct sw_flow *flow, *new_flow; | 872 | struct sw_flow *flow = NULL, *new_flow; |
824 | struct sw_flow_mask mask; | 873 | struct sw_flow_mask mask; |
825 | struct sk_buff *reply; | 874 | struct sk_buff *reply; |
826 | struct datapath *dp; | 875 | struct datapath *dp; |
876 | struct sw_flow_key key; | ||
827 | struct sw_flow_actions *acts; | 877 | struct sw_flow_actions *acts; |
828 | struct sw_flow_match match; | 878 | struct sw_flow_match match; |
879 | u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); | ||
829 | int error; | 880 | int error; |
830 | bool log = !a[OVS_FLOW_ATTR_PROBE]; | 881 | bool log = !a[OVS_FLOW_ATTR_PROBE]; |
831 | 882 | ||
@@ -850,13 +901,19 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
850 | } | 901 | } |
851 | 902 | ||
852 | /* Extract key. */ | 903 | /* Extract key. */ |
853 | ovs_match_init(&match, &new_flow->unmasked_key, &mask); | 904 | ovs_match_init(&match, &key, &mask); |
854 | error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], | 905 | error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], |
855 | a[OVS_FLOW_ATTR_MASK], log); | 906 | a[OVS_FLOW_ATTR_MASK], log); |
856 | if (error) | 907 | if (error) |
857 | goto err_kfree_flow; | 908 | goto err_kfree_flow; |
858 | 909 | ||
859 | ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask); | 910 | ovs_flow_mask_key(&new_flow->key, &key, &mask); |
911 | |||
912 | /* Extract flow identifier. */ | ||
913 | error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], | ||
914 | &key, log); | ||
915 | if (error) | ||
916 | goto err_kfree_flow; | ||
860 | 917 | ||
861 | /* Validate actions. */ | 918 | /* Validate actions. */ |
862 | error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, | 919 | error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, |
@@ -866,7 +923,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
866 | goto err_kfree_flow; | 923 | goto err_kfree_flow; |
867 | } | 924 | } |
868 | 925 | ||
869 | reply = ovs_flow_cmd_alloc_info(acts, info, false); | 926 | reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false, |
927 | ufid_flags); | ||
870 | if (IS_ERR(reply)) { | 928 | if (IS_ERR(reply)) { |
871 | error = PTR_ERR(reply); | 929 | error = PTR_ERR(reply); |
872 | goto err_kfree_acts; | 930 | goto err_kfree_acts; |
@@ -878,8 +936,12 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
878 | error = -ENODEV; | 936 | error = -ENODEV; |
879 | goto err_unlock_ovs; | 937 | goto err_unlock_ovs; |
880 | } | 938 | } |
939 | |||
881 | /* Check if this is a duplicate flow */ | 940 | /* Check if this is a duplicate flow */ |
882 | flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key); | 941 | if (ovs_identifier_is_ufid(&new_flow->id)) |
942 | flow = ovs_flow_tbl_lookup_ufid(&dp->table, &new_flow->id); | ||
943 | if (!flow) | ||
944 | flow = ovs_flow_tbl_lookup(&dp->table, &key); | ||
883 | if (likely(!flow)) { | 945 | if (likely(!flow)) { |
884 | rcu_assign_pointer(new_flow->sf_acts, acts); | 946 | rcu_assign_pointer(new_flow->sf_acts, acts); |
885 | 947 | ||
@@ -895,7 +957,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
895 | ovs_header->dp_ifindex, | 957 | ovs_header->dp_ifindex, |
896 | reply, info->snd_portid, | 958 | reply, info->snd_portid, |
897 | info->snd_seq, 0, | 959 | info->snd_seq, 0, |
898 | OVS_FLOW_CMD_NEW); | 960 | OVS_FLOW_CMD_NEW, |
961 | ufid_flags); | ||
899 | BUG_ON(error < 0); | 962 | BUG_ON(error < 0); |
900 | } | 963 | } |
901 | ovs_unlock(); | 964 | ovs_unlock(); |
@@ -913,10 +976,15 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
913 | error = -EEXIST; | 976 | error = -EEXIST; |
914 | goto err_unlock_ovs; | 977 | goto err_unlock_ovs; |
915 | } | 978 | } |
916 | /* The unmasked key has to be the same for flow updates. */ | 979 | /* The flow identifier has to be the same for flow updates. |
917 | if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { | 980 | * Look for any overlapping flow. |
918 | /* Look for any overlapping flow. */ | 981 | */ |
919 | flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); | 982 | if (unlikely(!ovs_flow_cmp(flow, &match))) { |
983 | if (ovs_identifier_is_key(&flow->id)) | ||
984 | flow = ovs_flow_tbl_lookup_exact(&dp->table, | ||
985 | &match); | ||
986 | else /* UFID matches but key is different */ | ||
987 | flow = NULL; | ||
920 | if (!flow) { | 988 | if (!flow) { |
921 | error = -ENOENT; | 989 | error = -ENOENT; |
922 | goto err_unlock_ovs; | 990 | goto err_unlock_ovs; |
@@ -931,7 +999,8 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) | |||
931 | ovs_header->dp_ifindex, | 999 | ovs_header->dp_ifindex, |
932 | reply, info->snd_portid, | 1000 | reply, info->snd_portid, |
933 | info->snd_seq, 0, | 1001 | info->snd_seq, 0, |
934 | OVS_FLOW_CMD_NEW); | 1002 | OVS_FLOW_CMD_NEW, |
1003 | ufid_flags); | ||
935 | BUG_ON(error < 0); | 1004 | BUG_ON(error < 0); |
936 | } | 1005 | } |
937 | ovs_unlock(); | 1006 | ovs_unlock(); |
@@ -987,8 +1056,11 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
987 | struct datapath *dp; | 1056 | struct datapath *dp; |
988 | struct sw_flow_actions *old_acts = NULL, *acts = NULL; | 1057 | struct sw_flow_actions *old_acts = NULL, *acts = NULL; |
989 | struct sw_flow_match match; | 1058 | struct sw_flow_match match; |
1059 | struct sw_flow_id sfid; | ||
1060 | u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); | ||
990 | int error; | 1061 | int error; |
991 | bool log = !a[OVS_FLOW_ATTR_PROBE]; | 1062 | bool log = !a[OVS_FLOW_ATTR_PROBE]; |
1063 | bool ufid_present; | ||
992 | 1064 | ||
993 | /* Extract key. */ | 1065 | /* Extract key. */ |
994 | error = -EINVAL; | 1066 | error = -EINVAL; |
@@ -997,6 +1069,7 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
997 | goto error; | 1069 | goto error; |
998 | } | 1070 | } |
999 | 1071 | ||
1072 | ufid_present = ovs_nla_get_ufid(&sfid, a[OVS_FLOW_ATTR_UFID], log); | ||
1000 | ovs_match_init(&match, &key, &mask); | 1073 | ovs_match_init(&match, &key, &mask); |
1001 | error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], | 1074 | error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], |
1002 | a[OVS_FLOW_ATTR_MASK], log); | 1075 | a[OVS_FLOW_ATTR_MASK], log); |
@@ -1013,7 +1086,8 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1013 | } | 1086 | } |
1014 | 1087 | ||
1015 | /* Can allocate before locking if have acts. */ | 1088 | /* Can allocate before locking if have acts. */ |
1016 | reply = ovs_flow_cmd_alloc_info(acts, info, false); | 1089 | reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false, |
1090 | ufid_flags); | ||
1017 | if (IS_ERR(reply)) { | 1091 | if (IS_ERR(reply)) { |
1018 | error = PTR_ERR(reply); | 1092 | error = PTR_ERR(reply); |
1019 | goto err_kfree_acts; | 1093 | goto err_kfree_acts; |
@@ -1027,7 +1101,10 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1027 | goto err_unlock_ovs; | 1101 | goto err_unlock_ovs; |
1028 | } | 1102 | } |
1029 | /* Check that the flow exists. */ | 1103 | /* Check that the flow exists. */ |
1030 | flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); | 1104 | if (ufid_present) |
1105 | flow = ovs_flow_tbl_lookup_ufid(&dp->table, &sfid); | ||
1106 | else | ||
1107 | flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); | ||
1031 | if (unlikely(!flow)) { | 1108 | if (unlikely(!flow)) { |
1032 | error = -ENOENT; | 1109 | error = -ENOENT; |
1033 | goto err_unlock_ovs; | 1110 | goto err_unlock_ovs; |
@@ -1043,13 +1120,16 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
1043 | ovs_header->dp_ifindex, | 1120 | ovs_header->dp_ifindex, |
1044 | reply, info->snd_portid, | 1121 | reply, info->snd_portid, |
1045 | info->snd_seq, 0, | 1122 | info->snd_seq, 0, |
1046 | OVS_FLOW_CMD_NEW); | 1123 | OVS_FLOW_CMD_NEW, |
1124 | ufid_flags); | ||
1047 | BUG_ON(error < 0); | 1125 | BUG_ON(error < 0); |
1048 | } | 1126 | } |
1049 | } else { | 1127 | } else { |
1050 | /* Could not alloc without acts before locking. */ | 1128 | /* Could not alloc without acts before locking. */ |
1051 | reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, | 1129 | reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, |
1052 | info, OVS_FLOW_CMD_NEW, false); | 1130 | info, OVS_FLOW_CMD_NEW, false, |
1131 | ufid_flags); | ||
1132 | |||
1053 | if (unlikely(IS_ERR(reply))) { | 1133 | if (unlikely(IS_ERR(reply))) { |
1054 | error = PTR_ERR(reply); | 1134 | error = PTR_ERR(reply); |
1055 | goto err_unlock_ovs; | 1135 | goto err_unlock_ovs; |
@@ -1086,17 +1166,22 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1086 | struct sw_flow *flow; | 1166 | struct sw_flow *flow; |
1087 | struct datapath *dp; | 1167 | struct datapath *dp; |
1088 | struct sw_flow_match match; | 1168 | struct sw_flow_match match; |
1089 | int err; | 1169 | struct sw_flow_id ufid; |
1170 | u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); | ||
1171 | int err = 0; | ||
1090 | bool log = !a[OVS_FLOW_ATTR_PROBE]; | 1172 | bool log = !a[OVS_FLOW_ATTR_PROBE]; |
1173 | bool ufid_present; | ||
1091 | 1174 | ||
1092 | if (!a[OVS_FLOW_ATTR_KEY]) { | 1175 | ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log); |
1176 | if (a[OVS_FLOW_ATTR_KEY]) { | ||
1177 | ovs_match_init(&match, &key, NULL); | ||
1178 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, | ||
1179 | log); | ||
1180 | } else if (!ufid_present) { | ||
1093 | OVS_NLERR(log, | 1181 | OVS_NLERR(log, |
1094 | "Flow get message rejected, Key attribute missing."); | 1182 | "Flow get message rejected, Key attribute missing."); |
1095 | return -EINVAL; | 1183 | err = -EINVAL; |
1096 | } | 1184 | } |
1097 | |||
1098 | ovs_match_init(&match, &key, NULL); | ||
1099 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, log); | ||
1100 | if (err) | 1185 | if (err) |
1101 | return err; | 1186 | return err; |
1102 | 1187 | ||
@@ -1107,14 +1192,17 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) | |||
1107 | goto unlock; | 1192 | goto unlock; |
1108 | } | 1193 | } |
1109 | 1194 | ||
1110 | flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); | 1195 | if (ufid_present) |
1196 | flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid); | ||
1197 | else | ||
1198 | flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); | ||
1111 | if (!flow) { | 1199 | if (!flow) { |
1112 | err = -ENOENT; | 1200 | err = -ENOENT; |
1113 | goto unlock; | 1201 | goto unlock; |
1114 | } | 1202 | } |
1115 | 1203 | ||
1116 | reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info, | 1204 | reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info, |
1117 | OVS_FLOW_CMD_NEW, true); | 1205 | OVS_FLOW_CMD_NEW, true, ufid_flags); |
1118 | if (IS_ERR(reply)) { | 1206 | if (IS_ERR(reply)) { |
1119 | err = PTR_ERR(reply); | 1207 | err = PTR_ERR(reply); |
1120 | goto unlock; | 1208 | goto unlock; |
@@ -1133,13 +1221,17 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1133 | struct ovs_header *ovs_header = info->userhdr; | 1221 | struct ovs_header *ovs_header = info->userhdr; |
1134 | struct sw_flow_key key; | 1222 | struct sw_flow_key key; |
1135 | struct sk_buff *reply; | 1223 | struct sk_buff *reply; |
1136 | struct sw_flow *flow; | 1224 | struct sw_flow *flow = NULL; |
1137 | struct datapath *dp; | 1225 | struct datapath *dp; |
1138 | struct sw_flow_match match; | 1226 | struct sw_flow_match match; |
1227 | struct sw_flow_id ufid; | ||
1228 | u32 ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); | ||
1139 | int err; | 1229 | int err; |
1140 | bool log = !a[OVS_FLOW_ATTR_PROBE]; | 1230 | bool log = !a[OVS_FLOW_ATTR_PROBE]; |
1231 | bool ufid_present; | ||
1141 | 1232 | ||
1142 | if (likely(a[OVS_FLOW_ATTR_KEY])) { | 1233 | ufid_present = ovs_nla_get_ufid(&ufid, a[OVS_FLOW_ATTR_UFID], log); |
1234 | if (a[OVS_FLOW_ATTR_KEY]) { | ||
1143 | ovs_match_init(&match, &key, NULL); | 1235 | ovs_match_init(&match, &key, NULL); |
1144 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, | 1236 | err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, |
1145 | log); | 1237 | log); |
@@ -1154,12 +1246,15 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1154 | goto unlock; | 1246 | goto unlock; |
1155 | } | 1247 | } |
1156 | 1248 | ||
1157 | if (unlikely(!a[OVS_FLOW_ATTR_KEY])) { | 1249 | if (unlikely(!a[OVS_FLOW_ATTR_KEY] && !ufid_present)) { |
1158 | err = ovs_flow_tbl_flush(&dp->table); | 1250 | err = ovs_flow_tbl_flush(&dp->table); |
1159 | goto unlock; | 1251 | goto unlock; |
1160 | } | 1252 | } |
1161 | 1253 | ||
1162 | flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); | 1254 | if (ufid_present) |
1255 | flow = ovs_flow_tbl_lookup_ufid(&dp->table, &ufid); | ||
1256 | else | ||
1257 | flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); | ||
1163 | if (unlikely(!flow)) { | 1258 | if (unlikely(!flow)) { |
1164 | err = -ENOENT; | 1259 | err = -ENOENT; |
1165 | goto unlock; | 1260 | goto unlock; |
@@ -1169,14 +1264,15 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) | |||
1169 | ovs_unlock(); | 1264 | ovs_unlock(); |
1170 | 1265 | ||
1171 | reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts, | 1266 | reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts, |
1172 | info, false); | 1267 | &flow->id, info, false, ufid_flags); |
1173 | if (likely(reply)) { | 1268 | if (likely(reply)) { |
1174 | if (likely(!IS_ERR(reply))) { | 1269 | if (likely(!IS_ERR(reply))) { |
1175 | rcu_read_lock(); /*To keep RCU checker happy. */ | 1270 | rcu_read_lock(); /*To keep RCU checker happy. */ |
1176 | err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, | 1271 | err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, |
1177 | reply, info->snd_portid, | 1272 | reply, info->snd_portid, |
1178 | info->snd_seq, 0, | 1273 | info->snd_seq, 0, |
1179 | OVS_FLOW_CMD_DEL); | 1274 | OVS_FLOW_CMD_DEL, |
1275 | ufid_flags); | ||
1180 | rcu_read_unlock(); | 1276 | rcu_read_unlock(); |
1181 | BUG_ON(err < 0); | 1277 | BUG_ON(err < 0); |
1182 | 1278 | ||
@@ -1195,9 +1291,18 @@ unlock: | |||
1195 | 1291 | ||
1196 | static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | 1292 | static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) |
1197 | { | 1293 | { |
1294 | struct nlattr *a[__OVS_FLOW_ATTR_MAX]; | ||
1198 | struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); | 1295 | struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh)); |
1199 | struct table_instance *ti; | 1296 | struct table_instance *ti; |
1200 | struct datapath *dp; | 1297 | struct datapath *dp; |
1298 | u32 ufid_flags; | ||
1299 | int err; | ||
1300 | |||
1301 | err = genlmsg_parse(cb->nlh, &dp_flow_genl_family, a, | ||
1302 | OVS_FLOW_ATTR_MAX, flow_policy); | ||
1303 | if (err) | ||
1304 | return err; | ||
1305 | ufid_flags = ovs_nla_get_ufid_flags(a[OVS_FLOW_ATTR_UFID_FLAGS]); | ||
1201 | 1306 | ||
1202 | rcu_read_lock(); | 1307 | rcu_read_lock(); |
1203 | dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); | 1308 | dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); |
@@ -1220,7 +1325,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
1220 | if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb, | 1325 | if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb, |
1221 | NETLINK_CB(cb->skb).portid, | 1326 | NETLINK_CB(cb->skb).portid, |
1222 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 1327 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
1223 | OVS_FLOW_CMD_NEW) < 0) | 1328 | OVS_FLOW_CMD_NEW, ufid_flags) < 0) |
1224 | break; | 1329 | break; |
1225 | 1330 | ||
1226 | cb->args[0] = bucket; | 1331 | cb->args[0] = bucket; |
@@ -1236,6 +1341,8 @@ static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { | |||
1236 | [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, | 1341 | [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, |
1237 | [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, | 1342 | [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, |
1238 | [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG }, | 1343 | [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG }, |
1344 | [OVS_FLOW_ATTR_UFID] = { .type = NLA_UNSPEC, .len = 1 }, | ||
1345 | [OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 }, | ||
1239 | }; | 1346 | }; |
1240 | 1347 | ||
1241 | static const struct genl_ops dp_flow_genl_ops[] = { | 1348 | static const struct genl_ops dp_flow_genl_ops[] = { |
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index d3d0a406562d..a076e445ccc2 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h | |||
@@ -197,6 +197,16 @@ struct sw_flow_match { | |||
197 | struct sw_flow_mask *mask; | 197 | struct sw_flow_mask *mask; |
198 | }; | 198 | }; |
199 | 199 | ||
200 | #define MAX_UFID_LENGTH 16 /* 128 bits */ | ||
201 | |||
202 | struct sw_flow_id { | ||
203 | u32 ufid_len; | ||
204 | union { | ||
205 | u32 ufid[MAX_UFID_LENGTH / 4]; | ||
206 | struct sw_flow_key *unmasked_key; | ||
207 | }; | ||
208 | }; | ||
209 | |||
200 | struct sw_flow_actions { | 210 | struct sw_flow_actions { |
201 | struct rcu_head rcu; | 211 | struct rcu_head rcu; |
202 | u32 actions_len; | 212 | u32 actions_len; |
@@ -213,13 +223,15 @@ struct flow_stats { | |||
213 | 223 | ||
214 | struct sw_flow { | 224 | struct sw_flow { |
215 | struct rcu_head rcu; | 225 | struct rcu_head rcu; |
216 | struct hlist_node hash_node[2]; | 226 | struct { |
217 | u32 hash; | 227 | struct hlist_node node[2]; |
228 | u32 hash; | ||
229 | } flow_table, ufid_table; | ||
218 | int stats_last_writer; /* NUMA-node id of the last writer on | 230 | int stats_last_writer; /* NUMA-node id of the last writer on |
219 | * 'stats[0]'. | 231 | * 'stats[0]'. |
220 | */ | 232 | */ |
221 | struct sw_flow_key key; | 233 | struct sw_flow_key key; |
222 | struct sw_flow_key unmasked_key; | 234 | struct sw_flow_id id; |
223 | struct sw_flow_mask *mask; | 235 | struct sw_flow_mask *mask; |
224 | struct sw_flow_actions __rcu *sf_acts; | 236 | struct sw_flow_actions __rcu *sf_acts; |
225 | struct flow_stats __rcu *stats[]; /* One for each NUMA node. First one | 237 | struct flow_stats __rcu *stats[]; /* One for each NUMA node. First one |
@@ -243,6 +255,16 @@ struct arp_eth_header { | |||
243 | unsigned char ar_tip[4]; /* target IP address */ | 255 | unsigned char ar_tip[4]; /* target IP address */ |
244 | } __packed; | 256 | } __packed; |
245 | 257 | ||
258 | static inline bool ovs_identifier_is_ufid(const struct sw_flow_id *sfid) | ||
259 | { | ||
260 | return sfid->ufid_len; | ||
261 | } | ||
262 | |||
263 | static inline bool ovs_identifier_is_key(const struct sw_flow_id *sfid) | ||
264 | { | ||
265 | return !ovs_identifier_is_ufid(sfid); | ||
266 | } | ||
267 | |||
246 | void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags, | 268 | void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags, |
247 | const struct sk_buff *); | 269 | const struct sk_buff *); |
248 | void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, | 270 | void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 33751f81bfcb..8b9a612b39d1 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -1180,6 +1180,59 @@ free_newmask: | |||
1180 | return err; | 1180 | return err; |
1181 | } | 1181 | } |
1182 | 1182 | ||
1183 | static size_t get_ufid_len(const struct nlattr *attr, bool log) | ||
1184 | { | ||
1185 | size_t len; | ||
1186 | |||
1187 | if (!attr) | ||
1188 | return 0; | ||
1189 | |||
1190 | len = nla_len(attr); | ||
1191 | if (len < 1 || len > MAX_UFID_LENGTH) { | ||
1192 | OVS_NLERR(log, "ufid size %u bytes exceeds the range (1, %d)", | ||
1193 | nla_len(attr), MAX_UFID_LENGTH); | ||
1194 | return 0; | ||
1195 | } | ||
1196 | |||
1197 | return len; | ||
1198 | } | ||
1199 | |||
1200 | /* Initializes 'flow->ufid', returning true if 'attr' contains a valid UFID, | ||
1201 | * or false otherwise. | ||
1202 | */ | ||
1203 | bool ovs_nla_get_ufid(struct sw_flow_id *sfid, const struct nlattr *attr, | ||
1204 | bool log) | ||
1205 | { | ||
1206 | sfid->ufid_len = get_ufid_len(attr, log); | ||
1207 | if (sfid->ufid_len) | ||
1208 | memcpy(sfid->ufid, nla_data(attr), sfid->ufid_len); | ||
1209 | |||
1210 | return sfid->ufid_len; | ||
1211 | } | ||
1212 | |||
1213 | int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid, | ||
1214 | const struct sw_flow_key *key, bool log) | ||
1215 | { | ||
1216 | struct sw_flow_key *new_key; | ||
1217 | |||
1218 | if (ovs_nla_get_ufid(sfid, ufid, log)) | ||
1219 | return 0; | ||
1220 | |||
1221 | /* If UFID was not provided, use unmasked key. */ | ||
1222 | new_key = kmalloc(sizeof(*new_key), GFP_KERNEL); | ||
1223 | if (!new_key) | ||
1224 | return -ENOMEM; | ||
1225 | memcpy(new_key, key, sizeof(*key)); | ||
1226 | sfid->unmasked_key = new_key; | ||
1227 | |||
1228 | return 0; | ||
1229 | } | ||
1230 | |||
1231 | u32 ovs_nla_get_ufid_flags(const struct nlattr *attr) | ||
1232 | { | ||
1233 | return attr ? nla_get_u32(attr) : 0; | ||
1234 | } | ||
1235 | |||
1183 | /** | 1236 | /** |
1184 | * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key. | 1237 | * ovs_nla_get_flow_metadata - parses Netlink attributes into a flow key. |
1185 | * @key: Receives extracted in_port, priority, tun_key and skb_mark. | 1238 | * @key: Receives extracted in_port, priority, tun_key and skb_mark. |
@@ -1450,9 +1503,20 @@ int ovs_nla_put_key(const struct sw_flow_key *swkey, | |||
1450 | } | 1503 | } |
1451 | 1504 | ||
1452 | /* Called with ovs_mutex or RCU read lock. */ | 1505 | /* Called with ovs_mutex or RCU read lock. */ |
1453 | int ovs_nla_put_unmasked_key(const struct sw_flow *flow, struct sk_buff *skb) | 1506 | int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb) |
1507 | { | ||
1508 | if (ovs_identifier_is_ufid(&flow->id)) | ||
1509 | return nla_put(skb, OVS_FLOW_ATTR_UFID, flow->id.ufid_len, | ||
1510 | flow->id.ufid); | ||
1511 | |||
1512 | return ovs_nla_put_key(flow->id.unmasked_key, flow->id.unmasked_key, | ||
1513 | OVS_FLOW_ATTR_KEY, false, skb); | ||
1514 | } | ||
1515 | |||
1516 | /* Called with ovs_mutex or RCU read lock. */ | ||
1517 | int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb) | ||
1454 | { | 1518 | { |
1455 | return ovs_nla_put_key(&flow->unmasked_key, &flow->unmasked_key, | 1519 | return ovs_nla_put_key(&flow->mask->key, &flow->key, |
1456 | OVS_FLOW_ATTR_KEY, false, skb); | 1520 | OVS_FLOW_ATTR_KEY, false, skb); |
1457 | } | 1521 | } |
1458 | 1522 | ||
diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h index 9ed09e66876a..5c3d75bff310 100644 --- a/net/openvswitch/flow_netlink.h +++ b/net/openvswitch/flow_netlink.h | |||
@@ -48,7 +48,8 @@ int ovs_nla_put_key(const struct sw_flow_key *, const struct sw_flow_key *, | |||
48 | int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *, | 48 | int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *, |
49 | bool log); | 49 | bool log); |
50 | 50 | ||
51 | int ovs_nla_put_unmasked_key(const struct sw_flow *flow, struct sk_buff *skb); | 51 | int ovs_nla_put_identifier(const struct sw_flow *flow, struct sk_buff *skb); |
52 | int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb); | ||
52 | int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb); | 53 | int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb); |
53 | 54 | ||
54 | int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key, | 55 | int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key, |
@@ -56,6 +57,11 @@ int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key, | |||
56 | int ovs_nla_put_egress_tunnel_key(struct sk_buff *, | 57 | int ovs_nla_put_egress_tunnel_key(struct sk_buff *, |
57 | const struct ovs_tunnel_info *); | 58 | const struct ovs_tunnel_info *); |
58 | 59 | ||
60 | bool ovs_nla_get_ufid(struct sw_flow_id *, const struct nlattr *, bool log); | ||
61 | int ovs_nla_get_identifier(struct sw_flow_id *sfid, const struct nlattr *ufid, | ||
62 | const struct sw_flow_key *key, bool log); | ||
63 | u32 ovs_nla_get_ufid_flags(const struct nlattr *attr); | ||
64 | |||
59 | int ovs_nla_copy_actions(const struct nlattr *attr, | 65 | int ovs_nla_copy_actions(const struct nlattr *attr, |
60 | const struct sw_flow_key *key, | 66 | const struct sw_flow_key *key, |
61 | struct sw_flow_actions **sfa, bool log); | 67 | struct sw_flow_actions **sfa, bool log); |
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 9a3f41f26da8..5e57628e6584 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -139,6 +139,8 @@ static void flow_free(struct sw_flow *flow) | |||
139 | { | 139 | { |
140 | int node; | 140 | int node; |
141 | 141 | ||
142 | if (ovs_identifier_is_key(&flow->id)) | ||
143 | kfree(flow->id.unmasked_key); | ||
142 | kfree((struct sw_flow_actions __force *)flow->sf_acts); | 144 | kfree((struct sw_flow_actions __force *)flow->sf_acts); |
143 | for_each_node(node) | 145 | for_each_node(node) |
144 | if (flow->stats[node]) | 146 | if (flow->stats[node]) |
@@ -200,18 +202,28 @@ static struct table_instance *table_instance_alloc(int new_size) | |||
200 | 202 | ||
201 | int ovs_flow_tbl_init(struct flow_table *table) | 203 | int ovs_flow_tbl_init(struct flow_table *table) |
202 | { | 204 | { |
203 | struct table_instance *ti; | 205 | struct table_instance *ti, *ufid_ti; |
204 | 206 | ||
205 | ti = table_instance_alloc(TBL_MIN_BUCKETS); | 207 | ti = table_instance_alloc(TBL_MIN_BUCKETS); |
206 | 208 | ||
207 | if (!ti) | 209 | if (!ti) |
208 | return -ENOMEM; | 210 | return -ENOMEM; |
209 | 211 | ||
212 | ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); | ||
213 | if (!ufid_ti) | ||
214 | goto free_ti; | ||
215 | |||
210 | rcu_assign_pointer(table->ti, ti); | 216 | rcu_assign_pointer(table->ti, ti); |
217 | rcu_assign_pointer(table->ufid_ti, ufid_ti); | ||
211 | INIT_LIST_HEAD(&table->mask_list); | 218 | INIT_LIST_HEAD(&table->mask_list); |
212 | table->last_rehash = jiffies; | 219 | table->last_rehash = jiffies; |
213 | table->count = 0; | 220 | table->count = 0; |
221 | table->ufid_count = 0; | ||
214 | return 0; | 222 | return 0; |
223 | |||
224 | free_ti: | ||
225 | __table_instance_destroy(ti); | ||
226 | return -ENOMEM; | ||
215 | } | 227 | } |
216 | 228 | ||
217 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | 229 | static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) |
@@ -221,13 +233,16 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu) | |||
221 | __table_instance_destroy(ti); | 233 | __table_instance_destroy(ti); |
222 | } | 234 | } |
223 | 235 | ||
224 | static void table_instance_destroy(struct table_instance *ti, bool deferred) | 236 | static void table_instance_destroy(struct table_instance *ti, |
237 | struct table_instance *ufid_ti, | ||
238 | bool deferred) | ||
225 | { | 239 | { |
226 | int i; | 240 | int i; |
227 | 241 | ||
228 | if (!ti) | 242 | if (!ti) |
229 | return; | 243 | return; |
230 | 244 | ||
245 | BUG_ON(!ufid_ti); | ||
231 | if (ti->keep_flows) | 246 | if (ti->keep_flows) |
232 | goto skip_flows; | 247 | goto skip_flows; |
233 | 248 | ||
@@ -236,18 +251,24 @@ static void table_instance_destroy(struct table_instance *ti, bool deferred) | |||
236 | struct hlist_head *head = flex_array_get(ti->buckets, i); | 251 | struct hlist_head *head = flex_array_get(ti->buckets, i); |
237 | struct hlist_node *n; | 252 | struct hlist_node *n; |
238 | int ver = ti->node_ver; | 253 | int ver = ti->node_ver; |
254 | int ufid_ver = ufid_ti->node_ver; | ||
239 | 255 | ||
240 | hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) { | 256 | hlist_for_each_entry_safe(flow, n, head, flow_table.node[ver]) { |
241 | hlist_del_rcu(&flow->hash_node[ver]); | 257 | hlist_del_rcu(&flow->flow_table.node[ver]); |
258 | if (ovs_identifier_is_ufid(&flow->id)) | ||
259 | hlist_del_rcu(&flow->ufid_table.node[ufid_ver]); | ||
242 | ovs_flow_free(flow, deferred); | 260 | ovs_flow_free(flow, deferred); |
243 | } | 261 | } |
244 | } | 262 | } |
245 | 263 | ||
246 | skip_flows: | 264 | skip_flows: |
247 | if (deferred) | 265 | if (deferred) { |
248 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | 266 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); |
249 | else | 267 | call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb); |
268 | } else { | ||
250 | __table_instance_destroy(ti); | 269 | __table_instance_destroy(ti); |
270 | __table_instance_destroy(ufid_ti); | ||
271 | } | ||
251 | } | 272 | } |
252 | 273 | ||
253 | /* No need for locking this function is called from RCU callback or | 274 | /* No need for locking this function is called from RCU callback or |
@@ -256,8 +277,9 @@ skip_flows: | |||
256 | void ovs_flow_tbl_destroy(struct flow_table *table) | 277 | void ovs_flow_tbl_destroy(struct flow_table *table) |
257 | { | 278 | { |
258 | struct table_instance *ti = rcu_dereference_raw(table->ti); | 279 | struct table_instance *ti = rcu_dereference_raw(table->ti); |
280 | struct table_instance *ufid_ti = rcu_dereference_raw(table->ufid_ti); | ||
259 | 281 | ||
260 | table_instance_destroy(ti, false); | 282 | table_instance_destroy(ti, ufid_ti, false); |
261 | } | 283 | } |
262 | 284 | ||
263 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | 285 | struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, |
@@ -272,7 +294,7 @@ struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, | |||
272 | while (*bucket < ti->n_buckets) { | 294 | while (*bucket < ti->n_buckets) { |
273 | i = 0; | 295 | i = 0; |
274 | head = flex_array_get(ti->buckets, *bucket); | 296 | head = flex_array_get(ti->buckets, *bucket); |
275 | hlist_for_each_entry_rcu(flow, head, hash_node[ver]) { | 297 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ver]) { |
276 | if (i < *last) { | 298 | if (i < *last) { |
277 | i++; | 299 | i++; |
278 | continue; | 300 | continue; |
@@ -294,16 +316,26 @@ static struct hlist_head *find_bucket(struct table_instance *ti, u32 hash) | |||
294 | (hash & (ti->n_buckets - 1))); | 316 | (hash & (ti->n_buckets - 1))); |
295 | } | 317 | } |
296 | 318 | ||
297 | static void table_instance_insert(struct table_instance *ti, struct sw_flow *flow) | 319 | static void table_instance_insert(struct table_instance *ti, |
320 | struct sw_flow *flow) | ||
298 | { | 321 | { |
299 | struct hlist_head *head; | 322 | struct hlist_head *head; |
300 | 323 | ||
301 | head = find_bucket(ti, flow->hash); | 324 | head = find_bucket(ti, flow->flow_table.hash); |
302 | hlist_add_head_rcu(&flow->hash_node[ti->node_ver], head); | 325 | hlist_add_head_rcu(&flow->flow_table.node[ti->node_ver], head); |
326 | } | ||
327 | |||
328 | static void ufid_table_instance_insert(struct table_instance *ti, | ||
329 | struct sw_flow *flow) | ||
330 | { | ||
331 | struct hlist_head *head; | ||
332 | |||
333 | head = find_bucket(ti, flow->ufid_table.hash); | ||
334 | hlist_add_head_rcu(&flow->ufid_table.node[ti->node_ver], head); | ||
303 | } | 335 | } |
304 | 336 | ||
305 | static void flow_table_copy_flows(struct table_instance *old, | 337 | static void flow_table_copy_flows(struct table_instance *old, |
306 | struct table_instance *new) | 338 | struct table_instance *new, bool ufid) |
307 | { | 339 | { |
308 | int old_ver; | 340 | int old_ver; |
309 | int i; | 341 | int i; |
@@ -318,15 +350,21 @@ static void flow_table_copy_flows(struct table_instance *old, | |||
318 | 350 | ||
319 | head = flex_array_get(old->buckets, i); | 351 | head = flex_array_get(old->buckets, i); |
320 | 352 | ||
321 | hlist_for_each_entry(flow, head, hash_node[old_ver]) | 353 | if (ufid) |
322 | table_instance_insert(new, flow); | 354 | hlist_for_each_entry(flow, head, |
355 | ufid_table.node[old_ver]) | ||
356 | ufid_table_instance_insert(new, flow); | ||
357 | else | ||
358 | hlist_for_each_entry(flow, head, | ||
359 | flow_table.node[old_ver]) | ||
360 | table_instance_insert(new, flow); | ||
323 | } | 361 | } |
324 | 362 | ||
325 | old->keep_flows = true; | 363 | old->keep_flows = true; |
326 | } | 364 | } |
327 | 365 | ||
328 | static struct table_instance *table_instance_rehash(struct table_instance *ti, | 366 | static struct table_instance *table_instance_rehash(struct table_instance *ti, |
329 | int n_buckets) | 367 | int n_buckets, bool ufid) |
330 | { | 368 | { |
331 | struct table_instance *new_ti; | 369 | struct table_instance *new_ti; |
332 | 370 | ||
@@ -334,27 +372,38 @@ static struct table_instance *table_instance_rehash(struct table_instance *ti, | |||
334 | if (!new_ti) | 372 | if (!new_ti) |
335 | return NULL; | 373 | return NULL; |
336 | 374 | ||
337 | flow_table_copy_flows(ti, new_ti); | 375 | flow_table_copy_flows(ti, new_ti, ufid); |
338 | 376 | ||
339 | return new_ti; | 377 | return new_ti; |
340 | } | 378 | } |
341 | 379 | ||
342 | int ovs_flow_tbl_flush(struct flow_table *flow_table) | 380 | int ovs_flow_tbl_flush(struct flow_table *flow_table) |
343 | { | 381 | { |
344 | struct table_instance *old_ti; | 382 | struct table_instance *old_ti, *new_ti; |
345 | struct table_instance *new_ti; | 383 | struct table_instance *old_ufid_ti, *new_ufid_ti; |
346 | 384 | ||
347 | old_ti = ovsl_dereference(flow_table->ti); | ||
348 | new_ti = table_instance_alloc(TBL_MIN_BUCKETS); | 385 | new_ti = table_instance_alloc(TBL_MIN_BUCKETS); |
349 | if (!new_ti) | 386 | if (!new_ti) |
350 | return -ENOMEM; | 387 | return -ENOMEM; |
388 | new_ufid_ti = table_instance_alloc(TBL_MIN_BUCKETS); | ||
389 | if (!new_ufid_ti) | ||
390 | goto err_free_ti; | ||
391 | |||
392 | old_ti = ovsl_dereference(flow_table->ti); | ||
393 | old_ufid_ti = ovsl_dereference(flow_table->ufid_ti); | ||
351 | 394 | ||
352 | rcu_assign_pointer(flow_table->ti, new_ti); | 395 | rcu_assign_pointer(flow_table->ti, new_ti); |
396 | rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti); | ||
353 | flow_table->last_rehash = jiffies; | 397 | flow_table->last_rehash = jiffies; |
354 | flow_table->count = 0; | 398 | flow_table->count = 0; |
399 | flow_table->ufid_count = 0; | ||
355 | 400 | ||
356 | table_instance_destroy(old_ti, true); | 401 | table_instance_destroy(old_ti, old_ufid_ti, true); |
357 | return 0; | 402 | return 0; |
403 | |||
404 | err_free_ti: | ||
405 | __table_instance_destroy(new_ti); | ||
406 | return -ENOMEM; | ||
358 | } | 407 | } |
359 | 408 | ||
360 | static u32 flow_hash(const struct sw_flow_key *key, | 409 | static u32 flow_hash(const struct sw_flow_key *key, |
@@ -402,14 +451,15 @@ static bool flow_cmp_masked_key(const struct sw_flow *flow, | |||
402 | return cmp_key(&flow->key, key, range->start, range->end); | 451 | return cmp_key(&flow->key, key, range->start, range->end); |
403 | } | 452 | } |
404 | 453 | ||
405 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | 454 | static bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, |
406 | const struct sw_flow_match *match) | 455 | const struct sw_flow_match *match) |
407 | { | 456 | { |
408 | struct sw_flow_key *key = match->key; | 457 | struct sw_flow_key *key = match->key; |
409 | int key_start = flow_key_start(key); | 458 | int key_start = flow_key_start(key); |
410 | int key_end = match->range.end; | 459 | int key_end = match->range.end; |
411 | 460 | ||
412 | return cmp_key(&flow->unmasked_key, key, key_start, key_end); | 461 | BUG_ON(ovs_identifier_is_ufid(&flow->id)); |
462 | return cmp_key(flow->id.unmasked_key, key, key_start, key_end); | ||
413 | } | 463 | } |
414 | 464 | ||
415 | static struct sw_flow *masked_flow_lookup(struct table_instance *ti, | 465 | static struct sw_flow *masked_flow_lookup(struct table_instance *ti, |
@@ -424,8 +474,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, | |||
424 | ovs_flow_mask_key(&masked_key, unmasked, mask); | 474 | ovs_flow_mask_key(&masked_key, unmasked, mask); |
425 | hash = flow_hash(&masked_key, &mask->range); | 475 | hash = flow_hash(&masked_key, &mask->range); |
426 | head = find_bucket(ti, hash); | 476 | head = find_bucket(ti, hash); |
427 | hlist_for_each_entry_rcu(flow, head, hash_node[ti->node_ver]) { | 477 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { |
428 | if (flow->mask == mask && flow->hash == hash && | 478 | if (flow->mask == mask && flow->flow_table.hash == hash && |
429 | flow_cmp_masked_key(flow, &masked_key, &mask->range)) | 479 | flow_cmp_masked_key(flow, &masked_key, &mask->range)) |
430 | return flow; | 480 | return flow; |
431 | } | 481 | } |
@@ -468,7 +518,48 @@ struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, | |||
468 | /* Always called under ovs-mutex. */ | 518 | /* Always called under ovs-mutex. */ |
469 | list_for_each_entry(mask, &tbl->mask_list, list) { | 519 | list_for_each_entry(mask, &tbl->mask_list, list) { |
470 | flow = masked_flow_lookup(ti, match->key, mask); | 520 | flow = masked_flow_lookup(ti, match->key, mask); |
471 | if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */ | 521 | if (flow && ovs_identifier_is_key(&flow->id) && |
522 | ovs_flow_cmp_unmasked_key(flow, match)) | ||
523 | return flow; | ||
524 | } | ||
525 | return NULL; | ||
526 | } | ||
527 | |||
528 | static u32 ufid_hash(const struct sw_flow_id *sfid) | ||
529 | { | ||
530 | return jhash(sfid->ufid, sfid->ufid_len, 0); | ||
531 | } | ||
532 | |||
533 | static bool ovs_flow_cmp_ufid(const struct sw_flow *flow, | ||
534 | const struct sw_flow_id *sfid) | ||
535 | { | ||
536 | if (flow->id.ufid_len != sfid->ufid_len) | ||
537 | return false; | ||
538 | |||
539 | return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len); | ||
540 | } | ||
541 | |||
542 | bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match) | ||
543 | { | ||
544 | if (ovs_identifier_is_ufid(&flow->id)) | ||
545 | return flow_cmp_masked_key(flow, match->key, &match->range); | ||
546 | |||
547 | return ovs_flow_cmp_unmasked_key(flow, match); | ||
548 | } | ||
549 | |||
550 | struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl, | ||
551 | const struct sw_flow_id *ufid) | ||
552 | { | ||
553 | struct table_instance *ti = rcu_dereference_ovsl(tbl->ufid_ti); | ||
554 | struct sw_flow *flow; | ||
555 | struct hlist_head *head; | ||
556 | u32 hash; | ||
557 | |||
558 | hash = ufid_hash(ufid); | ||
559 | head = find_bucket(ti, hash); | ||
560 | hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) { | ||
561 | if (flow->ufid_table.hash == hash && | ||
562 | ovs_flow_cmp_ufid(flow, ufid)) | ||
472 | return flow; | 563 | return flow; |
473 | } | 564 | } |
474 | return NULL; | 565 | return NULL; |
@@ -485,9 +576,10 @@ int ovs_flow_tbl_num_masks(const struct flow_table *table) | |||
485 | return num; | 576 | return num; |
486 | } | 577 | } |
487 | 578 | ||
488 | static struct table_instance *table_instance_expand(struct table_instance *ti) | 579 | static struct table_instance *table_instance_expand(struct table_instance *ti, |
580 | bool ufid) | ||
489 | { | 581 | { |
490 | return table_instance_rehash(ti, ti->n_buckets * 2); | 582 | return table_instance_rehash(ti, ti->n_buckets * 2, ufid); |
491 | } | 583 | } |
492 | 584 | ||
493 | /* Remove 'mask' from the mask list, if it is not needed any more. */ | 585 | /* Remove 'mask' from the mask list, if it is not needed any more. */ |
@@ -512,10 +604,15 @@ static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) | |||
512 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) | 604 | void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow) |
513 | { | 605 | { |
514 | struct table_instance *ti = ovsl_dereference(table->ti); | 606 | struct table_instance *ti = ovsl_dereference(table->ti); |
607 | struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti); | ||
515 | 608 | ||
516 | BUG_ON(table->count == 0); | 609 | BUG_ON(table->count == 0); |
517 | hlist_del_rcu(&flow->hash_node[ti->node_ver]); | 610 | hlist_del_rcu(&flow->flow_table.node[ti->node_ver]); |
518 | table->count--; | 611 | table->count--; |
612 | if (ovs_identifier_is_ufid(&flow->id)) { | ||
613 | hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]); | ||
614 | table->ufid_count--; | ||
615 | } | ||
519 | 616 | ||
520 | /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be | 617 | /* RCU delete the mask. 'flow->mask' is not NULLed, as it should be |
521 | * accessible as long as the RCU read lock is held. | 618 | * accessible as long as the RCU read lock is held. |
@@ -589,25 +686,47 @@ static void flow_key_insert(struct flow_table *table, struct sw_flow *flow) | |||
589 | struct table_instance *new_ti = NULL; | 686 | struct table_instance *new_ti = NULL; |
590 | struct table_instance *ti; | 687 | struct table_instance *ti; |
591 | 688 | ||
592 | flow->hash = flow_hash(&flow->key, &flow->mask->range); | 689 | flow->flow_table.hash = flow_hash(&flow->key, &flow->mask->range); |
593 | ti = ovsl_dereference(table->ti); | 690 | ti = ovsl_dereference(table->ti); |
594 | table_instance_insert(ti, flow); | 691 | table_instance_insert(ti, flow); |
595 | table->count++; | 692 | table->count++; |
596 | 693 | ||
597 | /* Expand table, if necessary, to make room. */ | 694 | /* Expand table, if necessary, to make room. */ |
598 | if (table->count > ti->n_buckets) | 695 | if (table->count > ti->n_buckets) |
599 | new_ti = table_instance_expand(ti); | 696 | new_ti = table_instance_expand(ti, false); |
600 | else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) | 697 | else if (time_after(jiffies, table->last_rehash + REHASH_INTERVAL)) |
601 | new_ti = table_instance_rehash(ti, ti->n_buckets); | 698 | new_ti = table_instance_rehash(ti, ti->n_buckets, false); |
602 | 699 | ||
603 | if (new_ti) { | 700 | if (new_ti) { |
604 | rcu_assign_pointer(table->ti, new_ti); | 701 | rcu_assign_pointer(table->ti, new_ti); |
605 | table_instance_destroy(ti, true); | 702 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); |
606 | table->last_rehash = jiffies; | 703 | table->last_rehash = jiffies; |
607 | } | 704 | } |
608 | } | 705 | } |
609 | 706 | ||
610 | /* Must be called with OVS mutex held. */ | 707 | /* Must be called with OVS mutex held. */ |
708 | static void flow_ufid_insert(struct flow_table *table, struct sw_flow *flow) | ||
709 | { | ||
710 | struct table_instance *ti; | ||
711 | |||
712 | flow->ufid_table.hash = ufid_hash(&flow->id); | ||
713 | ti = ovsl_dereference(table->ufid_ti); | ||
714 | ufid_table_instance_insert(ti, flow); | ||
715 | table->ufid_count++; | ||
716 | |||
717 | /* Expand table, if necessary, to make room. */ | ||
718 | if (table->ufid_count > ti->n_buckets) { | ||
719 | struct table_instance *new_ti; | ||
720 | |||
721 | new_ti = table_instance_expand(ti, true); | ||
722 | if (new_ti) { | ||
723 | rcu_assign_pointer(table->ufid_ti, new_ti); | ||
724 | call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb); | ||
725 | } | ||
726 | } | ||
727 | } | ||
728 | |||
729 | /* Must be called with OVS mutex held. */ | ||
611 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | 730 | int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, |
612 | const struct sw_flow_mask *mask) | 731 | const struct sw_flow_mask *mask) |
613 | { | 732 | { |
@@ -617,6 +736,8 @@ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, | |||
617 | if (err) | 736 | if (err) |
618 | return err; | 737 | return err; |
619 | flow_key_insert(table, flow); | 738 | flow_key_insert(table, flow); |
739 | if (ovs_identifier_is_ufid(&flow->id)) | ||
740 | flow_ufid_insert(table, flow); | ||
620 | 741 | ||
621 | return 0; | 742 | return 0; |
622 | } | 743 | } |
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 309fa6415689..616eda10d955 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h | |||
@@ -47,9 +47,11 @@ struct table_instance { | |||
47 | 47 | ||
48 | struct flow_table { | 48 | struct flow_table { |
49 | struct table_instance __rcu *ti; | 49 | struct table_instance __rcu *ti; |
50 | struct table_instance __rcu *ufid_ti; | ||
50 | struct list_head mask_list; | 51 | struct list_head mask_list; |
51 | unsigned long last_rehash; | 52 | unsigned long last_rehash; |
52 | unsigned int count; | 53 | unsigned int count; |
54 | unsigned int ufid_count; | ||
53 | }; | 55 | }; |
54 | 56 | ||
55 | extern struct kmem_cache *flow_stats_cache; | 57 | extern struct kmem_cache *flow_stats_cache; |
@@ -78,8 +80,10 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, | |||
78 | const struct sw_flow_key *); | 80 | const struct sw_flow_key *); |
79 | struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, | 81 | struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, |
80 | const struct sw_flow_match *match); | 82 | const struct sw_flow_match *match); |
81 | bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, | 83 | struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *, |
82 | const struct sw_flow_match *match); | 84 | const struct sw_flow_id *); |
85 | |||
86 | bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); | ||
83 | 87 | ||
84 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, | 88 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, |
85 | const struct sw_flow_mask *mask); | 89 | const struct sw_flow_mask *mask); |