diff options
author | Pravin B Shelar <pshelar@nicira.com> | 2014-10-19 15:03:40 -0400 |
---|---|---|
committer | Pravin B Shelar <pshelar@nicira.com> | 2014-11-06 02:52:35 -0500 |
commit | a85311bf1f9f8185682990cafdd4e0572c0ed373 (patch) | |
tree | ff8b3d0e0d3a7da142d371ade5a5bb9c065cd07b /net/openvswitch/flow_netlink.c | |
parent | 2fdb957d634a906ae8939bff23d45968307acbf7 (diff) |
openvswitch: Avoid NULL mask check while building mask
OVS does mask validation even if it does not need to convert
netlink mask attributes to mask structure. ovs_nla_get_match()
caller can pass NULL mask structure pointer if the caller does
not need mask. Therefore NULL check is required in SW_FLOW_KEY*
macros. Following patch does not convert mask netlink attributes
if mask pointer is NULL, so we do not need these checks in
SW_FLOW_KEY* macro.
Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
Acked-by: Daniele Di Proietto <ddiproietto@vmware.com>
Acked-by: Andy Zhou <azhou@nicira.com>
Diffstat (limited to 'net/openvswitch/flow_netlink.c')
-rw-r--r-- | net/openvswitch/flow_netlink.c | 107 |
1 files changed, 53 insertions, 54 deletions
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 482a0cbb22e8..ed3109761827 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
@@ -50,21 +50,18 @@ | |||
50 | 50 | ||
51 | #include "flow_netlink.h" | 51 | #include "flow_netlink.h" |
52 | 52 | ||
53 | static void update_range__(struct sw_flow_match *match, | 53 | static void update_range(struct sw_flow_match *match, |
54 | size_t offset, size_t size, bool is_mask) | 54 | size_t offset, size_t size, bool is_mask) |
55 | { | 55 | { |
56 | struct sw_flow_key_range *range = NULL; | 56 | struct sw_flow_key_range *range; |
57 | size_t start = rounddown(offset, sizeof(long)); | 57 | size_t start = rounddown(offset, sizeof(long)); |
58 | size_t end = roundup(offset + size, sizeof(long)); | 58 | size_t end = roundup(offset + size, sizeof(long)); |
59 | 59 | ||
60 | if (!is_mask) | 60 | if (!is_mask) |
61 | range = &match->range; | 61 | range = &match->range; |
62 | else if (match->mask) | 62 | else |
63 | range = &match->mask->range; | 63 | range = &match->mask->range; |
64 | 64 | ||
65 | if (!range) | ||
66 | return; | ||
67 | |||
68 | if (range->start == range->end) { | 65 | if (range->start == range->end) { |
69 | range->start = start; | 66 | range->start = start; |
70 | range->end = end; | 67 | range->end = end; |
@@ -80,22 +77,20 @@ static void update_range__(struct sw_flow_match *match, | |||
80 | 77 | ||
81 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ | 78 | #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ |
82 | do { \ | 79 | do { \ |
83 | update_range__(match, offsetof(struct sw_flow_key, field), \ | 80 | update_range(match, offsetof(struct sw_flow_key, field), \ |
84 | sizeof((match)->key->field), is_mask); \ | 81 | sizeof((match)->key->field), is_mask); \ |
85 | if (is_mask) { \ | 82 | if (is_mask) \ |
86 | if ((match)->mask) \ | 83 | (match)->mask->key.field = value; \ |
87 | (match)->mask->key.field = value; \ | 84 | else \ |
88 | } else { \ | ||
89 | (match)->key->field = value; \ | 85 | (match)->key->field = value; \ |
90 | } \ | ||
91 | } while (0) | 86 | } while (0) |
92 | 87 | ||
93 | #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \ | 88 | #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \ |
94 | do { \ | 89 | do { \ |
95 | update_range__(match, offset, len, is_mask); \ | 90 | update_range(match, offset, len, is_mask); \ |
96 | if (is_mask) \ | 91 | if (is_mask) \ |
97 | memcpy((u8 *)&(match)->mask->key + offset, value_p, \ | 92 | memcpy((u8 *)&(match)->mask->key + offset, value_p, \ |
98 | len); \ | 93 | len); \ |
99 | else \ | 94 | else \ |
100 | memcpy((u8 *)(match)->key + offset, value_p, len); \ | 95 | memcpy((u8 *)(match)->key + offset, value_p, len); \ |
101 | } while (0) | 96 | } while (0) |
@@ -104,18 +99,16 @@ static void update_range__(struct sw_flow_match *match, | |||
104 | SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ | 99 | SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ |
105 | value_p, len, is_mask) | 100 | value_p, len, is_mask) |
106 | 101 | ||
107 | #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ | 102 | #define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ |
108 | do { \ | 103 | do { \ |
109 | update_range__(match, offsetof(struct sw_flow_key, field), \ | 104 | update_range(match, offsetof(struct sw_flow_key, field), \ |
110 | sizeof((match)->key->field), is_mask); \ | 105 | sizeof((match)->key->field), is_mask); \ |
111 | if (is_mask) { \ | 106 | if (is_mask) \ |
112 | if ((match)->mask) \ | 107 | memset((u8 *)&(match)->mask->key.field, value, \ |
113 | memset((u8 *)&(match)->mask->key.field, value,\ | 108 | sizeof((match)->mask->key.field)); \ |
114 | sizeof((match)->mask->key.field)); \ | 109 | else \ |
115 | } else { \ | ||
116 | memset((u8 *)&(match)->key->field, value, \ | 110 | memset((u8 *)&(match)->key->field, value, \ |
117 | sizeof((match)->key->field)); \ | 111 | sizeof((match)->key->field)); \ |
118 | } \ | ||
119 | } while (0) | 112 | } while (0) |
120 | 113 | ||
121 | static bool match_validate(const struct sw_flow_match *match, | 114 | static bool match_validate(const struct sw_flow_match *match, |
@@ -677,8 +670,7 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, | |||
677 | 670 | ||
678 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); | 671 | SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); |
679 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); | 672 | attrs &= ~(1 << OVS_KEY_ATTR_VLAN); |
680 | } else if (!is_mask) | 673 | } |
681 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
682 | 674 | ||
683 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { | 675 | if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { |
684 | __be16 eth_type; | 676 | __be16 eth_type; |
@@ -903,8 +895,8 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val) | |||
903 | * attribute specifies the mask field of the wildcarded flow. | 895 | * attribute specifies the mask field of the wildcarded flow. |
904 | */ | 896 | */ |
905 | int ovs_nla_get_match(struct sw_flow_match *match, | 897 | int ovs_nla_get_match(struct sw_flow_match *match, |
906 | const struct nlattr *key, | 898 | const struct nlattr *nla_key, |
907 | const struct nlattr *mask) | 899 | const struct nlattr *nla_mask) |
908 | { | 900 | { |
909 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; | 901 | const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; |
910 | const struct nlattr *encap; | 902 | const struct nlattr *encap; |
@@ -914,7 +906,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
914 | bool encap_valid = false; | 906 | bool encap_valid = false; |
915 | int err; | 907 | int err; |
916 | 908 | ||
917 | err = parse_flow_nlattrs(key, a, &key_attrs); | 909 | err = parse_flow_nlattrs(nla_key, a, &key_attrs); |
918 | if (err) | 910 | if (err) |
919 | return err; | 911 | return err; |
920 | 912 | ||
@@ -955,36 +947,43 @@ int ovs_nla_get_match(struct sw_flow_match *match, | |||
955 | if (err) | 947 | if (err) |
956 | return err; | 948 | return err; |
957 | 949 | ||
958 | if (match->mask && !mask) { | 950 | if (match->mask) { |
959 | /* Create an exact match mask. We need to set to 0xff all the | 951 | if (!nla_mask) { |
960 | * 'match->mask' fields that have been touched in 'match->key'. | 952 | /* Create an exact match mask. We need to set to 0xff |
961 | * We cannot simply memset 'match->mask', because padding bytes | 953 | * all the 'match->mask' fields that have been touched |
962 | * and fields not specified in 'match->key' should be left to 0. | 954 | * in 'match->key'. We cannot simply memset |
963 | * Instead, we use a stream of netlink attributes, copied from | 955 | * 'match->mask', because padding bytes and fields not |
964 | * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care | 956 | * specified in 'match->key' should be left to 0. |
965 | * of filling 'match->mask' appropriately. | 957 | * Instead, we use a stream of netlink attributes, |
966 | */ | 958 | * copied from 'key' and set to 0xff. |
967 | newmask = kmemdup(key, nla_total_size(nla_len(key)), | 959 | * ovs_key_from_nlattrs() will take care of filling |
968 | GFP_KERNEL); | 960 | * 'match->mask' appropriately. |
969 | if (!newmask) | 961 | */ |
970 | return -ENOMEM; | 962 | newmask = kmemdup(nla_key, |
963 | nla_total_size(nla_len(nla_key)), | ||
964 | GFP_KERNEL); | ||
965 | if (!newmask) | ||
966 | return -ENOMEM; | ||
971 | 967 | ||
972 | mask_set_nlattr(newmask, 0xff); | 968 | mask_set_nlattr(newmask, 0xff); |
973 | 969 | ||
974 | /* The userspace does not send tunnel attributes that are 0, | 970 | /* The userspace does not send tunnel attributes that |
975 | * but we should not wildcard them nonetheless. | 971 | * are 0, but we should not wildcard them nonetheless. |
976 | */ | 972 | */ |
977 | if (match->key->tun_key.ipv4_dst) | 973 | if (match->key->tun_key.ipv4_dst) |
978 | SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true); | 974 | SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, |
975 | 0xff, true); | ||
979 | 976 | ||
980 | mask = newmask; | 977 | nla_mask = newmask; |
981 | } | 978 | } |
982 | 979 | ||
983 | if (mask) { | 980 | err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs); |
984 | err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); | ||
985 | if (err) | 981 | if (err) |
986 | goto free_newmask; | 982 | goto free_newmask; |
987 | 983 | ||
984 | /* Always match on tci. */ | ||
985 | SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); | ||
986 | |||
988 | if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { | 987 | if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { |
989 | __be16 eth_type = 0; | 988 | __be16 eth_type = 0; |
990 | __be16 tci = 0; | 989 | __be16 tci = 0; |