aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMaor Gottlieb <maorg@mellanox.com>2016-07-04 10:23:05 -0400
committerDavid S. Miller <davem@davemloft.net>2016-07-05 03:06:02 -0400
commitc5bb17302e734967822be559cf661704b707b4ed (patch)
tree5fe959504ac3236a3c7bafb9bd9401f5fec0c26e
parent019d0c99364a818eb08f52d7ee4b75ea6df07d09 (diff)
net/mlx5: Refactor mlx5_add_flow_rule
Reduce the set of arguments passed to mlx5_add_flow_rule by introducing flow_spec structure. Signed-off-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/infiniband/hw/mlx5/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c22
-rw-r--r--include/linux/mlx5/fs.h10
8 files changed, 171 insertions, 232 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index b48ad85315dc..dad63f038bb8 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1528,21 +1528,18 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1528{ 1528{
1529 struct mlx5_flow_table *ft = ft_prio->flow_table; 1529 struct mlx5_flow_table *ft = ft_prio->flow_table;
1530 struct mlx5_ib_flow_handler *handler; 1530 struct mlx5_ib_flow_handler *handler;
1531 struct mlx5_flow_spec *spec;
1531 void *ib_flow = flow_attr + 1; 1532 void *ib_flow = flow_attr + 1;
1532 u8 match_criteria_enable = 0;
1533 unsigned int spec_index; 1533 unsigned int spec_index;
1534 u32 *match_c;
1535 u32 *match_v;
1536 u32 action; 1534 u32 action;
1537 int err = 0; 1535 int err = 0;
1538 1536
1539 if (!is_valid_attr(flow_attr)) 1537 if (!is_valid_attr(flow_attr))
1540 return ERR_PTR(-EINVAL); 1538 return ERR_PTR(-EINVAL);
1541 1539
1542 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1540 spec = mlx5_vzalloc(sizeof(*spec));
1543 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1544 handler = kzalloc(sizeof(*handler), GFP_KERNEL); 1541 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1545 if (!handler || !match_c || !match_v) { 1542 if (!handler || !spec) {
1546 err = -ENOMEM; 1543 err = -ENOMEM;
1547 goto free; 1544 goto free;
1548 } 1545 }
@@ -1550,7 +1547,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1550 INIT_LIST_HEAD(&handler->list); 1547 INIT_LIST_HEAD(&handler->list);
1551 1548
1552 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) { 1549 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1553 err = parse_flow_attr(match_c, match_v, ib_flow); 1550 err = parse_flow_attr(spec->match_criteria,
1551 spec->match_value, ib_flow);
1554 if (err < 0) 1552 if (err < 0)
1555 goto free; 1553 goto free;
1556 1554
@@ -1558,11 +1556,11 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1558 } 1556 }
1559 1557
1560 /* Outer header support only */ 1558 /* Outer header support only */
1561 match_criteria_enable = (!outer_header_zero(match_c)) << 0; 1559 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria))
1560 << 0;
1562 action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 1561 action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
1563 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; 1562 MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
1564 handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable, 1563 handler->rule = mlx5_add_flow_rule(ft, spec,
1565 match_c, match_v,
1566 action, 1564 action,
1567 MLX5_FS_DEFAULT_FLOW_TAG, 1565 MLX5_FS_DEFAULT_FLOW_TAG,
1568 dst); 1566 dst);
@@ -1578,8 +1576,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1578free: 1576free:
1579 if (err) 1577 if (err)
1580 kfree(handler); 1578 kfree(handler);
1581 kfree(match_c); 1579 kvfree(spec);
1582 kfree(match_v);
1583 return err ? ERR_PTR(err) : handler; 1580 return err ? ERR_PTR(err) : handler;
1584} 1581}
1585 1582
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 10f18d46b8ab..a8cb38789774 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -175,15 +175,12 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
175{ 175{
176 struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type]; 176 struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
177 struct mlx5_flow_destination dest; 177 struct mlx5_flow_destination dest;
178 u8 match_criteria_enable = 0;
179 struct mlx5e_tir *tir = priv->indir_tir; 178 struct mlx5e_tir *tir = priv->indir_tir;
180 u32 *match_criteria; 179 struct mlx5_flow_spec *spec;
181 u32 *match_value;
182 int err = 0; 180 int err = 0;
183 181
184 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 182 spec = mlx5_vzalloc(sizeof(*spec));
185 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 183 if (!spec) {
186 if (!match_value || !match_criteria) {
187 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 184 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
188 err = -ENOMEM; 185 err = -ENOMEM;
189 goto out; 186 goto out;
@@ -208,8 +205,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
208 goto out; 205 goto out;
209 } 206 }
210 207
211 arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable, 208 arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec,
212 match_criteria, match_value,
213 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 209 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
214 MLX5_FS_DEFAULT_FLOW_TAG, 210 MLX5_FS_DEFAULT_FLOW_TAG,
215 &dest); 211 &dest);
@@ -220,8 +216,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
220 __func__, type); 216 __func__, type);
221 } 217 }
222out: 218out:
223 kvfree(match_criteria); 219 kvfree(spec);
224 kvfree(match_value);
225 return err; 220 return err;
226} 221}
227 222
@@ -475,23 +470,20 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
475 struct mlx5_flow_rule *rule = NULL; 470 struct mlx5_flow_rule *rule = NULL;
476 struct mlx5_flow_destination dest; 471 struct mlx5_flow_destination dest;
477 struct arfs_table *arfs_table; 472 struct arfs_table *arfs_table;
478 u8 match_criteria_enable = 0; 473 struct mlx5_flow_spec *spec;
479 struct mlx5_flow_table *ft; 474 struct mlx5_flow_table *ft;
480 u32 *match_criteria;
481 u32 *match_value;
482 int err = 0; 475 int err = 0;
483 476
484 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 477 spec = mlx5_vzalloc(sizeof(*spec));
485 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 478 if (!spec) {
486 if (!match_value || !match_criteria) {
487 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 479 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
488 err = -ENOMEM; 480 err = -ENOMEM;
489 goto out; 481 goto out;
490 } 482 }
491 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 483 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
492 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 484 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
493 outer_headers.ethertype); 485 outer_headers.ethertype);
494 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, 486 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
495 ntohs(tuple->etype)); 487 ntohs(tuple->etype));
496 arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype); 488 arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
497 if (!arfs_table) { 489 if (!arfs_table) {
@@ -501,59 +493,58 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
501 493
502 ft = arfs_table->ft.t; 494 ft = arfs_table->ft.t;
503 if (tuple->ip_proto == IPPROTO_TCP) { 495 if (tuple->ip_proto == IPPROTO_TCP) {
504 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 496 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
505 outer_headers.tcp_dport); 497 outer_headers.tcp_dport);
506 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 498 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
507 outer_headers.tcp_sport); 499 outer_headers.tcp_sport);
508 MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport, 500 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
509 ntohs(tuple->dst_port)); 501 ntohs(tuple->dst_port));
510 MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport, 502 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
511 ntohs(tuple->src_port)); 503 ntohs(tuple->src_port));
512 } else { 504 } else {
513 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 505 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
514 outer_headers.udp_dport); 506 outer_headers.udp_dport);
515 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 507 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
516 outer_headers.udp_sport); 508 outer_headers.udp_sport);
517 MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport, 509 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
518 ntohs(tuple->dst_port)); 510 ntohs(tuple->dst_port));
519 MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport, 511 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
520 ntohs(tuple->src_port)); 512 ntohs(tuple->src_port));
521 } 513 }
522 if (tuple->etype == htons(ETH_P_IP)) { 514 if (tuple->etype == htons(ETH_P_IP)) {
523 memcpy(MLX5_ADDR_OF(fte_match_param, match_value, 515 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
524 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), 516 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
525 &tuple->src_ipv4, 517 &tuple->src_ipv4,
526 4); 518 4);
527 memcpy(MLX5_ADDR_OF(fte_match_param, match_value, 519 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
528 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), 520 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
529 &tuple->dst_ipv4, 521 &tuple->dst_ipv4,
530 4); 522 4);
531 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 523 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
532 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4); 524 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
533 MLX5_SET_TO_ONES(fte_match_param, match_criteria, 525 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
534 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4); 526 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
535 } else { 527 } else {
536 memcpy(MLX5_ADDR_OF(fte_match_param, match_value, 528 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
537 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 529 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
538 &tuple->src_ipv6, 530 &tuple->src_ipv6,
539 16); 531 16);
540 memcpy(MLX5_ADDR_OF(fte_match_param, match_value, 532 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
541 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 533 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
542 &tuple->dst_ipv6, 534 &tuple->dst_ipv6,
543 16); 535 16);
544 memset(MLX5_ADDR_OF(fte_match_param, match_criteria, 536 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
545 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 537 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
546 0xff, 538 0xff,
547 16); 539 16);
548 memset(MLX5_ADDR_OF(fte_match_param, match_criteria, 540 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
549 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 541 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
550 0xff, 542 0xff,
551 16); 543 16);
552 } 544 }
553 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 545 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
554 dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; 546 dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
555 rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, 547 rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
556 match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
557 MLX5_FS_DEFAULT_FLOW_TAG, 548 MLX5_FS_DEFAULT_FLOW_TAG,
558 &dest); 549 &dest);
559 if (IS_ERR(rule)) { 550 if (IS_ERR(rule)) {
@@ -563,8 +554,7 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
563 } 554 }
564 555
565out: 556out:
566 kvfree(match_criteria); 557 kvfree(spec);
567 kvfree(match_value);
568 return err ? ERR_PTR(err) : rule; 558 return err ? ERR_PTR(err) : rule;
569} 559}
570 560
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 606e69b4babc..2e1e86316fe7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -156,19 +156,18 @@ enum mlx5e_vlan_rule_type {
156 156
157static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 157static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
158 enum mlx5e_vlan_rule_type rule_type, 158 enum mlx5e_vlan_rule_type rule_type,
159 u16 vid, u32 *mc, u32 *mv) 159 u16 vid, struct mlx5_flow_spec *spec)
160{ 160{
161 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; 161 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
162 struct mlx5_flow_destination dest; 162 struct mlx5_flow_destination dest;
163 u8 match_criteria_enable = 0;
164 struct mlx5_flow_rule **rule_p; 163 struct mlx5_flow_rule **rule_p;
165 int err = 0; 164 int err = 0;
166 165
167 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 166 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
168 dest.ft = priv->fs.l2.ft.t; 167 dest.ft = priv->fs.l2.ft.t;
169 168
170 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 169 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
171 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag); 170 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
172 171
173 switch (rule_type) { 172 switch (rule_type) {
174 case MLX5E_VLAN_RULE_TYPE_UNTAGGED: 173 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
@@ -176,17 +175,19 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
176 break; 175 break;
177 case MLX5E_VLAN_RULE_TYPE_ANY_VID: 176 case MLX5E_VLAN_RULE_TYPE_ANY_VID:
178 rule_p = &priv->fs.vlan.any_vlan_rule; 177 rule_p = &priv->fs.vlan.any_vlan_rule;
179 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); 178 MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
180 break; 179 break;
181 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */ 180 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
182 rule_p = &priv->fs.vlan.active_vlans_rule[vid]; 181 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
183 MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1); 182 MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
184 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid); 183 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
185 MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid); 184 outer_headers.first_vid);
185 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
186 vid);
186 break; 187 break;
187 } 188 }
188 189
189 *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv, 190 *rule_p = mlx5_add_flow_rule(ft, spec,
190 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 191 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
191 MLX5_FS_DEFAULT_FLOW_TAG, 192 MLX5_FS_DEFAULT_FLOW_TAG,
192 &dest); 193 &dest);
@@ -203,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
203static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, 204static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
204 enum mlx5e_vlan_rule_type rule_type, u16 vid) 205 enum mlx5e_vlan_rule_type rule_type, u16 vid)
205{ 206{
206 u32 *match_criteria; 207 struct mlx5_flow_spec *spec;
207 u32 *match_value;
208 int err = 0; 208 int err = 0;
209 209
210 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 210 spec = mlx5_vzalloc(sizeof(*spec));
211 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 211 if (!spec) {
212 if (!match_value || !match_criteria) {
213 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 212 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
214 err = -ENOMEM; 213 return -ENOMEM;
215 goto add_vlan_rule_out;
216 } 214 }
217 215
218 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID) 216 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
219 mlx5e_vport_context_update_vlans(priv); 217 mlx5e_vport_context_update_vlans(priv);
220 218
221 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria, 219 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
222 match_value);
223 220
224add_vlan_rule_out: 221 kvfree(spec);
225 kvfree(match_criteria);
226 kvfree(match_value);
227 222
228 return err; 223 return err;
229} 224}
@@ -598,32 +593,27 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
598 u8 proto) 593 u8 proto)
599{ 594{
600 struct mlx5_flow_rule *rule; 595 struct mlx5_flow_rule *rule;
601 u8 match_criteria_enable = 0; 596 struct mlx5_flow_spec *spec;
602 u32 *match_criteria;
603 u32 *match_value;
604 int err = 0; 597 int err = 0;
605 598
606 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 599 spec = mlx5_vzalloc(sizeof(*spec));
607 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 600 if (!spec) {
608 if (!match_value || !match_criteria) {
609 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 601 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
610 err = -ENOMEM; 602 return ERR_PTR(-ENOMEM);
611 goto out;
612 } 603 }
613 604
614 if (proto) { 605 if (proto) {
615 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 606 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
616 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol); 607 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
617 MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto); 608 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
618 } 609 }
619 if (etype) { 610 if (etype) {
620 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 611 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
621 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype); 612 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
622 MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype); 613 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
623 } 614 }
624 615
625 rule = mlx5_add_flow_rule(ft, match_criteria_enable, 616 rule = mlx5_add_flow_rule(ft, spec,
626 match_criteria, match_value,
627 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 617 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
628 MLX5_FS_DEFAULT_FLOW_TAG, 618 MLX5_FS_DEFAULT_FLOW_TAG,
629 dest); 619 dest);
@@ -631,9 +621,8 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
631 err = PTR_ERR(rule); 621 err = PTR_ERR(rule);
632 netdev_err(priv->netdev, "%s: add rule failed\n", __func__); 622 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
633 } 623 }
634out: 624
635 kvfree(match_criteria); 625 kvfree(spec);
636 kvfree(match_value);
637 return err ? ERR_PTR(err) : rule; 626 return err ? ERR_PTR(err) : rule;
638} 627}
639 628
@@ -792,24 +781,20 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
792{ 781{
793 struct mlx5_flow_table *ft = priv->fs.l2.ft.t; 782 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
794 struct mlx5_flow_destination dest; 783 struct mlx5_flow_destination dest;
795 u8 match_criteria_enable = 0; 784 struct mlx5_flow_spec *spec;
796 u32 *match_criteria;
797 u32 *match_value;
798 int err = 0; 785 int err = 0;
799 u8 *mc_dmac; 786 u8 *mc_dmac;
800 u8 *mv_dmac; 787 u8 *mv_dmac;
801 788
802 match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 789 spec = mlx5_vzalloc(sizeof(*spec));
803 match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param)); 790 if (!spec) {
804 if (!match_value || !match_criteria) {
805 netdev_err(priv->netdev, "%s: alloc failed\n", __func__); 791 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
806 err = -ENOMEM; 792 return -ENOMEM;
807 goto add_l2_rule_out;
808 } 793 }
809 794
810 mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, 795 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
811 outer_headers.dmac_47_16); 796 outer_headers.dmac_47_16);
812 mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value, 797 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
813 outer_headers.dmac_47_16); 798 outer_headers.dmac_47_16);
814 799
815 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; 800 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
@@ -817,13 +802,13 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
817 802
818 switch (type) { 803 switch (type) {
819 case MLX5E_FULLMATCH: 804 case MLX5E_FULLMATCH:
820 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 805 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
821 eth_broadcast_addr(mc_dmac); 806 eth_broadcast_addr(mc_dmac);
822 ether_addr_copy(mv_dmac, ai->addr); 807 ether_addr_copy(mv_dmac, ai->addr);
823 break; 808 break;
824 809
825 case MLX5E_ALLMULTI: 810 case MLX5E_ALLMULTI:
826 match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; 811 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
827 mc_dmac[0] = 0x01; 812 mc_dmac[0] = 0x01;
828 mv_dmac[0] = 0x01; 813 mv_dmac[0] = 0x01;
829 break; 814 break;
@@ -832,8 +817,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
832 break; 817 break;
833 } 818 }
834 819
835 ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, 820 ai->rule = mlx5_add_flow_rule(ft, spec,
836 match_value,
837 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 821 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
838 MLX5_FS_DEFAULT_FLOW_TAG, &dest); 822 MLX5_FS_DEFAULT_FLOW_TAG, &dest);
839 if (IS_ERR(ai->rule)) { 823 if (IS_ERR(ai->rule)) {
@@ -843,9 +827,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
843 ai->rule = NULL; 827 ai->rule = NULL;
844 } 828 }
845 829
846add_l2_rule_out: 830 kvfree(spec);
847 kvfree(match_criteria);
848 kvfree(match_value);
849 831
850 return err; 832 return err;
851} 833}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 704c3d30493e..3261e8b1286e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -50,7 +50,7 @@ struct mlx5e_tc_flow {
50#define MLX5E_TC_TABLE_NUM_GROUPS 4 50#define MLX5E_TC_TABLE_NUM_GROUPS 4
51 51
52static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, 52static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
53 u32 *match_c, u32 *match_v, 53 struct mlx5_flow_spec *spec,
54 u32 action, u32 flow_tag) 54 u32 action, u32 flow_tag)
55{ 55{
56 struct mlx5_core_dev *dev = priv->mdev; 56 struct mlx5_core_dev *dev = priv->mdev;
@@ -88,8 +88,8 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
88 table_created = true; 88 table_created = true;
89 } 89 }
90 90
91 rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS, 91 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
92 match_c, match_v, 92 rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
93 action, flow_tag, 93 action, flow_tag,
94 &dest); 94 &dest);
95 95
@@ -126,12 +126,13 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
126 } 126 }
127} 127}
128 128
129static int parse_cls_flower(struct mlx5e_priv *priv, 129static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
130 u32 *match_c, u32 *match_v,
131 struct tc_cls_flower_offload *f) 130 struct tc_cls_flower_offload *f)
132{ 131{
133 void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); 132 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
134 void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); 133 outer_headers);
134 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
135 outer_headers);
135 u16 addr_type = 0; 136 u16 addr_type = 0;
136 u8 ip_proto = 0; 137 u8 ip_proto = 0;
137 138
@@ -342,12 +343,11 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
342 struct tc_cls_flower_offload *f) 343 struct tc_cls_flower_offload *f)
343{ 344{
344 struct mlx5e_tc_table *tc = &priv->fs.tc; 345 struct mlx5e_tc_table *tc = &priv->fs.tc;
345 u32 *match_c;
346 u32 *match_v;
347 int err = 0; 346 int err = 0;
348 u32 flow_tag; 347 u32 flow_tag;
349 u32 action; 348 u32 action;
350 struct mlx5e_tc_flow *flow; 349 struct mlx5e_tc_flow *flow;
350 struct mlx5_flow_spec *spec;
351 struct mlx5_flow_rule *old = NULL; 351 struct mlx5_flow_rule *old = NULL;
352 352
353 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, 353 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
@@ -357,16 +357,15 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
357 else 357 else
358 flow = kzalloc(sizeof(*flow), GFP_KERNEL); 358 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
359 359
360 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 360 spec = mlx5_vzalloc(sizeof(*spec));
361 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 361 if (!spec || !flow) {
362 if (!match_c || !match_v || !flow) {
363 err = -ENOMEM; 362 err = -ENOMEM;
364 goto err_free; 363 goto err_free;
365 } 364 }
366 365
367 flow->cookie = f->cookie; 366 flow->cookie = f->cookie;
368 367
369 err = parse_cls_flower(priv, match_c, match_v, f); 368 err = parse_cls_flower(priv, spec, f);
370 if (err < 0) 369 if (err < 0)
371 goto err_free; 370 goto err_free;
372 371
@@ -379,8 +378,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
379 if (err) 378 if (err)
380 goto err_free; 379 goto err_free;
381 380
382 flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action, 381 flow->rule = mlx5e_tc_add_flow(priv, spec, action, flow_tag);
383 flow_tag);
384 if (IS_ERR(flow->rule)) { 382 if (IS_ERR(flow->rule)) {
385 err = PTR_ERR(flow->rule); 383 err = PTR_ERR(flow->rule);
386 goto err_hash_del; 384 goto err_hash_del;
@@ -398,8 +396,7 @@ err_free:
398 if (!old) 396 if (!old)
399 kfree(flow); 397 kfree(flow);
400out: 398out:
401 kfree(match_c); 399 kvfree(spec);
402 kfree(match_v);
403 return err; 400 return err;
404} 401}
405 402
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f0a973557f7b..f6d667797ee1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -329,25 +329,23 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
329 MLX5_MATCH_OUTER_HEADERS); 329 MLX5_MATCH_OUTER_HEADERS);
330 struct mlx5_flow_rule *flow_rule = NULL; 330 struct mlx5_flow_rule *flow_rule = NULL;
331 struct mlx5_flow_destination dest; 331 struct mlx5_flow_destination dest;
332 struct mlx5_flow_spec *spec;
332 void *mv_misc = NULL; 333 void *mv_misc = NULL;
333 void *mc_misc = NULL; 334 void *mc_misc = NULL;
334 u8 *dmac_v = NULL; 335 u8 *dmac_v = NULL;
335 u8 *dmac_c = NULL; 336 u8 *dmac_c = NULL;
336 u32 *match_v;
337 u32 *match_c;
338 337
339 if (rx_rule) 338 if (rx_rule)
340 match_header |= MLX5_MATCH_MISC_PARAMETERS; 339 match_header |= MLX5_MATCH_MISC_PARAMETERS;
341 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 340
342 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 341 spec = mlx5_vzalloc(sizeof(*spec));
343 if (!match_v || !match_c) { 342 if (!spec) {
344 pr_warn("FDB: Failed to alloc match parameters\n"); 343 pr_warn("FDB: Failed to alloc match parameters\n");
345 goto out; 344 return NULL;
346 } 345 }
347 346 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
348 dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
349 outer_headers.dmac_47_16); 347 outer_headers.dmac_47_16);
350 dmac_c = MLX5_ADDR_OF(fte_match_param, match_c, 348 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
351 outer_headers.dmac_47_16); 349 outer_headers.dmac_47_16);
352 350
353 if (match_header & MLX5_MATCH_OUTER_HEADERS) { 351 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
@@ -356,8 +354,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
356 } 354 }
357 355
358 if (match_header & MLX5_MATCH_MISC_PARAMETERS) { 356 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
359 mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); 357 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
360 mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); 358 misc_parameters);
359 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
360 misc_parameters);
361 MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT); 361 MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
362 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port); 362 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
363 } 363 }
@@ -368,11 +368,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
368 esw_debug(esw->dev, 368 esw_debug(esw->dev,
369 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", 369 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
370 dmac_v, dmac_c, vport); 370 dmac_v, dmac_c, vport);
371 spec->match_criteria_enable = match_header;
371 flow_rule = 372 flow_rule =
372 mlx5_add_flow_rule(esw->fdb_table.fdb, 373 mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
373 match_header,
374 match_c,
375 match_v,
376 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 374 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
377 0, &dest); 375 0, &dest);
378 if (IS_ERR(flow_rule)) { 376 if (IS_ERR(flow_rule)) {
@@ -381,9 +379,8 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
381 dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); 379 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
382 flow_rule = NULL; 380 flow_rule = NULL;
383 } 381 }
384out: 382
385 kfree(match_v); 383 kvfree(spec);
386 kfree(match_c);
387 return flow_rule; 384 return flow_rule;
388} 385}
389 386
@@ -1293,9 +1290,8 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
1293static int esw_vport_ingress_config(struct mlx5_eswitch *esw, 1290static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1294 struct mlx5_vport *vport) 1291 struct mlx5_vport *vport)
1295{ 1292{
1293 struct mlx5_flow_spec *spec;
1296 u8 smac[ETH_ALEN]; 1294 u8 smac[ETH_ALEN];
1297 u32 *match_v;
1298 u32 *match_c;
1299 int err = 0; 1295 int err = 0;
1300 u8 *smac_v; 1296 u8 *smac_v;
1301 1297
@@ -1329,9 +1325,8 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1329 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n", 1325 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
1330 vport->vport, vport->vlan, vport->qos); 1326 vport->vport, vport->vlan, vport->qos);
1331 1327
1332 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1328 spec = mlx5_vzalloc(sizeof(*spec));
1333 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1329 if (!spec) {
1334 if (!match_v || !match_c) {
1335 err = -ENOMEM; 1330 err = -ENOMEM;
1336 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n", 1331 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
1337 vport->vport, err); 1332 vport->vport, err);
@@ -1339,22 +1334,20 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1339 } 1334 }
1340 1335
1341 if (vport->vlan || vport->qos) 1336 if (vport->vlan || vport->qos)
1342 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); 1337 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1343 1338
1344 if (vport->spoofchk) { 1339 if (vport->spoofchk) {
1345 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16); 1340 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
1346 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0); 1341 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
1347 smac_v = MLX5_ADDR_OF(fte_match_param, 1342 smac_v = MLX5_ADDR_OF(fte_match_param,
1348 match_v, 1343 spec->match_value,
1349 outer_headers.smac_47_16); 1344 outer_headers.smac_47_16);
1350 ether_addr_copy(smac_v, smac); 1345 ether_addr_copy(smac_v, smac);
1351 } 1346 }
1352 1347
1348 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1353 vport->ingress.allow_rule = 1349 vport->ingress.allow_rule =
1354 mlx5_add_flow_rule(vport->ingress.acl, 1350 mlx5_add_flow_rule(vport->ingress.acl, spec,
1355 MLX5_MATCH_OUTER_HEADERS,
1356 match_c,
1357 match_v,
1358 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 1351 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1359 0, NULL); 1352 0, NULL);
1360 if (IS_ERR(vport->ingress.allow_rule)) { 1353 if (IS_ERR(vport->ingress.allow_rule)) {
@@ -1365,13 +1358,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1365 goto out; 1358 goto out;
1366 } 1359 }
1367 1360
1368 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); 1361 memset(spec, 0, sizeof(*spec));
1369 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1370 vport->ingress.drop_rule = 1362 vport->ingress.drop_rule =
1371 mlx5_add_flow_rule(vport->ingress.acl, 1363 mlx5_add_flow_rule(vport->ingress.acl, spec,
1372 0,
1373 match_c,
1374 match_v,
1375 MLX5_FLOW_CONTEXT_ACTION_DROP, 1364 MLX5_FLOW_CONTEXT_ACTION_DROP,
1376 0, NULL); 1365 0, NULL);
1377 if (IS_ERR(vport->ingress.drop_rule)) { 1366 if (IS_ERR(vport->ingress.drop_rule)) {
@@ -1385,17 +1374,14 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
1385out: 1374out:
1386 if (err) 1375 if (err)
1387 esw_vport_cleanup_ingress_rules(esw, vport); 1376 esw_vport_cleanup_ingress_rules(esw, vport);
1388 1377 kvfree(spec);
1389 kfree(match_v);
1390 kfree(match_c);
1391 return err; 1378 return err;
1392} 1379}
1393 1380
1394static int esw_vport_egress_config(struct mlx5_eswitch *esw, 1381static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1395 struct mlx5_vport *vport) 1382 struct mlx5_vport *vport)
1396{ 1383{
1397 u32 *match_v; 1384 struct mlx5_flow_spec *spec;
1398 u32 *match_c;
1399 int err = 0; 1385 int err = 0;
1400 1386
1401 esw_vport_cleanup_egress_rules(esw, vport); 1387 esw_vport_cleanup_egress_rules(esw, vport);
@@ -1411,9 +1397,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1411 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n", 1397 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
1412 vport->vport, vport->vlan, vport->qos); 1398 vport->vport, vport->vlan, vport->qos);
1413 1399
1414 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1400 spec = mlx5_vzalloc(sizeof(*spec));
1415 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 1401 if (!spec) {
1416 if (!match_v || !match_c) {
1417 err = -ENOMEM; 1402 err = -ENOMEM;
1418 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n", 1403 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
1419 vport->vport, err); 1404 vport->vport, err);
@@ -1421,16 +1406,14 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1421 } 1406 }
1422 1407
1423 /* Allowed vlan rule */ 1408 /* Allowed vlan rule */
1424 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag); 1409 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
1425 MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag); 1410 MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
1426 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid); 1411 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
1427 MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan); 1412 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
1428 1413
1414 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1429 vport->egress.allowed_vlan = 1415 vport->egress.allowed_vlan =
1430 mlx5_add_flow_rule(vport->egress.acl, 1416 mlx5_add_flow_rule(vport->egress.acl, spec,
1431 MLX5_MATCH_OUTER_HEADERS,
1432 match_c,
1433 match_v,
1434 MLX5_FLOW_CONTEXT_ACTION_ALLOW, 1417 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
1435 0, NULL); 1418 0, NULL);
1436 if (IS_ERR(vport->egress.allowed_vlan)) { 1419 if (IS_ERR(vport->egress.allowed_vlan)) {
@@ -1442,13 +1425,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1442 } 1425 }
1443 1426
1444 /* Drop others rule (star rule) */ 1427 /* Drop others rule (star rule) */
1445 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param)); 1428 memset(spec, 0, sizeof(*spec));
1446 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
1447 vport->egress.drop_rule = 1429 vport->egress.drop_rule =
1448 mlx5_add_flow_rule(vport->egress.acl, 1430 mlx5_add_flow_rule(vport->egress.acl, spec,
1449 0,
1450 match_c,
1451 match_v,
1452 MLX5_FLOW_CONTEXT_ACTION_DROP, 1431 MLX5_FLOW_CONTEXT_ACTION_DROP,
1453 0, NULL); 1432 0, NULL);
1454 if (IS_ERR(vport->egress.drop_rule)) { 1433 if (IS_ERR(vport->egress.drop_rule)) {
@@ -1458,8 +1437,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
1458 vport->egress.drop_rule = NULL; 1437 vport->egress.drop_rule = NULL;
1459 } 1438 }
1460out: 1439out:
1461 kfree(match_v); 1440 kvfree(spec);
1462 kfree(match_c);
1463 return err; 1441 return err;
1464} 1442}
1465 1443
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index ed8ad988f07a..1842dfb4636b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -43,37 +43,35 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
43{ 43{
44 struct mlx5_flow_destination dest; 44 struct mlx5_flow_destination dest;
45 struct mlx5_flow_rule *flow_rule; 45 struct mlx5_flow_rule *flow_rule;
46 int match_header = MLX5_MATCH_MISC_PARAMETERS; 46 struct mlx5_flow_spec *spec;
47 u32 *match_v, *match_c;
48 void *misc; 47 void *misc;
49 48
50 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 49 spec = mlx5_vzalloc(sizeof(*spec));
51 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 50 if (!spec) {
52 if (!match_v || !match_c) {
53 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); 51 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
54 flow_rule = ERR_PTR(-ENOMEM); 52 flow_rule = ERR_PTR(-ENOMEM);
55 goto out; 53 goto out;
56 } 54 }
57 55
58 misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); 56 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
59 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); 57 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
60 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */ 58 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
61 59
62 misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); 60 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
63 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); 61 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
64 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 62 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
65 63
64 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
66 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 65 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
67 dest.vport_num = vport; 66 dest.vport_num = vport;
68 67
69 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, match_header, match_c, 68 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
70 match_v, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 69 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
71 0, &dest); 70 0, &dest);
72 if (IS_ERR(flow_rule)) 71 if (IS_ERR(flow_rule))
73 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); 72 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
74out: 73out:
75 kfree(match_v); 74 kvfree(spec);
76 kfree(match_c);
77 return flow_rule; 75 return flow_rule;
78} 76}
79 77
@@ -138,12 +136,11 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
138{ 136{
139 struct mlx5_flow_destination dest; 137 struct mlx5_flow_destination dest;
140 struct mlx5_flow_rule *flow_rule = NULL; 138 struct mlx5_flow_rule *flow_rule = NULL;
141 u32 *match_v, *match_c; 139 struct mlx5_flow_spec *spec;
142 int err = 0; 140 int err = 0;
143 141
144 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 142 spec = mlx5_vzalloc(sizeof(*spec));
145 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 143 if (!spec) {
146 if (!match_v || !match_c) {
147 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n"); 144 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
148 err = -ENOMEM; 145 err = -ENOMEM;
149 goto out; 146 goto out;
@@ -152,8 +149,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
152 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 149 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
153 dest.vport_num = 0; 150 dest.vport_num = 0;
154 151
155 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, 0, match_c, match_v, 152 flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
156 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 0, &dest); 153 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
154 0, &dest);
157 if (IS_ERR(flow_rule)) { 155 if (IS_ERR(flow_rule)) {
158 err = PTR_ERR(flow_rule); 156 err = PTR_ERR(flow_rule);
159 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); 157 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
@@ -162,8 +160,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
162 160
163 esw->fdb_table.offloads.miss_rule = flow_rule; 161 esw->fdb_table.offloads.miss_rule = flow_rule;
164out: 162out:
165 kfree(match_v); 163 kvfree(spec);
166 kfree(match_c);
167 return err; 164 return err;
168} 165}
169 166
@@ -351,29 +348,28 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
351{ 348{
352 struct mlx5_flow_destination dest; 349 struct mlx5_flow_destination dest;
353 struct mlx5_flow_rule *flow_rule; 350 struct mlx5_flow_rule *flow_rule;
354 int match_header = MLX5_MATCH_MISC_PARAMETERS; 351 struct mlx5_flow_spec *spec;
355 u32 *match_v, *match_c;
356 void *misc; 352 void *misc;
357 353
358 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 354 spec = mlx5_vzalloc(sizeof(*spec));
359 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); 355 if (!spec) {
360 if (!match_v || !match_c) {
361 esw_warn(esw->dev, "Failed to alloc match parameters\n"); 356 esw_warn(esw->dev, "Failed to alloc match parameters\n");
362 flow_rule = ERR_PTR(-ENOMEM); 357 flow_rule = ERR_PTR(-ENOMEM);
363 goto out; 358 goto out;
364 } 359 }
365 360
366 misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters); 361 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
367 MLX5_SET(fte_match_set_misc, misc, source_port, vport); 362 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
368 363
369 misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); 364 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
370 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); 365 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
371 366
367 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
372 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; 368 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
373 dest.tir_num = tirn; 369 dest.tir_num = tirn;
374 370
375 flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, match_header, match_c, 371 flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
376 match_v, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, 372 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
377 0, &dest); 373 0, &dest);
378 if (IS_ERR(flow_rule)) { 374 if (IS_ERR(flow_rule)) {
379 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); 375 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
@@ -381,8 +377,7 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
381 } 377 }
382 378
383out: 379out:
384 kfree(match_v); 380 kvfree(spec);
385 kfree(match_c);
386 return flow_rule; 381 return flow_rule;
387} 382}
388 383
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index b0401104afb9..7fcdae1cf053 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1160,9 +1160,7 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
1160 1160
1161static struct mlx5_flow_rule * 1161static struct mlx5_flow_rule *
1162_mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1162_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1163 u8 match_criteria_enable, 1163 struct mlx5_flow_spec *spec,
1164 u32 *match_criteria,
1165 u32 *match_value,
1166 u32 action, 1164 u32 action,
1167 u32 flow_tag, 1165 u32 flow_tag,
1168 struct mlx5_flow_destination *dest) 1166 struct mlx5_flow_destination *dest)
@@ -1176,22 +1174,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1176 nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); 1174 nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
1177 fs_for_each_fg(g, ft) 1175 fs_for_each_fg(g, ft)
1178 if (compare_match_criteria(g->mask.match_criteria_enable, 1176 if (compare_match_criteria(g->mask.match_criteria_enable,
1179 match_criteria_enable, 1177 spec->match_criteria_enable,
1180 g->mask.match_criteria, 1178 g->mask.match_criteria,
1181 match_criteria)) { 1179 spec->match_criteria)) {
1182 rule = add_rule_fg(g, match_value, 1180 rule = add_rule_fg(g, spec->match_value,
1183 action, flow_tag, dest); 1181 action, flow_tag, dest);
1184 if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) 1182 if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
1185 goto unlock; 1183 goto unlock;
1186 } 1184 }
1187 1185
1188 g = create_autogroup(ft, match_criteria_enable, match_criteria); 1186 g = create_autogroup(ft, spec->match_criteria_enable,
1187 spec->match_criteria);
1189 if (IS_ERR(g)) { 1188 if (IS_ERR(g)) {
1190 rule = (void *)g; 1189 rule = (void *)g;
1191 goto unlock; 1190 goto unlock;
1192 } 1191 }
1193 1192
1194 rule = add_rule_fg(g, match_value, 1193 rule = add_rule_fg(g, spec->match_value,
1195 action, flow_tag, dest); 1194 action, flow_tag, dest);
1196 if (IS_ERR(rule)) { 1195 if (IS_ERR(rule)) {
1197 /* Remove assumes refcount > 0 and autogroup creates a group 1196 /* Remove assumes refcount > 0 and autogroup creates a group
@@ -1215,9 +1214,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
1215 1214
1216struct mlx5_flow_rule * 1215struct mlx5_flow_rule *
1217mlx5_add_flow_rule(struct mlx5_flow_table *ft, 1216mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1218 u8 match_criteria_enable, 1217 struct mlx5_flow_spec *spec,
1219 u32 *match_criteria,
1220 u32 *match_value,
1221 u32 action, 1218 u32 action,
1222 u32 flow_tag, 1219 u32 flow_tag,
1223 struct mlx5_flow_destination *dest) 1220 struct mlx5_flow_destination *dest)
@@ -1248,8 +1245,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
1248 } 1245 }
1249 } 1246 }
1250 1247
1251 rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria, 1248 rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest);
1252 match_value, action, flow_tag, dest);
1253 1249
1254 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { 1250 if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1255 if (!IS_ERR_OR_NULL(rule) && 1251 if (!IS_ERR_OR_NULL(rule) &&
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 6ad111938709..d22fe7e5a39a 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -68,6 +68,12 @@ struct mlx5_flow_group;
68struct mlx5_flow_rule; 68struct mlx5_flow_rule;
69struct mlx5_flow_namespace; 69struct mlx5_flow_namespace;
70 70
71struct mlx5_flow_spec {
72 u8 match_criteria_enable;
73 u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
74 u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
75};
76
71struct mlx5_flow_destination { 77struct mlx5_flow_destination {
72 enum mlx5_flow_destination_type type; 78 enum mlx5_flow_destination_type type;
73 union { 79 union {
@@ -116,9 +122,7 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
116 */ 122 */
117struct mlx5_flow_rule * 123struct mlx5_flow_rule *
118mlx5_add_flow_rule(struct mlx5_flow_table *ft, 124mlx5_add_flow_rule(struct mlx5_flow_table *ft,
119 u8 match_criteria_enable, 125 struct mlx5_flow_spec *spec,
120 u32 *match_criteria,
121 u32 *match_value,
122 u32 action, 126 u32 action,
123 u32 flow_tag, 127 u32 flow_tag,
124 struct mlx5_flow_destination *dest); 128 struct mlx5_flow_destination *dest);