aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-28 19:32:59 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-28 19:32:59 -0500
commitd367341b25bd5aef3bf5524baa6f73e16ceced85 (patch)
treee970b889efb8baf73d3114d4fb9d3a36e87ef955
parent8d1666fdfcea94abc0c2f84b0aa512269d28bdf1 (diff)
parent9b93ab981e3bf62ff95a8cbb6faf652cd400decd (diff)
Merge tag 'mlx5-shared-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Saeed Mahameed says: ==================== Mellanox, mlx5 E-Switch updates 2017-12-19 This series includes updates for mlx5 E-Switch infrastructures, to be merged into net-next and rdma-next trees. Mark's patches provide E-Switch refactoring that generalize the mlx5 E-Switch vf representors interfaces and data structures. The serious is mainly focused on moving ethernet (netdev) specific representors logic out of E-Switch (eswitch.c) into mlx5e representor module (en_rep.c), which provides better separation and allows future support for other types of vf representors (e.g. RDMA). Gal's patches at the end of this serious, provide a simple syntax fix and two other patches that handles vport ingress/egress ACL steering name spaces to be aligned with the Firmware/Hardware specs. V1->V2: - Addressed coding style comments in patches #1 and #7 - The series is still based on rc4, as now I see net-next is also @rc4. V2->V3: - Fixed compilation warning, reported by Dave. Please pull and let me know if there's any problem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h4
-rw-r--r--include/linux/mlx5/fs.h4
9 files changed, 424 insertions, 214 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 2c43606c26b5..c6a77f8e99a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
190 return 0; 190 return 0;
191} 191}
192 192
193static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
194 struct mlx5_eswitch_rep *rep)
195{
196 struct mlx5e_rep_sq *rep_sq, *tmp;
197 struct mlx5e_rep_priv *rpriv;
198
199 if (esw->mode != SRIOV_OFFLOADS)
200 return;
201
202 rpriv = mlx5e_rep_to_rep_priv(rep);
203 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
204 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
205 list_del(&rep_sq->list);
206 kfree(rep_sq);
207 }
208}
209
210static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
211 struct mlx5_eswitch_rep *rep,
212 u16 *sqns_array, int sqns_num)
213{
214 struct mlx5_flow_handle *flow_rule;
215 struct mlx5e_rep_priv *rpriv;
216 struct mlx5e_rep_sq *rep_sq;
217 int err;
218 int i;
219
220 if (esw->mode != SRIOV_OFFLOADS)
221 return 0;
222
223 rpriv = mlx5e_rep_to_rep_priv(rep);
224 for (i = 0; i < sqns_num; i++) {
225 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
226 if (!rep_sq) {
227 err = -ENOMEM;
228 goto out_err;
229 }
230
231 /* Add re-inject rule to the PF/representor sqs */
232 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
233 rep->vport,
234 sqns_array[i]);
235 if (IS_ERR(flow_rule)) {
236 err = PTR_ERR(flow_rule);
237 kfree(rep_sq);
238 goto out_err;
239 }
240 rep_sq->send_to_vport_rule = flow_rule;
241 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
242 }
243 return 0;
244
245out_err:
246 mlx5e_sqs2vport_stop(esw, rep);
247 return err;
248}
249
193int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) 250int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
194{ 251{
195 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 252 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
210 sqs[num_sqs++] = c->sq[tc].sqn; 267 sqs[num_sqs++] = c->sq[tc].sqn;
211 } 268 }
212 269
213 err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs); 270 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
214 kfree(sqs); 271 kfree(sqs);
215 272
216out: 273out:
@@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
225 struct mlx5e_rep_priv *rpriv = priv->ppriv; 282 struct mlx5e_rep_priv *rpriv = priv->ppriv;
226 struct mlx5_eswitch_rep *rep = rpriv->rep; 283 struct mlx5_eswitch_rep *rep = rpriv->rep;
227 284
228 mlx5_eswitch_sqs2vport_stop(esw, rep); 285 mlx5e_sqs2vport_stop(esw, rep);
229} 286}
230 287
231static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv) 288static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
@@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
238#endif 295#endif
239 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, 296 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
240 DELAY_PROBE_TIME); 297 DELAY_PROBE_TIME);
241 struct net_device *netdev = rpriv->rep->netdev; 298 struct net_device *netdev = rpriv->netdev;
242 struct mlx5e_priv *priv = netdev_priv(netdev); 299 struct mlx5e_priv *priv = netdev_priv(netdev);
243 300
244 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval); 301 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
@@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
259{ 316{
260 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv, 317 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
261 neigh_update.neigh_stats_work.work); 318 neigh_update.neigh_stats_work.work);
262 struct net_device *netdev = rpriv->rep->netdev; 319 struct net_device *netdev = rpriv->netdev;
263 struct mlx5e_priv *priv = netdev_priv(netdev); 320 struct mlx5e_priv *priv = netdev_priv(netdev);
264 struct mlx5e_neigh_hash_entry *nhe; 321 struct mlx5e_neigh_hash_entry *nhe;
265 322
@@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
355 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, 412 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
356 neigh_update.netevent_nb); 413 neigh_update.netevent_nb);
357 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; 414 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
358 struct net_device *netdev = rpriv->rep->netdev; 415 struct net_device *netdev = rpriv->netdev;
359 struct mlx5e_priv *priv = netdev_priv(netdev); 416 struct mlx5e_priv *priv = netdev_priv(netdev);
360 struct mlx5e_neigh_hash_entry *nhe = NULL; 417 struct mlx5e_neigh_hash_entry *nhe = NULL;
361 struct mlx5e_neigh m_neigh = {}; 418 struct mlx5e_neigh m_neigh = {};
@@ -483,7 +540,7 @@ out_err:
483static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv) 540static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
484{ 541{
485 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update; 542 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
486 struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev); 543 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
487 544
488 unregister_netevent_notifier(&neigh_update->netevent_nb); 545 unregister_netevent_notifier(&neigh_update->netevent_nb);
489 546
@@ -904,7 +961,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
904 err = PTR_ERR(flow_rule); 961 err = PTR_ERR(flow_rule);
905 goto err_destroy_direct_tirs; 962 goto err_destroy_direct_tirs;
906 } 963 }
907 rep->vport_rx_rule = flow_rule; 964 rpriv->vport_rx_rule = flow_rule;
908 965
909 err = mlx5e_tc_init(priv); 966 err = mlx5e_tc_init(priv);
910 if (err) 967 if (err)
@@ -913,7 +970,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
913 return 0; 970 return 0;
914 971
915err_del_flow_rule: 972err_del_flow_rule:
916 mlx5_del_flow_rules(rep->vport_rx_rule); 973 mlx5_del_flow_rules(rpriv->vport_rx_rule);
917err_destroy_direct_tirs: 974err_destroy_direct_tirs:
918 mlx5e_destroy_direct_tirs(priv); 975 mlx5e_destroy_direct_tirs(priv);
919err_destroy_direct_rqts: 976err_destroy_direct_rqts:
@@ -924,10 +981,9 @@ err_destroy_direct_rqts:
924static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) 981static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
925{ 982{
926 struct mlx5e_rep_priv *rpriv = priv->ppriv; 983 struct mlx5e_rep_priv *rpriv = priv->ppriv;
927 struct mlx5_eswitch_rep *rep = rpriv->rep;
928 984
929 mlx5e_tc_cleanup(priv); 985 mlx5e_tc_cleanup(priv);
930 mlx5_del_flow_rules(rep->vport_rx_rule); 986 mlx5_del_flow_rules(rpriv->vport_rx_rule);
931 mlx5e_destroy_direct_tirs(priv); 987 mlx5e_destroy_direct_tirs(priv);
932 mlx5e_destroy_direct_rqts(priv); 988 mlx5e_destroy_direct_rqts(priv);
933} 989}
@@ -967,10 +1023,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
967/* e-Switch vport representors */ 1023/* e-Switch vport representors */
968 1024
969static int 1025static int
970mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) 1026mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
971{ 1027{
972 struct mlx5e_priv *priv = netdev_priv(rep->netdev); 1028 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
973 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1029 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
974 1030
975 int err; 1031 int err;
976 1032
@@ -992,10 +1048,10 @@ err_remove_sqs:
992} 1048}
993 1049
994static void 1050static void
995mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) 1051mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
996{ 1052{
997 struct mlx5e_priv *priv = netdev_priv(rep->netdev); 1053 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
998 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1054 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
999 1055
1000 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) 1056 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1001 mlx5e_remove_sqs_fwd_rules(priv); 1057 mlx5e_remove_sqs_fwd_rules(priv);
@@ -1008,8 +1064,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
1008} 1064}
1009 1065
1010static int 1066static int
1011mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) 1067mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1012{ 1068{
1069 struct mlx5e_rep_priv *uplink_rpriv;
1013 struct mlx5e_rep_priv *rpriv; 1070 struct mlx5e_rep_priv *rpriv;
1014 struct net_device *netdev; 1071 struct net_device *netdev;
1015 struct mlx5e_priv *upriv; 1072 struct mlx5e_priv *upriv;
@@ -1019,7 +1076,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
1019 if (!rpriv) 1076 if (!rpriv)
1020 return -ENOMEM; 1077 return -ENOMEM;
1021 1078
1022 netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv); 1079 netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
1023 if (!netdev) { 1080 if (!netdev) {
1024 pr_warn("Failed to create representor netdev for vport %d\n", 1081 pr_warn("Failed to create representor netdev for vport %d\n",
1025 rep->vport); 1082 rep->vport);
@@ -1027,8 +1084,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
1027 return -EINVAL; 1084 return -EINVAL;
1028 } 1085 }
1029 1086
1030 rep->netdev = netdev; 1087 rpriv->netdev = netdev;
1031 rpriv->rep = rep; 1088 rpriv->rep = rep;
1089 rep->rep_if[REP_ETH].priv = rpriv;
1090 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1032 1091
1033 err = mlx5e_attach_netdev(netdev_priv(netdev)); 1092 err = mlx5e_attach_netdev(netdev_priv(netdev));
1034 if (err) { 1093 if (err) {
@@ -1044,7 +1103,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
1044 goto err_detach_netdev; 1103 goto err_detach_netdev;
1045 } 1104 }
1046 1105
1047 upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw)); 1106 uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
1107 upriv = netdev_priv(uplink_rpriv->netdev);
1048 err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb, 1108 err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
1049 upriv); 1109 upriv);
1050 if (err) 1110 if (err)
@@ -1076,16 +1136,19 @@ err_destroy_netdev:
1076} 1136}
1077 1137
1078static void 1138static void
1079mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep) 1139mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1080{ 1140{
1081 struct net_device *netdev = rep->netdev; 1141 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1142 struct net_device *netdev = rpriv->netdev;
1082 struct mlx5e_priv *priv = netdev_priv(netdev); 1143 struct mlx5e_priv *priv = netdev_priv(netdev);
1083 struct mlx5e_rep_priv *rpriv = priv->ppriv; 1144 struct mlx5e_rep_priv *uplink_rpriv;
1084 void *ppriv = priv->ppriv; 1145 void *ppriv = priv->ppriv;
1085 struct mlx5e_priv *upriv; 1146 struct mlx5e_priv *upriv;
1086 1147
1087 unregister_netdev(rep->netdev); 1148 unregister_netdev(netdev);
1088 upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw)); 1149 uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
1150 REP_ETH);
1151 upriv = netdev_priv(uplink_rpriv->netdev);
1089 tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb, 1152 tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
1090 upriv); 1153 upriv);
1091 mlx5e_rep_neigh_cleanup(rpriv); 1154 mlx5e_rep_neigh_cleanup(rpriv);
@@ -1100,18 +1163,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1100 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1163 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1101 int total_vfs = MLX5_TOTAL_VPORTS(mdev); 1164 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1102 int vport; 1165 int vport;
1103 u8 mac[ETH_ALEN];
1104
1105 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
1106 1166
1107 for (vport = 1; vport < total_vfs; vport++) { 1167 for (vport = 1; vport < total_vfs; vport++) {
1108 struct mlx5_eswitch_rep rep; 1168 struct mlx5_eswitch_rep_if rep_if = {};
1109 1169
1110 rep.load = mlx5e_vport_rep_load; 1170 rep_if.load = mlx5e_vport_rep_load;
1111 rep.unload = mlx5e_vport_rep_unload; 1171 rep_if.unload = mlx5e_vport_rep_unload;
1112 rep.vport = vport; 1172 mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
1113 ether_addr_copy(rep.hw_id, mac);
1114 mlx5_eswitch_register_vport_rep(esw, vport, &rep);
1115 } 1173 }
1116} 1174}
1117 1175
@@ -1123,21 +1181,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1123 int vport; 1181 int vport;
1124 1182
1125 for (vport = 1; vport < total_vfs; vport++) 1183 for (vport = 1; vport < total_vfs; vport++)
1126 mlx5_eswitch_unregister_vport_rep(esw, vport); 1184 mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
1127} 1185}
1128 1186
1129void mlx5e_register_vport_reps(struct mlx5e_priv *priv) 1187void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1130{ 1188{
1131 struct mlx5_core_dev *mdev = priv->mdev; 1189 struct mlx5_core_dev *mdev = priv->mdev;
1132 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1190 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1133 struct mlx5_eswitch_rep rep; 1191 struct mlx5_eswitch_rep_if rep_if;
1192 struct mlx5e_rep_priv *rpriv;
1193
1194 rpriv = priv->ppriv;
1195 rpriv->netdev = priv->netdev;
1134 1196
1135 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id); 1197 rep_if.load = mlx5e_nic_rep_load;
1136 rep.load = mlx5e_nic_rep_load; 1198 rep_if.unload = mlx5e_nic_rep_unload;
1137 rep.unload = mlx5e_nic_rep_unload; 1199 rep_if.priv = rpriv;
1138 rep.vport = FDB_UPLINK_VPORT; 1200 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1139 rep.netdev = priv->netdev; 1201 mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
1140 mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
1141 1202
1142 mlx5e_rep_register_vf_vports(priv); /* VFs vports */ 1203 mlx5e_rep_register_vf_vports(priv); /* VFs vports */
1143} 1204}
@@ -1148,7 +1209,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1148 struct mlx5_eswitch *esw = mdev->priv.eswitch; 1209 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1149 1210
1150 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ 1211 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
1151 mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/ 1212 mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
1152} 1213}
1153 1214
1154void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev) 1215void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 5659ed9f51e6..b9b481f2833a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
56struct mlx5e_rep_priv { 56struct mlx5e_rep_priv {
57 struct mlx5_eswitch_rep *rep; 57 struct mlx5_eswitch_rep *rep;
58 struct mlx5e_neigh_update_table neigh_update; 58 struct mlx5e_neigh_update_table neigh_update;
59 struct net_device *netdev;
60 struct mlx5_flow_handle *vport_rx_rule;
61 struct list_head vport_sqs_list;
59}; 62};
60 63
64static inline
65struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
66{
67 return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
68}
69
61struct mlx5e_neigh { 70struct mlx5e_neigh {
62 struct net_device *dev; 71 struct net_device *dev;
63 union { 72 union {
@@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
124 int encap_size; 133 int encap_size;
125}; 134};
126 135
136struct mlx5e_rep_sq {
137 struct mlx5_flow_handle *send_to_vport_rule;
138 struct list_head list;
139};
140
127void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); 141void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
128void mlx5e_register_vport_reps(struct mlx5e_priv *priv); 142void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
129void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); 143void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3e03d2e8f96a..25a8073f15d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -617,7 +617,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
617 FLOW_DISSECTOR_KEY_ENC_PORTS, 617 FLOW_DISSECTOR_KEY_ENC_PORTS,
618 f->mask); 618 f->mask);
619 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 619 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
620 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); 620 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
621 struct net_device *up_dev = uplink_rpriv->netdev;
621 struct mlx5e_priv *up_priv = netdev_priv(up_dev); 622 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
622 623
623 /* Full udp dst port must be given */ 624 /* Full udp dst port must be given */
@@ -1507,6 +1508,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1507 int *out_ttl) 1508 int *out_ttl)
1508{ 1509{
1509 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1510 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1511 struct mlx5e_rep_priv *uplink_rpriv;
1510 struct rtable *rt; 1512 struct rtable *rt;
1511 struct neighbour *n = NULL; 1513 struct neighbour *n = NULL;
1512 1514
@@ -1520,9 +1522,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1520#else 1522#else
1521 return -EOPNOTSUPP; 1523 return -EOPNOTSUPP;
1522#endif 1524#endif
1525 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1523 /* if the egress device isn't on the same HW e-switch, we use the uplink */ 1526 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1524 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) 1527 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1525 *out_dev = mlx5_eswitch_get_uplink_netdev(esw); 1528 *out_dev = uplink_rpriv->netdev;
1526 else 1529 else
1527 *out_dev = rt->dst.dev; 1530 *out_dev = rt->dst.dev;
1528 1531
@@ -1543,6 +1546,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1543 struct neighbour **out_n, 1546 struct neighbour **out_n,
1544 int *out_ttl) 1547 int *out_ttl)
1545{ 1548{
1549 struct mlx5e_rep_priv *uplink_rpriv;
1546 struct neighbour *n = NULL; 1550 struct neighbour *n = NULL;
1547 struct dst_entry *dst; 1551 struct dst_entry *dst;
1548 1552
@@ -1557,9 +1561,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1557 1561
1558 *out_ttl = ip6_dst_hoplimit(dst); 1562 *out_ttl = ip6_dst_hoplimit(dst);
1559 1563
1564 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1560 /* if the egress device isn't on the same HW e-switch, we use the uplink */ 1565 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1561 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) 1566 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1562 *out_dev = mlx5_eswitch_get_uplink_netdev(esw); 1567 *out_dev = uplink_rpriv->netdev;
1563 else 1568 else
1564 *out_dev = dst->dev; 1569 *out_dev = dst->dev;
1565#else 1570#else
@@ -1859,7 +1864,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1859 struct mlx5e_tc_flow *flow) 1864 struct mlx5e_tc_flow *flow)
1860{ 1865{
1861 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 1866 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1862 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); 1867 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
1868 REP_ETH);
1869 struct net_device *up_dev = uplink_rpriv->netdev;
1863 unsigned short family = ip_tunnel_info_af(tun_info); 1870 unsigned short family = ip_tunnel_info_af(tun_info);
1864 struct mlx5e_priv *up_priv = netdev_priv(up_dev); 1871 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1865 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 1872 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bbb140f517c4..7649e36653d9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -867,9 +867,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
867 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n", 867 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
868 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size)); 868 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
869 869
870 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); 870 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
871 vport->vport);
871 if (!root_ns) { 872 if (!root_ns) {
872 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); 873 esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
873 return -EOPNOTSUPP; 874 return -EOPNOTSUPP;
874 } 875 }
875 876
@@ -984,9 +985,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
984 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n", 985 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
985 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size)); 986 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
986 987
987 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); 988 root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
989 vport->vport);
988 if (!root_ns) { 990 if (!root_ns) {
989 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); 991 esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
990 return -EOPNOTSUPP; 992 return -EOPNOTSUPP;
991 } 993 }
992 994
@@ -1290,7 +1292,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
1290 1292
1291 err = mlx5_create_scheduling_element_cmd(dev, 1293 err = mlx5_create_scheduling_element_cmd(dev,
1292 SCHEDULING_HIERARCHY_E_SWITCH, 1294 SCHEDULING_HIERARCHY_E_SWITCH,
1293 &tsar_ctx, 1295 tsar_ctx,
1294 &esw->qos.root_tsar_id); 1296 &esw->qos.root_tsar_id);
1295 if (err) { 1297 if (err) {
1296 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); 1298 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
@@ -1333,20 +1335,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
1333 if (vport->qos.enabled) 1335 if (vport->qos.enabled)
1334 return -EEXIST; 1336 return -EEXIST;
1335 1337
1336 MLX5_SET(scheduling_context, &sched_ctx, element_type, 1338 MLX5_SET(scheduling_context, sched_ctx, element_type,
1337 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); 1339 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1338 vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, 1340 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1339 element_attributes); 1341 element_attributes);
1340 MLX5_SET(vport_element, vport_elem, vport_number, vport_num); 1342 MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
1341 MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, 1343 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1342 esw->qos.root_tsar_id); 1344 esw->qos.root_tsar_id);
1343 MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, 1345 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1344 initial_max_rate); 1346 initial_max_rate);
1345 MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share); 1347 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1346 1348
1347 err = mlx5_create_scheduling_element_cmd(dev, 1349 err = mlx5_create_scheduling_element_cmd(dev,
1348 SCHEDULING_HIERARCHY_E_SWITCH, 1350 SCHEDULING_HIERARCHY_E_SWITCH,
1349 &sched_ctx, 1351 sched_ctx,
1350 &vport->qos.esw_tsar_ix); 1352 &vport->qos.esw_tsar_ix);
1351 if (err) { 1353 if (err) {
1352 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", 1354 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
@@ -1392,22 +1394,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
1392 if (!vport->qos.enabled) 1394 if (!vport->qos.enabled)
1393 return -EIO; 1395 return -EIO;
1394 1396
1395 MLX5_SET(scheduling_context, &sched_ctx, element_type, 1397 MLX5_SET(scheduling_context, sched_ctx, element_type,
1396 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); 1398 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1397 vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, 1399 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1398 element_attributes); 1400 element_attributes);
1399 MLX5_SET(vport_element, vport_elem, vport_number, vport_num); 1401 MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
1400 MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, 1402 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1401 esw->qos.root_tsar_id); 1403 esw->qos.root_tsar_id);
1402 MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, 1404 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1403 max_rate); 1405 max_rate);
1404 MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share); 1406 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
1405 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; 1407 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
1406 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; 1408 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
1407 1409
1408 err = mlx5_modify_scheduling_element_cmd(dev, 1410 err = mlx5_modify_scheduling_element_cmd(dev,
1409 SCHEDULING_HIERARCHY_E_SWITCH, 1411 SCHEDULING_HIERARCHY_E_SWITCH,
1410 &sched_ctx, 1412 sched_ctx,
1411 vport->qos.esw_tsar_ix, 1413 vport->qos.esw_tsar_ix,
1412 bitmask); 1414 bitmask);
1413 if (err) { 1415 if (err) {
@@ -1644,13 +1646,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1644 goto abort; 1646 goto abort;
1645 } 1647 }
1646 1648
1647 esw->offloads.vport_reps = 1649 err = esw_offloads_init_reps(esw);
1648 kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep), 1650 if (err)
1649 GFP_KERNEL);
1650 if (!esw->offloads.vport_reps) {
1651 err = -ENOMEM;
1652 goto abort; 1651 goto abort;
1653 }
1654 1652
1655 hash_init(esw->offloads.encap_tbl); 1653 hash_init(esw->offloads.encap_tbl);
1656 hash_init(esw->offloads.mod_hdr_tbl); 1654 hash_init(esw->offloads.mod_hdr_tbl);
@@ -1681,8 +1679,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1681abort: 1679abort:
1682 if (esw->work_queue) 1680 if (esw->work_queue)
1683 destroy_workqueue(esw->work_queue); 1681 destroy_workqueue(esw->work_queue);
1682 esw_offloads_cleanup_reps(esw);
1684 kfree(esw->vports); 1683 kfree(esw->vports);
1685 kfree(esw->offloads.vport_reps);
1686 kfree(esw); 1684 kfree(esw);
1687 return err; 1685 return err;
1688} 1686}
@@ -1696,7 +1694,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1696 1694
1697 esw->dev->priv.eswitch = NULL; 1695 esw->dev->priv.eswitch = NULL;
1698 destroy_workqueue(esw->work_queue); 1696 destroy_workqueue(esw->work_queue);
1699 kfree(esw->offloads.vport_reps); 1697 esw_offloads_cleanup_reps(esw);
1700 kfree(esw->vports); 1698 kfree(esw->vports);
1701 kfree(esw); 1699 kfree(esw);
1702} 1700}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 565c8b7a399a..3b481182f13a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -45,6 +45,11 @@ enum {
45 SRIOV_OFFLOADS 45 SRIOV_OFFLOADS
46}; 46};
47 47
48enum {
49 REP_ETH,
50 NUM_REP_TYPES,
51};
52
48#ifdef CONFIG_MLX5_ESWITCH 53#ifdef CONFIG_MLX5_ESWITCH
49 54
50#define MLX5_MAX_UC_PER_VPORT(dev) \ 55#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -133,25 +138,21 @@ struct mlx5_eswitch_fdb {
133 }; 138 };
134}; 139};
135 140
136struct mlx5_esw_sq { 141struct mlx5_eswitch_rep;
137 struct mlx5_flow_handle *send_to_vport_rule; 142struct mlx5_eswitch_rep_if {
138 struct list_head list; 143 int (*load)(struct mlx5_core_dev *dev,
144 struct mlx5_eswitch_rep *rep);
145 void (*unload)(struct mlx5_eswitch_rep *rep);
146 void *priv;
147 bool valid;
139}; 148};
140 149
141struct mlx5_eswitch_rep { 150struct mlx5_eswitch_rep {
142 int (*load)(struct mlx5_eswitch *esw, 151 struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
143 struct mlx5_eswitch_rep *rep);
144 void (*unload)(struct mlx5_eswitch *esw,
145 struct mlx5_eswitch_rep *rep);
146 u16 vport; 152 u16 vport;
147 u8 hw_id[ETH_ALEN]; 153 u8 hw_id[ETH_ALEN];
148 struct net_device *netdev;
149
150 struct mlx5_flow_handle *vport_rx_rule;
151 struct list_head vport_sqs_list;
152 u16 vlan; 154 u16 vlan;
153 u32 vlan_refcount; 155 u32 vlan_refcount;
154 bool valid;
155}; 156};
156 157
157struct mlx5_esw_offload { 158struct mlx5_esw_offload {
@@ -197,6 +198,8 @@ struct mlx5_eswitch {
197 198
198void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports); 199void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
199int esw_offloads_init(struct mlx5_eswitch *esw, int nvports); 200int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
201void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
202int esw_offloads_init_reps(struct mlx5_eswitch *esw);
200 203
201/* E-Switch API */ 204/* E-Switch API */
202int mlx5_eswitch_init(struct mlx5_core_dev *dev); 205int mlx5_eswitch_init(struct mlx5_core_dev *dev);
@@ -221,6 +224,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
221int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 224int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
222 int vport, 225 int vport,
223 struct ifla_vf_stats *vf_stats); 226 struct ifla_vf_stats *vf_stats);
227struct mlx5_flow_handle *
228mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
229 u32 sqn);
230void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
224 231
225struct mlx5_flow_spec; 232struct mlx5_flow_spec;
226struct mlx5_esw_flow_attr; 233struct mlx5_esw_flow_attr;
@@ -257,12 +264,6 @@ struct mlx5_esw_flow_attr {
257 struct mlx5e_tc_flow_parse_attr *parse_attr; 264 struct mlx5e_tc_flow_parse_attr *parse_attr;
258}; 265};
259 266
260int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
261 struct mlx5_eswitch_rep *rep,
262 u16 *sqns_array, int sqns_num);
263void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
264 struct mlx5_eswitch_rep *rep);
265
266int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode); 267int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
267int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode); 268int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
268int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode); 269int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
@@ -272,10 +273,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
272int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap); 273int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
273void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, 274void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
274 int vport_index, 275 int vport_index,
275 struct mlx5_eswitch_rep *rep); 276 struct mlx5_eswitch_rep_if *rep_if,
277 u8 rep_type);
276void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, 278void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
277 int vport_index); 279 int vport_index,
278struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw); 280 u8 rep_type);
281void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
279 282
280int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 283int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
281 struct mlx5_esw_flow_attr *attr); 284 struct mlx5_esw_flow_attr *attr);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1143d80119bd..99f583a15cc3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none"); 130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) { 131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport]; 132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->valid) 133 if (!rep->rep_if[REP_ETH].valid)
134 continue; 134 continue;
135 135
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val); 136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
@@ -302,7 +302,7 @@ out:
302 return err; 302 return err;
303} 303}
304 304
305static struct mlx5_flow_handle * 305struct mlx5_flow_handle *
306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) 306mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
307{ 307{
308 struct mlx5_flow_act flow_act = {0}; 308 struct mlx5_flow_act flow_act = {0};
@@ -339,57 +339,9 @@ out:
339 return flow_rule; 339 return flow_rule;
340} 340}
341 341
342void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, 342void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
343 struct mlx5_eswitch_rep *rep)
344{
345 struct mlx5_esw_sq *esw_sq, *tmp;
346
347 if (esw->mode != SRIOV_OFFLOADS)
348 return;
349
350 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
351 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
352 list_del(&esw_sq->list);
353 kfree(esw_sq);
354 }
355}
356
357int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
358 struct mlx5_eswitch_rep *rep,
359 u16 *sqns_array, int sqns_num)
360{ 343{
361 struct mlx5_flow_handle *flow_rule; 344 mlx5_del_flow_rules(rule);
362 struct mlx5_esw_sq *esw_sq;
363 int err;
364 int i;
365
366 if (esw->mode != SRIOV_OFFLOADS)
367 return 0;
368
369 for (i = 0; i < sqns_num; i++) {
370 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
371 if (!esw_sq) {
372 err = -ENOMEM;
373 goto out_err;
374 }
375
376 /* Add re-inject rule to the PF/representor sqs */
377 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
378 rep->vport,
379 sqns_array[i]);
380 if (IS_ERR(flow_rule)) {
381 err = PTR_ERR(flow_rule);
382 kfree(esw_sq);
383 goto out_err;
384 }
385 esw_sq->send_to_vport_rule = flow_rule;
386 list_add(&esw_sq->list, &rep->vport_sqs_list);
387 }
388 return 0;
389
390out_err:
391 mlx5_eswitch_sqs2vport_stop(esw, rep);
392 return err;
393} 345}
394 346
395static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) 347static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -732,12 +684,111 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
732 return err; 684 return err;
733} 685}
734 686
735int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) 687void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
688{
689 kfree(esw->offloads.vport_reps);
690}
691
692int esw_offloads_init_reps(struct mlx5_eswitch *esw)
693{
694 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
695 struct mlx5_core_dev *dev = esw->dev;
696 struct mlx5_esw_offload *offloads;
697 struct mlx5_eswitch_rep *rep;
698 u8 hw_id[ETH_ALEN];
699 int vport;
700
701 esw->offloads.vport_reps = kcalloc(total_vfs,
702 sizeof(struct mlx5_eswitch_rep),
703 GFP_KERNEL);
704 if (!esw->offloads.vport_reps)
705 return -ENOMEM;
706
707 offloads = &esw->offloads;
708 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
709
710 for (vport = 0; vport < total_vfs; vport++) {
711 rep = &offloads->vport_reps[vport];
712
713 rep->vport = vport;
714 ether_addr_copy(rep->hw_id, hw_id);
715 }
716
717 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
718
719 return 0;
720}
721
722static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
723 u8 rep_type)
724{
725 struct mlx5_eswitch_rep *rep;
726 int vport;
727
728 for (vport = nvports - 1; vport >= 0; vport--) {
729 rep = &esw->offloads.vport_reps[vport];
730 if (!rep->rep_if[rep_type].valid)
731 continue;
732
733 rep->rep_if[rep_type].unload(rep);
734 }
735}
736
737static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
738{
739 u8 rep_type = NUM_REP_TYPES;
740
741 while (rep_type-- > 0)
742 esw_offloads_unload_reps_type(esw, nvports, rep_type);
743}
744
745static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
746 u8 rep_type)
736{ 747{
737 struct mlx5_eswitch_rep *rep; 748 struct mlx5_eswitch_rep *rep;
738 int vport; 749 int vport;
739 int err; 750 int err;
740 751
752 for (vport = 0; vport < nvports; vport++) {
753 rep = &esw->offloads.vport_reps[vport];
754 if (!rep->rep_if[rep_type].valid)
755 continue;
756
757 err = rep->rep_if[rep_type].load(esw->dev, rep);
758 if (err)
759 goto err_reps;
760 }
761
762 return 0;
763
764err_reps:
765 esw_offloads_unload_reps_type(esw, vport, rep_type);
766 return err;
767}
768
769static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
770{
771 u8 rep_type = 0;
772 int err;
773
774 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
775 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
776 if (err)
777 goto err_reps;
778 }
779
780 return err;
781
782err_reps:
783 while (rep_type-- > 0)
784 esw_offloads_unload_reps_type(esw, nvports, rep_type);
785 return err;
786}
787
788int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
789{
790 int err;
791
741 /* disable PF RoCE so missed packets don't go through RoCE steering */ 792 /* disable PF RoCE so missed packets don't go through RoCE steering */
742 mlx5_dev_list_lock(); 793 mlx5_dev_list_lock();
743 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); 794 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
@@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
755 if (err) 806 if (err)
756 goto create_fg_err; 807 goto create_fg_err;
757 808
758 for (vport = 0; vport < nvports; vport++) { 809 err = esw_offloads_load_reps(esw, nvports);
759 rep = &esw->offloads.vport_reps[vport]; 810 if (err)
760 if (!rep->valid) 811 goto err_reps;
761 continue;
762
763 err = rep->load(esw, rep);
764 if (err)
765 goto err_reps;
766 }
767 812
768 return 0; 813 return 0;
769 814
770err_reps: 815err_reps:
771 for (vport--; vport >= 0; vport--) {
772 rep = &esw->offloads.vport_reps[vport];
773 if (!rep->valid)
774 continue;
775 rep->unload(esw, rep);
776 }
777 esw_destroy_vport_rx_group(esw); 816 esw_destroy_vport_rx_group(esw);
778 817
779create_fg_err: 818create_fg_err:
@@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
814 853
815void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) 854void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
816{ 855{
817 struct mlx5_eswitch_rep *rep; 856 esw_offloads_unload_reps(esw, nvports);
818 int vport;
819
820 for (vport = nvports - 1; vport >= 0; vport--) {
821 rep = &esw->offloads.vport_reps[vport];
822 if (!rep->valid)
823 continue;
824 rep->unload(esw, rep);
825 }
826
827 esw_destroy_vport_rx_group(esw); 857 esw_destroy_vport_rx_group(esw);
828 esw_destroy_offloads_table(esw); 858 esw_destroy_offloads_table(esw);
829 esw_destroy_offloads_fdb_tables(esw); 859 esw_destroy_offloads_fdb_tables(esw);
@@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1120 1150
1121void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, 1151void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1122 int vport_index, 1152 int vport_index,
1123 struct mlx5_eswitch_rep *__rep) 1153 struct mlx5_eswitch_rep_if *__rep_if,
1154 u8 rep_type)
1124{ 1155{
1125 struct mlx5_esw_offload *offloads = &esw->offloads; 1156 struct mlx5_esw_offload *offloads = &esw->offloads;
1126 struct mlx5_eswitch_rep *rep; 1157 struct mlx5_eswitch_rep_if *rep_if;
1127
1128 rep = &offloads->vport_reps[vport_index];
1129 1158
1130 memset(rep, 0, sizeof(*rep)); 1159 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1131 1160
1132 rep->load = __rep->load; 1161 rep_if->load = __rep_if->load;
1133 rep->unload = __rep->unload; 1162 rep_if->unload = __rep_if->unload;
1134 rep->vport = __rep->vport; 1163 rep_if->priv = __rep_if->priv;
1135 rep->netdev = __rep->netdev;
1136 ether_addr_copy(rep->hw_id, __rep->hw_id);
1137 1164
1138 INIT_LIST_HEAD(&rep->vport_sqs_list); 1165 rep_if->valid = true;
1139 rep->valid = true;
1140} 1166}
1141 1167
1142void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, 1168void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1143 int vport_index) 1169 int vport_index, u8 rep_type)
1144{ 1170{
1145 struct mlx5_esw_offload *offloads = &esw->offloads; 1171 struct mlx5_esw_offload *offloads = &esw->offloads;
1146 struct mlx5_eswitch_rep *rep; 1172 struct mlx5_eswitch_rep *rep;
@@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1148 rep = &offloads->vport_reps[vport_index]; 1174 rep = &offloads->vport_reps[vport_index];
1149 1175
1150 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled) 1176 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1151 rep->unload(esw, rep); 1177 rep->rep_if[rep_type].unload(rep);
1152 1178
1153 rep->valid = false; 1179 rep->rep_if[rep_type].valid = false;
1154} 1180}
1155 1181
1156struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw) 1182void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1157{ 1183{
1158#define UPLINK_REP_INDEX 0 1184#define UPLINK_REP_INDEX 0
1159 struct mlx5_esw_offload *offloads = &esw->offloads; 1185 struct mlx5_esw_offload *offloads = &esw->offloads;
1160 struct mlx5_eswitch_rep *rep; 1186 struct mlx5_eswitch_rep *rep;
1161 1187
1162 rep = &offloads->vport_reps[UPLINK_REP_INDEX]; 1188 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1163 return rep->netdev; 1189 return rep->rep_if[rep_type].priv;
1164} 1190}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index dfaad9ecb2b8..cc4f6ab9374a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2026,16 +2026,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2026 return &steering->fdb_root_ns->ns; 2026 return &steering->fdb_root_ns->ns;
2027 else 2027 else
2028 return NULL; 2028 return NULL;
2029 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2030 if (steering->esw_egress_root_ns)
2031 return &steering->esw_egress_root_ns->ns;
2032 else
2033 return NULL;
2034 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2035 if (steering->esw_ingress_root_ns)
2036 return &steering->esw_ingress_root_ns->ns;
2037 else
2038 return NULL;
2039 case MLX5_FLOW_NAMESPACE_SNIFFER_RX: 2029 case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
2040 if (steering->sniffer_rx_root_ns) 2030 if (steering->sniffer_rx_root_ns)
2041 return &steering->sniffer_rx_root_ns->ns; 2031 return &steering->sniffer_rx_root_ns->ns;
@@ -2066,6 +2056,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
2066} 2056}
2067EXPORT_SYMBOL(mlx5_get_flow_namespace); 2057EXPORT_SYMBOL(mlx5_get_flow_namespace);
2068 2058
2059struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
2060 enum mlx5_flow_namespace_type type,
2061 int vport)
2062{
2063 struct mlx5_flow_steering *steering = dev->priv.steering;
2064
2065 if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
2066 return NULL;
2067
2068 switch (type) {
2069 case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
2070 if (steering->esw_egress_root_ns &&
2071 steering->esw_egress_root_ns[vport])
2072 return &steering->esw_egress_root_ns[vport]->ns;
2073 else
2074 return NULL;
2075 case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
2076 if (steering->esw_ingress_root_ns &&
2077 steering->esw_ingress_root_ns[vport])
2078 return &steering->esw_ingress_root_ns[vport]->ns;
2079 else
2080 return NULL;
2081 default:
2082 return NULL;
2083 }
2084}
2085
2069static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns, 2086static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
2070 unsigned int prio, int num_levels) 2087 unsigned int prio, int num_levels)
2071{ 2088{
@@ -2343,13 +2360,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
2343 clean_tree(&root_ns->ns.node); 2360 clean_tree(&root_ns->ns.node);
2344} 2361}
2345 2362
2363static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
2364{
2365 struct mlx5_flow_steering *steering = dev->priv.steering;
2366 int i;
2367
2368 if (!steering->esw_egress_root_ns)
2369 return;
2370
2371 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2372 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2373
2374 kfree(steering->esw_egress_root_ns);
2375}
2376
2377static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2378{
2379 struct mlx5_flow_steering *steering = dev->priv.steering;
2380 int i;
2381
2382 if (!steering->esw_ingress_root_ns)
2383 return;
2384
2385 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
2386 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2387
2388 kfree(steering->esw_ingress_root_ns);
2389}
2390
2346void mlx5_cleanup_fs(struct mlx5_core_dev *dev) 2391void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
2347{ 2392{
2348 struct mlx5_flow_steering *steering = dev->priv.steering; 2393 struct mlx5_flow_steering *steering = dev->priv.steering;
2349 2394
2350 cleanup_root_ns(steering->root_ns); 2395 cleanup_root_ns(steering->root_ns);
2351 cleanup_root_ns(steering->esw_egress_root_ns); 2396 cleanup_egress_acls_root_ns(dev);
2352 cleanup_root_ns(steering->esw_ingress_root_ns); 2397 cleanup_ingress_acls_root_ns(dev);
2353 cleanup_root_ns(steering->fdb_root_ns); 2398 cleanup_root_ns(steering->fdb_root_ns);
2354 cleanup_root_ns(steering->sniffer_rx_root_ns); 2399 cleanup_root_ns(steering->sniffer_rx_root_ns);
2355 cleanup_root_ns(steering->sniffer_tx_root_ns); 2400 cleanup_root_ns(steering->sniffer_tx_root_ns);
@@ -2418,34 +2463,86 @@ out_err:
2418 return PTR_ERR(prio); 2463 return PTR_ERR(prio);
2419} 2464}
2420 2465
2421static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering) 2466static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2422{ 2467{
2423 struct fs_prio *prio; 2468 struct fs_prio *prio;
2424 2469
2425 steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL); 2470 steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
2426 if (!steering->esw_egress_root_ns) 2471 if (!steering->esw_egress_root_ns[vport])
2427 return -ENOMEM; 2472 return -ENOMEM;
2428 2473
2429 /* create 1 prio*/ 2474 /* create 1 prio*/
2430 prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0, 2475 prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
2431 MLX5_TOTAL_VPORTS(steering->dev));
2432 return PTR_ERR_OR_ZERO(prio); 2476 return PTR_ERR_OR_ZERO(prio);
2433} 2477}
2434 2478
2435static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering) 2479static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
2436{ 2480{
2437 struct fs_prio *prio; 2481 struct fs_prio *prio;
2438 2482
2439 steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL); 2483 steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
2440 if (!steering->esw_ingress_root_ns) 2484 if (!steering->esw_ingress_root_ns[vport])
2441 return -ENOMEM; 2485 return -ENOMEM;
2442 2486
2443 /* create 1 prio*/ 2487 /* create 1 prio*/
2444 prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0, 2488 prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
2445 MLX5_TOTAL_VPORTS(steering->dev));
2446 return PTR_ERR_OR_ZERO(prio); 2489 return PTR_ERR_OR_ZERO(prio);
2447} 2490}
2448 2491
2492static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
2493{
2494 struct mlx5_flow_steering *steering = dev->priv.steering;
2495 int err;
2496 int i;
2497
2498 steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2499 sizeof(*steering->esw_egress_root_ns),
2500 GFP_KERNEL);
2501 if (!steering->esw_egress_root_ns)
2502 return -ENOMEM;
2503
2504 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2505 err = init_egress_acl_root_ns(steering, i);
2506 if (err)
2507 goto cleanup_root_ns;
2508 }
2509
2510 return 0;
2511
2512cleanup_root_ns:
2513 for (i--; i >= 0; i--)
2514 cleanup_root_ns(steering->esw_egress_root_ns[i]);
2515 kfree(steering->esw_egress_root_ns);
2516 return err;
2517}
2518
2519static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
2520{
2521 struct mlx5_flow_steering *steering = dev->priv.steering;
2522 int err;
2523 int i;
2524
2525 steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
2526 sizeof(*steering->esw_ingress_root_ns),
2527 GFP_KERNEL);
2528 if (!steering->esw_ingress_root_ns)
2529 return -ENOMEM;
2530
2531 for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
2532 err = init_ingress_acl_root_ns(steering, i);
2533 if (err)
2534 goto cleanup_root_ns;
2535 }
2536
2537 return 0;
2538
2539cleanup_root_ns:
2540 for (i--; i >= 0; i--)
2541 cleanup_root_ns(steering->esw_ingress_root_ns[i]);
2542 kfree(steering->esw_ingress_root_ns);
2543 return err;
2544}
2545
2449int mlx5_init_fs(struct mlx5_core_dev *dev) 2546int mlx5_init_fs(struct mlx5_core_dev *dev)
2450{ 2547{
2451 struct mlx5_flow_steering *steering; 2548 struct mlx5_flow_steering *steering;
@@ -2488,12 +2585,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
2488 goto err; 2585 goto err;
2489 } 2586 }
2490 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { 2587 if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
2491 err = init_egress_acl_root_ns(steering); 2588 err = init_egress_acls_root_ns(dev);
2492 if (err) 2589 if (err)
2493 goto err; 2590 goto err;
2494 } 2591 }
2495 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { 2592 if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
2496 err = init_ingress_acl_root_ns(steering); 2593 err = init_ingress_acls_root_ns(dev);
2497 if (err) 2594 if (err)
2498 goto err; 2595 goto err;
2499 } 2596 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 397d24a621a4..3e571045626f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -71,8 +71,8 @@ struct mlx5_flow_steering {
71 struct kmem_cache *ftes_cache; 71 struct kmem_cache *ftes_cache;
72 struct mlx5_flow_root_namespace *root_ns; 72 struct mlx5_flow_root_namespace *root_ns;
73 struct mlx5_flow_root_namespace *fdb_root_ns; 73 struct mlx5_flow_root_namespace *fdb_root_ns;
74 struct mlx5_flow_root_namespace *esw_egress_root_ns; 74 struct mlx5_flow_root_namespace **esw_egress_root_ns;
75 struct mlx5_flow_root_namespace *esw_ingress_root_ns; 75 struct mlx5_flow_root_namespace **esw_ingress_root_ns;
76 struct mlx5_flow_root_namespace *sniffer_tx_root_ns; 76 struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
77 struct mlx5_flow_root_namespace *sniffer_rx_root_ns; 77 struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
78}; 78};
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index b25e7baa273e..a0b48afcb422 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -95,6 +95,10 @@ struct mlx5_flow_destination {
95struct mlx5_flow_namespace * 95struct mlx5_flow_namespace *
96mlx5_get_flow_namespace(struct mlx5_core_dev *dev, 96mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
97 enum mlx5_flow_namespace_type type); 97 enum mlx5_flow_namespace_type type);
98struct mlx5_flow_namespace *
99mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
100 enum mlx5_flow_namespace_type type,
101 int vport);
98 102
99struct mlx5_flow_table * 103struct mlx5_flow_table *
100mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, 104mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,