aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-01-30 15:44:05 -0500
committerDavid S. Miller <davem@davemloft.net>2017-01-30 15:44:05 -0500
commit6415aa5039bd40022e0377afb8ce53f8637080e7 (patch)
tree511db8af6ac75841b9e4d7325216ed2b371e4842
parent051a2e0860cd1554fda6beba36487b23252fd3c6 (diff)
parentd15118af268324ecfc968dd90396e966f4f9b3ff (diff)
Merge tag 'mlx5-fixes-2017-01-27' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says: ==================== mlx5-fixes-2017-01-27 A couple of mlx5 core and ethernet driver fixes. From Or, A couple of error return values and error handling fixes. From Hadar, Support TC encapsulation offloads even when the mlx5e uplink device is stacked under an upper device. From Gal, Two patches to fix RSS hash modifications via ethtool. From Moshe, Added a needed ets capability check. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
15 files changed, 181 insertions, 157 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3797cc7c1288..caa837e5e2b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
1728 if (cmd->cmdif_rev > CMD_IF_REV) { 1728 if (cmd->cmdif_rev > CMD_IF_REV) {
1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n", 1729 dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1730 CMD_IF_REV, cmd->cmdif_rev); 1730 CMD_IF_REV, cmd->cmdif_rev);
1731 err = -ENOTSUPP; 1731 err = -EOPNOTSUPP;
1732 goto err_free_page; 1732 goto err_free_page;
1733 } 1733 }
1734 1734
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 951dbd58594d..d5ecb8f53fd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd); 791int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
792 792
793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix); 793int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
794void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv); 794void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
795 enum mlx5e_traffic_types tt);
795 796
796int mlx5e_open_locked(struct net_device *netdev); 797int mlx5e_open_locked(struct net_device *netdev);
797int mlx5e_close_locked(struct net_device *netdev); 798int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
863 864
864static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) 865static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
865{ 866{
866 return -ENOTSUPP; 867 return -EOPNOTSUPP;
867} 868}
868 869
869static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) 870static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
870{ 871{
871 return -ENOTSUPP; 872 return -EOPNOTSUPP;
872} 873}
873#else 874#else
874int mlx5e_arfs_create_tables(struct mlx5e_priv *priv); 875int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f0b460f47f29..0523ed47f597 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
89 int i; 89 int i;
90 90
91 if (!MLX5_CAP_GEN(priv->mdev, ets)) 91 if (!MLX5_CAP_GEN(priv->mdev, ets))
92 return -ENOTSUPP; 92 return -EOPNOTSUPP;
93 93
94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; 94 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
95 for (i = 0; i < ets->ets_cap; i++) { 95 for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
236 int err; 236 int err;
237 237
238 if (!MLX5_CAP_GEN(priv->mdev, ets)) 238 if (!MLX5_CAP_GEN(priv->mdev, ets))
239 return -ENOTSUPP; 239 return -EOPNOTSUPP;
240 240
241 err = mlx5e_dbcnl_validate_ets(netdev, ets); 241 err = mlx5e_dbcnl_validate_ets(netdev, ets);
242 if (err) 242 if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
402 struct mlx5_core_dev *mdev = priv->mdev; 402 struct mlx5_core_dev *mdev = priv->mdev;
403 struct ieee_ets ets; 403 struct ieee_ets ets;
404 struct ieee_pfc pfc; 404 struct ieee_pfc pfc;
405 int err = -ENOTSUPP; 405 int err = -EOPNOTSUPP;
406 int i; 406 int i;
407 407
408 if (!MLX5_CAP_GEN(mdev, ets)) 408 if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
511 struct mlx5e_priv *priv = netdev_priv(netdev); 511 struct mlx5e_priv *priv = netdev_priv(netdev);
512 struct mlx5_core_dev *mdev = priv->mdev; 512 struct mlx5_core_dev *mdev = priv->mdev;
513 513
514 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
515 netdev_err(netdev, "%s, ets is not supported\n", __func__);
516 return;
517 }
518
514 if (priority >= CEE_DCBX_MAX_PRIO) { 519 if (priority >= CEE_DCBX_MAX_PRIO) {
515 netdev_err(netdev, 520 netdev_err(netdev,
516 "%s, priority is out of range\n", __func__); 521 "%s, priority is out of range\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 5197817e4b2f..bb67863aa361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
595 struct mlx5e_priv *priv = netdev_priv(netdev); 595 struct mlx5e_priv *priv = netdev_priv(netdev);
596 596
597 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) 597 if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
598 return -ENOTSUPP; 598 return -EOPNOTSUPP;
599 599
600 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec; 600 coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
601 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts; 601 coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
620 int i; 620 int i;
621 621
622 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 622 if (!MLX5_CAP_GEN(mdev, cq_moderation))
623 return -ENOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 mutex_lock(&priv->state_lock); 625 mutex_lock(&priv->state_lock);
626 626
@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
980 980
981static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) 981static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
982{ 982{
983 struct mlx5_core_dev *mdev = priv->mdev;
984 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); 983 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
985 int i; 984 struct mlx5_core_dev *mdev = priv->mdev;
985 int ctxlen = MLX5_ST_SZ_BYTES(tirc);
986 int tt;
986 987
987 MLX5_SET(modify_tir_in, in, bitmask.hash, 1); 988 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
988 mlx5e_build_tir_ctx_hash(tirc, priv);
989 989
990 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 990 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
991 mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen); 991 memset(tirc, 0, ctxlen);
992 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
993 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
994 }
992} 995}
993 996
994static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, 997static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
996{ 999{
997 struct mlx5e_priv *priv = netdev_priv(dev); 1000 struct mlx5e_priv *priv = netdev_priv(dev);
998 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 1001 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1002 bool hash_changed = false;
999 void *in; 1003 void *in;
1000 1004
1001 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && 1005 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1017 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0); 1021 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1018 } 1022 }
1019 1023
1020 if (key) 1024 if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
1025 hfunc != priv->params.rss_hfunc) {
1026 priv->params.rss_hfunc = hfunc;
1027 hash_changed = true;
1028 }
1029
1030 if (key) {
1021 memcpy(priv->params.toeplitz_hash_key, key, 1031 memcpy(priv->params.toeplitz_hash_key, key,
1022 sizeof(priv->params.toeplitz_hash_key)); 1032 sizeof(priv->params.toeplitz_hash_key));
1033 hash_changed = hash_changed ||
1034 priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
1035 }
1023 1036
1024 if (hfunc != ETH_RSS_HASH_NO_CHANGE) 1037 if (hash_changed)
1025 priv->params.rss_hfunc = hfunc; 1038 mlx5e_modify_tirs_hash(priv, in, inlen);
1026
1027 mlx5e_modify_tirs_hash(priv, in, inlen);
1028 1039
1029 mutex_unlock(&priv->state_lock); 1040 mutex_unlock(&priv->state_lock);
1030 1041
@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1296 u32 mlx5_wol_mode; 1307 u32 mlx5_wol_mode;
1297 1308
1298 if (!wol_supported) 1309 if (!wol_supported)
1299 return -ENOTSUPP; 1310 return -EOPNOTSUPP;
1300 1311
1301 if (wol->wolopts & ~wol_supported) 1312 if (wol->wolopts & ~wol_supported)
1302 return -EINVAL; 1313 return -EINVAL;
@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
1426 1437
1427 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE && 1438 if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
1428 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)) 1439 !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
1429 return -ENOTSUPP; 1440 return -EOPNOTSUPP;
1430 1441
1431 if (!rx_mode_changed) 1442 if (!rx_mode_changed)
1432 return 0; 1443 return 0;
@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
1452 bool reset; 1463 bool reset;
1453 1464
1454 if (!MLX5_CAP_GEN(mdev, cqe_compression)) 1465 if (!MLX5_CAP_GEN(mdev, cqe_compression))
1455 return -ENOTSUPP; 1466 return -EOPNOTSUPP;
1456 1467
1457 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { 1468 if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
1458 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n"); 1469 netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1fe80de5d68f..a0e5a69402b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1089 MLX5_FLOW_NAMESPACE_KERNEL); 1089 MLX5_FLOW_NAMESPACE_KERNEL);
1090 1090
1091 if (!priv->fs.ns) 1091 if (!priv->fs.ns)
1092 return -EINVAL; 1092 return -EOPNOTSUPP;
1093 1093
1094 err = mlx5e_arfs_create_tables(priv); 1094 err = mlx5e_arfs_create_tables(priv);
1095 if (err) { 1095 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d088effd7160..f33f72d0237c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
92 ns = mlx5_get_flow_namespace(priv->mdev, 92 ns = mlx5_get_flow_namespace(priv->mdev,
93 MLX5_FLOW_NAMESPACE_ETHTOOL); 93 MLX5_FLOW_NAMESPACE_ETHTOOL);
94 if (!ns) 94 if (!ns)
95 return ERR_PTR(-ENOTSUPP); 95 return ERR_PTR(-EOPNOTSUPP);
96 96
97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev, 97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
98 flow_table_properties_nic_receive.log_max_ft_size)), 98 flow_table_properties_nic_receive.log_max_ft_size)),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2b7dd315020c..f14ca3385fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout); 2022 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
2023} 2023}
2024 2024
2025void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv) 2025void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
2026 enum mlx5e_traffic_types tt)
2026{ 2027{
2028 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2029
2030#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2031 MLX5_HASH_FIELD_SEL_DST_IP)
2032
2033#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2034 MLX5_HASH_FIELD_SEL_DST_IP |\
2035 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2036 MLX5_HASH_FIELD_SEL_L4_DPORT)
2037
2038#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2039 MLX5_HASH_FIELD_SEL_DST_IP |\
2040 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2041
2027 MLX5_SET(tirc, tirc, rx_hash_fn, 2042 MLX5_SET(tirc, tirc, rx_hash_fn,
2028 mlx5e_rx_hash_fn(priv->params.rss_hfunc)); 2043 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
2029 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) { 2044 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
2035 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); 2050 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2036 memcpy(rss_key, priv->params.toeplitz_hash_key, len); 2051 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
2037 } 2052 }
2053
2054 switch (tt) {
2055 case MLX5E_TT_IPV4_TCP:
2056 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2057 MLX5_L3_PROT_TYPE_IPV4);
2058 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2059 MLX5_L4_PROT_TYPE_TCP);
2060 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2061 MLX5_HASH_IP_L4PORTS);
2062 break;
2063
2064 case MLX5E_TT_IPV6_TCP:
2065 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2066 MLX5_L3_PROT_TYPE_IPV6);
2067 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2068 MLX5_L4_PROT_TYPE_TCP);
2069 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2070 MLX5_HASH_IP_L4PORTS);
2071 break;
2072
2073 case MLX5E_TT_IPV4_UDP:
2074 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2075 MLX5_L3_PROT_TYPE_IPV4);
2076 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2077 MLX5_L4_PROT_TYPE_UDP);
2078 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2079 MLX5_HASH_IP_L4PORTS);
2080 break;
2081
2082 case MLX5E_TT_IPV6_UDP:
2083 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2084 MLX5_L3_PROT_TYPE_IPV6);
2085 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2086 MLX5_L4_PROT_TYPE_UDP);
2087 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2088 MLX5_HASH_IP_L4PORTS);
2089 break;
2090
2091 case MLX5E_TT_IPV4_IPSEC_AH:
2092 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2093 MLX5_L3_PROT_TYPE_IPV4);
2094 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2095 MLX5_HASH_IP_IPSEC_SPI);
2096 break;
2097
2098 case MLX5E_TT_IPV6_IPSEC_AH:
2099 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2100 MLX5_L3_PROT_TYPE_IPV6);
2101 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2102 MLX5_HASH_IP_IPSEC_SPI);
2103 break;
2104
2105 case MLX5E_TT_IPV4_IPSEC_ESP:
2106 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2107 MLX5_L3_PROT_TYPE_IPV4);
2108 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2109 MLX5_HASH_IP_IPSEC_SPI);
2110 break;
2111
2112 case MLX5E_TT_IPV6_IPSEC_ESP:
2113 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2114 MLX5_L3_PROT_TYPE_IPV6);
2115 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2116 MLX5_HASH_IP_IPSEC_SPI);
2117 break;
2118
2119 case MLX5E_TT_IPV4:
2120 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2121 MLX5_L3_PROT_TYPE_IPV4);
2122 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2123 MLX5_HASH_IP);
2124 break;
2125
2126 case MLX5E_TT_IPV6:
2127 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2128 MLX5_L3_PROT_TYPE_IPV6);
2129 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2130 MLX5_HASH_IP);
2131 break;
2132 default:
2133 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2134 }
2038} 2135}
2039 2136
2040static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) 2137static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2404static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2501static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2405 enum mlx5e_traffic_types tt) 2502 enum mlx5e_traffic_types tt)
2406{ 2503{
2407 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2408
2409 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); 2504 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2410 2505
2411#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2412 MLX5_HASH_FIELD_SEL_DST_IP)
2413
2414#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2415 MLX5_HASH_FIELD_SEL_DST_IP |\
2416 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2417 MLX5_HASH_FIELD_SEL_L4_DPORT)
2418
2419#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2420 MLX5_HASH_FIELD_SEL_DST_IP |\
2421 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2422
2423 mlx5e_build_tir_ctx_lro(tirc, priv); 2506 mlx5e_build_tir_ctx_lro(tirc, priv);
2424 2507
2425 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); 2508 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2426 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); 2509 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2427 mlx5e_build_tir_ctx_hash(tirc, priv); 2510 mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
2428
2429 switch (tt) {
2430 case MLX5E_TT_IPV4_TCP:
2431 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2432 MLX5_L3_PROT_TYPE_IPV4);
2433 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2434 MLX5_L4_PROT_TYPE_TCP);
2435 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2436 MLX5_HASH_IP_L4PORTS);
2437 break;
2438
2439 case MLX5E_TT_IPV6_TCP:
2440 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2441 MLX5_L3_PROT_TYPE_IPV6);
2442 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2443 MLX5_L4_PROT_TYPE_TCP);
2444 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2445 MLX5_HASH_IP_L4PORTS);
2446 break;
2447
2448 case MLX5E_TT_IPV4_UDP:
2449 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2450 MLX5_L3_PROT_TYPE_IPV4);
2451 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2452 MLX5_L4_PROT_TYPE_UDP);
2453 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2454 MLX5_HASH_IP_L4PORTS);
2455 break;
2456
2457 case MLX5E_TT_IPV6_UDP:
2458 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2459 MLX5_L3_PROT_TYPE_IPV6);
2460 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2461 MLX5_L4_PROT_TYPE_UDP);
2462 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2463 MLX5_HASH_IP_L4PORTS);
2464 break;
2465
2466 case MLX5E_TT_IPV4_IPSEC_AH:
2467 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2468 MLX5_L3_PROT_TYPE_IPV4);
2469 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2470 MLX5_HASH_IP_IPSEC_SPI);
2471 break;
2472
2473 case MLX5E_TT_IPV6_IPSEC_AH:
2474 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2475 MLX5_L3_PROT_TYPE_IPV6);
2476 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2477 MLX5_HASH_IP_IPSEC_SPI);
2478 break;
2479
2480 case MLX5E_TT_IPV4_IPSEC_ESP:
2481 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2482 MLX5_L3_PROT_TYPE_IPV4);
2483 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2484 MLX5_HASH_IP_IPSEC_SPI);
2485 break;
2486
2487 case MLX5E_TT_IPV6_IPSEC_ESP:
2488 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2489 MLX5_L3_PROT_TYPE_IPV6);
2490 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2491 MLX5_HASH_IP_IPSEC_SPI);
2492 break;
2493
2494 case MLX5E_TT_IPV4:
2495 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2496 MLX5_L3_PROT_TYPE_IPV4);
2497 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2498 MLX5_HASH_IP);
2499 break;
2500
2501 case MLX5E_TT_IPV6:
2502 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2503 MLX5_L3_PROT_TYPE_IPV6);
2504 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2505 MLX5_HASH_IP);
2506 break;
2507 default:
2508 WARN_ONCE(true,
2509 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
2510 }
2511} 2511}
2512 2512
2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, 2513static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 3331static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3332{ 3332{
3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 3333 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3334 return -ENOTSUPP; 3334 return -EOPNOTSUPP;
3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) || 3335 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3336 !MLX5_CAP_GEN(mdev, nic_flow_table) || 3336 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3337 !MLX5_CAP_ETH(mdev, csum_cap) || 3337 !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3343 < 3) { 3343 < 3) {
3344 mlx5_core_warn(mdev, 3344 mlx5_core_warn(mdev,
3345 "Not creating net device, some required device capabilities are missing\n"); 3345 "Not creating net device, some required device capabilities are missing\n");
3346 return -ENOTSUPP; 3346 return -EOPNOTSUPP;
3347 } 3347 }
3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) 3348 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); 3349 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 46bef6a26a8c..c5282b6aba8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
663 __be32 *saddr, 663 __be32 *saddr,
664 int *out_ttl) 664 int *out_ttl)
665{ 665{
666 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
666 struct rtable *rt; 667 struct rtable *rt;
667 struct neighbour *n = NULL; 668 struct neighbour *n = NULL;
668 int ttl; 669 int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
677#else 678#else
678 return -EOPNOTSUPP; 679 return -EOPNOTSUPP;
679#endif 680#endif
680 681 /* if the egress device isn't on the same HW e-switch, we use the uplink */
681 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) { 682 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
682 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__); 683 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
683 ip_rt_put(rt); 684 else
684 return -EOPNOTSUPP; 685 *out_dev = rt->dst.dev;
685 }
686 686
687 ttl = ip4_dst_hoplimit(&rt->dst); 687 ttl = ip4_dst_hoplimit(&rt->dst);
688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr); 688 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
693 *out_n = n; 693 *out_n = n;
694 *saddr = fl4->saddr; 694 *saddr = fl4->saddr;
695 *out_ttl = ttl; 695 *out_ttl = ttl;
696 *out_dev = rt->dst.dev;
697 696
698 return 0; 697 return 0;
699} 698}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f14d9c9ba773..d0c8bf014453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
133 133
134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) || 134 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist)) 135 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
136 return -ENOTSUPP; 136 return -EOPNOTSUPP;
137 137
138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n", 138 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
139 vport, vlan, qos, set_flags); 139 vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 353 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
354 if (!root_ns) { 354 if (!root_ns) {
355 esw_warn(dev, "Failed to get FDB flow namespace\n"); 355 esw_warn(dev, "Failed to get FDB flow namespace\n");
356 return -ENOMEM; 356 return -EOPNOTSUPP;
357 } 357 }
358 358
359 flow_group_in = mlx5_vzalloc(inlen); 359 flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS); 962 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
963 if (!root_ns) { 963 if (!root_ns) {
964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n"); 964 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
965 return -EIO; 965 return -EOPNOTSUPP;
966 } 966 }
967 967
968 flow_group_in = mlx5_vzalloc(inlen); 968 flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS); 1079 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
1080 if (!root_ns) { 1080 if (!root_ns) {
1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n"); 1081 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
1082 return -EIO; 1082 return -EOPNOTSUPP;
1083 } 1083 }
1084 1084
1085 flow_group_in = mlx5_vzalloc(inlen); 1085 flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) || 1630 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1631 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1632 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1633 return -ENOTSUPP; 1633 return -EOPNOTSUPP;
1634 } 1634 }
1635 1635
1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support)) 1636 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 03293ed1cc22..595f7c7383b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
166 return 0; 166 return 0;
167 167
168out_notsupp: 168out_notsupp:
169 return -ENOTSUPP; 169 return -EOPNOTSUPP;
170} 170}
171 171
172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, 172int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB); 424 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
425 if (!root_ns) { 425 if (!root_ns) {
426 esw_warn(dev, "Failed to get FDB flow namespace\n"); 426 esw_warn(dev, "Failed to get FDB flow namespace\n");
427 err = -EOPNOTSUPP;
427 goto ns_err; 428 goto ns_err;
428 } 429 }
429 430
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
535 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS); 536 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
536 if (!ns) { 537 if (!ns) {
537 esw_warn(esw->dev, "Failed to get offloads flow namespace\n"); 538 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
538 return -ENOMEM; 539 return -EOPNOTSUPP;
539 } 540 }
540 541
541 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0); 542 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
655 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err); 656 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
656 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 657 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
657 if (err1) 658 if (err1)
658 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err); 659 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
659 } 660 }
660 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { 661 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
661 if (mlx5_eswitch_inline_mode_get(esw, 662 if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
674 int vport; 675 int vport;
675 int err; 676 int err;
676 677
678 /* disable PF RoCE so missed packets don't go through RoCE steering */
679 mlx5_dev_list_lock();
680 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
681 mlx5_dev_list_unlock();
682
677 err = esw_create_offloads_fdb_table(esw, nvports); 683 err = esw_create_offloads_fdb_table(esw, nvports);
678 if (err) 684 if (err)
679 return err; 685 goto create_fdb_err;
680 686
681 err = esw_create_offloads_table(esw); 687 err = esw_create_offloads_table(esw);
682 if (err) 688 if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
696 goto err_reps; 702 goto err_reps;
697 } 703 }
698 704
699 /* disable PF RoCE so missed packets don't go through RoCE steering */
700 mlx5_dev_list_lock();
701 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
702 mlx5_dev_list_unlock();
703
704 return 0; 705 return 0;
705 706
706err_reps: 707err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
717 718
718create_ft_err: 719create_ft_err:
719 esw_destroy_offloads_fdb_table(esw); 720 esw_destroy_offloads_fdb_table(esw);
721
722create_fdb_err:
723 /* enable back PF RoCE */
724 mlx5_dev_list_lock();
725 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
726 mlx5_dev_list_unlock();
727
720 return err; 728 return err;
721} 729}
722 730
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
724{ 732{
725 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs; 733 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
726 734
727 /* enable back PF RoCE */
728 mlx5_dev_list_lock();
729 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
730 mlx5_dev_list_unlock();
731
732 mlx5_eswitch_disable_sriov(esw); 735 mlx5_eswitch_disable_sriov(esw);
733 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY); 736 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
734 if (err) { 737 if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
738 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err); 741 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
739 } 742 }
740 743
744 /* enable back PF RoCE */
745 mlx5_dev_list_lock();
746 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
747 mlx5_dev_list_unlock();
748
741 return err; 749 return err;
742} 750}
743 751
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c4478ecd8056..b53fc85a2375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
322 flow_table_properties_nic_receive. 322 flow_table_properties_nic_receive.
323 flow_modify_en); 323 flow_modify_en);
324 if (!atomic_mod_cap) 324 if (!atomic_mod_cap)
325 return -ENOTSUPP; 325 return -EOPNOTSUPP;
326 opmod = 1; 326 opmod = 1;
327 327
328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte); 328 return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0ac7a2fc916c..6346a8f5883b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
1822 struct mlx5_flow_table *ft; 1822 struct mlx5_flow_table *ft;
1823 1823
1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR); 1824 ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
1825 if (!ns) 1825 if (WARN_ON(!ns))
1826 return -EINVAL; 1826 return -EINVAL;
1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0); 1827 ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
1828 if (IS_ERR(ft)) { 1828 if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d01e9f21d469..3c315eb8d270 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
807 return 0; 807 return 0;
808 } 808 }
809 809
810 return -ENOTSUPP; 810 return -EOPNOTSUPP;
811} 811}
812 812
813 813
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index d2ec9d232a70..fd12e0a377a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
620 u32 out[MLX5_ST_SZ_DW(qtct_reg)]; 620 u32 out[MLX5_ST_SZ_DW(qtct_reg)];
621 621
622 if (!MLX5_CAP_GEN(mdev, ets)) 622 if (!MLX5_CAP_GEN(mdev, ets))
623 return -ENOTSUPP; 623 return -EOPNOTSUPP;
624 624
625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), 625 return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
626 MLX5_REG_QETCR, 0, 1); 626 MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
632 u32 in[MLX5_ST_SZ_DW(qtct_reg)]; 632 u32 in[MLX5_ST_SZ_DW(qtct_reg)];
633 633
634 if (!MLX5_CAP_GEN(mdev, ets)) 634 if (!MLX5_CAP_GEN(mdev, ets))
635 return -ENOTSUPP; 635 return -EOPNOTSUPP;
636 636
637 memset(in, 0, sizeof(in)); 637 memset(in, 0, sizeof(in));
638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, 638 return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 269e4401c342..7129c30a2ab4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
532 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 532 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
533 return -EACCES; 533 return -EACCES;
534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) 534 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
535 return -ENOTSUPP; 535 return -EOPNOTSUPP;
536 536
537 in = mlx5_vzalloc(inlen); 537 in = mlx5_vzalloc(inlen);
538 if (!in) 538 if (!in)