diff options
| author | David S. Miller <davem@davemloft.net> | 2019-03-11 19:22:49 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-03-11 19:22:49 -0400 |
| commit | a3b1933d34d5bb26d7503752e3528315a9e28339 (patch) | |
| tree | f18a9ecaf28651557531e2b3bbc5a93b9386c2a6 /drivers/net | |
| parent | c6873d18cb4a5be9623d468c626b5650451ba44a (diff) | |
| parent | 24319258660a84dd77f4be026a55b10a12524919 (diff) | |
Merge tag 'mlx5-fixes-2019-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says:
====================
Mellanox, mlx5 fixes 2019-03-11
For -stable v5.0:
('net/mlx5e: Fix access to non-existing receive queue')
('net/mlx5e: Properly get the PF number phys port name ndo')
('net/mlx5: Fix multiple updates of steering rules in parallel')
('net/mlx5: Avoid panic when setting vport mac, getting vport config')
('net/mlx5: Avoid panic when setting vport rate')
('net/mlx5e: IPoIB, Fix RX checksum statistics update')
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 3 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 13 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 11 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 17 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 161 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | 1 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/lag.c | 21 | ||||
| -rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 2 |
8 files changed, 128 insertions, 101 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 0804b478ad19..a0987cc5fe4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -424,6 +424,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
| 424 | 424 | ||
| 425 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 425 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
| 426 | priv->channels.params = new_channels.params; | 426 | priv->channels.params = new_channels.params; |
| 427 | if (!netif_is_rxfh_configured(priv->netdev)) | ||
| 428 | mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, | ||
| 429 | MLX5E_INDIR_RQT_SIZE, count); | ||
| 427 | goto out; | 430 | goto out; |
| 428 | } | 431 | } |
| 429 | 432 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index a1a3e2774989..a66b6ed80b30 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
| @@ -1129,16 +1129,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, | |||
| 1129 | struct mlx5e_priv *priv = netdev_priv(dev); | 1129 | struct mlx5e_priv *priv = netdev_priv(dev); |
| 1130 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 1130 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
| 1131 | struct mlx5_eswitch_rep *rep = rpriv->rep; | 1131 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
| 1132 | int ret, pf_num; | 1132 | unsigned int fn; |
| 1133 | int ret; | ||
| 1133 | 1134 | ||
| 1134 | ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num); | 1135 | fn = PCI_FUNC(priv->mdev->pdev->devfn); |
| 1135 | if (ret) | 1136 | if (fn >= MLX5_MAX_PORTS) |
| 1136 | return ret; | 1137 | return -EOPNOTSUPP; |
| 1137 | 1138 | ||
| 1138 | if (rep->vport == MLX5_VPORT_UPLINK) | 1139 | if (rep->vport == MLX5_VPORT_UPLINK) |
| 1139 | ret = snprintf(buf, len, "p%d", pf_num); | 1140 | ret = snprintf(buf, len, "p%d", fn); |
| 1140 | else | 1141 | else |
| 1141 | ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1); | 1142 | ret = snprintf(buf, len, "pf%dvf%d", fn, rep->vport - 1); |
| 1142 | 1143 | ||
| 1143 | if (ret >= len) | 1144 | if (ret >= len) |
| 1144 | return -EOPNOTSUPP; | 1145 | return -EOPNOTSUPP; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index be396e5e4e39..3dde5c7e0739 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -1295,8 +1295,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, | |||
| 1295 | 1295 | ||
| 1296 | skb->protocol = *((__be16 *)(skb->data)); | 1296 | skb->protocol = *((__be16 *)(skb->data)); |
| 1297 | 1297 | ||
| 1298 | skb->ip_summed = CHECKSUM_COMPLETE; | 1298 | if (netdev->features & NETIF_F_RXCSUM) { |
| 1299 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); | 1299 | skb->ip_summed = CHECKSUM_COMPLETE; |
| 1300 | skb->csum = csum_unfold((__force __sum16)cqe->check_sum); | ||
| 1301 | stats->csum_complete++; | ||
| 1302 | } else { | ||
| 1303 | skb->ip_summed = CHECKSUM_NONE; | ||
| 1304 | stats->csum_none++; | ||
| 1305 | } | ||
| 1300 | 1306 | ||
| 1301 | if (unlikely(mlx5e_rx_hw_stamp(tstamp))) | 1307 | if (unlikely(mlx5e_rx_hw_stamp(tstamp))) |
| 1302 | skb_hwtstamps(skb)->hwtstamp = | 1308 | skb_hwtstamps(skb)->hwtstamp = |
| @@ -1315,7 +1321,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, | |||
| 1315 | 1321 | ||
| 1316 | skb->dev = netdev; | 1322 | skb->dev = netdev; |
| 1317 | 1323 | ||
| 1318 | stats->csum_complete++; | ||
| 1319 | stats->packets++; | 1324 | stats->packets++; |
| 1320 | stats->bytes += cqe_bcnt; | 1325 | stats->bytes += cqe_bcnt; |
| 1321 | } | 1326 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d0b28251abf2..ecd2c747f726 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -1931,7 +1931,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
| 1931 | u64 node_guid; | 1931 | u64 node_guid; |
| 1932 | int err = 0; | 1932 | int err = 0; |
| 1933 | 1933 | ||
| 1934 | if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) | 1934 | if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) |
| 1935 | return -EPERM; | 1935 | return -EPERM; |
| 1936 | if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) | 1936 | if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) |
| 1937 | return -EINVAL; | 1937 | return -EINVAL; |
| @@ -2005,7 +2005,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, | |||
| 2005 | { | 2005 | { |
| 2006 | struct mlx5_vport *evport; | 2006 | struct mlx5_vport *evport; |
| 2007 | 2007 | ||
| 2008 | if (!MLX5_CAP_GEN(esw->dev, vport_group_manager)) | 2008 | if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager)) |
| 2009 | return -EPERM; | 2009 | return -EPERM; |
| 2010 | if (!LEGAL_VPORT(esw, vport)) | 2010 | if (!LEGAL_VPORT(esw, vport)) |
| 2011 | return -EINVAL; | 2011 | return -EINVAL; |
| @@ -2297,19 +2297,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) | |||
| 2297 | int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, | 2297 | int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, |
| 2298 | u32 max_rate, u32 min_rate) | 2298 | u32 max_rate, u32 min_rate) |
| 2299 | { | 2299 | { |
| 2300 | u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); | ||
| 2301 | bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && | ||
| 2302 | fw_max_bw_share >= MLX5_MIN_BW_SHARE; | ||
| 2303 | bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); | ||
| 2304 | struct mlx5_vport *evport; | 2300 | struct mlx5_vport *evport; |
| 2301 | u32 fw_max_bw_share; | ||
| 2305 | u32 previous_min_rate; | 2302 | u32 previous_min_rate; |
| 2306 | u32 divider; | 2303 | u32 divider; |
| 2304 | bool min_rate_supported; | ||
| 2305 | bool max_rate_supported; | ||
| 2307 | int err = 0; | 2306 | int err = 0; |
| 2308 | 2307 | ||
| 2309 | if (!ESW_ALLOWED(esw)) | 2308 | if (!ESW_ALLOWED(esw)) |
| 2310 | return -EPERM; | 2309 | return -EPERM; |
| 2311 | if (!LEGAL_VPORT(esw, vport)) | 2310 | if (!LEGAL_VPORT(esw, vport)) |
| 2312 | return -EINVAL; | 2311 | return -EINVAL; |
| 2312 | |||
| 2313 | fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); | ||
| 2314 | min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && | ||
| 2315 | fw_max_bw_share >= MLX5_MIN_BW_SHARE; | ||
| 2316 | max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); | ||
| 2317 | |||
| 2313 | if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) | 2318 | if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) |
| 2314 | return -EOPNOTSUPP; | 2319 | return -EOPNOTSUPP; |
| 2315 | 2320 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f2cfa012315e..0be3eb86dd84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -263,10 +263,11 @@ static void nested_down_write_ref_node(struct fs_node *node, | |||
| 263 | } | 263 | } |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | static void down_write_ref_node(struct fs_node *node) | 266 | static void down_write_ref_node(struct fs_node *node, bool locked) |
| 267 | { | 267 | { |
| 268 | if (node) { | 268 | if (node) { |
| 269 | down_write(&node->lock); | 269 | if (!locked) |
| 270 | down_write(&node->lock); | ||
| 270 | refcount_inc(&node->refcount); | 271 | refcount_inc(&node->refcount); |
| 271 | } | 272 | } |
| 272 | } | 273 | } |
| @@ -277,13 +278,14 @@ static void up_read_ref_node(struct fs_node *node) | |||
| 277 | up_read(&node->lock); | 278 | up_read(&node->lock); |
| 278 | } | 279 | } |
| 279 | 280 | ||
| 280 | static void up_write_ref_node(struct fs_node *node) | 281 | static void up_write_ref_node(struct fs_node *node, bool locked) |
| 281 | { | 282 | { |
| 282 | refcount_dec(&node->refcount); | 283 | refcount_dec(&node->refcount); |
| 283 | up_write(&node->lock); | 284 | if (!locked) |
| 285 | up_write(&node->lock); | ||
| 284 | } | 286 | } |
| 285 | 287 | ||
| 286 | static void tree_put_node(struct fs_node *node) | 288 | static void tree_put_node(struct fs_node *node, bool locked) |
| 287 | { | 289 | { |
| 288 | struct fs_node *parent_node = node->parent; | 290 | struct fs_node *parent_node = node->parent; |
| 289 | 291 | ||
| @@ -294,27 +296,27 @@ static void tree_put_node(struct fs_node *node) | |||
| 294 | /* Only root namespace doesn't have parent and we just | 296 | /* Only root namespace doesn't have parent and we just |
| 295 | * need to free its node. | 297 | * need to free its node. |
| 296 | */ | 298 | */ |
| 297 | down_write_ref_node(parent_node); | 299 | down_write_ref_node(parent_node, locked); |
| 298 | list_del_init(&node->list); | 300 | list_del_init(&node->list); |
| 299 | if (node->del_sw_func) | 301 | if (node->del_sw_func) |
| 300 | node->del_sw_func(node); | 302 | node->del_sw_func(node); |
| 301 | up_write_ref_node(parent_node); | 303 | up_write_ref_node(parent_node, locked); |
| 302 | } else { | 304 | } else { |
| 303 | kfree(node); | 305 | kfree(node); |
| 304 | } | 306 | } |
| 305 | node = NULL; | 307 | node = NULL; |
| 306 | } | 308 | } |
| 307 | if (!node && parent_node) | 309 | if (!node && parent_node) |
| 308 | tree_put_node(parent_node); | 310 | tree_put_node(parent_node, locked); |
| 309 | } | 311 | } |
| 310 | 312 | ||
| 311 | static int tree_remove_node(struct fs_node *node) | 313 | static int tree_remove_node(struct fs_node *node, bool locked) |
| 312 | { | 314 | { |
| 313 | if (refcount_read(&node->refcount) > 1) { | 315 | if (refcount_read(&node->refcount) > 1) { |
| 314 | refcount_dec(&node->refcount); | 316 | refcount_dec(&node->refcount); |
| 315 | return -EEXIST; | 317 | return -EEXIST; |
| 316 | } | 318 | } |
| 317 | tree_put_node(node); | 319 | tree_put_node(node, locked); |
| 318 | return 0; | 320 | return 0; |
| 319 | } | 321 | } |
| 320 | 322 | ||
| @@ -420,22 +422,34 @@ static void del_sw_flow_table(struct fs_node *node) | |||
| 420 | kfree(ft); | 422 | kfree(ft); |
| 421 | } | 423 | } |
| 422 | 424 | ||
| 423 | static void del_sw_hw_rule(struct fs_node *node) | 425 | static void modify_fte(struct fs_fte *fte) |
| 424 | { | 426 | { |
| 425 | struct mlx5_flow_root_namespace *root; | 427 | struct mlx5_flow_root_namespace *root; |
| 426 | struct mlx5_flow_rule *rule; | ||
| 427 | struct mlx5_flow_table *ft; | 428 | struct mlx5_flow_table *ft; |
| 428 | struct mlx5_flow_group *fg; | 429 | struct mlx5_flow_group *fg; |
| 429 | struct fs_fte *fte; | 430 | struct mlx5_core_dev *dev; |
| 430 | int modify_mask; | ||
| 431 | struct mlx5_core_dev *dev = get_dev(node); | ||
| 432 | int err; | 431 | int err; |
| 433 | bool update_fte = false; | ||
| 434 | 432 | ||
| 435 | fs_get_obj(rule, node); | ||
| 436 | fs_get_obj(fte, rule->node.parent); | ||
| 437 | fs_get_obj(fg, fte->node.parent); | 433 | fs_get_obj(fg, fte->node.parent); |
| 438 | fs_get_obj(ft, fg->node.parent); | 434 | fs_get_obj(ft, fg->node.parent); |
| 435 | dev = get_dev(&fte->node); | ||
| 436 | |||
| 437 | root = find_root(&ft->node); | ||
| 438 | err = root->cmds->update_fte(dev, ft, fg->id, fte->modify_mask, fte); | ||
| 439 | if (err) | ||
| 440 | mlx5_core_warn(dev, | ||
| 441 | "%s can't del rule fg id=%d fte_index=%d\n", | ||
| 442 | __func__, fg->id, fte->index); | ||
| 443 | fte->modify_mask = 0; | ||
| 444 | } | ||
| 445 | |||
| 446 | static void del_sw_hw_rule(struct fs_node *node) | ||
| 447 | { | ||
| 448 | struct mlx5_flow_rule *rule; | ||
| 449 | struct fs_fte *fte; | ||
| 450 | |||
| 451 | fs_get_obj(rule, node); | ||
| 452 | fs_get_obj(fte, rule->node.parent); | ||
| 439 | trace_mlx5_fs_del_rule(rule); | 453 | trace_mlx5_fs_del_rule(rule); |
| 440 | if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { | 454 | if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { |
| 441 | mutex_lock(&rule->dest_attr.ft->lock); | 455 | mutex_lock(&rule->dest_attr.ft->lock); |
| @@ -445,27 +459,19 @@ static void del_sw_hw_rule(struct fs_node *node) | |||
| 445 | 459 | ||
| 446 | if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && | 460 | if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER && |
| 447 | --fte->dests_size) { | 461 | --fte->dests_size) { |
| 448 | modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | | 462 | fte->modify_mask |= |
| 449 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); | 463 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) | |
| 464 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS); | ||
| 450 | fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; | 465 | fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT; |
| 451 | update_fte = true; | ||
| 452 | goto out; | 466 | goto out; |
| 453 | } | 467 | } |
| 454 | 468 | ||
| 455 | if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && | 469 | if ((fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && |
| 456 | --fte->dests_size) { | 470 | --fte->dests_size) { |
| 457 | modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); | 471 | fte->modify_mask |= |
| 458 | update_fte = true; | 472 | BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST); |
| 459 | } | 473 | } |
| 460 | out: | 474 | out: |
| 461 | root = find_root(&ft->node); | ||
| 462 | if (update_fte && fte->dests_size) { | ||
| 463 | err = root->cmds->update_fte(dev, ft, fg->id, modify_mask, fte); | ||
| 464 | if (err) | ||
| 465 | mlx5_core_warn(dev, | ||
| 466 | "%s can't del rule fg id=%d fte_index=%d\n", | ||
| 467 | __func__, fg->id, fte->index); | ||
| 468 | } | ||
| 469 | kfree(rule); | 475 | kfree(rule); |
| 470 | } | 476 | } |
| 471 | 477 | ||
| @@ -491,6 +497,7 @@ static void del_hw_fte(struct fs_node *node) | |||
| 491 | mlx5_core_warn(dev, | 497 | mlx5_core_warn(dev, |
| 492 | "flow steering can't delete fte in index %d of flow group id %d\n", | 498 | "flow steering can't delete fte in index %d of flow group id %d\n", |
| 493 | fte->index, fg->id); | 499 | fte->index, fg->id); |
| 500 | node->active = 0; | ||
| 494 | } | 501 | } |
| 495 | } | 502 | } |
| 496 | 503 | ||
| @@ -591,7 +598,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, | |||
| 591 | fte->node.type = FS_TYPE_FLOW_ENTRY; | 598 | fte->node.type = FS_TYPE_FLOW_ENTRY; |
| 592 | fte->action = *flow_act; | 599 | fte->action = *flow_act; |
| 593 | 600 | ||
| 594 | tree_init_node(&fte->node, del_hw_fte, del_sw_fte); | 601 | tree_init_node(&fte->node, NULL, del_sw_fte); |
| 595 | 602 | ||
| 596 | return fte; | 603 | return fte; |
| 597 | } | 604 | } |
| @@ -858,7 +865,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, | |||
| 858 | fs_get_obj(fte, rule->node.parent); | 865 | fs_get_obj(fte, rule->node.parent); |
| 859 | if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) | 866 | if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) |
| 860 | return -EINVAL; | 867 | return -EINVAL; |
| 861 | down_write_ref_node(&fte->node); | 868 | down_write_ref_node(&fte->node, false); |
| 862 | fs_get_obj(fg, fte->node.parent); | 869 | fs_get_obj(fg, fte->node.parent); |
| 863 | fs_get_obj(ft, fg->node.parent); | 870 | fs_get_obj(ft, fg->node.parent); |
| 864 | 871 | ||
| @@ -866,7 +873,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule, | |||
| 866 | root = find_root(&ft->node); | 873 | root = find_root(&ft->node); |
| 867 | err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, | 874 | err = root->cmds->update_fte(get_dev(&ft->node), ft, fg->id, |
| 868 | modify_mask, fte); | 875 | modify_mask, fte); |
| 869 | up_write_ref_node(&fte->node); | 876 | up_write_ref_node(&fte->node, false); |
| 870 | 877 | ||
| 871 | return err; | 878 | return err; |
| 872 | } | 879 | } |
| @@ -1016,11 +1023,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa | |||
| 1016 | if (err) | 1023 | if (err) |
| 1017 | goto destroy_ft; | 1024 | goto destroy_ft; |
| 1018 | ft->node.active = true; | 1025 | ft->node.active = true; |
| 1019 | down_write_ref_node(&fs_prio->node); | 1026 | down_write_ref_node(&fs_prio->node, false); |
| 1020 | tree_add_node(&ft->node, &fs_prio->node); | 1027 | tree_add_node(&ft->node, &fs_prio->node); |
| 1021 | list_add_flow_table(ft, fs_prio); | 1028 | list_add_flow_table(ft, fs_prio); |
| 1022 | fs_prio->num_ft++; | 1029 | fs_prio->num_ft++; |
| 1023 | up_write_ref_node(&fs_prio->node); | 1030 | up_write_ref_node(&fs_prio->node, false); |
| 1024 | mutex_unlock(&root->chain_lock); | 1031 | mutex_unlock(&root->chain_lock); |
| 1025 | trace_mlx5_fs_add_ft(ft); | 1032 | trace_mlx5_fs_add_ft(ft); |
| 1026 | return ft; | 1033 | return ft; |
| @@ -1114,17 +1121,17 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft, | |||
| 1114 | if (ft->autogroup.active) | 1121 | if (ft->autogroup.active) |
| 1115 | return ERR_PTR(-EPERM); | 1122 | return ERR_PTR(-EPERM); |
| 1116 | 1123 | ||
| 1117 | down_write_ref_node(&ft->node); | 1124 | down_write_ref_node(&ft->node, false); |
| 1118 | fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, | 1125 | fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria, |
| 1119 | start_index, end_index, | 1126 | start_index, end_index, |
| 1120 | ft->node.children.prev); | 1127 | ft->node.children.prev); |
| 1121 | up_write_ref_node(&ft->node); | 1128 | up_write_ref_node(&ft->node, false); |
| 1122 | if (IS_ERR(fg)) | 1129 | if (IS_ERR(fg)) |
| 1123 | return fg; | 1130 | return fg; |
| 1124 | 1131 | ||
| 1125 | err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); | 1132 | err = root->cmds->create_flow_group(dev, ft, fg_in, &fg->id); |
| 1126 | if (err) { | 1133 | if (err) { |
| 1127 | tree_put_node(&fg->node); | 1134 | tree_put_node(&fg->node, false); |
| 1128 | return ERR_PTR(err); | 1135 | return ERR_PTR(err); |
| 1129 | } | 1136 | } |
| 1130 | trace_mlx5_fs_add_fg(fg); | 1137 | trace_mlx5_fs_add_fg(fg); |
| @@ -1521,10 +1528,10 @@ static void free_match_list(struct match_list_head *head) | |||
| 1521 | struct match_list *iter, *match_tmp; | 1528 | struct match_list *iter, *match_tmp; |
| 1522 | 1529 | ||
| 1523 | list_del(&head->first.list); | 1530 | list_del(&head->first.list); |
| 1524 | tree_put_node(&head->first.g->node); | 1531 | tree_put_node(&head->first.g->node, false); |
| 1525 | list_for_each_entry_safe(iter, match_tmp, &head->list, | 1532 | list_for_each_entry_safe(iter, match_tmp, &head->list, |
| 1526 | list) { | 1533 | list) { |
| 1527 | tree_put_node(&iter->g->node); | 1534 | tree_put_node(&iter->g->node, false); |
| 1528 | list_del(&iter->list); | 1535 | list_del(&iter->list); |
| 1529 | kfree(iter); | 1536 | kfree(iter); |
| 1530 | } | 1537 | } |
| @@ -1601,11 +1608,16 @@ lookup_fte_locked(struct mlx5_flow_group *g, | |||
| 1601 | fte_tmp = NULL; | 1608 | fte_tmp = NULL; |
| 1602 | goto out; | 1609 | goto out; |
| 1603 | } | 1610 | } |
| 1611 | if (!fte_tmp->node.active) { | ||
| 1612 | tree_put_node(&fte_tmp->node, false); | ||
| 1613 | fte_tmp = NULL; | ||
| 1614 | goto out; | ||
| 1615 | } | ||
| 1604 | 1616 | ||
| 1605 | nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); | 1617 | nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); |
| 1606 | out: | 1618 | out: |
| 1607 | if (take_write) | 1619 | if (take_write) |
| 1608 | up_write_ref_node(&g->node); | 1620 | up_write_ref_node(&g->node, false); |
| 1609 | else | 1621 | else |
| 1610 | up_read_ref_node(&g->node); | 1622 | up_read_ref_node(&g->node); |
| 1611 | return fte_tmp; | 1623 | return fte_tmp; |
| @@ -1647,8 +1659,8 @@ search_again_locked: | |||
| 1647 | continue; | 1659 | continue; |
| 1648 | rule = add_rule_fg(g, spec->match_value, | 1660 | rule = add_rule_fg(g, spec->match_value, |
| 1649 | flow_act, dest, dest_num, fte_tmp); | 1661 | flow_act, dest, dest_num, fte_tmp); |
| 1650 | up_write_ref_node(&fte_tmp->node); | 1662 | up_write_ref_node(&fte_tmp->node, false); |
| 1651 | tree_put_node(&fte_tmp->node); | 1663 | tree_put_node(&fte_tmp->node, false); |
| 1652 | kmem_cache_free(steering->ftes_cache, fte); | 1664 | kmem_cache_free(steering->ftes_cache, fte); |
| 1653 | return rule; | 1665 | return rule; |
| 1654 | } | 1666 | } |
| @@ -1684,7 +1696,7 @@ skip_search: | |||
| 1684 | 1696 | ||
| 1685 | err = insert_fte(g, fte); | 1697 | err = insert_fte(g, fte); |
| 1686 | if (err) { | 1698 | if (err) { |
| 1687 | up_write_ref_node(&g->node); | 1699 | up_write_ref_node(&g->node, false); |
| 1688 | if (err == -ENOSPC) | 1700 | if (err == -ENOSPC) |
| 1689 | continue; | 1701 | continue; |
| 1690 | kmem_cache_free(steering->ftes_cache, fte); | 1702 | kmem_cache_free(steering->ftes_cache, fte); |
| @@ -1692,11 +1704,11 @@ skip_search: | |||
| 1692 | } | 1704 | } |
| 1693 | 1705 | ||
| 1694 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); | 1706 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); |
| 1695 | up_write_ref_node(&g->node); | 1707 | up_write_ref_node(&g->node, false); |
| 1696 | rule = add_rule_fg(g, spec->match_value, | 1708 | rule = add_rule_fg(g, spec->match_value, |
| 1697 | flow_act, dest, dest_num, fte); | 1709 | flow_act, dest, dest_num, fte); |
| 1698 | up_write_ref_node(&fte->node); | 1710 | up_write_ref_node(&fte->node, false); |
| 1699 | tree_put_node(&fte->node); | 1711 | tree_put_node(&fte->node, false); |
| 1700 | return rule; | 1712 | return rule; |
| 1701 | } | 1713 | } |
| 1702 | rule = ERR_PTR(-ENOENT); | 1714 | rule = ERR_PTR(-ENOENT); |
| @@ -1738,7 +1750,7 @@ search_again_locked: | |||
| 1738 | err = build_match_list(&match_head, ft, spec); | 1750 | err = build_match_list(&match_head, ft, spec); |
| 1739 | if (err) { | 1751 | if (err) { |
| 1740 | if (take_write) | 1752 | if (take_write) |
| 1741 | up_write_ref_node(&ft->node); | 1753 | up_write_ref_node(&ft->node, false); |
| 1742 | else | 1754 | else |
| 1743 | up_read_ref_node(&ft->node); | 1755 | up_read_ref_node(&ft->node); |
| 1744 | return ERR_PTR(err); | 1756 | return ERR_PTR(err); |
| @@ -1753,7 +1765,7 @@ search_again_locked: | |||
| 1753 | if (!IS_ERR(rule) || | 1765 | if (!IS_ERR(rule) || |
| 1754 | (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { | 1766 | (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) { |
| 1755 | if (take_write) | 1767 | if (take_write) |
| 1756 | up_write_ref_node(&ft->node); | 1768 | up_write_ref_node(&ft->node, false); |
| 1757 | return rule; | 1769 | return rule; |
| 1758 | } | 1770 | } |
| 1759 | 1771 | ||
| @@ -1769,12 +1781,12 @@ search_again_locked: | |||
| 1769 | g = alloc_auto_flow_group(ft, spec); | 1781 | g = alloc_auto_flow_group(ft, spec); |
| 1770 | if (IS_ERR(g)) { | 1782 | if (IS_ERR(g)) { |
| 1771 | rule = ERR_CAST(g); | 1783 | rule = ERR_CAST(g); |
| 1772 | up_write_ref_node(&ft->node); | 1784 | up_write_ref_node(&ft->node, false); |
| 1773 | return rule; | 1785 | return rule; |
| 1774 | } | 1786 | } |
| 1775 | 1787 | ||
| 1776 | nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); | 1788 | nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); |
| 1777 | up_write_ref_node(&ft->node); | 1789 | up_write_ref_node(&ft->node, false); |
| 1778 | 1790 | ||
| 1779 | err = create_auto_flow_group(ft, g); | 1791 | err = create_auto_flow_group(ft, g); |
| 1780 | if (err) | 1792 | if (err) |
| @@ -1793,17 +1805,17 @@ search_again_locked: | |||
| 1793 | } | 1805 | } |
| 1794 | 1806 | ||
| 1795 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); | 1807 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); |
| 1796 | up_write_ref_node(&g->node); | 1808 | up_write_ref_node(&g->node, false); |
| 1797 | rule = add_rule_fg(g, spec->match_value, flow_act, dest, | 1809 | rule = add_rule_fg(g, spec->match_value, flow_act, dest, |
| 1798 | dest_num, fte); | 1810 | dest_num, fte); |
| 1799 | up_write_ref_node(&fte->node); | 1811 | up_write_ref_node(&fte->node, false); |
| 1800 | tree_put_node(&fte->node); | 1812 | tree_put_node(&fte->node, false); |
| 1801 | tree_put_node(&g->node); | 1813 | tree_put_node(&g->node, false); |
| 1802 | return rule; | 1814 | return rule; |
| 1803 | 1815 | ||
| 1804 | err_release_fg: | 1816 | err_release_fg: |
| 1805 | up_write_ref_node(&g->node); | 1817 | up_write_ref_node(&g->node, false); |
| 1806 | tree_put_node(&g->node); | 1818 | tree_put_node(&g->node, false); |
| 1807 | return ERR_PTR(err); | 1819 | return ERR_PTR(err); |
| 1808 | } | 1820 | } |
| 1809 | 1821 | ||
| @@ -1866,10 +1878,33 @@ EXPORT_SYMBOL(mlx5_add_flow_rules); | |||
| 1866 | 1878 | ||
| 1867 | void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) | 1879 | void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) |
| 1868 | { | 1880 | { |
| 1881 | struct fs_fte *fte; | ||
| 1869 | int i; | 1882 | int i; |
| 1870 | 1883 | ||
| 1884 | /* In order to consolidate the HW changes we lock the FTE for other | ||
| 1885 | * changes, and increase its refcount, in order not to perform the | ||
| 1886 | * "del" functions of the FTE. Will handle them here. | ||
| 1887 | * The removal of the rules is done under locked FTE. | ||
| 1888 | * After removing all the handle's rules, if there are remaining | ||
| 1889 | * rules, it means we just need to modify the FTE in FW, and | ||
| 1890 | * unlock/decrease the refcount we increased before. | ||
| 1891 | * Otherwise, it means the FTE should be deleted. First delete the | ||
| 1892 | * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of | ||
| 1893 | * the FTE, which will handle the last decrease of the refcount, as | ||
| 1894 | * well as required handling of its parent. | ||
| 1895 | */ | ||
| 1896 | fs_get_obj(fte, handle->rule[0]->node.parent); | ||
| 1897 | down_write_ref_node(&fte->node, false); | ||
| 1871 | for (i = handle->num_rules - 1; i >= 0; i--) | 1898 | for (i = handle->num_rules - 1; i >= 0; i--) |
| 1872 | tree_remove_node(&handle->rule[i]->node); | 1899 | tree_remove_node(&handle->rule[i]->node, true); |
| 1900 | if (fte->modify_mask && fte->dests_size) { | ||
| 1901 | modify_fte(fte); | ||
| 1902 | up_write_ref_node(&fte->node, false); | ||
| 1903 | } else { | ||
| 1904 | del_hw_fte(&fte->node); | ||
| 1905 | up_write(&fte->node.lock); | ||
| 1906 | tree_put_node(&fte->node, false); | ||
| 1907 | } | ||
| 1873 | kfree(handle); | 1908 | kfree(handle); |
| 1874 | } | 1909 | } |
| 1875 | EXPORT_SYMBOL(mlx5_del_flow_rules); | 1910 | EXPORT_SYMBOL(mlx5_del_flow_rules); |
| @@ -1972,7 +2007,7 @@ int mlx5_destroy_flow_table(struct mlx5_flow_table *ft) | |||
| 1972 | mutex_unlock(&root->chain_lock); | 2007 | mutex_unlock(&root->chain_lock); |
| 1973 | return err; | 2008 | return err; |
| 1974 | } | 2009 | } |
| 1975 | if (tree_remove_node(&ft->node)) | 2010 | if (tree_remove_node(&ft->node, false)) |
| 1976 | mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", | 2011 | mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n", |
| 1977 | ft->id); | 2012 | ft->id); |
| 1978 | mutex_unlock(&root->chain_lock); | 2013 | mutex_unlock(&root->chain_lock); |
| @@ -1983,7 +2018,7 @@ EXPORT_SYMBOL(mlx5_destroy_flow_table); | |||
| 1983 | 2018 | ||
| 1984 | void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) | 2019 | void mlx5_destroy_flow_group(struct mlx5_flow_group *fg) |
| 1985 | { | 2020 | { |
| 1986 | if (tree_remove_node(&fg->node)) | 2021 | if (tree_remove_node(&fg->node, false)) |
| 1987 | mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", | 2022 | mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n", |
| 1988 | fg->id); | 2023 | fg->id); |
| 1989 | } | 2024 | } |
| @@ -2367,8 +2402,8 @@ static void clean_tree(struct fs_node *node) | |||
| 2367 | tree_get_node(node); | 2402 | tree_get_node(node); |
| 2368 | list_for_each_entry_safe(iter, temp, &node->children, list) | 2403 | list_for_each_entry_safe(iter, temp, &node->children, list) |
| 2369 | clean_tree(iter); | 2404 | clean_tree(iter); |
| 2370 | tree_put_node(node); | 2405 | tree_put_node(node, false); |
| 2371 | tree_remove_node(node); | 2406 | tree_remove_node(node, false); |
| 2372 | } | 2407 | } |
| 2373 | } | 2408 | } |
| 2374 | 2409 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 2dc86347af58..87de0e4d9124 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | |||
| @@ -172,6 +172,7 @@ struct fs_fte { | |||
| 172 | enum fs_fte_status status; | 172 | enum fs_fte_status status; |
| 173 | struct mlx5_fc *counter; | 173 | struct mlx5_fc *counter; |
| 174 | struct rhash_head hash; | 174 | struct rhash_head hash; |
| 175 | int modify_mask; | ||
| 175 | }; | 176 | }; |
| 176 | 177 | ||
| 177 | /* Type of children is mlx5_flow_table/namespace */ | 178 | /* Type of children is mlx5_flow_table/namespace */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 48aa6e030bcf..959605559858 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c | |||
| @@ -595,27 +595,6 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) | |||
| 595 | err); | 595 | err); |
| 596 | } | 596 | } |
| 597 | 597 | ||
| 598 | int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num) | ||
| 599 | { | ||
| 600 | struct mlx5_lag *ldev; | ||
| 601 | int n; | ||
| 602 | |||
| 603 | ldev = mlx5_lag_dev_get(dev); | ||
| 604 | if (!ldev) { | ||
| 605 | mlx5_core_warn(dev, "no lag device, can't get pf num\n"); | ||
| 606 | return -EINVAL; | ||
| 607 | } | ||
| 608 | |||
| 609 | for (n = 0; n < MLX5_MAX_PORTS; n++) | ||
| 610 | if (ldev->pf[n].dev == dev) { | ||
| 611 | *pf_num = n; | ||
| 612 | return 0; | ||
| 613 | } | ||
| 614 | |||
| 615 | mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n"); | ||
| 616 | return -EINVAL; | ||
| 617 | } | ||
| 618 | |||
| 619 | /* Must be called with intf_mutex held */ | 598 | /* Must be called with intf_mutex held */ |
| 620 | void mlx5_lag_remove(struct mlx5_core_dev *dev) | 599 | void mlx5_lag_remove(struct mlx5_core_dev *dev) |
| 621 | { | 600 | { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 9529cf9623e3..7b331674622c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
| @@ -188,8 +188,6 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) | |||
| 188 | MLX5_CAP_GEN(dev, lag_master); | 188 | MLX5_CAP_GEN(dev, lag_master); |
| 189 | } | 189 | } |
| 190 | 190 | ||
| 191 | int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num); | ||
| 192 | |||
| 193 | void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); | 191 | void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); |
| 194 | void mlx5_lag_update(struct mlx5_core_dev *dev); | 192 | void mlx5_lag_update(struct mlx5_core_dev *dev); |
| 195 | 193 | ||
