diff options
author | Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> | 2019-02-28 18:24:24 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2019-04-18 11:38:47 -0400 |
commit | 7b9ffc76bf5998aad8feaa26d9d3fcb65ec7a21b (patch) | |
tree | 9935f6c7b53786d7f9ce06d804aa4a42ce541ec4 /drivers/net/ethernet/intel/ice/ice_main.c | |
parent | 0ebd3ff13ccad2940516ba522ca8d21cea4f56f6 (diff) |
ice: Add code for DCB initialization part 3/4
This patch adds a new function ice_pf_dcb_cfg (and related helpers)
which applies the DCB configuration obtained from the firmware. As
part of this, VSIs/netdevs are updated with traffic class information.
This patch requires a bit of a refactor of existing code.
1. For a MIB change event, the associated VSI is closed and brought up
again. The gap between closing and opening the VSI can cause a race
condition. Fix this by grabbing the rtnl_lock prior to closing the
VSI and then only free it after re-opening the VSI during a MIB
change event.
2. ice_sched_query_elem is used in ice_sched.c and with this patch, in
ice_dcb.c as well. However, ice_dcb.c is not built when CONFIG_DCB is
unset. This results in namespace warnings (ice_sched.o: Externally
defined symbols with no external references) when CONFIG_DCB is unset.
To avoid this move ice_sched_query_elem from ice_sched.c to
ice_common.c.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 118 |
1 files changed, 69 insertions, 49 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 22fe0605aa9f..ff84a6c318a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
@@ -31,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); | |||
31 | static struct workqueue_struct *ice_wq; | 31 | static struct workqueue_struct *ice_wq; |
32 | static const struct net_device_ops ice_netdev_ops; | 32 | static const struct net_device_ops ice_netdev_ops; |
33 | 33 | ||
34 | static void ice_pf_dis_all_vsi(struct ice_pf *pf); | ||
35 | static void ice_rebuild(struct ice_pf *pf); | 34 | static void ice_rebuild(struct ice_pf *pf); |
36 | 35 | ||
37 | static void ice_vsi_release_all(struct ice_pf *pf); | 36 | static void ice_vsi_release_all(struct ice_pf *pf); |
@@ -398,6 +397,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) | |||
398 | } | 397 | } |
399 | 398 | ||
400 | /** | 399 | /** |
400 | * ice_dis_vsi - pause a VSI | ||
401 | * @vsi: the VSI being paused | ||
402 | * @locked: is the rtnl_lock already held | ||
403 | */ | ||
404 | static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) | ||
405 | { | ||
406 | if (test_bit(__ICE_DOWN, vsi->state)) | ||
407 | return; | ||
408 | |||
409 | set_bit(__ICE_NEEDS_RESTART, vsi->state); | ||
410 | |||
411 | if (vsi->type == ICE_VSI_PF && vsi->netdev) { | ||
412 | if (netif_running(vsi->netdev)) { | ||
413 | if (!locked) { | ||
414 | rtnl_lock(); | ||
415 | vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); | ||
416 | rtnl_unlock(); | ||
417 | } else { | ||
418 | vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); | ||
419 | } | ||
420 | } else { | ||
421 | ice_vsi_close(vsi); | ||
422 | } | ||
423 | } | ||
424 | } | ||
425 | |||
426 | /** | ||
427 | * ice_pf_dis_all_vsi - Pause all VSIs on a PF | ||
428 | * @pf: the PF | ||
429 | * @locked: is the rtnl_lock already held | ||
430 | */ | ||
431 | #ifdef CONFIG_DCB | ||
432 | void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) | ||
433 | #else | ||
434 | static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) | ||
435 | #endif /* CONFIG_DCB */ | ||
436 | { | ||
437 | int v; | ||
438 | |||
439 | ice_for_each_vsi(pf, v) | ||
440 | if (pf->vsi[v]) | ||
441 | ice_dis_vsi(pf->vsi[v], locked); | ||
442 | } | ||
443 | |||
444 | /** | ||
401 | * ice_prepare_for_reset - prep for the core to reset | 445 | * ice_prepare_for_reset - prep for the core to reset |
402 | * @pf: board private structure | 446 | * @pf: board private structure |
403 | * | 447 | * |
@@ -417,7 +461,7 @@ ice_prepare_for_reset(struct ice_pf *pf) | |||
417 | ice_vc_notify_reset(pf); | 461 | ice_vc_notify_reset(pf); |
418 | 462 | ||
419 | /* disable the VSIs and their queues that are not already DOWN */ | 463 | /* disable the VSIs and their queues that are not already DOWN */ |
420 | ice_pf_dis_all_vsi(pf); | 464 | ice_pf_dis_all_vsi(pf, false); |
421 | 465 | ||
422 | if (hw->port_info) | 466 | if (hw->port_info) |
423 | ice_sched_clear_port(hw->port_info); | 467 | ice_sched_clear_port(hw->port_info); |
@@ -3581,47 +3625,31 @@ static void ice_vsi_release_all(struct ice_pf *pf) | |||
3581 | } | 3625 | } |
3582 | 3626 | ||
3583 | /** | 3627 | /** |
3584 | * ice_dis_vsi - pause a VSI | 3628 | * ice_ena_vsi - resume a VSI |
3585 | * @vsi: the VSI being paused | 3629 | * @vsi: the VSI being resume |
3586 | * @locked: is the rtnl_lock already held | 3630 | * @locked: is the rtnl_lock already held |
3587 | */ | 3631 | */ |
3588 | static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) | 3632 | static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) |
3589 | { | 3633 | { |
3590 | if (test_bit(__ICE_DOWN, vsi->state)) | 3634 | int err = 0; |
3591 | return; | ||
3592 | 3635 | ||
3593 | set_bit(__ICE_NEEDS_RESTART, vsi->state); | 3636 | if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) |
3637 | return err; | ||
3638 | |||
3639 | clear_bit(__ICE_NEEDS_RESTART, vsi->state); | ||
3640 | |||
3641 | if (vsi->netdev && vsi->type == ICE_VSI_PF) { | ||
3642 | struct net_device *netd = vsi->netdev; | ||
3594 | 3643 | ||
3595 | if (vsi->type == ICE_VSI_PF && vsi->netdev) { | ||
3596 | if (netif_running(vsi->netdev)) { | 3644 | if (netif_running(vsi->netdev)) { |
3597 | if (!locked) { | 3645 | if (locked) { |
3646 | err = netd->netdev_ops->ndo_open(netd); | ||
3647 | } else { | ||
3598 | rtnl_lock(); | 3648 | rtnl_lock(); |
3599 | vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); | 3649 | err = netd->netdev_ops->ndo_open(netd); |
3600 | rtnl_unlock(); | 3650 | rtnl_unlock(); |
3601 | } else { | ||
3602 | vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); | ||
3603 | } | 3651 | } |
3604 | } else { | 3652 | } else { |
3605 | ice_vsi_close(vsi); | ||
3606 | } | ||
3607 | } | ||
3608 | } | ||
3609 | |||
3610 | /** | ||
3611 | * ice_ena_vsi - resume a VSI | ||
3612 | * @vsi: the VSI being resume | ||
3613 | */ | ||
3614 | static int ice_ena_vsi(struct ice_vsi *vsi) | ||
3615 | { | ||
3616 | int err = 0; | ||
3617 | |||
3618 | if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) && | ||
3619 | vsi->netdev) { | ||
3620 | if (netif_running(vsi->netdev)) { | ||
3621 | rtnl_lock(); | ||
3622 | err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); | ||
3623 | rtnl_unlock(); | ||
3624 | } else { | ||
3625 | err = ice_vsi_open(vsi); | 3653 | err = ice_vsi_open(vsi); |
3626 | } | 3654 | } |
3627 | } | 3655 | } |
@@ -3630,29 +3658,21 @@ static int ice_ena_vsi(struct ice_vsi *vsi) | |||
3630 | } | 3658 | } |
3631 | 3659 | ||
3632 | /** | 3660 | /** |
3633 | * ice_pf_dis_all_vsi - Pause all VSIs on a PF | ||
3634 | * @pf: the PF | ||
3635 | */ | ||
3636 | static void ice_pf_dis_all_vsi(struct ice_pf *pf) | ||
3637 | { | ||
3638 | int v; | ||
3639 | |||
3640 | ice_for_each_vsi(pf, v) | ||
3641 | if (pf->vsi[v]) | ||
3642 | ice_dis_vsi(pf->vsi[v], false); | ||
3643 | } | ||
3644 | |||
3645 | /** | ||
3646 | * ice_pf_ena_all_vsi - Resume all VSIs on a PF | 3661 | * ice_pf_ena_all_vsi - Resume all VSIs on a PF |
3647 | * @pf: the PF | 3662 | * @pf: the PF |
3663 | * @locked: is the rtnl_lock already held | ||
3648 | */ | 3664 | */ |
3649 | static int ice_pf_ena_all_vsi(struct ice_pf *pf) | 3665 | #ifdef CONFIG_DCB |
3666 | int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) | ||
3667 | #else | ||
3668 | static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked) | ||
3669 | #endif /* CONFIG_DCB */ | ||
3650 | { | 3670 | { |
3651 | int v; | 3671 | int v; |
3652 | 3672 | ||
3653 | ice_for_each_vsi(pf, v) | 3673 | ice_for_each_vsi(pf, v) |
3654 | if (pf->vsi[v]) | 3674 | if (pf->vsi[v]) |
3655 | if (ice_ena_vsi(pf->vsi[v])) | 3675 | if (ice_ena_vsi(pf->vsi[v], locked)) |
3656 | return -EIO; | 3676 | return -EIO; |
3657 | 3677 | ||
3658 | return 0; | 3678 | return 0; |
@@ -3800,7 +3820,7 @@ static void ice_rebuild(struct ice_pf *pf) | |||
3800 | } | 3820 | } |
3801 | 3821 | ||
3802 | /* restart the VSIs that were rebuilt and running before the reset */ | 3822 | /* restart the VSIs that were rebuilt and running before the reset */ |
3803 | err = ice_pf_ena_all_vsi(pf); | 3823 | err = ice_pf_ena_all_vsi(pf, false); |
3804 | if (err) { | 3824 | if (err) { |
3805 | dev_err(&pf->pdev->dev, "error enabling VSIs\n"); | 3825 | dev_err(&pf->pdev->dev, "error enabling VSIs\n"); |
3806 | /* no need to disable VSIs in tear down path in ice_rebuild() | 3826 | /* no need to disable VSIs in tear down path in ice_rebuild() |