diff options
author | Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> | 2018-09-19 20:23:09 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2018-10-01 15:50:06 -0400 |
commit | df0f847915b4311fc107e8e803c69b9f426c4f7b (patch) | |
tree | fef734ba96961a9065666a2eacd849488ccbee6d /drivers/net/ethernet/intel/ice/ice_lib.c | |
parent | 07309a0e59edf4247bbf64ed852f95ced207f27a (diff) |
ice: Move common functions out of ice_main.c part 6/7
This patch continues the code move out of ice_main.c
The following top level functions (and related dependency functions) were
moved to ice_lib.c:
ice_vsi_setup_vector_base
ice_vsi_alloc_q_vectors
ice_vsi_get_qs
The following functions were made static again:
ice_vsi_free_arrays
ice_vsi_clear_rings
Also, in this patch, the netdev and NAPI registration logic was de-coupled
from the VSI creation logic (ice_vsi_setup) as for SR-IOV, while we want to
create VF VSIs using ice_vsi_setup, we don't want to create netdevs.
Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 463 |
1 files changed, 461 insertions, 2 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 6ba82337d017..232ca06974ea 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c | |||
@@ -346,7 +346,7 @@ void ice_vsi_delete(struct ice_vsi *vsi) | |||
346 | * @vsi: pointer to VSI being cleared | 346 | * @vsi: pointer to VSI being cleared |
347 | * @free_qvectors: bool to specify if q_vectors should be deallocated | 347 | * @free_qvectors: bool to specify if q_vectors should be deallocated |
348 | */ | 348 | */ |
349 | void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) | 349 | static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) |
350 | { | 350 | { |
351 | struct ice_pf *pf = vsi->back; | 351 | struct ice_pf *pf = vsi->back; |
352 | 352 | ||
@@ -424,6 +424,141 @@ irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) | |||
424 | } | 424 | } |
425 | 425 | ||
426 | /** | 426 | /** |
427 | * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI | ||
428 | * @vsi: the VSI getting queues | ||
429 | * | ||
430 | * Return 0 on success and a negative value on error | ||
431 | */ | ||
432 | static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) | ||
433 | { | ||
434 | struct ice_pf *pf = vsi->back; | ||
435 | int offset, ret = 0; | ||
436 | |||
437 | mutex_lock(&pf->avail_q_mutex); | ||
438 | /* look for contiguous block of queues for Tx */ | ||
439 | offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS, | ||
440 | 0, vsi->alloc_txq, 0); | ||
441 | if (offset < ICE_MAX_TXQS) { | ||
442 | int i; | ||
443 | |||
444 | bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); | ||
445 | for (i = 0; i < vsi->alloc_txq; i++) | ||
446 | vsi->txq_map[i] = i + offset; | ||
447 | } else { | ||
448 | ret = -ENOMEM; | ||
449 | vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; | ||
450 | } | ||
451 | |||
452 | /* look for contiguous block of queues for Rx */ | ||
453 | offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, | ||
454 | 0, vsi->alloc_rxq, 0); | ||
455 | if (offset < ICE_MAX_RXQS) { | ||
456 | int i; | ||
457 | |||
458 | bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq); | ||
459 | for (i = 0; i < vsi->alloc_rxq; i++) | ||
460 | vsi->rxq_map[i] = i + offset; | ||
461 | } else { | ||
462 | ret = -ENOMEM; | ||
463 | vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER; | ||
464 | } | ||
465 | mutex_unlock(&pf->avail_q_mutex); | ||
466 | |||
467 | return ret; | ||
468 | } | ||
469 | |||
470 | /** | ||
471 | * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI | ||
472 | * @vsi: the VSI getting queues | ||
473 | * | ||
474 | * Return 0 on success and a negative value on error | ||
475 | */ | ||
476 | static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) | ||
477 | { | ||
478 | struct ice_pf *pf = vsi->back; | ||
479 | int i, index = 0; | ||
480 | |||
481 | mutex_lock(&pf->avail_q_mutex); | ||
482 | |||
483 | if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { | ||
484 | for (i = 0; i < vsi->alloc_txq; i++) { | ||
485 | index = find_next_zero_bit(pf->avail_txqs, | ||
486 | ICE_MAX_TXQS, index); | ||
487 | if (index < ICE_MAX_TXQS) { | ||
488 | set_bit(index, pf->avail_txqs); | ||
489 | vsi->txq_map[i] = index; | ||
490 | } else { | ||
491 | goto err_scatter_tx; | ||
492 | } | ||
493 | } | ||
494 | } | ||
495 | |||
496 | if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) { | ||
497 | for (i = 0; i < vsi->alloc_rxq; i++) { | ||
498 | index = find_next_zero_bit(pf->avail_rxqs, | ||
499 | ICE_MAX_RXQS, index); | ||
500 | if (index < ICE_MAX_RXQS) { | ||
501 | set_bit(index, pf->avail_rxqs); | ||
502 | vsi->rxq_map[i] = index; | ||
503 | } else { | ||
504 | goto err_scatter_rx; | ||
505 | } | ||
506 | } | ||
507 | } | ||
508 | |||
509 | mutex_unlock(&pf->avail_q_mutex); | ||
510 | return 0; | ||
511 | |||
512 | err_scatter_rx: | ||
513 | /* unflag any queues we have grabbed (i is failed position) */ | ||
514 | for (index = 0; index < i; index++) { | ||
515 | clear_bit(vsi->rxq_map[index], pf->avail_rxqs); | ||
516 | vsi->rxq_map[index] = 0; | ||
517 | } | ||
518 | i = vsi->alloc_txq; | ||
519 | err_scatter_tx: | ||
520 | /* i is either position of failed attempt or vsi->alloc_txq */ | ||
521 | for (index = 0; index < i; index++) { | ||
522 | clear_bit(vsi->txq_map[index], pf->avail_txqs); | ||
523 | vsi->txq_map[index] = 0; | ||
524 | } | ||
525 | |||
526 | mutex_unlock(&pf->avail_q_mutex); | ||
527 | return -ENOMEM; | ||
528 | } | ||
529 | |||
530 | /** | ||
531 | * ice_vsi_get_qs - Assign queues from PF to VSI | ||
532 | * @vsi: the VSI to assign queues to | ||
533 | * | ||
534 | * Returns 0 on success and a negative value on error | ||
535 | */ | ||
536 | int ice_vsi_get_qs(struct ice_vsi *vsi) | ||
537 | { | ||
538 | int ret = 0; | ||
539 | |||
540 | vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; | ||
541 | vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; | ||
542 | |||
543 | /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping | ||
544 | * modes individually to scatter if assigning contiguous queues | ||
545 | * to Rx or Tx fails | ||
546 | */ | ||
547 | ret = ice_vsi_get_qs_contig(vsi); | ||
548 | if (ret < 0) { | ||
549 | if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) | ||
550 | vsi->alloc_txq = max_t(u16, vsi->alloc_txq, | ||
551 | ICE_MAX_SCATTER_TXQS); | ||
552 | if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) | ||
553 | vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq, | ||
554 | ICE_MAX_SCATTER_RXQS); | ||
555 | ret = ice_vsi_get_qs_scatter(vsi); | ||
556 | } | ||
557 | |||
558 | return ret; | ||
559 | } | ||
560 | |||
561 | /** | ||
427 | * ice_vsi_put_qs - Release queues from VSI to PF | 562 | * ice_vsi_put_qs - Release queues from VSI to PF |
428 | * @vsi: the VSI that is going to release queues | 563 | * @vsi: the VSI that is going to release queues |
429 | */ | 564 | */ |
@@ -448,6 +583,22 @@ void ice_vsi_put_qs(struct ice_vsi *vsi) | |||
448 | } | 583 | } |
449 | 584 | ||
450 | /** | 585 | /** |
586 | * ice_rss_clean - Delete RSS related VSI structures that hold user inputs | ||
587 | * @vsi: the VSI being removed | ||
588 | */ | ||
589 | static void ice_rss_clean(struct ice_vsi *vsi) | ||
590 | { | ||
591 | struct ice_pf *pf; | ||
592 | |||
593 | pf = vsi->back; | ||
594 | |||
595 | if (vsi->rss_hkey_user) | ||
596 | devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user); | ||
597 | if (vsi->rss_lut_user) | ||
598 | devm_kfree(&pf->pdev->dev, vsi->rss_lut_user); | ||
599 | } | ||
600 | |||
601 | /** | ||
451 | * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type | 602 | * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type |
452 | * @vsi: the VSI being configured | 603 | * @vsi: the VSI being configured |
453 | */ | 604 | */ |
@@ -686,10 +837,182 @@ int ice_vsi_init(struct ice_vsi *vsi) | |||
686 | } | 837 | } |
687 | 838 | ||
688 | /** | 839 | /** |
840 | * ice_free_q_vector - Free memory allocated for a specific interrupt vector | ||
841 | * @vsi: VSI having the memory freed | ||
842 | * @v_idx: index of the vector to be freed | ||
843 | */ | ||
844 | static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) | ||
845 | { | ||
846 | struct ice_q_vector *q_vector; | ||
847 | struct ice_ring *ring; | ||
848 | |||
849 | if (!vsi->q_vectors[v_idx]) { | ||
850 | dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n", | ||
851 | v_idx); | ||
852 | return; | ||
853 | } | ||
854 | q_vector = vsi->q_vectors[v_idx]; | ||
855 | |||
856 | ice_for_each_ring(ring, q_vector->tx) | ||
857 | ring->q_vector = NULL; | ||
858 | ice_for_each_ring(ring, q_vector->rx) | ||
859 | ring->q_vector = NULL; | ||
860 | |||
861 | /* only VSI with an associated netdev is set up with NAPI */ | ||
862 | if (vsi->netdev) | ||
863 | netif_napi_del(&q_vector->napi); | ||
864 | |||
865 | devm_kfree(&vsi->back->pdev->dev, q_vector); | ||
866 | vsi->q_vectors[v_idx] = NULL; | ||
867 | } | ||
868 | |||
869 | /** | ||
870 | * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors | ||
871 | * @vsi: the VSI having memory freed | ||
872 | */ | ||
873 | void ice_vsi_free_q_vectors(struct ice_vsi *vsi) | ||
874 | { | ||
875 | int v_idx; | ||
876 | |||
877 | for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) | ||
878 | ice_free_q_vector(vsi, v_idx); | ||
879 | } | ||
880 | |||
881 | /** | ||
882 | * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector | ||
883 | * @vsi: the VSI being configured | ||
884 | * @v_idx: index of the vector in the VSI struct | ||
885 | * | ||
886 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | ||
887 | */ | ||
888 | static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) | ||
889 | { | ||
890 | struct ice_pf *pf = vsi->back; | ||
891 | struct ice_q_vector *q_vector; | ||
892 | |||
893 | /* allocate q_vector */ | ||
894 | q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); | ||
895 | if (!q_vector) | ||
896 | return -ENOMEM; | ||
897 | |||
898 | q_vector->vsi = vsi; | ||
899 | q_vector->v_idx = v_idx; | ||
900 | /* only set affinity_mask if the CPU is online */ | ||
901 | if (cpu_online(v_idx)) | ||
902 | cpumask_set_cpu(v_idx, &q_vector->affinity_mask); | ||
903 | |||
904 | /* This will not be called in the driver load path because the netdev | ||
905 | * will not be created yet. All other cases with register the NAPI | ||
906 | * handler here (i.e. resume, reset/rebuild, etc.) | ||
907 | */ | ||
908 | if (vsi->netdev) | ||
909 | netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, | ||
910 | NAPI_POLL_WEIGHT); | ||
911 | |||
912 | /* tie q_vector and VSI together */ | ||
913 | vsi->q_vectors[v_idx] = q_vector; | ||
914 | |||
915 | return 0; | ||
916 | } | ||
917 | |||
918 | /** | ||
919 | * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors | ||
920 | * @vsi: the VSI being configured | ||
921 | * | ||
922 | * We allocate one q_vector per queue interrupt. If allocation fails we | ||
923 | * return -ENOMEM. | ||
924 | */ | ||
925 | int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) | ||
926 | { | ||
927 | struct ice_pf *pf = vsi->back; | ||
928 | int v_idx = 0, num_q_vectors; | ||
929 | int err; | ||
930 | |||
931 | if (vsi->q_vectors[0]) { | ||
932 | dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", | ||
933 | vsi->vsi_num); | ||
934 | return -EEXIST; | ||
935 | } | ||
936 | |||
937 | if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) { | ||
938 | num_q_vectors = vsi->num_q_vectors; | ||
939 | } else { | ||
940 | err = -EINVAL; | ||
941 | goto err_out; | ||
942 | } | ||
943 | |||
944 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { | ||
945 | err = ice_vsi_alloc_q_vector(vsi, v_idx); | ||
946 | if (err) | ||
947 | goto err_out; | ||
948 | } | ||
949 | |||
950 | return 0; | ||
951 | |||
952 | err_out: | ||
953 | while (v_idx--) | ||
954 | ice_free_q_vector(vsi, v_idx); | ||
955 | |||
956 | dev_err(&pf->pdev->dev, | ||
957 | "Failed to allocate %d q_vector for VSI %d, ret=%d\n", | ||
958 | vsi->num_q_vectors, vsi->vsi_num, err); | ||
959 | vsi->num_q_vectors = 0; | ||
960 | return err; | ||
961 | } | ||
962 | |||
963 | /** | ||
964 | * ice_vsi_setup_vector_base - Set up the base vector for the given VSI | ||
965 | * @vsi: ptr to the VSI | ||
966 | * | ||
967 | * This should only be called after ice_vsi_alloc() which allocates the | ||
968 | * corresponding SW VSI structure and initializes num_queue_pairs for the | ||
969 | * newly allocated VSI. | ||
970 | * | ||
971 | * Returns 0 on success or negative on failure | ||
972 | */ | ||
973 | int ice_vsi_setup_vector_base(struct ice_vsi *vsi) | ||
974 | { | ||
975 | struct ice_pf *pf = vsi->back; | ||
976 | int num_q_vectors = 0; | ||
977 | |||
978 | if (vsi->base_vector) { | ||
979 | dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", | ||
980 | vsi->vsi_num, vsi->base_vector); | ||
981 | return -EEXIST; | ||
982 | } | ||
983 | |||
984 | if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) | ||
985 | return -ENOENT; | ||
986 | |||
987 | switch (vsi->type) { | ||
988 | case ICE_VSI_PF: | ||
989 | num_q_vectors = vsi->num_q_vectors; | ||
990 | break; | ||
991 | default: | ||
992 | dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n", | ||
993 | vsi->type); | ||
994 | break; | ||
995 | } | ||
996 | |||
997 | if (num_q_vectors) | ||
998 | vsi->base_vector = ice_get_res(pf, pf->irq_tracker, | ||
999 | num_q_vectors, vsi->idx); | ||
1000 | |||
1001 | if (vsi->base_vector < 0) { | ||
1002 | dev_err(&pf->pdev->dev, | ||
1003 | "Failed to get tracking for %d vectors for VSI %d, err=%d\n", | ||
1004 | num_q_vectors, vsi->vsi_num, vsi->base_vector); | ||
1005 | return -ENOENT; | ||
1006 | } | ||
1007 | |||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | /** | ||
689 | * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI | 1012 | * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI |
690 | * @vsi: the VSI having rings deallocated | 1013 | * @vsi: the VSI having rings deallocated |
691 | */ | 1014 | */ |
692 | void ice_vsi_clear_rings(struct ice_vsi *vsi) | 1015 | static void ice_vsi_clear_rings(struct ice_vsi *vsi) |
693 | { | 1016 | { |
694 | int i; | 1017 | int i; |
695 | 1018 | ||
@@ -1675,6 +1998,142 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) | |||
1675 | } | 1998 | } |
1676 | 1999 | ||
1677 | /** | 2000 | /** |
2001 | * ice_vsi_release - Delete a VSI and free its resources | ||
2002 | * @vsi: the VSI being removed | ||
2003 | * | ||
2004 | * Returns 0 on success or < 0 on error | ||
2005 | */ | ||
2006 | int ice_vsi_release(struct ice_vsi *vsi) | ||
2007 | { | ||
2008 | struct ice_pf *pf; | ||
2009 | |||
2010 | if (!vsi->back) | ||
2011 | return -ENODEV; | ||
2012 | pf = vsi->back; | ||
2013 | /* do not unregister and free netdevs while driver is in the reset | ||
2014 | * recovery pending state. Since reset/rebuild happens through PF | ||
2015 | * service task workqueue, its not a good idea to unregister netdev | ||
2016 | * that is associated to the PF that is running the work queue items | ||
2017 | * currently. This is done to avoid check_flush_dependency() warning | ||
2018 | * on this wq | ||
2019 | */ | ||
2020 | if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) { | ||
2021 | unregister_netdev(vsi->netdev); | ||
2022 | free_netdev(vsi->netdev); | ||
2023 | vsi->netdev = NULL; | ||
2024 | } | ||
2025 | |||
2026 | if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) | ||
2027 | ice_rss_clean(vsi); | ||
2028 | |||
2029 | /* Disable VSI and free resources */ | ||
2030 | ice_vsi_dis_irq(vsi); | ||
2031 | ice_vsi_close(vsi); | ||
2032 | |||
2033 | /* reclaim interrupt vectors back to PF */ | ||
2034 | ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); | ||
2035 | pf->num_avail_msix += vsi->num_q_vectors; | ||
2036 | |||
2037 | ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num); | ||
2038 | ice_vsi_delete(vsi); | ||
2039 | ice_vsi_free_q_vectors(vsi); | ||
2040 | ice_vsi_clear_rings(vsi); | ||
2041 | |||
2042 | ice_vsi_put_qs(vsi); | ||
2043 | pf->q_left_tx += vsi->alloc_txq; | ||
2044 | pf->q_left_rx += vsi->alloc_rxq; | ||
2045 | |||
2046 | /* retain SW VSI data structure since it is needed to unregister and | ||
2047 | * free VSI netdev when PF is not in reset recovery pending state,\ | ||
2048 | * for ex: during rmmod. | ||
2049 | */ | ||
2050 | if (!ice_is_reset_recovery_pending(pf->state)) | ||
2051 | ice_vsi_clear(vsi); | ||
2052 | |||
2053 | return 0; | ||
2054 | } | ||
2055 | |||
2056 | /** | ||
2057 | * ice_vsi_rebuild - Rebuild VSI after reset | ||
2058 | * @vsi: VSI to be rebuild | ||
2059 | * | ||
2060 | * Returns 0 on success and negative value on failure | ||
2061 | */ | ||
2062 | int ice_vsi_rebuild(struct ice_vsi *vsi) | ||
2063 | { | ||
2064 | u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; | ||
2065 | int ret, i; | ||
2066 | |||
2067 | if (!vsi) | ||
2068 | return -EINVAL; | ||
2069 | |||
2070 | ice_vsi_free_q_vectors(vsi); | ||
2071 | ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx); | ||
2072 | vsi->base_vector = 0; | ||
2073 | ice_vsi_clear_rings(vsi); | ||
2074 | ice_vsi_free_arrays(vsi, false); | ||
2075 | ice_vsi_set_num_qs(vsi); | ||
2076 | |||
2077 | /* Initialize VSI struct elements and create VSI in FW */ | ||
2078 | ret = ice_vsi_init(vsi); | ||
2079 | if (ret < 0) | ||
2080 | goto err_vsi; | ||
2081 | |||
2082 | ret = ice_vsi_alloc_arrays(vsi, false); | ||
2083 | if (ret < 0) | ||
2084 | goto err_vsi; | ||
2085 | |||
2086 | switch (vsi->type) { | ||
2087 | case ICE_VSI_PF: | ||
2088 | ret = ice_vsi_alloc_q_vectors(vsi); | ||
2089 | if (ret) | ||
2090 | goto err_rings; | ||
2091 | |||
2092 | ret = ice_vsi_setup_vector_base(vsi); | ||
2093 | if (ret) | ||
2094 | goto err_vectors; | ||
2095 | |||
2096 | ret = ice_vsi_alloc_rings(vsi); | ||
2097 | if (ret) | ||
2098 | goto err_vectors; | ||
2099 | |||
2100 | ice_vsi_map_rings_to_vectors(vsi); | ||
2101 | break; | ||
2102 | default: | ||
2103 | break; | ||
2104 | } | ||
2105 | |||
2106 | ice_vsi_set_tc_cfg(vsi); | ||
2107 | |||
2108 | /* configure VSI nodes based on number of queues and TC's */ | ||
2109 | for (i = 0; i < vsi->tc_cfg.numtc; i++) | ||
2110 | max_txqs[i] = vsi->num_txq; | ||
2111 | |||
2112 | ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num, | ||
2113 | vsi->tc_cfg.ena_tc, max_txqs); | ||
2114 | if (ret) { | ||
2115 | dev_info(&vsi->back->pdev->dev, | ||
2116 | "Failed VSI lan queue config\n"); | ||
2117 | goto err_vectors; | ||
2118 | } | ||
2119 | return 0; | ||
2120 | |||
2121 | err_vectors: | ||
2122 | ice_vsi_free_q_vectors(vsi); | ||
2123 | err_rings: | ||
2124 | if (vsi->netdev) { | ||
2125 | vsi->current_netdev_flags = 0; | ||
2126 | unregister_netdev(vsi->netdev); | ||
2127 | free_netdev(vsi->netdev); | ||
2128 | vsi->netdev = NULL; | ||
2129 | } | ||
2130 | err_vsi: | ||
2131 | ice_vsi_clear(vsi); | ||
2132 | set_bit(__ICE_RESET_FAILED, vsi->back->state); | ||
2133 | return ret; | ||
2134 | } | ||
2135 | |||
2136 | /** | ||
1678 | * ice_is_reset_recovery_pending - schedule a reset | 2137 | * ice_is_reset_recovery_pending - schedule a reset |
1679 | * @state: pf state field | 2138 | * @state: pf state field |
1680 | */ | 2139 | */ |