aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c463
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c492
3 files changed, 521 insertions, 450 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 6ba82337d017..232ca06974ea 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -346,7 +346,7 @@ void ice_vsi_delete(struct ice_vsi *vsi)
346 * @vsi: pointer to VSI being cleared 346 * @vsi: pointer to VSI being cleared
347 * @free_qvectors: bool to specify if q_vectors should be deallocated 347 * @free_qvectors: bool to specify if q_vectors should be deallocated
348 */ 348 */
349void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors) 349static void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors)
350{ 350{
351 struct ice_pf *pf = vsi->back; 351 struct ice_pf *pf = vsi->back;
352 352
@@ -424,6 +424,141 @@ irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
424} 424}
425 425
426/** 426/**
427 * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
428 * @vsi: the VSI getting queues
429 *
430 * Return 0 on success and a negative value on error
431 */
432static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
433{
434 struct ice_pf *pf = vsi->back;
435 int offset, ret = 0;
436
437 mutex_lock(&pf->avail_q_mutex);
438 /* look for contiguous block of queues for Tx */
439 offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
440 0, vsi->alloc_txq, 0);
441 if (offset < ICE_MAX_TXQS) {
442 int i;
443
444 bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
445 for (i = 0; i < vsi->alloc_txq; i++)
446 vsi->txq_map[i] = i + offset;
447 } else {
448 ret = -ENOMEM;
449 vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
450 }
451
452 /* look for contiguous block of queues for Rx */
453 offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
454 0, vsi->alloc_rxq, 0);
455 if (offset < ICE_MAX_RXQS) {
456 int i;
457
458 bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
459 for (i = 0; i < vsi->alloc_rxq; i++)
460 vsi->rxq_map[i] = i + offset;
461 } else {
462 ret = -ENOMEM;
463 vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
464 }
465 mutex_unlock(&pf->avail_q_mutex);
466
467 return ret;
468}
469
470/**
471 * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
472 * @vsi: the VSI getting queues
473 *
474 * Return 0 on success and a negative value on error
475 */
476static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi)
477{
478 struct ice_pf *pf = vsi->back;
479 int i, index = 0;
480
481 mutex_lock(&pf->avail_q_mutex);
482
483 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
484 for (i = 0; i < vsi->alloc_txq; i++) {
485 index = find_next_zero_bit(pf->avail_txqs,
486 ICE_MAX_TXQS, index);
487 if (index < ICE_MAX_TXQS) {
488 set_bit(index, pf->avail_txqs);
489 vsi->txq_map[i] = index;
490 } else {
491 goto err_scatter_tx;
492 }
493 }
494 }
495
496 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
497 for (i = 0; i < vsi->alloc_rxq; i++) {
498 index = find_next_zero_bit(pf->avail_rxqs,
499 ICE_MAX_RXQS, index);
500 if (index < ICE_MAX_RXQS) {
501 set_bit(index, pf->avail_rxqs);
502 vsi->rxq_map[i] = index;
503 } else {
504 goto err_scatter_rx;
505 }
506 }
507 }
508
509 mutex_unlock(&pf->avail_q_mutex);
510 return 0;
511
512err_scatter_rx:
513 /* unflag any queues we have grabbed (i is failed position) */
514 for (index = 0; index < i; index++) {
515 clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
516 vsi->rxq_map[index] = 0;
517 }
518 i = vsi->alloc_txq;
519err_scatter_tx:
520 /* i is either position of failed attempt or vsi->alloc_txq */
521 for (index = 0; index < i; index++) {
522 clear_bit(vsi->txq_map[index], pf->avail_txqs);
523 vsi->txq_map[index] = 0;
524 }
525
526 mutex_unlock(&pf->avail_q_mutex);
527 return -ENOMEM;
528}
529
530/**
531 * ice_vsi_get_qs - Assign queues from PF to VSI
532 * @vsi: the VSI to assign queues to
533 *
534 * Returns 0 on success and a negative value on error
535 */
536int ice_vsi_get_qs(struct ice_vsi *vsi)
537{
538 int ret = 0;
539
540 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
541 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
542
543 /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping
544 * modes individually to scatter if assigning contiguous queues
545 * to Rx or Tx fails
546 */
547 ret = ice_vsi_get_qs_contig(vsi);
548 if (ret < 0) {
549 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
550 vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
551 ICE_MAX_SCATTER_TXQS);
552 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
553 vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
554 ICE_MAX_SCATTER_RXQS);
555 ret = ice_vsi_get_qs_scatter(vsi);
556 }
557
558 return ret;
559}
560
561/**
427 * ice_vsi_put_qs - Release queues from VSI to PF 562 * ice_vsi_put_qs - Release queues from VSI to PF
428 * @vsi: the VSI that is going to release queues 563 * @vsi: the VSI that is going to release queues
429 */ 564 */
@@ -448,6 +583,22 @@ void ice_vsi_put_qs(struct ice_vsi *vsi)
448} 583}
449 584
450/** 585/**
586 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
587 * @vsi: the VSI being removed
588 */
589static void ice_rss_clean(struct ice_vsi *vsi)
590{
591 struct ice_pf *pf;
592
593 pf = vsi->back;
594
595 if (vsi->rss_hkey_user)
596 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
597 if (vsi->rss_lut_user)
598 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
599}
600
601/**
451 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type 602 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
452 * @vsi: the VSI being configured 603 * @vsi: the VSI being configured
453 */ 604 */
@@ -686,10 +837,182 @@ int ice_vsi_init(struct ice_vsi *vsi)
686} 837}
687 838
688/** 839/**
840 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
841 * @vsi: VSI having the memory freed
842 * @v_idx: index of the vector to be freed
843 */
844static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
845{
846 struct ice_q_vector *q_vector;
847 struct ice_ring *ring;
848
849 if (!vsi->q_vectors[v_idx]) {
850 dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
851 v_idx);
852 return;
853 }
854 q_vector = vsi->q_vectors[v_idx];
855
856 ice_for_each_ring(ring, q_vector->tx)
857 ring->q_vector = NULL;
858 ice_for_each_ring(ring, q_vector->rx)
859 ring->q_vector = NULL;
860
861 /* only VSI with an associated netdev is set up with NAPI */
862 if (vsi->netdev)
863 netif_napi_del(&q_vector->napi);
864
865 devm_kfree(&vsi->back->pdev->dev, q_vector);
866 vsi->q_vectors[v_idx] = NULL;
867}
868
869/**
870 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
871 * @vsi: the VSI having memory freed
872 */
873void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
874{
875 int v_idx;
876
877 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
878 ice_free_q_vector(vsi, v_idx);
879}
880
881/**
882 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
883 * @vsi: the VSI being configured
884 * @v_idx: index of the vector in the VSI struct
885 *
886 * We allocate one q_vector. If allocation fails we return -ENOMEM.
887 */
888static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
889{
890 struct ice_pf *pf = vsi->back;
891 struct ice_q_vector *q_vector;
892
893 /* allocate q_vector */
894 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
895 if (!q_vector)
896 return -ENOMEM;
897
898 q_vector->vsi = vsi;
899 q_vector->v_idx = v_idx;
900 /* only set affinity_mask if the CPU is online */
901 if (cpu_online(v_idx))
902 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
903
904 /* This will not be called in the driver load path because the netdev
905 * will not be created yet. All other cases with register the NAPI
906 * handler here (i.e. resume, reset/rebuild, etc.)
907 */
908 if (vsi->netdev)
909 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
910 NAPI_POLL_WEIGHT);
911
912 /* tie q_vector and VSI together */
913 vsi->q_vectors[v_idx] = q_vector;
914
915 return 0;
916}
917
918/**
919 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
920 * @vsi: the VSI being configured
921 *
922 * We allocate one q_vector per queue interrupt. If allocation fails we
923 * return -ENOMEM.
924 */
925int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
926{
927 struct ice_pf *pf = vsi->back;
928 int v_idx = 0, num_q_vectors;
929 int err;
930
931 if (vsi->q_vectors[0]) {
932 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
933 vsi->vsi_num);
934 return -EEXIST;
935 }
936
937 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
938 num_q_vectors = vsi->num_q_vectors;
939 } else {
940 err = -EINVAL;
941 goto err_out;
942 }
943
944 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
945 err = ice_vsi_alloc_q_vector(vsi, v_idx);
946 if (err)
947 goto err_out;
948 }
949
950 return 0;
951
952err_out:
953 while (v_idx--)
954 ice_free_q_vector(vsi, v_idx);
955
956 dev_err(&pf->pdev->dev,
957 "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
958 vsi->num_q_vectors, vsi->vsi_num, err);
959 vsi->num_q_vectors = 0;
960 return err;
961}
962
963/**
964 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
965 * @vsi: ptr to the VSI
966 *
967 * This should only be called after ice_vsi_alloc() which allocates the
968 * corresponding SW VSI structure and initializes num_queue_pairs for the
969 * newly allocated VSI.
970 *
971 * Returns 0 on success or negative on failure
972 */
973int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
974{
975 struct ice_pf *pf = vsi->back;
976 int num_q_vectors = 0;
977
978 if (vsi->base_vector) {
979 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
980 vsi->vsi_num, vsi->base_vector);
981 return -EEXIST;
982 }
983
984 if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
985 return -ENOENT;
986
987 switch (vsi->type) {
988 case ICE_VSI_PF:
989 num_q_vectors = vsi->num_q_vectors;
990 break;
991 default:
992 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
993 vsi->type);
994 break;
995 }
996
997 if (num_q_vectors)
998 vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
999 num_q_vectors, vsi->idx);
1000
1001 if (vsi->base_vector < 0) {
1002 dev_err(&pf->pdev->dev,
1003 "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
1004 num_q_vectors, vsi->vsi_num, vsi->base_vector);
1005 return -ENOENT;
1006 }
1007
1008 return 0;
1009}
1010
1011/**
689 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1012 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
690 * @vsi: the VSI having rings deallocated 1013 * @vsi: the VSI having rings deallocated
691 */ 1014 */
692void ice_vsi_clear_rings(struct ice_vsi *vsi) 1015static void ice_vsi_clear_rings(struct ice_vsi *vsi)
693{ 1016{
694 int i; 1017 int i;
695 1018
@@ -1675,6 +1998,142 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
1675} 1998}
1676 1999
1677/** 2000/**
2001 * ice_vsi_release - Delete a VSI and free its resources
2002 * @vsi: the VSI being removed
2003 *
2004 * Returns 0 on success or < 0 on error
2005 */
2006int ice_vsi_release(struct ice_vsi *vsi)
2007{
2008 struct ice_pf *pf;
2009
2010 if (!vsi->back)
2011 return -ENODEV;
2012 pf = vsi->back;
2013 /* do not unregister and free netdevs while driver is in the reset
2014 * recovery pending state. Since reset/rebuild happens through PF
2015 * service task workqueue, its not a good idea to unregister netdev
2016 * that is associated to the PF that is running the work queue items
2017 * currently. This is done to avoid check_flush_dependency() warning
2018 * on this wq
2019 */
2020 if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) {
2021 unregister_netdev(vsi->netdev);
2022 free_netdev(vsi->netdev);
2023 vsi->netdev = NULL;
2024 }
2025
2026 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2027 ice_rss_clean(vsi);
2028
2029 /* Disable VSI and free resources */
2030 ice_vsi_dis_irq(vsi);
2031 ice_vsi_close(vsi);
2032
2033 /* reclaim interrupt vectors back to PF */
2034 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
2035 pf->num_avail_msix += vsi->num_q_vectors;
2036
2037 ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
2038 ice_vsi_delete(vsi);
2039 ice_vsi_free_q_vectors(vsi);
2040 ice_vsi_clear_rings(vsi);
2041
2042 ice_vsi_put_qs(vsi);
2043 pf->q_left_tx += vsi->alloc_txq;
2044 pf->q_left_rx += vsi->alloc_rxq;
2045
2046 /* retain SW VSI data structure since it is needed to unregister and
2047 * free VSI netdev when PF is not in reset recovery pending state,\
2048 * for ex: during rmmod.
2049 */
2050 if (!ice_is_reset_recovery_pending(pf->state))
2051 ice_vsi_clear(vsi);
2052
2053 return 0;
2054}
2055
2056/**
2057 * ice_vsi_rebuild - Rebuild VSI after reset
2058 * @vsi: VSI to be rebuild
2059 *
2060 * Returns 0 on success and negative value on failure
2061 */
2062int ice_vsi_rebuild(struct ice_vsi *vsi)
2063{
2064 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2065 int ret, i;
2066
2067 if (!vsi)
2068 return -EINVAL;
2069
2070 ice_vsi_free_q_vectors(vsi);
2071 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
2072 vsi->base_vector = 0;
2073 ice_vsi_clear_rings(vsi);
2074 ice_vsi_free_arrays(vsi, false);
2075 ice_vsi_set_num_qs(vsi);
2076
2077 /* Initialize VSI struct elements and create VSI in FW */
2078 ret = ice_vsi_init(vsi);
2079 if (ret < 0)
2080 goto err_vsi;
2081
2082 ret = ice_vsi_alloc_arrays(vsi, false);
2083 if (ret < 0)
2084 goto err_vsi;
2085
2086 switch (vsi->type) {
2087 case ICE_VSI_PF:
2088 ret = ice_vsi_alloc_q_vectors(vsi);
2089 if (ret)
2090 goto err_rings;
2091
2092 ret = ice_vsi_setup_vector_base(vsi);
2093 if (ret)
2094 goto err_vectors;
2095
2096 ret = ice_vsi_alloc_rings(vsi);
2097 if (ret)
2098 goto err_vectors;
2099
2100 ice_vsi_map_rings_to_vectors(vsi);
2101 break;
2102 default:
2103 break;
2104 }
2105
2106 ice_vsi_set_tc_cfg(vsi);
2107
2108 /* configure VSI nodes based on number of queues and TC's */
2109 for (i = 0; i < vsi->tc_cfg.numtc; i++)
2110 max_txqs[i] = vsi->num_txq;
2111
2112 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
2113 vsi->tc_cfg.ena_tc, max_txqs);
2114 if (ret) {
2115 dev_info(&vsi->back->pdev->dev,
2116 "Failed VSI lan queue config\n");
2117 goto err_vectors;
2118 }
2119 return 0;
2120
2121err_vectors:
2122 ice_vsi_free_q_vectors(vsi);
2123err_rings:
2124 if (vsi->netdev) {
2125 vsi->current_netdev_flags = 0;
2126 unregister_netdev(vsi->netdev);
2127 free_netdev(vsi->netdev);
2128 vsi->netdev = NULL;
2129 }
2130err_vsi:
2131 ice_vsi_clear(vsi);
2132 set_bit(__ICE_RESET_FAILED, vsi->back->state);
2133 return ret;
2134}
2135
2136/**
1678 * ice_is_reset_recovery_pending - schedule a reset 2137 * ice_is_reset_recovery_pending - schedule a reset
1679 * @state: pf state field 2138 * @state: pf state field
1680 */ 2139 */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 002bbca8e7ea..aaab3fc4b018 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -6,6 +6,12 @@
6 6
7#include "ice.h" 7#include "ice.h"
8 8
9int ice_vsi_setup_vector_base(struct ice_vsi *vsi);
10
11int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
12
13int ice_vsi_get_qs(struct ice_vsi *vsi);
14
9void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi); 15void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
10 16
11int ice_vsi_alloc_rings(struct ice_vsi *vsi); 17int ice_vsi_alloc_rings(struct ice_vsi *vsi);
@@ -18,10 +24,6 @@ int ice_get_free_slot(void *array, int size, int curr);
18 24
19int ice_vsi_init(struct ice_vsi *vsi); 25int ice_vsi_init(struct ice_vsi *vsi);
20 26
21void ice_vsi_free_arrays(struct ice_vsi *vsi, bool free_qvectors);
22
23void ice_vsi_clear_rings(struct ice_vsi *vsi);
24
25int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors); 27int ice_vsi_alloc_arrays(struct ice_vsi *vsi, bool alloc_qvectors);
26 28
27int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, 29int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
@@ -57,6 +59,8 @@ void ice_vsi_delete(struct ice_vsi *vsi);
57 59
58int ice_vsi_clear(struct ice_vsi *vsi); 60int ice_vsi_clear(struct ice_vsi *vsi);
59 61
62int ice_vsi_release(struct ice_vsi *vsi);
63
60void ice_vsi_close(struct ice_vsi *vsi); 64void ice_vsi_close(struct ice_vsi *vsi);
61 65
62int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id); 66int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
@@ -64,8 +68,12 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
64int 68int
65ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id); 69ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
66 70
71int ice_vsi_rebuild(struct ice_vsi *vsi);
72
67bool ice_is_reset_recovery_pending(unsigned long *state); 73bool ice_is_reset_recovery_pending(unsigned long *state);
68 74
75void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
76
69void ice_vsi_put_qs(struct ice_vsi *vsi); 77void ice_vsi_put_qs(struct ice_vsi *vsi);
70 78
71void ice_vsi_dis_irq(struct ice_vsi *vsi); 79void ice_vsi_dis_irq(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f458ff285a34..3927b18b45a0 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -32,7 +32,6 @@ static const struct net_device_ops ice_netdev_ops;
32 32
33static void ice_pf_dis_all_vsi(struct ice_pf *pf); 33static void ice_pf_dis_all_vsi(struct ice_pf *pf);
34static void ice_rebuild(struct ice_pf *pf); 34static void ice_rebuild(struct ice_pf *pf);
35static int ice_vsi_release(struct ice_vsi *vsi);
36 35
37static void ice_vsi_release_all(struct ice_pf *pf); 36static void ice_vsi_release_all(struct ice_pf *pf);
38static void ice_update_vsi_stats(struct ice_vsi *vsi); 37static void ice_update_vsi_stats(struct ice_vsi *vsi);
@@ -1465,185 +1464,43 @@ skip_req_irq:
1465} 1464}
1466 1465
1467/** 1466/**
1468 * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 1467 * ice_napi_del - Remove NAPI handler for the VSI
1469 * @vsi: the VSI getting queues 1468 * @vsi: VSI for which NAPI handler is to be removed
1470 *
1471 * Return 0 on success and a negative value on error
1472 */
1473static int ice_vsi_get_qs_contig(struct ice_vsi *vsi)
1474{
1475 struct ice_pf *pf = vsi->back;
1476 int offset, ret = 0;
1477
1478 mutex_lock(&pf->avail_q_mutex);
1479 /* look for contiguous block of queues for tx */
1480 offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
1481 0, vsi->alloc_txq, 0);
1482 if (offset < ICE_MAX_TXQS) {
1483 int i;
1484
1485 bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq);
1486 for (i = 0; i < vsi->alloc_txq; i++)
1487 vsi->txq_map[i] = i + offset;
1488 } else {
1489 ret = -ENOMEM;
1490 vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER;
1491 }
1492
1493 /* look for contiguous block of queues for rx */
1494 offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS,
1495 0, vsi->alloc_rxq, 0);
1496 if (offset < ICE_MAX_RXQS) {
1497 int i;
1498
1499 bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
1500 for (i = 0; i < vsi->alloc_rxq; i++)
1501 vsi->rxq_map[i] = i + offset;
1502 } else {
1503 ret = -ENOMEM;
1504 vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
1505 }
1506 mutex_unlock(&pf->avail_q_mutex);
1507
1508 return ret;
1509}
1510
1511/**
1512 * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI
1513 * @vsi: the VSI getting queues
1514 *
1515 * Return 0 on success and a negative value on error
1516 */ 1469 */
1517static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) 1470static void ice_napi_del(struct ice_vsi *vsi)
1518{ 1471{
1519 struct ice_pf *pf = vsi->back; 1472 int v_idx;
1520 int i, index = 0;
1521
1522 mutex_lock(&pf->avail_q_mutex);
1523
1524 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) {
1525 for (i = 0; i < vsi->alloc_txq; i++) {
1526 index = find_next_zero_bit(pf->avail_txqs,
1527 ICE_MAX_TXQS, index);
1528 if (index < ICE_MAX_TXQS) {
1529 set_bit(index, pf->avail_txqs);
1530 vsi->txq_map[i] = index;
1531 } else {
1532 goto err_scatter_tx;
1533 }
1534 }
1535 }
1536
1537 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
1538 for (i = 0; i < vsi->alloc_rxq; i++) {
1539 index = find_next_zero_bit(pf->avail_rxqs,
1540 ICE_MAX_RXQS, index);
1541 if (index < ICE_MAX_RXQS) {
1542 set_bit(index, pf->avail_rxqs);
1543 vsi->rxq_map[i] = index;
1544 } else {
1545 goto err_scatter_rx;
1546 }
1547 }
1548 }
1549
1550 mutex_unlock(&pf->avail_q_mutex);
1551 return 0;
1552 1473
1553err_scatter_rx: 1474 if (!vsi->netdev)
1554 /* unflag any queues we have grabbed (i is failed position) */ 1475 return;
1555 for (index = 0; index < i; index++) {
1556 clear_bit(vsi->rxq_map[index], pf->avail_rxqs);
1557 vsi->rxq_map[index] = 0;
1558 }
1559 i = vsi->alloc_txq;
1560err_scatter_tx:
1561 /* i is either position of failed attempt or vsi->alloc_txq */
1562 for (index = 0; index < i; index++) {
1563 clear_bit(vsi->txq_map[index], pf->avail_txqs);
1564 vsi->txq_map[index] = 0;
1565 }
1566 1476
1567 mutex_unlock(&pf->avail_q_mutex); 1477 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
1568 return -ENOMEM; 1478 netif_napi_del(&vsi->q_vectors[v_idx]->napi);
1569} 1479}
1570 1480
1571/** 1481/**
1572 * ice_vsi_get_qs - Assign queues from PF to VSI 1482 * ice_napi_add - register NAPI handler for the VSI
1573 * @vsi: the VSI to assign queues to 1483 * @vsi: VSI for which NAPI handler is to be registered
1574 * 1484 *
1575 * Returns 0 on success and a negative value on error 1485 * This function is only called in the driver's load path. Registering the NAPI
1486 * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume,
1487 * reset/rebuild, etc.)
1576 */ 1488 */
1577static int ice_vsi_get_qs(struct ice_vsi *vsi) 1489static void ice_napi_add(struct ice_vsi *vsi)
1578{ 1490{
1579 int ret = 0; 1491 int v_idx;
1580
1581 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
1582 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
1583
1584 /* NOTE: ice_vsi_get_qs_contig() will set the rx/tx mapping
1585 * modes individually to scatter if assigning contiguous queues
1586 * to rx or tx fails
1587 */
1588 ret = ice_vsi_get_qs_contig(vsi);
1589 if (ret < 0) {
1590 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
1591 vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
1592 ICE_MAX_SCATTER_TXQS);
1593 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
1594 vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
1595 ICE_MAX_SCATTER_RXQS);
1596 ret = ice_vsi_get_qs_scatter(vsi);
1597 }
1598
1599 return ret;
1600}
1601
1602/**
1603 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
1604 * @vsi: VSI having the memory freed
1605 * @v_idx: index of the vector to be freed
1606 */
1607static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
1608{
1609 struct ice_q_vector *q_vector;
1610 struct ice_ring *ring;
1611 1492
1612 if (!vsi->q_vectors[v_idx]) { 1493 if (!vsi->netdev)
1613 dev_dbg(&vsi->back->pdev->dev, "Queue vector at index %d not found\n",
1614 v_idx);
1615 return; 1494 return;
1616 }
1617 q_vector = vsi->q_vectors[v_idx];
1618
1619 ice_for_each_ring(ring, q_vector->tx)
1620 ring->q_vector = NULL;
1621 ice_for_each_ring(ring, q_vector->rx)
1622 ring->q_vector = NULL;
1623
1624 /* only VSI with an associated netdev is set up with NAPI */
1625 if (vsi->netdev)
1626 netif_napi_del(&q_vector->napi);
1627
1628 devm_kfree(&vsi->back->pdev->dev, q_vector);
1629 vsi->q_vectors[v_idx] = NULL;
1630}
1631
1632/**
1633 * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
1634 * @vsi: the VSI having memory freed
1635 */
1636static void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
1637{
1638 int v_idx;
1639 1495
1640 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) 1496 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
1641 ice_free_q_vector(vsi, v_idx); 1497 netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
1498 ice_napi_poll, NAPI_POLL_WEIGHT);
1642} 1499}
1643 1500
1644/** 1501/**
1645 * ice_cfg_netdev - Setup the netdev flags 1502 * ice_cfg_netdev - Allocate, configure and register a netdev
1646 * @vsi: the VSI being configured 1503 * @vsi: the VSI associated with the new netdev
1647 * 1504 *
1648 * Returns 0 on success, negative value on failure 1505 * Returns 0 on success, negative value on failure
1649 */ 1506 */
@@ -1656,6 +1513,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
1656 struct ice_netdev_priv *np; 1513 struct ice_netdev_priv *np;
1657 struct net_device *netdev; 1514 struct net_device *netdev;
1658 u8 mac_addr[ETH_ALEN]; 1515 u8 mac_addr[ETH_ALEN];
1516 int err;
1659 1517
1660 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv), 1518 netdev = alloc_etherdev_mqs(sizeof(struct ice_netdev_priv),
1661 vsi->alloc_txq, vsi->alloc_rxq); 1519 vsi->alloc_txq, vsi->alloc_rxq);
@@ -1713,130 +1571,14 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
1713 netdev->min_mtu = ETH_MIN_MTU; 1571 netdev->min_mtu = ETH_MIN_MTU;
1714 netdev->max_mtu = ICE_MAX_MTU; 1572 netdev->max_mtu = ICE_MAX_MTU;
1715 1573
1716 return 0; 1574 err = register_netdev(vsi->netdev);
1717} 1575 if (err)
1718 1576 return err;
1719/**
1720 * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
1721 * @vsi: the VSI being configured
1722 * @v_idx: index of the vector in the vsi struct
1723 *
1724 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1725 */
1726static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
1727{
1728 struct ice_pf *pf = vsi->back;
1729 struct ice_q_vector *q_vector;
1730
1731 /* allocate q_vector */
1732 q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
1733 if (!q_vector)
1734 return -ENOMEM;
1735
1736 q_vector->vsi = vsi;
1737 q_vector->v_idx = v_idx;
1738 /* only set affinity_mask if the CPU is online */
1739 if (cpu_online(v_idx))
1740 cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
1741
1742 if (vsi->netdev)
1743 netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
1744 NAPI_POLL_WEIGHT);
1745 /* tie q_vector and vsi together */
1746 vsi->q_vectors[v_idx] = q_vector;
1747
1748 return 0;
1749}
1750
1751/**
1752 * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
1753 * @vsi: the VSI being configured
1754 *
1755 * We allocate one q_vector per queue interrupt. If allocation fails we
1756 * return -ENOMEM.
1757 */
1758static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
1759{
1760 struct ice_pf *pf = vsi->back;
1761 int v_idx = 0, num_q_vectors;
1762 int err;
1763
1764 if (vsi->q_vectors[0]) {
1765 dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
1766 vsi->vsi_num);
1767 return -EEXIST;
1768 }
1769
1770 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1771 num_q_vectors = vsi->num_q_vectors;
1772 } else {
1773 err = -EINVAL;
1774 goto err_out;
1775 }
1776
1777 for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
1778 err = ice_vsi_alloc_q_vector(vsi, v_idx);
1779 if (err)
1780 goto err_out;
1781 }
1782
1783 return 0;
1784
1785err_out:
1786 while (v_idx--)
1787 ice_free_q_vector(vsi, v_idx);
1788
1789 dev_err(&pf->pdev->dev,
1790 "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
1791 vsi->num_q_vectors, vsi->vsi_num, err);
1792 vsi->num_q_vectors = 0;
1793 return err;
1794}
1795
1796/**
1797 * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
1798 * @vsi: ptr to the VSI
1799 *
1800 * This should only be called after ice_vsi_alloc() which allocates the
1801 * corresponding SW VSI structure and initializes num_queue_pairs for the
1802 * newly allocated VSI.
1803 *
1804 * Returns 0 on success or negative on failure
1805 */
1806static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1807{
1808 struct ice_pf *pf = vsi->back;
1809 int num_q_vectors = 0;
1810
1811 if (vsi->base_vector) {
1812 dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
1813 vsi->vsi_num, vsi->base_vector);
1814 return -EEXIST;
1815 }
1816
1817 if (!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
1818 return -ENOENT;
1819
1820 switch (vsi->type) {
1821 case ICE_VSI_PF:
1822 num_q_vectors = vsi->num_q_vectors;
1823 break;
1824 default:
1825 dev_warn(&vsi->back->pdev->dev, "Unknown VSI type %d\n",
1826 vsi->type);
1827 break;
1828 }
1829 1577
1830 if (num_q_vectors) 1578 netif_carrier_off(vsi->netdev);
1831 vsi->base_vector = ice_get_res(pf, pf->irq_tracker,
1832 num_q_vectors, vsi->idx);
1833 1579
1834 if (vsi->base_vector < 0) { 1580 /* make sure transmit queues start off as stopped */
1835 dev_err(&pf->pdev->dev, 1581 netif_tx_stop_all_queues(vsi->netdev);
1836 "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
1837 num_q_vectors, vsi->vsi_num, vsi->base_vector);
1838 return -ENOENT;
1839 }
1840 1582
1841 return 0; 1583 return 0;
1842} 1584}
@@ -1919,87 +1661,6 @@ ice_vsi_cfg_rss_exit:
1919} 1661}
1920 1662
1921/** 1663/**
1922 * ice_vsi_rebuild - Rebuild VSI after reset
1923 * @vsi: vsi to be rebuild
1924 *
1925 * Returns 0 on success and negative value on failure
1926 */
1927static int ice_vsi_rebuild(struct ice_vsi *vsi)
1928{
1929 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1930 int ret, i;
1931
1932 if (!vsi)
1933 return -EINVAL;
1934
1935 ice_vsi_free_q_vectors(vsi);
1936 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
1937 vsi->base_vector = 0;
1938 ice_vsi_clear_rings(vsi);
1939 ice_vsi_free_arrays(vsi, false);
1940 ice_vsi_set_num_qs(vsi);
1941
1942 /* Initialize VSI struct elements and create VSI in FW */
1943 ret = ice_vsi_init(vsi);
1944 if (ret < 0)
1945 goto err_vsi;
1946
1947 ret = ice_vsi_alloc_arrays(vsi, false);
1948 if (ret < 0)
1949 goto err_vsi;
1950
1951 switch (vsi->type) {
1952 case ICE_VSI_PF:
1953 /* fall through */
1954 ret = ice_vsi_alloc_q_vectors(vsi);
1955 if (ret)
1956 goto err_rings;
1957
1958 ret = ice_vsi_setup_vector_base(vsi);
1959 if (ret)
1960 goto err_vectors;
1961
1962 ret = ice_vsi_alloc_rings(vsi);
1963 if (ret)
1964 goto err_vectors;
1965
1966 ice_vsi_map_rings_to_vectors(vsi);
1967 break;
1968 default:
1969 break;
1970 }
1971
1972 ice_vsi_set_tc_cfg(vsi);
1973
1974 /* configure VSI nodes based on number of queues and TC's */
1975 for (i = 0; i < vsi->tc_cfg.numtc; i++)
1976 max_txqs[i] = vsi->num_txq;
1977
1978 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->vsi_num,
1979 vsi->tc_cfg.ena_tc, max_txqs);
1980 if (ret) {
1981 dev_info(&vsi->back->pdev->dev,
1982 "Failed VSI lan queue config\n");
1983 goto err_vectors;
1984 }
1985 return 0;
1986
1987err_vectors:
1988 ice_vsi_free_q_vectors(vsi);
1989err_rings:
1990 if (vsi->netdev) {
1991 vsi->current_netdev_flags = 0;
1992 unregister_netdev(vsi->netdev);
1993 free_netdev(vsi->netdev);
1994 vsi->netdev = NULL;
1995 }
1996err_vsi:
1997 ice_vsi_clear(vsi);
1998 set_bit(__ICE_RESET_FAILED, vsi->back->state);
1999 return ret;
2000}
2001
2002/**
2003 * ice_vsi_setup - Set up a VSI by a given type 1664 * ice_vsi_setup - Set up a VSI by a given type
2004 * @pf: board private structure 1665 * @pf: board private structure
2005 * @pi: pointer to the port_info instance 1666 * @pi: pointer to the port_info instance
@@ -2237,6 +1898,18 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
2237 goto unroll_vsi_setup; 1898 goto unroll_vsi_setup;
2238 } 1899 }
2239 1900
1901 status = ice_cfg_netdev(vsi);
1902 if (status) {
1903 status = -ENODEV;
1904 goto unroll_vsi_setup;
1905 }
1906
1907 /* registering the NAPI handler requires both the queues and
1908 * netdev to be created, which are done in ice_pf_vsi_setup()
1909 * and ice_cfg_netdev() respectively
1910 */
1911 ice_napi_add(vsi);
1912
2240 /* To add a MAC filter, first add the MAC to a list and then 1913 /* To add a MAC filter, first add the MAC to a list and then
2241 * pass the list to ice_add_mac. 1914 * pass the list to ice_add_mac.
2242 */ 1915 */
@@ -2245,7 +1918,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
2245 status = ice_add_mac_to_list(vsi, &tmp_add_list, 1918 status = ice_add_mac_to_list(vsi, &tmp_add_list,
2246 vsi->port_info->mac.perm_addr); 1919 vsi->port_info->mac.perm_addr);
2247 if (status) 1920 if (status)
2248 goto unroll_vsi_setup; 1921 goto unroll_napi_add;
2249 1922
2250 /* VSI needs to receive broadcast traffic, so add the broadcast 1923 /* VSI needs to receive broadcast traffic, so add the broadcast
2251 * MAC address to the list as well. 1924 * MAC address to the list as well.
@@ -2269,16 +1942,20 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
2269free_mac_list: 1942free_mac_list:
2270 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); 1943 ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
2271 1944
2272unroll_vsi_setup: 1945unroll_napi_add:
2273 if (vsi) { 1946 if (vsi) {
2274 ice_vsi_free_q_vectors(vsi); 1947 ice_napi_del(vsi);
2275 if (vsi->netdev && vsi->netdev->reg_state == NETREG_REGISTERED)
2276 unregister_netdev(vsi->netdev);
2277 if (vsi->netdev) { 1948 if (vsi->netdev) {
1949 if (vsi->netdev->reg_state == NETREG_REGISTERED)
1950 unregister_netdev(vsi->netdev);
2278 free_netdev(vsi->netdev); 1951 free_netdev(vsi->netdev);
2279 vsi->netdev = NULL; 1952 vsi->netdev = NULL;
2280 } 1953 }
1954 }
2281 1955
1956unroll_vsi_setup:
1957 if (vsi) {
1958 ice_vsi_free_q_vectors(vsi);
2282 ice_vsi_delete(vsi); 1959 ice_vsi_delete(vsi);
2283 ice_vsi_put_qs(vsi); 1960 ice_vsi_put_qs(vsi);
2284 pf->q_left_tx += vsi->alloc_txq; 1961 pf->q_left_tx += vsi->alloc_txq;
@@ -3590,79 +3267,6 @@ err_setup_tx:
3590 return err; 3267 return err;
3591} 3268}
3592 3269
3593
3594/**
3595 * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
3596 * @vsi: the VSI being removed
3597 */
3598static void ice_rss_clean(struct ice_vsi *vsi)
3599{
3600 struct ice_pf *pf;
3601
3602 pf = vsi->back;
3603
3604 if (vsi->rss_hkey_user)
3605 devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
3606 if (vsi->rss_lut_user)
3607 devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
3608}
3609
3610/**
3611 * ice_vsi_release - Delete a VSI and free its resources
3612 * @vsi: the VSI being removed
3613 *
3614 * Returns 0 on success or < 0 on error
3615 */
3616static int ice_vsi_release(struct ice_vsi *vsi)
3617{
3618 struct ice_pf *pf;
3619
3620 if (!vsi->back)
3621 return -ENODEV;
3622 pf = vsi->back;
3623 /* do not unregister and free netdevs while driver is in the reset
3624 * recovery pending state. Since reset/rebuild happens through PF
3625 * service task workqueue, its not a good idea to unregister netdev
3626 * that is associated to the PF that is running the work queue items
3627 * currently. This is done to avoid check_flush_dependency() warning
3628 * on this wq
3629 */
3630 if (vsi->netdev && !ice_is_reset_recovery_pending(pf->state)) {
3631 unregister_netdev(vsi->netdev);
3632 free_netdev(vsi->netdev);
3633 vsi->netdev = NULL;
3634 }
3635
3636 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3637 ice_rss_clean(vsi);
3638
3639 /* Disable VSI and free resources */
3640 ice_vsi_dis_irq(vsi);
3641 ice_vsi_close(vsi);
3642
3643 /* reclaim interrupt vectors back to PF */
3644 ice_free_res(vsi->back->irq_tracker, vsi->base_vector, vsi->idx);
3645 pf->num_avail_msix += vsi->num_q_vectors;
3646
3647 ice_remove_vsi_fltr(&pf->hw, vsi->vsi_num);
3648 ice_vsi_delete(vsi);
3649 ice_vsi_free_q_vectors(vsi);
3650 ice_vsi_clear_rings(vsi);
3651
3652 ice_vsi_put_qs(vsi);
3653 pf->q_left_tx += vsi->alloc_txq;
3654 pf->q_left_rx += vsi->alloc_rxq;
3655
3656 /* retain SW VSI data structure since it is needed to unregister and
3657 * free VSI netdev when PF is not in reset recovery pending state,\
3658 * for ex: during rmmod.
3659 */
3660 if (!ice_is_reset_recovery_pending(pf->state))
3661 ice_vsi_clear(vsi);
3662
3663 return 0;
3664}
3665
3666/** 3270/**
3667 * ice_vsi_release_all - Delete all VSIs 3271 * ice_vsi_release_all - Delete all VSIs
3668 * @pf: PF from which all VSIs are being removed 3272 * @pf: PF from which all VSIs are being removed