aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c388
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c386
3 files changed, 410 insertions, 386 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 06a54d79fba8..474ce5828bd4 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -226,6 +226,67 @@ static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
226} 226}
227 227
228/** 228/**
229 * ice_vsi_delete - delete a VSI from the switch
230 * @vsi: pointer to VSI being removed
231 */
232void ice_vsi_delete(struct ice_vsi *vsi)
233{
234 struct ice_pf *pf = vsi->back;
235 struct ice_vsi_ctx ctxt;
236 enum ice_status status;
237
238 ctxt.vsi_num = vsi->vsi_num;
239
240 memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
241
242 status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
243 if (status)
244 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
245 vsi->vsi_num);
246}
247
248/**
249 * ice_msix_clean_rings - MSIX mode Interrupt Handler
250 * @irq: interrupt number
251 * @data: pointer to a q_vector
252 */
253irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
254{
255 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
256
257 if (!q_vector->tx.ring && !q_vector->rx.ring)
258 return IRQ_HANDLED;
259
260 napi_schedule(&q_vector->napi);
261
262 return IRQ_HANDLED;
263}
264
265/**
266 * ice_vsi_put_qs - Release queues from VSI to PF
267 * @vsi: the VSI that is going to release queues
268 */
269void ice_vsi_put_qs(struct ice_vsi *vsi)
270{
271 struct ice_pf *pf = vsi->back;
272 int i;
273
274 mutex_lock(&pf->avail_q_mutex);
275
276 for (i = 0; i < vsi->alloc_txq; i++) {
277 clear_bit(vsi->txq_map[i], pf->avail_txqs);
278 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
279 }
280
281 for (i = 0; i < vsi->alloc_rxq; i++) {
282 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
283 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
284 }
285
286 mutex_unlock(&pf->avail_q_mutex);
287}
288
289/**
229 * ice_add_mac_to_list - Add a mac address filter entry to the list 290 * ice_add_mac_to_list - Add a mac address filter entry to the list
230 * @vsi: the VSI to be forwarded to 291 * @vsi: the VSI to be forwarded to
231 * @add_list: pointer to the list which contains MAC filter entries 292 * @add_list: pointer to the list which contains MAC filter entries
@@ -747,3 +808,330 @@ err_alloc_q_ids:
747 808
748 return err; 809 return err;
749} 810}
811
812/**
813 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
814 * @vsi: VSI to enable or disable VLAN pruning on
815 * @ena: set to true to enable VLAN pruning and false to disable it
816 *
817 * returns 0 if VSI is updated, negative otherwise
818 */
819int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
820{
821 struct ice_vsi_ctx *ctxt;
822 struct device *dev;
823 int status;
824
825 if (!vsi)
826 return -EINVAL;
827
828 dev = &vsi->back->pdev->dev;
829 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
830 if (!ctxt)
831 return -ENOMEM;
832
833 ctxt->info = vsi->info;
834
835 if (ena) {
836 ctxt->info.sec_flags |=
837 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
838 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
839 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
840 } else {
841 ctxt->info.sec_flags &=
842 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
843 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
844 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
845 }
846
847 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
848 ICE_AQ_VSI_PROP_SW_VALID);
849 ctxt->vsi_num = vsi->vsi_num;
850 status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL);
851 if (status) {
852 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n",
853 ena ? "Ena" : "Dis", vsi->vsi_num, status,
854 vsi->back->hw.adminq.sq_last_status);
855 goto err_out;
856 }
857
858 vsi->info.sec_flags = ctxt->info.sec_flags;
859 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
860
861 devm_kfree(dev, ctxt);
862 return 0;
863
864err_out:
865 devm_kfree(dev, ctxt);
866 return -EIO;
867}
868
869/**
870 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
871 * @vsi: the VSI being cleaned up
872 */
873static void ice_vsi_release_msix(struct ice_vsi *vsi)
874{
875 struct ice_pf *pf = vsi->back;
876 u16 vector = vsi->base_vector;
877 struct ice_hw *hw = &pf->hw;
878 u32 txq = 0;
879 u32 rxq = 0;
880 int i, q;
881
882 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
883 struct ice_q_vector *q_vector = vsi->q_vectors[i];
884
885 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
886 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
887 for (q = 0; q < q_vector->num_ring_tx; q++) {
888 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
889 txq++;
890 }
891
892 for (q = 0; q < q_vector->num_ring_rx; q++) {
893 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
894 rxq++;
895 }
896 }
897
898 ice_flush(hw);
899}
900
901/**
902 * ice_vsi_free_irq - Free the IRQ association with the OS
903 * @vsi: the VSI being configured
904 */
905void ice_vsi_free_irq(struct ice_vsi *vsi)
906{
907 struct ice_pf *pf = vsi->back;
908 int base = vsi->base_vector;
909
910 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
911 int i;
912
913 if (!vsi->q_vectors || !vsi->irqs_ready)
914 return;
915
916 vsi->irqs_ready = false;
917 for (i = 0; i < vsi->num_q_vectors; i++) {
918 u16 vector = i + base;
919 int irq_num;
920
921 irq_num = pf->msix_entries[vector].vector;
922
923 /* free only the irqs that were actually requested */
924 if (!vsi->q_vectors[i] ||
925 !(vsi->q_vectors[i]->num_ring_tx ||
926 vsi->q_vectors[i]->num_ring_rx))
927 continue;
928
929 /* clear the affinity notifier in the IRQ descriptor */
930 irq_set_affinity_notifier(irq_num, NULL);
931
932 /* clear the affinity_mask in the IRQ descriptor */
933 irq_set_affinity_hint(irq_num, NULL);
934 synchronize_irq(irq_num);
935 devm_free_irq(&pf->pdev->dev, irq_num,
936 vsi->q_vectors[i]);
937 }
938 ice_vsi_release_msix(vsi);
939 }
940}
941
942/**
943 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
944 * @vsi: the VSI having resources freed
945 */
946void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
947{
948 int i;
949
950 if (!vsi->tx_rings)
951 return;
952
953 ice_for_each_txq(vsi, i)
954 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
955 ice_free_tx_ring(vsi->tx_rings[i]);
956}
957
958/**
959 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
960 * @vsi: the VSI having resources freed
961 */
962void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
963{
964 int i;
965
966 if (!vsi->rx_rings)
967 return;
968
969 ice_for_each_rxq(vsi, i)
970 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
971 ice_free_rx_ring(vsi->rx_rings[i]);
972}
973
974/**
975 * ice_free_res - free a block of resources
976 * @res: pointer to the resource
977 * @index: starting index previously returned by ice_get_res
978 * @id: identifier to track owner
979 *
980 * Returns number of resources freed
981 */
982int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
983{
984 int count = 0;
985 int i;
986
987 if (!res || index >= res->num_entries)
988 return -EINVAL;
989
990 id |= ICE_RES_VALID_BIT;
991 for (i = index; i < res->num_entries && res->list[i] == id; i++) {
992 res->list[i] = 0;
993 count++;
994 }
995
996 return count;
997}
998
999/**
1000 * ice_search_res - Search the tracker for a block of resources
1001 * @res: pointer to the resource
1002 * @needed: size of the block needed
1003 * @id: identifier to track owner
1004 *
1005 * Returns the base item index of the block, or -ENOMEM for error
1006 */
1007static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
1008{
1009 int start = res->search_hint;
1010 int end = start;
1011
1012 id |= ICE_RES_VALID_BIT;
1013
1014 do {
1015 /* skip already allocated entries */
1016 if (res->list[end++] & ICE_RES_VALID_BIT) {
1017 start = end;
1018 if ((start + needed) > res->num_entries)
1019 break;
1020 }
1021
1022 if (end == (start + needed)) {
1023 int i = start;
1024
1025 /* there was enough, so assign it to the requestor */
1026 while (i != end)
1027 res->list[i++] = id;
1028
1029 if (end == res->num_entries)
1030 end = 0;
1031
1032 res->search_hint = end;
1033 return start;
1034 }
1035 } while (1);
1036
1037 return -ENOMEM;
1038}
1039
1040/**
1041 * ice_get_res - get a block of resources
1042 * @pf: board private structure
1043 * @res: pointer to the resource
1044 * @needed: size of the block needed
1045 * @id: identifier to track owner
1046 *
1047 * Returns the base item index of the block, or -ENOMEM for error
1048 * The search_hint trick and lack of advanced fit-finding only works
1049 * because we're highly likely to have all the same sized requests.
1050 * Linear search time and any fragmentation should be minimal.
1051 */
1052int
1053ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
1054{
1055 int ret;
1056
1057 if (!res || !pf)
1058 return -EINVAL;
1059
1060 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
1061 dev_err(&pf->pdev->dev,
1062 "param err: needed=%d, num_entries = %d id=0x%04x\n",
1063 needed, res->num_entries, id);
1064 return -EINVAL;
1065 }
1066
1067 /* search based on search_hint */
1068 ret = ice_search_res(res, needed, id);
1069
1070 if (ret < 0) {
1071 /* previous search failed. Reset search hint and try again */
1072 res->search_hint = 0;
1073 ret = ice_search_res(res, needed, id);
1074 }
1075
1076 return ret;
1077}
1078
1079/**
1080 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
1081 * @vsi: the VSI being un-configured
1082 */
1083void ice_vsi_dis_irq(struct ice_vsi *vsi)
1084{
1085 struct ice_pf *pf = vsi->back;
1086 struct ice_hw *hw = &pf->hw;
1087 int base = vsi->base_vector;
1088 u32 val;
1089 int i;
1090
1091 /* disable interrupt causation from each queue */
1092 if (vsi->tx_rings) {
1093 ice_for_each_txq(vsi, i) {
1094 if (vsi->tx_rings[i]) {
1095 u16 reg;
1096
1097 reg = vsi->tx_rings[i]->reg_idx;
1098 val = rd32(hw, QINT_TQCTL(reg));
1099 val &= ~QINT_TQCTL_CAUSE_ENA_M;
1100 wr32(hw, QINT_TQCTL(reg), val);
1101 }
1102 }
1103 }
1104
1105 if (vsi->rx_rings) {
1106 ice_for_each_rxq(vsi, i) {
1107 if (vsi->rx_rings[i]) {
1108 u16 reg;
1109
1110 reg = vsi->rx_rings[i]->reg_idx;
1111 val = rd32(hw, QINT_RQCTL(reg));
1112 val &= ~QINT_RQCTL_CAUSE_ENA_M;
1113 wr32(hw, QINT_RQCTL(reg), val);
1114 }
1115 }
1116 }
1117
1118 /* disable each interrupt */
1119 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1120 for (i = vsi->base_vector;
1121 i < (vsi->num_q_vectors + vsi->base_vector); i++)
1122 wr32(hw, GLINT_DYN_CTL(i), 0);
1123
1124 ice_flush(hw);
1125 for (i = 0; i < vsi->num_q_vectors; i++)
1126 synchronize_irq(pf->msix_entries[i + base].vector);
1127 }
1128}
1129
1130/**
1131 * ice_is_reset_recovery_pending - schedule a reset
1132 * @state: pf state field
1133 */
1134bool ice_is_reset_recovery_pending(unsigned long *state)
1135{
1136 return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
1137}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index ad4257929b9b..cbde99f38798 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -33,4 +33,26 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi);
33 33
34int ice_vsi_stop_tx_rings(struct ice_vsi *vsi); 34int ice_vsi_stop_tx_rings(struct ice_vsi *vsi);
35 35
36int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
37
38void ice_vsi_delete(struct ice_vsi *vsi);
39
40int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
41
42int
43ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
44
45bool ice_is_reset_recovery_pending(unsigned long *state);
46
47void ice_vsi_put_qs(struct ice_vsi *vsi);
48
49void ice_vsi_dis_irq(struct ice_vsi *vsi);
50
51void ice_vsi_free_irq(struct ice_vsi *vsi);
52
53void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
54
55void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
56
57irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data);
36#endif /* !_ICE_LIB_H_ */ 58#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index ececf3dabf7e..839c11198e14 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -142,109 +142,6 @@ static int ice_get_free_slot(void *array, int size, int curr)
142} 142}
143 143
144/** 144/**
145 * ice_search_res - Search the tracker for a block of resources
146 * @res: pointer to the resource
147 * @needed: size of the block needed
148 * @id: identifier to track owner
149 * Returns the base item index of the block, or -ENOMEM for error
150 */
151static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
152{
153 int start = res->search_hint;
154 int end = start;
155
156 id |= ICE_RES_VALID_BIT;
157
158 do {
159 /* skip already allocated entries */
160 if (res->list[end++] & ICE_RES_VALID_BIT) {
161 start = end;
162 if ((start + needed) > res->num_entries)
163 break;
164 }
165
166 if (end == (start + needed)) {
167 int i = start;
168
169 /* there was enough, so assign it to the requestor */
170 while (i != end)
171 res->list[i++] = id;
172
173 if (end == res->num_entries)
174 end = 0;
175
176 res->search_hint = end;
177 return start;
178 }
179 } while (1);
180
181 return -ENOMEM;
182}
183
184/**
185 * ice_get_res - get a block of resources
186 * @pf: board private structure
187 * @res: pointer to the resource
188 * @needed: size of the block needed
189 * @id: identifier to track owner
190 *
191 * Returns the base item index of the block, or -ENOMEM for error
192 * The search_hint trick and lack of advanced fit-finding only works
193 * because we're highly likely to have all the same sized requests.
194 * Linear search time and any fragmentation should be minimal.
195 */
196static int
197ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
198{
199 int ret;
200
201 if (!res || !pf)
202 return -EINVAL;
203
204 if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
205 dev_err(&pf->pdev->dev,
206 "param err: needed=%d, num_entries = %d id=0x%04x\n",
207 needed, res->num_entries, id);
208 return -EINVAL;
209 }
210
211 /* search based on search_hint */
212 ret = ice_search_res(res, needed, id);
213
214 if (ret < 0) {
215 /* previous search failed. Reset search hint and try again */
216 res->search_hint = 0;
217 ret = ice_search_res(res, needed, id);
218 }
219
220 return ret;
221}
222
223/**
224 * ice_free_res - free a block of resources
225 * @res: pointer to the resource
226 * @index: starting index previously returned by ice_get_res
227 * @id: identifier to track owner
228 * Returns number of resources freed
229 */
230static int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
231{
232 int count = 0;
233 int i;
234
235 if (!res || index >= res->num_entries)
236 return -EINVAL;
237
238 id |= ICE_RES_VALID_BIT;
239 for (i = index; i < res->num_entries && res->list[i] == id; i++) {
240 res->list[i] = 0;
241 count++;
242 }
243
244 return count;
245}
246
247/**
248 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced 145 * ice_add_mac_to_sync_list - creates list of mac addresses to be synced
249 * @netdev: the net device on which the sync is happening 146 * @netdev: the net device on which the sync is happening
250 * @addr: mac address to sync 147 * @addr: mac address to sync
@@ -300,63 +197,6 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
300} 197}
301 198
302/** 199/**
303 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
304 * @vsi: VSI to enable or disable VLAN pruning on
305 * @ena: set to true to enable VLAN pruning and false to disable it
306 *
307 * returns 0 if VSI is updated, negative otherwise
308 */
309static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
310{
311 struct ice_vsi_ctx *ctxt;
312 struct device *dev;
313 int status;
314
315 if (!vsi)
316 return -EINVAL;
317
318 dev = &vsi->back->pdev->dev;
319 ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
320 if (!ctxt)
321 return -ENOMEM;
322
323 ctxt->info = vsi->info;
324
325 if (ena) {
326 ctxt->info.sec_flags |=
327 ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
328 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
329 ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
330 } else {
331 ctxt->info.sec_flags &=
332 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
333 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
334 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
335 }
336
337 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
338 ICE_AQ_VSI_PROP_SW_VALID);
339 ctxt->vsi_num = vsi->vsi_num;
340 status = ice_aq_update_vsi(&vsi->back->hw, ctxt, NULL);
341 if (status) {
342 netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI %d failed, err = %d, aq_err = %d\n",
343 ena ? "Ena" : "Dis", vsi->vsi_num, status,
344 vsi->back->hw.adminq.sq_last_status);
345 goto err_out;
346 }
347
348 vsi->info.sec_flags = ctxt->info.sec_flags;
349 vsi->info.sw_flags2 = ctxt->info.sw_flags2;
350
351 devm_kfree(dev, ctxt);
352 return 0;
353
354err_out:
355 devm_kfree(dev, ctxt);
356 return -EIO;
357}
358
359/**
360 * ice_vsi_sync_fltr - Update the VSI filter list to the HW 200 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
361 * @vsi: ptr to the VSI 201 * @vsi: ptr to the VSI
362 * 202 *
@@ -521,15 +361,6 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
521} 361}
522 362
523/** 363/**
524 * ice_is_reset_recovery_pending - schedule a reset
525 * @state: pf state field
526 */
527static bool ice_is_reset_recovery_pending(unsigned long int *state)
528{
529 return test_bit(__ICE_RESET_RECOVERY_PENDING, state);
530}
531
532/**
533 * ice_prepare_for_reset - prep for the core to reset 364 * ice_prepare_for_reset - prep for the core to reset
534 * @pf: board private structure 365 * @pf: board private structure
535 * 366 *
@@ -1293,57 +1124,6 @@ static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
1293static void ice_irq_affinity_release(struct kref __always_unused *ref) {} 1124static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
1294 1125
1295/** 1126/**
1296 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
1297 * @vsi: the VSI being un-configured
1298 */
1299static void ice_vsi_dis_irq(struct ice_vsi *vsi)
1300{
1301 struct ice_pf *pf = vsi->back;
1302 struct ice_hw *hw = &pf->hw;
1303 int base = vsi->base_vector;
1304 u32 val;
1305 int i;
1306
1307 /* disable interrupt causation from each queue */
1308 if (vsi->tx_rings) {
1309 ice_for_each_txq(vsi, i) {
1310 if (vsi->tx_rings[i]) {
1311 u16 reg;
1312
1313 reg = vsi->tx_rings[i]->reg_idx;
1314 val = rd32(hw, QINT_TQCTL(reg));
1315 val &= ~QINT_TQCTL_CAUSE_ENA_M;
1316 wr32(hw, QINT_TQCTL(reg), val);
1317 }
1318 }
1319 }
1320
1321 if (vsi->rx_rings) {
1322 ice_for_each_rxq(vsi, i) {
1323 if (vsi->rx_rings[i]) {
1324 u16 reg;
1325
1326 reg = vsi->rx_rings[i]->reg_idx;
1327 val = rd32(hw, QINT_RQCTL(reg));
1328 val &= ~QINT_RQCTL_CAUSE_ENA_M;
1329 wr32(hw, QINT_RQCTL(reg), val);
1330 }
1331 }
1332 }
1333
1334 /* disable each interrupt */
1335 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1336 for (i = vsi->base_vector;
1337 i < (vsi->num_q_vectors + vsi->base_vector); i++)
1338 wr32(hw, GLINT_DYN_CTL(i), 0);
1339
1340 ice_flush(hw);
1341 for (i = 0; i < vsi->num_q_vectors; i++)
1342 synchronize_irq(pf->msix_entries[i + base].vector);
1343 }
1344}
1345
1346/**
1347 * ice_vsi_ena_irq - Enable IRQ for the given VSI 1127 * ice_vsi_ena_irq - Enable IRQ for the given VSI
1348 * @vsi: the VSI being configured 1128 * @vsi: the VSI being configured
1349 */ 1129 */
@@ -1364,26 +1144,6 @@ static int ice_vsi_ena_irq(struct ice_vsi *vsi)
1364} 1144}
1365 1145
1366/** 1146/**
1367 * ice_vsi_delete - delete a VSI from the switch
1368 * @vsi: pointer to VSI being removed
1369 */
1370static void ice_vsi_delete(struct ice_vsi *vsi)
1371{
1372 struct ice_pf *pf = vsi->back;
1373 struct ice_vsi_ctx ctxt;
1374 enum ice_status status;
1375
1376 ctxt.vsi_num = vsi->vsi_num;
1377
1378 memcpy(&ctxt.info, &vsi->info, sizeof(struct ice_aqc_vsi_props));
1379
1380 status = ice_free_vsi(&pf->hw, vsi->idx, &ctxt, false, NULL);
1381 if (status)
1382 dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
1383 vsi->vsi_num);
1384}
1385
1386/**
1387 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI 1147 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
1388 * @vsi: the VSI being configured 1148 * @vsi: the VSI being configured
1389 * @basename: name for the vector 1149 * @basename: name for the vector
@@ -1690,38 +1450,6 @@ static int ice_vsi_init(struct ice_vsi *vsi)
1690} 1450}
1691 1451
1692/** 1452/**
1693 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
1694 * @vsi: the VSI being cleaned up
1695 */
1696static void ice_vsi_release_msix(struct ice_vsi *vsi)
1697{
1698 struct ice_pf *pf = vsi->back;
1699 u16 vector = vsi->base_vector;
1700 struct ice_hw *hw = &pf->hw;
1701 u32 txq = 0;
1702 u32 rxq = 0;
1703 int i, q;
1704
1705 for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
1706 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1707
1708 wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), 0);
1709 wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), 0);
1710 for (q = 0; q < q_vector->num_ring_tx; q++) {
1711 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
1712 txq++;
1713 }
1714
1715 for (q = 0; q < q_vector->num_ring_rx; q++) {
1716 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
1717 rxq++;
1718 }
1719 }
1720
1721 ice_flush(hw);
1722}
1723
1724/**
1725 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI 1453 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1726 * @vsi: the VSI having rings deallocated 1454 * @vsi: the VSI having rings deallocated
1727 */ 1455 */
@@ -1804,47 +1532,6 @@ err_out:
1804} 1532}
1805 1533
1806/** 1534/**
1807 * ice_vsi_free_irq - Free the irq association with the OS
1808 * @vsi: the VSI being configured
1809 */
1810static void ice_vsi_free_irq(struct ice_vsi *vsi)
1811{
1812 struct ice_pf *pf = vsi->back;
1813 int base = vsi->base_vector;
1814
1815 if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
1816 int i;
1817
1818 if (!vsi->q_vectors || !vsi->irqs_ready)
1819 return;
1820
1821 vsi->irqs_ready = false;
1822 for (i = 0; i < vsi->num_q_vectors; i++) {
1823 u16 vector = i + base;
1824 int irq_num;
1825
1826 irq_num = pf->msix_entries[vector].vector;
1827
1828 /* free only the irqs that were actually requested */
1829 if (!vsi->q_vectors[i] ||
1830 !(vsi->q_vectors[i]->num_ring_tx ||
1831 vsi->q_vectors[i]->num_ring_rx))
1832 continue;
1833
1834 /* clear the affinity notifier in the IRQ descriptor */
1835 irq_set_affinity_notifier(irq_num, NULL);
1836
1837 /* clear the affinity_mask in the IRQ descriptor */
1838 irq_set_affinity_hint(irq_num, NULL);
1839 synchronize_irq(irq_num);
1840 devm_free_irq(&pf->pdev->dev, irq_num,
1841 vsi->q_vectors[i]);
1842 }
1843 ice_vsi_release_msix(vsi);
1844 }
1845}
1846
1847/**
1848 * ice_ena_misc_vector - enable the non-queue interrupts 1535 * ice_ena_misc_vector - enable the non-queue interrupts
1849 * @pf: board private structure 1536 * @pf: board private structure
1850 */ 1537 */
@@ -2097,23 +1784,6 @@ err_txrings:
2097} 1784}
2098 1785
2099/** 1786/**
2100 * ice_msix_clean_rings - MSIX mode Interrupt Handler
2101 * @irq: interrupt number
2102 * @data: pointer to a q_vector
2103 */
2104static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
2105{
2106 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
2107
2108 if (!q_vector->tx.ring && !q_vector->rx.ring)
2109 return IRQ_HANDLED;
2110
2111 napi_schedule(&q_vector->napi);
2112
2113 return IRQ_HANDLED;
2114}
2115
2116/**
2117 * ice_vsi_alloc - Allocates the next available struct vsi in the PF 1787 * ice_vsi_alloc - Allocates the next available struct vsi in the PF
2118 * @pf: board private structure 1788 * @pf: board private structure
2119 * @type: type of VSI 1789 * @type: type of VSI
@@ -2400,30 +2070,6 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
2400} 2070}
2401 2071
2402/** 2072/**
2403 * ice_vsi_put_qs - Release queues from VSI to PF
2404 * @vsi: the VSI thats going to release queues
2405 */
2406static void ice_vsi_put_qs(struct ice_vsi *vsi)
2407{
2408 struct ice_pf *pf = vsi->back;
2409 int i;
2410
2411 mutex_lock(&pf->avail_q_mutex);
2412
2413 for (i = 0; i < vsi->alloc_txq; i++) {
2414 clear_bit(vsi->txq_map[i], pf->avail_txqs);
2415 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
2416 }
2417
2418 for (i = 0; i < vsi->alloc_rxq; i++) {
2419 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
2420 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
2421 }
2422
2423 mutex_unlock(&pf->avail_q_mutex);
2424}
2425
2426/**
2427 * ice_free_q_vector - Free memory allocated for a specific interrupt vector 2073 * ice_free_q_vector - Free memory allocated for a specific interrupt vector
2428 * @vsi: VSI having the memory freed 2074 * @vsi: VSI having the memory freed
2429 * @v_idx: index of the vector to be freed 2075 * @v_idx: index of the vector to be freed
@@ -4420,38 +4066,6 @@ static int ice_vsi_req_irq(struct ice_vsi *vsi, char *basename)
4420} 4066}
4421 4067
4422/** 4068/**
4423 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
4424 * @vsi: the VSI having resources freed
4425 */
4426static void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
4427{
4428 int i;
4429
4430 if (!vsi->tx_rings)
4431 return;
4432
4433 ice_for_each_txq(vsi, i)
4434 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
4435 ice_free_tx_ring(vsi->tx_rings[i]);
4436}
4437
4438/**
4439 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
4440 * @vsi: the VSI having resources freed
4441 */
4442static void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
4443{
4444 int i;
4445
4446 if (!vsi->rx_rings)
4447 return;
4448
4449 ice_for_each_rxq(vsi, i)
4450 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
4451 ice_free_rx_ring(vsi->rx_rings[i]);
4452}
4453
4454/**
4455 * ice_vsi_open - Called when a network interface is made active 4069 * ice_vsi_open - Called when a network interface is made active
4456 * @vsi: the VSI to open 4070 * @vsi: the VSI to open
4457 * 4071 *