diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 491 |
1 files changed, 491 insertions, 0 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 1cf4dca12495..06a54d79fba8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c | |||
@@ -5,6 +5,227 @@ | |||
5 | #include "ice_lib.h" | 5 | #include "ice_lib.h" |
6 | 6 | ||
7 | /** | 7 | /** |
8 | * ice_setup_rx_ctx - Configure a receive ring context | ||
9 | * @ring: The Rx ring to configure | ||
10 | * | ||
11 | * Configure the Rx descriptor ring in RLAN context. | ||
12 | */ | ||
13 | static int ice_setup_rx_ctx(struct ice_ring *ring) | ||
14 | { | ||
15 | struct ice_vsi *vsi = ring->vsi; | ||
16 | struct ice_hw *hw = &vsi->back->hw; | ||
17 | u32 rxdid = ICE_RXDID_FLEX_NIC; | ||
18 | struct ice_rlan_ctx rlan_ctx; | ||
19 | u32 regval; | ||
20 | u16 pf_q; | ||
21 | int err; | ||
22 | |||
23 | /* what is RX queue number in global space of 2K Rx queues */ | ||
24 | pf_q = vsi->rxq_map[ring->q_index]; | ||
25 | |||
26 | /* clear the context structure first */ | ||
27 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | ||
28 | |||
29 | rlan_ctx.base = ring->dma >> 7; | ||
30 | |||
31 | rlan_ctx.qlen = ring->count; | ||
32 | |||
33 | /* Receive Packet Data Buffer Size. | ||
34 | * The Packet Data Buffer Size is defined in 128 byte units. | ||
35 | */ | ||
36 | rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; | ||
37 | |||
38 | /* use 32 byte descriptors */ | ||
39 | rlan_ctx.dsize = 1; | ||
40 | |||
41 | /* Strip the Ethernet CRC bytes before the packet is posted to host | ||
42 | * memory. | ||
43 | */ | ||
44 | rlan_ctx.crcstrip = 1; | ||
45 | |||
46 | /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ | ||
47 | rlan_ctx.l2tsel = 1; | ||
48 | |||
49 | rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; | ||
50 | rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; | ||
51 | rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; | ||
52 | |||
53 | /* This controls whether VLAN is stripped from inner headers | ||
54 | * The VLAN in the inner L2 header is stripped to the receive | ||
55 | * descriptor if enabled by this flag. | ||
56 | */ | ||
57 | rlan_ctx.showiv = 0; | ||
58 | |||
59 | /* Max packet size for this queue - must not be set to a larger value | ||
60 | * than 5 x DBUF | ||
61 | */ | ||
62 | rlan_ctx.rxmax = min_t(u16, vsi->max_frame, | ||
63 | ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); | ||
64 | |||
65 | /* Rx queue threshold in units of 64 */ | ||
66 | rlan_ctx.lrxqthresh = 1; | ||
67 | |||
68 | /* Enable Flexible Descriptors in the queue context which | ||
69 | * allows this driver to select a specific receive descriptor format | ||
70 | */ | ||
71 | regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); | ||
72 | regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & | ||
73 | QRXFLXP_CNTXT_RXDID_IDX_M; | ||
74 | |||
75 | /* increasing context priority to pick up profile id; | ||
76 | * default is 0x01; setting to 0x03 to ensure profile | ||
77 | * is programming if prev context is of same priority | ||
78 | */ | ||
79 | regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & | ||
80 | QRXFLXP_CNTXT_RXDID_PRIO_M; | ||
81 | |||
82 | wr32(hw, QRXFLXP_CNTXT(pf_q), regval); | ||
83 | |||
84 | /* Absolute queue number out of 2K needs to be passed */ | ||
85 | err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); | ||
86 | if (err) { | ||
87 | dev_err(&vsi->back->pdev->dev, | ||
88 | "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", | ||
89 | pf_q, err); | ||
90 | return -EIO; | ||
91 | } | ||
92 | |||
93 | /* init queue specific tail register */ | ||
94 | ring->tail = hw->hw_addr + QRX_TAIL(pf_q); | ||
95 | writel(0, ring->tail); | ||
96 | ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); | ||
97 | |||
98 | return 0; | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance | ||
103 | * @ring: The Tx ring to configure | ||
104 | * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized | ||
105 | * @pf_q: queue index in the PF space | ||
106 | * | ||
107 | * Configure the Tx descriptor ring in TLAN context. | ||
108 | */ | ||
109 | static void | ||
110 | ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) | ||
111 | { | ||
112 | struct ice_vsi *vsi = ring->vsi; | ||
113 | struct ice_hw *hw = &vsi->back->hw; | ||
114 | |||
115 | tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; | ||
116 | |||
117 | tlan_ctx->port_num = vsi->port_info->lport; | ||
118 | |||
119 | /* Transmit Queue Length */ | ||
120 | tlan_ctx->qlen = ring->count; | ||
121 | |||
122 | /* PF number */ | ||
123 | tlan_ctx->pf_num = hw->pf_id; | ||
124 | |||
125 | /* queue belongs to a specific VSI type | ||
126 | * VF / VM index should be programmed per vmvf_type setting: | ||
127 | * for vmvf_type = VF, it is VF number between 0-256 | ||
128 | * for vmvf_type = VM, it is VM number between 0-767 | ||
129 | * for PF or EMP this field should be set to zero | ||
130 | */ | ||
131 | switch (vsi->type) { | ||
132 | case ICE_VSI_PF: | ||
133 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; | ||
134 | break; | ||
135 | default: | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | /* make sure the context is associated with the right VSI */ | ||
140 | tlan_ctx->src_vsi = vsi->vsi_num; | ||
141 | |||
142 | tlan_ctx->tso_ena = ICE_TX_LEGACY; | ||
143 | tlan_ctx->tso_qnum = pf_q; | ||
144 | |||
145 | /* Legacy or Advanced Host Interface: | ||
146 | * 0: Advanced Host Interface | ||
147 | * 1: Legacy Host Interface | ||
148 | */ | ||
149 | tlan_ctx->legacy_int = ICE_TX_LEGACY; | ||
150 | } | ||
151 | |||
152 | /** | ||
153 | * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled | ||
154 | * @pf: the PF being configured | ||
155 | * @pf_q: the PF queue | ||
156 | * @ena: enable or disable state of the queue | ||
157 | * | ||
158 | * This routine will wait for the given Rx queue of the PF to reach the | ||
159 | * enabled or disabled state. | ||
160 | * Returns -ETIMEDOUT in case of failing to reach the requested state after | ||
161 | * multiple retries; else will return 0 in case of success. | ||
162 | */ | ||
163 | static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) | ||
164 | { | ||
165 | int i; | ||
166 | |||
167 | for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { | ||
168 | u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); | ||
169 | |||
170 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | ||
171 | break; | ||
172 | |||
173 | usleep_range(10, 20); | ||
174 | } | ||
175 | if (i >= ICE_Q_WAIT_RETRY_LIMIT) | ||
176 | return -ETIMEDOUT; | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings | ||
183 | * @vsi: the VSI being configured | ||
184 | * @ena: start or stop the Rx rings | ||
185 | */ | ||
186 | static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) | ||
187 | { | ||
188 | struct ice_pf *pf = vsi->back; | ||
189 | struct ice_hw *hw = &pf->hw; | ||
190 | int i, j, ret = 0; | ||
191 | |||
192 | for (i = 0; i < vsi->num_rxq; i++) { | ||
193 | int pf_q = vsi->rxq_map[i]; | ||
194 | u32 rx_reg; | ||
195 | |||
196 | for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { | ||
197 | rx_reg = rd32(hw, QRX_CTRL(pf_q)); | ||
198 | if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == | ||
199 | ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) | ||
200 | break; | ||
201 | usleep_range(1000, 2000); | ||
202 | } | ||
203 | |||
204 | /* Skip if the queue is already in the requested state */ | ||
205 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | ||
206 | continue; | ||
207 | |||
208 | /* turn on/off the queue */ | ||
209 | if (ena) | ||
210 | rx_reg |= QRX_CTRL_QENA_REQ_M; | ||
211 | else | ||
212 | rx_reg &= ~QRX_CTRL_QENA_REQ_M; | ||
213 | wr32(hw, QRX_CTRL(pf_q), rx_reg); | ||
214 | |||
215 | /* wait for the change to finish */ | ||
216 | ret = ice_pf_rxq_wait(pf, pf_q, ena); | ||
217 | if (ret) { | ||
218 | dev_err(&pf->pdev->dev, | ||
219 | "VSI idx %d Rx ring %d %sable timeout\n", | ||
220 | vsi->idx, pf_q, (ena ? "en" : "dis")); | ||
221 | break; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | return ret; | ||
226 | } | ||
227 | |||
228 | /** | ||
8 | * ice_add_mac_to_list - Add a mac address filter entry to the list | 229 | * ice_add_mac_to_list - Add a mac address filter entry to the list |
9 | * @vsi: the VSI to be forwarded to | 230 | * @vsi: the VSI to be forwarded to |
10 | * @add_list: pointer to the list which contains MAC filter entries | 231 | * @add_list: pointer to the list which contains MAC filter entries |
@@ -186,6 +407,174 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) | |||
186 | } | 407 | } |
187 | 408 | ||
188 | /** | 409 | /** |
410 | * ice_vsi_cfg_rxqs - Configure the VSI for Rx | ||
411 | * @vsi: the VSI being configured | ||
412 | * | ||
413 | * Return 0 on success and a negative value on error | ||
414 | * Configure the Rx VSI for operation. | ||
415 | */ | ||
416 | int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) | ||
417 | { | ||
418 | int err = 0; | ||
419 | u16 i; | ||
420 | |||
421 | if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) | ||
422 | vsi->max_frame = vsi->netdev->mtu + | ||
423 | ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | ||
424 | else | ||
425 | vsi->max_frame = ICE_RXBUF_2048; | ||
426 | |||
427 | vsi->rx_buf_len = ICE_RXBUF_2048; | ||
428 | /* set up individual rings */ | ||
429 | for (i = 0; i < vsi->num_rxq && !err; i++) | ||
430 | err = ice_setup_rx_ctx(vsi->rx_rings[i]); | ||
431 | |||
432 | if (err) { | ||
433 | dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); | ||
434 | return -EIO; | ||
435 | } | ||
436 | return err; | ||
437 | } | ||
438 | |||
439 | /** | ||
440 | * ice_vsi_cfg_txqs - Configure the VSI for Tx | ||
441 | * @vsi: the VSI being configured | ||
442 | * | ||
443 | * Return 0 on success and a negative value on error | ||
444 | * Configure the Tx VSI for operation. | ||
445 | */ | ||
446 | int ice_vsi_cfg_txqs(struct ice_vsi *vsi) | ||
447 | { | ||
448 | struct ice_aqc_add_tx_qgrp *qg_buf; | ||
449 | struct ice_aqc_add_txqs_perq *txq; | ||
450 | struct ice_pf *pf = vsi->back; | ||
451 | enum ice_status status; | ||
452 | u16 buf_len, i, pf_q; | ||
453 | int err = 0, tc = 0; | ||
454 | u8 num_q_grps; | ||
455 | |||
456 | buf_len = sizeof(struct ice_aqc_add_tx_qgrp); | ||
457 | qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); | ||
458 | if (!qg_buf) | ||
459 | return -ENOMEM; | ||
460 | |||
461 | if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { | ||
462 | err = -EINVAL; | ||
463 | goto err_cfg_txqs; | ||
464 | } | ||
465 | qg_buf->num_txqs = 1; | ||
466 | num_q_grps = 1; | ||
467 | |||
468 | /* set up and configure the Tx queues */ | ||
469 | ice_for_each_txq(vsi, i) { | ||
470 | struct ice_tlan_ctx tlan_ctx = { 0 }; | ||
471 | |||
472 | pf_q = vsi->txq_map[i]; | ||
473 | ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); | ||
474 | /* copy context contents into the qg_buf */ | ||
475 | qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); | ||
476 | ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, | ||
477 | ice_tlan_ctx_info); | ||
478 | |||
479 | /* init queue specific tail reg. It is referred as transmit | ||
480 | * comm scheduler queue doorbell. | ||
481 | */ | ||
482 | vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); | ||
483 | status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc, | ||
484 | num_q_grps, qg_buf, buf_len, NULL); | ||
485 | if (status) { | ||
486 | dev_err(&vsi->back->pdev->dev, | ||
487 | "Failed to set LAN Tx queue context, error: %d\n", | ||
488 | status); | ||
489 | err = -ENODEV; | ||
490 | goto err_cfg_txqs; | ||
491 | } | ||
492 | |||
493 | /* Add Tx Queue TEID into the VSI Tx ring from the response | ||
494 | * This will complete configuring and enabling the queue. | ||
495 | */ | ||
496 | txq = &qg_buf->txqs[0]; | ||
497 | if (pf_q == le16_to_cpu(txq->txq_id)) | ||
498 | vsi->tx_rings[i]->txq_teid = | ||
499 | le32_to_cpu(txq->q_teid); | ||
500 | } | ||
501 | err_cfg_txqs: | ||
502 | devm_kfree(&pf->pdev->dev, qg_buf); | ||
503 | return err; | ||
504 | } | ||
505 | |||
506 | /** | ||
507 | * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW | ||
508 | * @vsi: the VSI being configured | ||
509 | */ | ||
510 | void ice_vsi_cfg_msix(struct ice_vsi *vsi) | ||
511 | { | ||
512 | struct ice_pf *pf = vsi->back; | ||
513 | u16 vector = vsi->base_vector; | ||
514 | struct ice_hw *hw = &pf->hw; | ||
515 | u32 txq = 0, rxq = 0; | ||
516 | int i, q, itr; | ||
517 | u8 itr_gran; | ||
518 | |||
519 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { | ||
520 | struct ice_q_vector *q_vector = vsi->q_vectors[i]; | ||
521 | |||
522 | itr_gran = hw->itr_gran_200; | ||
523 | |||
524 | if (q_vector->num_ring_rx) { | ||
525 | q_vector->rx.itr = | ||
526 | ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting, | ||
527 | itr_gran); | ||
528 | q_vector->rx.latency_range = ICE_LOW_LATENCY; | ||
529 | } | ||
530 | |||
531 | if (q_vector->num_ring_tx) { | ||
532 | q_vector->tx.itr = | ||
533 | ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting, | ||
534 | itr_gran); | ||
535 | q_vector->tx.latency_range = ICE_LOW_LATENCY; | ||
536 | } | ||
537 | wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); | ||
538 | wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); | ||
539 | |||
540 | /* Both Transmit Queue Interrupt Cause Control register | ||
541 | * and Receive Queue Interrupt Cause control register | ||
542 | * expects MSIX_INDX field to be the vector index | ||
543 | * within the function space and not the absolute | ||
544 | * vector index across PF or across device. | ||
545 | * For SR-IOV VF VSIs queue vector index always starts | ||
546 | * with 1 since first vector index(0) is used for OICR | ||
547 | * in VF space. Since VMDq and other PF VSIs are within | ||
548 | * the PF function space, use the vector index that is | ||
549 | * tracked for this PF. | ||
550 | */ | ||
551 | for (q = 0; q < q_vector->num_ring_tx; q++) { | ||
552 | u32 val; | ||
553 | |||
554 | itr = ICE_ITR_NONE; | ||
555 | val = QINT_TQCTL_CAUSE_ENA_M | | ||
556 | (itr << QINT_TQCTL_ITR_INDX_S) | | ||
557 | (vector << QINT_TQCTL_MSIX_INDX_S); | ||
558 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); | ||
559 | txq++; | ||
560 | } | ||
561 | |||
562 | for (q = 0; q < q_vector->num_ring_rx; q++) { | ||
563 | u32 val; | ||
564 | |||
565 | itr = ICE_ITR_NONE; | ||
566 | val = QINT_RQCTL_CAUSE_ENA_M | | ||
567 | (itr << QINT_RQCTL_ITR_INDX_S) | | ||
568 | (vector << QINT_RQCTL_MSIX_INDX_S); | ||
569 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); | ||
570 | rxq++; | ||
571 | } | ||
572 | } | ||
573 | |||
574 | ice_flush(hw); | ||
575 | } | ||
576 | |||
577 | /** | ||
189 | * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx | 578 | * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx |
190 | * @vsi: the VSI being changed | 579 | * @vsi: the VSI being changed |
191 | */ | 580 | */ |
@@ -256,3 +645,105 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
256 | vsi->info.vlan_flags = ctxt.info.vlan_flags; | 645 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
257 | return 0; | 646 | return 0; |
258 | } | 647 | } |
648 | |||
649 | /** | ||
650 | * ice_vsi_start_rx_rings - start VSI's Rx rings | ||
651 | * @vsi: the VSI whose rings are to be started | ||
652 | * | ||
653 | * Returns 0 on success and a negative value on error | ||
654 | */ | ||
655 | int ice_vsi_start_rx_rings(struct ice_vsi *vsi) | ||
656 | { | ||
657 | return ice_vsi_ctrl_rx_rings(vsi, true); | ||
658 | } | ||
659 | |||
660 | /** | ||
661 | * ice_vsi_stop_rx_rings - stop VSI's Rx rings | ||
662 | * @vsi: the VSI | ||
663 | * | ||
664 | * Returns 0 on success and a negative value on error | ||
665 | */ | ||
666 | int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) | ||
667 | { | ||
668 | return ice_vsi_ctrl_rx_rings(vsi, false); | ||
669 | } | ||
670 | |||
671 | /** | ||
672 | * ice_vsi_stop_tx_rings - Disable Tx rings | ||
673 | * @vsi: the VSI being configured | ||
674 | */ | ||
675 | int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) | ||
676 | { | ||
677 | struct ice_pf *pf = vsi->back; | ||
678 | struct ice_hw *hw = &pf->hw; | ||
679 | enum ice_status status; | ||
680 | u32 *q_teids, val; | ||
681 | u16 *q_ids, i; | ||
682 | int err = 0; | ||
683 | |||
684 | if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) | ||
685 | return -EINVAL; | ||
686 | |||
687 | q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), | ||
688 | GFP_KERNEL); | ||
689 | if (!q_teids) | ||
690 | return -ENOMEM; | ||
691 | |||
692 | q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), | ||
693 | GFP_KERNEL); | ||
694 | if (!q_ids) { | ||
695 | err = -ENOMEM; | ||
696 | goto err_alloc_q_ids; | ||
697 | } | ||
698 | |||
699 | /* set up the Tx queue list to be disabled */ | ||
700 | ice_for_each_txq(vsi, i) { | ||
701 | u16 v_idx; | ||
702 | |||
703 | if (!vsi->tx_rings || !vsi->tx_rings[i]) { | ||
704 | err = -EINVAL; | ||
705 | goto err_out; | ||
706 | } | ||
707 | |||
708 | q_ids[i] = vsi->txq_map[i]; | ||
709 | q_teids[i] = vsi->tx_rings[i]->txq_teid; | ||
710 | |||
711 | /* clear cause_ena bit for disabled queues */ | ||
712 | val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); | ||
713 | val &= ~QINT_TQCTL_CAUSE_ENA_M; | ||
714 | wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); | ||
715 | |||
716 | /* software is expected to wait for 100 ns */ | ||
717 | ndelay(100); | ||
718 | |||
719 | /* trigger a software interrupt for the vector associated to | ||
720 | * the queue to schedule NAPI handler | ||
721 | */ | ||
722 | v_idx = vsi->tx_rings[i]->q_vector->v_idx; | ||
723 | wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx), | ||
724 | GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); | ||
725 | } | ||
726 | status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, | ||
727 | NULL); | ||
728 | /* if the disable queue command was exercised during an active reset | ||
729 | * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as | ||
730 | * the reset operation disables queues at the hardware level anyway. | ||
731 | */ | ||
732 | if (status == ICE_ERR_RESET_ONGOING) { | ||
733 | dev_info(&pf->pdev->dev, | ||
734 | "Reset in progress. LAN Tx queues already disabled\n"); | ||
735 | } else if (status) { | ||
736 | dev_err(&pf->pdev->dev, | ||
737 | "Failed to disable LAN Tx queues, error: %d\n", | ||
738 | status); | ||
739 | err = -ENODEV; | ||
740 | } | ||
741 | |||
742 | err_out: | ||
743 | devm_kfree(&pf->pdev->dev, q_ids); | ||
744 | |||
745 | err_alloc_q_ids: | ||
746 | devm_kfree(&pf->pdev->dev, q_teids); | ||
747 | |||
748 | return err; | ||
749 | } | ||