diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 491 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.h | 13 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 541 |
3 files changed, 526 insertions, 519 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 1cf4dca12495..06a54d79fba8 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c | |||
| @@ -5,6 +5,227 @@ | |||
| 5 | #include "ice_lib.h" | 5 | #include "ice_lib.h" |
| 6 | 6 | ||
| 7 | /** | 7 | /** |
| 8 | * ice_setup_rx_ctx - Configure a receive ring context | ||
| 9 | * @ring: The Rx ring to configure | ||
| 10 | * | ||
| 11 | * Configure the Rx descriptor ring in RLAN context. | ||
| 12 | */ | ||
| 13 | static int ice_setup_rx_ctx(struct ice_ring *ring) | ||
| 14 | { | ||
| 15 | struct ice_vsi *vsi = ring->vsi; | ||
| 16 | struct ice_hw *hw = &vsi->back->hw; | ||
| 17 | u32 rxdid = ICE_RXDID_FLEX_NIC; | ||
| 18 | struct ice_rlan_ctx rlan_ctx; | ||
| 19 | u32 regval; | ||
| 20 | u16 pf_q; | ||
| 21 | int err; | ||
| 22 | |||
| 23 | /* what is RX queue number in global space of 2K Rx queues */ | ||
| 24 | pf_q = vsi->rxq_map[ring->q_index]; | ||
| 25 | |||
| 26 | /* clear the context structure first */ | ||
| 27 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | ||
| 28 | |||
| 29 | rlan_ctx.base = ring->dma >> 7; | ||
| 30 | |||
| 31 | rlan_ctx.qlen = ring->count; | ||
| 32 | |||
| 33 | /* Receive Packet Data Buffer Size. | ||
| 34 | * The Packet Data Buffer Size is defined in 128 byte units. | ||
| 35 | */ | ||
| 36 | rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; | ||
| 37 | |||
| 38 | /* use 32 byte descriptors */ | ||
| 39 | rlan_ctx.dsize = 1; | ||
| 40 | |||
| 41 | /* Strip the Ethernet CRC bytes before the packet is posted to host | ||
| 42 | * memory. | ||
| 43 | */ | ||
| 44 | rlan_ctx.crcstrip = 1; | ||
| 45 | |||
| 46 | /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ | ||
| 47 | rlan_ctx.l2tsel = 1; | ||
| 48 | |||
| 49 | rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; | ||
| 50 | rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; | ||
| 51 | rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; | ||
| 52 | |||
| 53 | /* This controls whether VLAN is stripped from inner headers | ||
| 54 | * The VLAN in the inner L2 header is stripped to the receive | ||
| 55 | * descriptor if enabled by this flag. | ||
| 56 | */ | ||
| 57 | rlan_ctx.showiv = 0; | ||
| 58 | |||
| 59 | /* Max packet size for this queue - must not be set to a larger value | ||
| 60 | * than 5 x DBUF | ||
| 61 | */ | ||
| 62 | rlan_ctx.rxmax = min_t(u16, vsi->max_frame, | ||
| 63 | ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); | ||
| 64 | |||
| 65 | /* Rx queue threshold in units of 64 */ | ||
| 66 | rlan_ctx.lrxqthresh = 1; | ||
| 67 | |||
| 68 | /* Enable Flexible Descriptors in the queue context which | ||
| 69 | * allows this driver to select a specific receive descriptor format | ||
| 70 | */ | ||
| 71 | regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); | ||
| 72 | regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & | ||
| 73 | QRXFLXP_CNTXT_RXDID_IDX_M; | ||
| 74 | |||
| 75 | /* increasing context priority to pick up profile id; | ||
| 76 | * default is 0x01; setting to 0x03 to ensure profile | ||
| 77 | * is programming if prev context is of same priority | ||
| 78 | */ | ||
| 79 | regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & | ||
| 80 | QRXFLXP_CNTXT_RXDID_PRIO_M; | ||
| 81 | |||
| 82 | wr32(hw, QRXFLXP_CNTXT(pf_q), regval); | ||
| 83 | |||
| 84 | /* Absolute queue number out of 2K needs to be passed */ | ||
| 85 | err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); | ||
| 86 | if (err) { | ||
| 87 | dev_err(&vsi->back->pdev->dev, | ||
| 88 | "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", | ||
| 89 | pf_q, err); | ||
| 90 | return -EIO; | ||
| 91 | } | ||
| 92 | |||
| 93 | /* init queue specific tail register */ | ||
| 94 | ring->tail = hw->hw_addr + QRX_TAIL(pf_q); | ||
| 95 | writel(0, ring->tail); | ||
| 96 | ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | /** | ||
| 102 | * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance | ||
| 103 | * @ring: The Tx ring to configure | ||
| 104 | * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized | ||
| 105 | * @pf_q: queue index in the PF space | ||
| 106 | * | ||
| 107 | * Configure the Tx descriptor ring in TLAN context. | ||
| 108 | */ | ||
| 109 | static void | ||
| 110 | ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) | ||
| 111 | { | ||
| 112 | struct ice_vsi *vsi = ring->vsi; | ||
| 113 | struct ice_hw *hw = &vsi->back->hw; | ||
| 114 | |||
| 115 | tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; | ||
| 116 | |||
| 117 | tlan_ctx->port_num = vsi->port_info->lport; | ||
| 118 | |||
| 119 | /* Transmit Queue Length */ | ||
| 120 | tlan_ctx->qlen = ring->count; | ||
| 121 | |||
| 122 | /* PF number */ | ||
| 123 | tlan_ctx->pf_num = hw->pf_id; | ||
| 124 | |||
| 125 | /* queue belongs to a specific VSI type | ||
| 126 | * VF / VM index should be programmed per vmvf_type setting: | ||
| 127 | * for vmvf_type = VF, it is VF number between 0-256 | ||
| 128 | * for vmvf_type = VM, it is VM number between 0-767 | ||
| 129 | * for PF or EMP this field should be set to zero | ||
| 130 | */ | ||
| 131 | switch (vsi->type) { | ||
| 132 | case ICE_VSI_PF: | ||
| 133 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; | ||
| 134 | break; | ||
| 135 | default: | ||
| 136 | return; | ||
| 137 | } | ||
| 138 | |||
| 139 | /* make sure the context is associated with the right VSI */ | ||
| 140 | tlan_ctx->src_vsi = vsi->vsi_num; | ||
| 141 | |||
| 142 | tlan_ctx->tso_ena = ICE_TX_LEGACY; | ||
| 143 | tlan_ctx->tso_qnum = pf_q; | ||
| 144 | |||
| 145 | /* Legacy or Advanced Host Interface: | ||
| 146 | * 0: Advanced Host Interface | ||
| 147 | * 1: Legacy Host Interface | ||
| 148 | */ | ||
| 149 | tlan_ctx->legacy_int = ICE_TX_LEGACY; | ||
| 150 | } | ||
| 151 | |||
| 152 | /** | ||
| 153 | * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled | ||
| 154 | * @pf: the PF being configured | ||
| 155 | * @pf_q: the PF queue | ||
| 156 | * @ena: enable or disable state of the queue | ||
| 157 | * | ||
| 158 | * This routine will wait for the given Rx queue of the PF to reach the | ||
| 159 | * enabled or disabled state. | ||
| 160 | * Returns -ETIMEDOUT in case of failing to reach the requested state after | ||
| 161 | * multiple retries; else will return 0 in case of success. | ||
| 162 | */ | ||
| 163 | static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) | ||
| 164 | { | ||
| 165 | int i; | ||
| 166 | |||
| 167 | for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { | ||
| 168 | u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); | ||
| 169 | |||
| 170 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | ||
| 171 | break; | ||
| 172 | |||
| 173 | usleep_range(10, 20); | ||
| 174 | } | ||
| 175 | if (i >= ICE_Q_WAIT_RETRY_LIMIT) | ||
| 176 | return -ETIMEDOUT; | ||
| 177 | |||
| 178 | return 0; | ||
| 179 | } | ||
| 180 | |||
| 181 | /** | ||
| 182 | * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings | ||
| 183 | * @vsi: the VSI being configured | ||
| 184 | * @ena: start or stop the Rx rings | ||
| 185 | */ | ||
| 186 | static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) | ||
| 187 | { | ||
| 188 | struct ice_pf *pf = vsi->back; | ||
| 189 | struct ice_hw *hw = &pf->hw; | ||
| 190 | int i, j, ret = 0; | ||
| 191 | |||
| 192 | for (i = 0; i < vsi->num_rxq; i++) { | ||
| 193 | int pf_q = vsi->rxq_map[i]; | ||
| 194 | u32 rx_reg; | ||
| 195 | |||
| 196 | for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { | ||
| 197 | rx_reg = rd32(hw, QRX_CTRL(pf_q)); | ||
| 198 | if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == | ||
| 199 | ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) | ||
| 200 | break; | ||
| 201 | usleep_range(1000, 2000); | ||
| 202 | } | ||
| 203 | |||
| 204 | /* Skip if the queue is already in the requested state */ | ||
| 205 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | ||
| 206 | continue; | ||
| 207 | |||
| 208 | /* turn on/off the queue */ | ||
| 209 | if (ena) | ||
| 210 | rx_reg |= QRX_CTRL_QENA_REQ_M; | ||
| 211 | else | ||
| 212 | rx_reg &= ~QRX_CTRL_QENA_REQ_M; | ||
| 213 | wr32(hw, QRX_CTRL(pf_q), rx_reg); | ||
| 214 | |||
| 215 | /* wait for the change to finish */ | ||
| 216 | ret = ice_pf_rxq_wait(pf, pf_q, ena); | ||
| 217 | if (ret) { | ||
| 218 | dev_err(&pf->pdev->dev, | ||
| 219 | "VSI idx %d Rx ring %d %sable timeout\n", | ||
| 220 | vsi->idx, pf_q, (ena ? "en" : "dis")); | ||
| 221 | break; | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | return ret; | ||
| 226 | } | ||
| 227 | |||
| 228 | /** | ||
| 8 | * ice_add_mac_to_list - Add a mac address filter entry to the list | 229 | * ice_add_mac_to_list - Add a mac address filter entry to the list |
| 9 | * @vsi: the VSI to be forwarded to | 230 | * @vsi: the VSI to be forwarded to |
| 10 | * @add_list: pointer to the list which contains MAC filter entries | 231 | * @add_list: pointer to the list which contains MAC filter entries |
| @@ -186,6 +407,174 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) | |||
| 186 | } | 407 | } |
| 187 | 408 | ||
| 188 | /** | 409 | /** |
| 410 | * ice_vsi_cfg_rxqs - Configure the VSI for Rx | ||
| 411 | * @vsi: the VSI being configured | ||
| 412 | * | ||
| 413 | * Return 0 on success and a negative value on error | ||
| 414 | * Configure the Rx VSI for operation. | ||
| 415 | */ | ||
| 416 | int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) | ||
| 417 | { | ||
| 418 | int err = 0; | ||
| 419 | u16 i; | ||
| 420 | |||
| 421 | if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) | ||
| 422 | vsi->max_frame = vsi->netdev->mtu + | ||
| 423 | ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | ||
| 424 | else | ||
| 425 | vsi->max_frame = ICE_RXBUF_2048; | ||
| 426 | |||
| 427 | vsi->rx_buf_len = ICE_RXBUF_2048; | ||
| 428 | /* set up individual rings */ | ||
| 429 | for (i = 0; i < vsi->num_rxq && !err; i++) | ||
| 430 | err = ice_setup_rx_ctx(vsi->rx_rings[i]); | ||
| 431 | |||
| 432 | if (err) { | ||
| 433 | dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); | ||
| 434 | return -EIO; | ||
| 435 | } | ||
| 436 | return err; | ||
| 437 | } | ||
| 438 | |||
| 439 | /** | ||
| 440 | * ice_vsi_cfg_txqs - Configure the VSI for Tx | ||
| 441 | * @vsi: the VSI being configured | ||
| 442 | * | ||
| 443 | * Return 0 on success and a negative value on error | ||
| 444 | * Configure the Tx VSI for operation. | ||
| 445 | */ | ||
| 446 | int ice_vsi_cfg_txqs(struct ice_vsi *vsi) | ||
| 447 | { | ||
| 448 | struct ice_aqc_add_tx_qgrp *qg_buf; | ||
| 449 | struct ice_aqc_add_txqs_perq *txq; | ||
| 450 | struct ice_pf *pf = vsi->back; | ||
| 451 | enum ice_status status; | ||
| 452 | u16 buf_len, i, pf_q; | ||
| 453 | int err = 0, tc = 0; | ||
| 454 | u8 num_q_grps; | ||
| 455 | |||
| 456 | buf_len = sizeof(struct ice_aqc_add_tx_qgrp); | ||
| 457 | qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); | ||
| 458 | if (!qg_buf) | ||
| 459 | return -ENOMEM; | ||
| 460 | |||
| 461 | if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { | ||
| 462 | err = -EINVAL; | ||
| 463 | goto err_cfg_txqs; | ||
| 464 | } | ||
| 465 | qg_buf->num_txqs = 1; | ||
| 466 | num_q_grps = 1; | ||
| 467 | |||
| 468 | /* set up and configure the Tx queues */ | ||
| 469 | ice_for_each_txq(vsi, i) { | ||
| 470 | struct ice_tlan_ctx tlan_ctx = { 0 }; | ||
| 471 | |||
| 472 | pf_q = vsi->txq_map[i]; | ||
| 473 | ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); | ||
| 474 | /* copy context contents into the qg_buf */ | ||
| 475 | qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); | ||
| 476 | ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, | ||
| 477 | ice_tlan_ctx_info); | ||
| 478 | |||
| 479 | /* init queue specific tail reg. It is referred as transmit | ||
| 480 | * comm scheduler queue doorbell. | ||
| 481 | */ | ||
| 482 | vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); | ||
| 483 | status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc, | ||
| 484 | num_q_grps, qg_buf, buf_len, NULL); | ||
| 485 | if (status) { | ||
| 486 | dev_err(&vsi->back->pdev->dev, | ||
| 487 | "Failed to set LAN Tx queue context, error: %d\n", | ||
| 488 | status); | ||
| 489 | err = -ENODEV; | ||
| 490 | goto err_cfg_txqs; | ||
| 491 | } | ||
| 492 | |||
| 493 | /* Add Tx Queue TEID into the VSI Tx ring from the response | ||
| 494 | * This will complete configuring and enabling the queue. | ||
| 495 | */ | ||
| 496 | txq = &qg_buf->txqs[0]; | ||
| 497 | if (pf_q == le16_to_cpu(txq->txq_id)) | ||
| 498 | vsi->tx_rings[i]->txq_teid = | ||
| 499 | le32_to_cpu(txq->q_teid); | ||
| 500 | } | ||
| 501 | err_cfg_txqs: | ||
| 502 | devm_kfree(&pf->pdev->dev, qg_buf); | ||
| 503 | return err; | ||
| 504 | } | ||
| 505 | |||
| 506 | /** | ||
| 507 | * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW | ||
| 508 | * @vsi: the VSI being configured | ||
| 509 | */ | ||
| 510 | void ice_vsi_cfg_msix(struct ice_vsi *vsi) | ||
| 511 | { | ||
| 512 | struct ice_pf *pf = vsi->back; | ||
| 513 | u16 vector = vsi->base_vector; | ||
| 514 | struct ice_hw *hw = &pf->hw; | ||
| 515 | u32 txq = 0, rxq = 0; | ||
| 516 | int i, q, itr; | ||
| 517 | u8 itr_gran; | ||
| 518 | |||
| 519 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { | ||
| 520 | struct ice_q_vector *q_vector = vsi->q_vectors[i]; | ||
| 521 | |||
| 522 | itr_gran = hw->itr_gran_200; | ||
| 523 | |||
| 524 | if (q_vector->num_ring_rx) { | ||
| 525 | q_vector->rx.itr = | ||
| 526 | ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting, | ||
| 527 | itr_gran); | ||
| 528 | q_vector->rx.latency_range = ICE_LOW_LATENCY; | ||
| 529 | } | ||
| 530 | |||
| 531 | if (q_vector->num_ring_tx) { | ||
| 532 | q_vector->tx.itr = | ||
| 533 | ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting, | ||
| 534 | itr_gran); | ||
| 535 | q_vector->tx.latency_range = ICE_LOW_LATENCY; | ||
| 536 | } | ||
| 537 | wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); | ||
| 538 | wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); | ||
| 539 | |||
| 540 | /* Both Transmit Queue Interrupt Cause Control register | ||
| 541 | * and Receive Queue Interrupt Cause control register | ||
| 542 | * expects MSIX_INDX field to be the vector index | ||
| 543 | * within the function space and not the absolute | ||
| 544 | * vector index across PF or across device. | ||
| 545 | * For SR-IOV VF VSIs queue vector index always starts | ||
| 546 | * with 1 since first vector index(0) is used for OICR | ||
| 547 | * in VF space. Since VMDq and other PF VSIs are within | ||
| 548 | * the PF function space, use the vector index that is | ||
| 549 | * tracked for this PF. | ||
| 550 | */ | ||
| 551 | for (q = 0; q < q_vector->num_ring_tx; q++) { | ||
| 552 | u32 val; | ||
| 553 | |||
| 554 | itr = ICE_ITR_NONE; | ||
| 555 | val = QINT_TQCTL_CAUSE_ENA_M | | ||
| 556 | (itr << QINT_TQCTL_ITR_INDX_S) | | ||
| 557 | (vector << QINT_TQCTL_MSIX_INDX_S); | ||
| 558 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); | ||
| 559 | txq++; | ||
| 560 | } | ||
| 561 | |||
| 562 | for (q = 0; q < q_vector->num_ring_rx; q++) { | ||
| 563 | u32 val; | ||
| 564 | |||
| 565 | itr = ICE_ITR_NONE; | ||
| 566 | val = QINT_RQCTL_CAUSE_ENA_M | | ||
| 567 | (itr << QINT_RQCTL_ITR_INDX_S) | | ||
| 568 | (vector << QINT_RQCTL_MSIX_INDX_S); | ||
| 569 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); | ||
| 570 | rxq++; | ||
| 571 | } | ||
| 572 | } | ||
| 573 | |||
| 574 | ice_flush(hw); | ||
| 575 | } | ||
| 576 | |||
| 577 | /** | ||
| 189 | * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx | 578 | * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx |
| 190 | * @vsi: the VSI being changed | 579 | * @vsi: the VSI being changed |
| 191 | */ | 580 | */ |
| @@ -256,3 +645,105 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) | |||
| 256 | vsi->info.vlan_flags = ctxt.info.vlan_flags; | 645 | vsi->info.vlan_flags = ctxt.info.vlan_flags; |
| 257 | return 0; | 646 | return 0; |
| 258 | } | 647 | } |
| 648 | |||
| 649 | /** | ||
| 650 | * ice_vsi_start_rx_rings - start VSI's Rx rings | ||
| 651 | * @vsi: the VSI whose rings are to be started | ||
| 652 | * | ||
| 653 | * Returns 0 on success and a negative value on error | ||
| 654 | */ | ||
| 655 | int ice_vsi_start_rx_rings(struct ice_vsi *vsi) | ||
| 656 | { | ||
| 657 | return ice_vsi_ctrl_rx_rings(vsi, true); | ||
| 658 | } | ||
| 659 | |||
| 660 | /** | ||
| 661 | * ice_vsi_stop_rx_rings - stop VSI's Rx rings | ||
| 662 | * @vsi: the VSI | ||
| 663 | * | ||
| 664 | * Returns 0 on success and a negative value on error | ||
| 665 | */ | ||
| 666 | int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) | ||
| 667 | { | ||
| 668 | return ice_vsi_ctrl_rx_rings(vsi, false); | ||
| 669 | } | ||
| 670 | |||
| 671 | /** | ||
| 672 | * ice_vsi_stop_tx_rings - Disable Tx rings | ||
| 673 | * @vsi: the VSI being configured | ||
| 674 | */ | ||
| 675 | int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) | ||
| 676 | { | ||
| 677 | struct ice_pf *pf = vsi->back; | ||
| 678 | struct ice_hw *hw = &pf->hw; | ||
| 679 | enum ice_status status; | ||
| 680 | u32 *q_teids, val; | ||
| 681 | u16 *q_ids, i; | ||
| 682 | int err = 0; | ||
| 683 | |||
| 684 | if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) | ||
| 685 | return -EINVAL; | ||
| 686 | |||
| 687 | q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), | ||
| 688 | GFP_KERNEL); | ||
| 689 | if (!q_teids) | ||
| 690 | return -ENOMEM; | ||
| 691 | |||
| 692 | q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), | ||
| 693 | GFP_KERNEL); | ||
| 694 | if (!q_ids) { | ||
| 695 | err = -ENOMEM; | ||
| 696 | goto err_alloc_q_ids; | ||
| 697 | } | ||
| 698 | |||
| 699 | /* set up the Tx queue list to be disabled */ | ||
| 700 | ice_for_each_txq(vsi, i) { | ||
| 701 | u16 v_idx; | ||
| 702 | |||
| 703 | if (!vsi->tx_rings || !vsi->tx_rings[i]) { | ||
| 704 | err = -EINVAL; | ||
| 705 | goto err_out; | ||
| 706 | } | ||
| 707 | |||
| 708 | q_ids[i] = vsi->txq_map[i]; | ||
| 709 | q_teids[i] = vsi->tx_rings[i]->txq_teid; | ||
| 710 | |||
| 711 | /* clear cause_ena bit for disabled queues */ | ||
| 712 | val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); | ||
| 713 | val &= ~QINT_TQCTL_CAUSE_ENA_M; | ||
| 714 | wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); | ||
| 715 | |||
| 716 | /* software is expected to wait for 100 ns */ | ||
| 717 | ndelay(100); | ||
| 718 | |||
| 719 | /* trigger a software interrupt for the vector associated to | ||
| 720 | * the queue to schedule NAPI handler | ||
| 721 | */ | ||
| 722 | v_idx = vsi->tx_rings[i]->q_vector->v_idx; | ||
| 723 | wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx), | ||
| 724 | GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); | ||
| 725 | } | ||
| 726 | status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, | ||
| 727 | NULL); | ||
| 728 | /* if the disable queue command was exercised during an active reset | ||
| 729 | * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as | ||
| 730 | * the reset operation disables queues at the hardware level anyway. | ||
| 731 | */ | ||
| 732 | if (status == ICE_ERR_RESET_ONGOING) { | ||
| 733 | dev_info(&pf->pdev->dev, | ||
| 734 | "Reset in progress. LAN Tx queues already disabled\n"); | ||
| 735 | } else if (status) { | ||
| 736 | dev_err(&pf->pdev->dev, | ||
| 737 | "Failed to disable LAN Tx queues, error: %d\n", | ||
| 738 | status); | ||
| 739 | err = -ENODEV; | ||
| 740 | } | ||
| 741 | |||
| 742 | err_out: | ||
| 743 | devm_kfree(&pf->pdev->dev, q_ids); | ||
| 744 | |||
| 745 | err_alloc_q_ids: | ||
| 746 | devm_kfree(&pf->pdev->dev, q_teids); | ||
| 747 | |||
| 748 | return err; | ||
| 749 | } | ||
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index c10874d26eee..ad4257929b9b 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h | |||
| @@ -13,6 +13,12 @@ void ice_free_fltr_list(struct device *dev, struct list_head *h); | |||
| 13 | 13 | ||
| 14 | void ice_update_eth_stats(struct ice_vsi *vsi); | 14 | void ice_update_eth_stats(struct ice_vsi *vsi); |
| 15 | 15 | ||
| 16 | int ice_vsi_cfg_rxqs(struct ice_vsi *vsi); | ||
| 17 | |||
| 18 | int ice_vsi_cfg_txqs(struct ice_vsi *vsi); | ||
| 19 | |||
| 20 | void ice_vsi_cfg_msix(struct ice_vsi *vsi); | ||
| 21 | |||
| 16 | int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); | 22 | int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); |
| 17 | 23 | ||
| 18 | int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); | 24 | int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); |
| @@ -20,4 +26,11 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); | |||
| 20 | int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi); | 26 | int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi); |
| 21 | 27 | ||
| 22 | int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena); | 28 | int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena); |
| 29 | |||
| 30 | int ice_vsi_start_rx_rings(struct ice_vsi *vsi); | ||
| 31 | |||
| 32 | int ice_vsi_stop_rx_rings(struct ice_vsi *vsi); | ||
| 33 | |||
| 34 | int ice_vsi_stop_tx_rings(struct ice_vsi *vsi); | ||
| 35 | |||
| 23 | #endif /* !_ICE_LIB_H_ */ | 36 | #endif /* !_ICE_LIB_H_ */ |
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2673430ec2e9..ececf3dabf7e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c | |||
| @@ -1845,77 +1845,6 @@ static void ice_vsi_free_irq(struct ice_vsi *vsi) | |||
| 1845 | } | 1845 | } |
| 1846 | 1846 | ||
| 1847 | /** | 1847 | /** |
| 1848 | * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW | ||
| 1849 | * @vsi: the VSI being configured | ||
| 1850 | */ | ||
| 1851 | static void ice_vsi_cfg_msix(struct ice_vsi *vsi) | ||
| 1852 | { | ||
| 1853 | struct ice_pf *pf = vsi->back; | ||
| 1854 | u16 vector = vsi->base_vector; | ||
| 1855 | struct ice_hw *hw = &pf->hw; | ||
| 1856 | u32 txq = 0, rxq = 0; | ||
| 1857 | int i, q, itr; | ||
| 1858 | u8 itr_gran; | ||
| 1859 | |||
| 1860 | for (i = 0; i < vsi->num_q_vectors; i++, vector++) { | ||
| 1861 | struct ice_q_vector *q_vector = vsi->q_vectors[i]; | ||
| 1862 | |||
| 1863 | itr_gran = hw->itr_gran_200; | ||
| 1864 | |||
| 1865 | if (q_vector->num_ring_rx) { | ||
| 1866 | q_vector->rx.itr = | ||
| 1867 | ITR_TO_REG(vsi->rx_rings[rxq]->rx_itr_setting, | ||
| 1868 | itr_gran); | ||
| 1869 | q_vector->rx.latency_range = ICE_LOW_LATENCY; | ||
| 1870 | } | ||
| 1871 | |||
| 1872 | if (q_vector->num_ring_tx) { | ||
| 1873 | q_vector->tx.itr = | ||
| 1874 | ITR_TO_REG(vsi->tx_rings[txq]->tx_itr_setting, | ||
| 1875 | itr_gran); | ||
| 1876 | q_vector->tx.latency_range = ICE_LOW_LATENCY; | ||
| 1877 | } | ||
| 1878 | wr32(hw, GLINT_ITR(ICE_RX_ITR, vector), q_vector->rx.itr); | ||
| 1879 | wr32(hw, GLINT_ITR(ICE_TX_ITR, vector), q_vector->tx.itr); | ||
| 1880 | |||
| 1881 | /* Both Transmit Queue Interrupt Cause Control register | ||
| 1882 | * and Receive Queue Interrupt Cause control register | ||
| 1883 | * expects MSIX_INDX field to be the vector index | ||
| 1884 | * within the function space and not the absolute | ||
| 1885 | * vector index across PF or across device. | ||
| 1886 | * For SR-IOV VF VSIs queue vector index always starts | ||
| 1887 | * with 1 since first vector index(0) is used for OICR | ||
| 1888 | * in VF space. Since VMDq and other PF VSIs are withtin | ||
| 1889 | * the PF function space, use the vector index thats | ||
| 1890 | * tracked for this PF. | ||
| 1891 | */ | ||
| 1892 | for (q = 0; q < q_vector->num_ring_tx; q++) { | ||
| 1893 | u32 val; | ||
| 1894 | |||
| 1895 | itr = ICE_TX_ITR; | ||
| 1896 | val = QINT_TQCTL_CAUSE_ENA_M | | ||
| 1897 | (itr << QINT_TQCTL_ITR_INDX_S) | | ||
| 1898 | (vector << QINT_TQCTL_MSIX_INDX_S); | ||
| 1899 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); | ||
| 1900 | txq++; | ||
| 1901 | } | ||
| 1902 | |||
| 1903 | for (q = 0; q < q_vector->num_ring_rx; q++) { | ||
| 1904 | u32 val; | ||
| 1905 | |||
| 1906 | itr = ICE_RX_ITR; | ||
| 1907 | val = QINT_RQCTL_CAUSE_ENA_M | | ||
| 1908 | (itr << QINT_RQCTL_ITR_INDX_S) | | ||
| 1909 | (vector << QINT_RQCTL_MSIX_INDX_S); | ||
| 1910 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); | ||
| 1911 | rxq++; | ||
| 1912 | } | ||
| 1913 | } | ||
| 1914 | |||
| 1915 | ice_flush(hw); | ||
| 1916 | } | ||
| 1917 | |||
| 1918 | /** | ||
| 1919 | * ice_ena_misc_vector - enable the non-queue interrupts | 1848 | * ice_ena_misc_vector - enable the non-queue interrupts |
| 1920 | * @pf: board private structure | 1849 | * @pf: board private structure |
| 1921 | */ | 1850 | */ |
| @@ -3967,248 +3896,6 @@ static int ice_restore_vlan(struct ice_vsi *vsi) | |||
| 3967 | } | 3896 | } |
| 3968 | 3897 | ||
| 3969 | /** | 3898 | /** |
| 3970 | * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance | ||
| 3971 | * @ring: The Tx ring to configure | ||
| 3972 | * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized | ||
| 3973 | * @pf_q: queue index in the PF space | ||
| 3974 | * | ||
| 3975 | * Configure the Tx descriptor ring in TLAN context. | ||
| 3976 | */ | ||
| 3977 | static void | ||
| 3978 | ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) | ||
| 3979 | { | ||
| 3980 | struct ice_vsi *vsi = ring->vsi; | ||
| 3981 | struct ice_hw *hw = &vsi->back->hw; | ||
| 3982 | |||
| 3983 | tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; | ||
| 3984 | |||
| 3985 | tlan_ctx->port_num = vsi->port_info->lport; | ||
| 3986 | |||
| 3987 | /* Transmit Queue Length */ | ||
| 3988 | tlan_ctx->qlen = ring->count; | ||
| 3989 | |||
| 3990 | /* PF number */ | ||
| 3991 | tlan_ctx->pf_num = hw->pf_id; | ||
| 3992 | |||
| 3993 | /* queue belongs to a specific VSI type | ||
| 3994 | * VF / VM index should be programmed per vmvf_type setting: | ||
| 3995 | * for vmvf_type = VF, it is VF number between 0-256 | ||
| 3996 | * for vmvf_type = VM, it is VM number between 0-767 | ||
| 3997 | * for PF or EMP this field should be set to zero | ||
| 3998 | */ | ||
| 3999 | switch (vsi->type) { | ||
| 4000 | case ICE_VSI_PF: | ||
| 4001 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; | ||
| 4002 | break; | ||
| 4003 | default: | ||
| 4004 | return; | ||
| 4005 | } | ||
| 4006 | |||
| 4007 | /* make sure the context is associated with the right VSI */ | ||
| 4008 | tlan_ctx->src_vsi = vsi->vsi_num; | ||
| 4009 | |||
| 4010 | tlan_ctx->tso_ena = ICE_TX_LEGACY; | ||
| 4011 | tlan_ctx->tso_qnum = pf_q; | ||
| 4012 | |||
| 4013 | /* Legacy or Advanced Host Interface: | ||
| 4014 | * 0: Advanced Host Interface | ||
| 4015 | * 1: Legacy Host Interface | ||
| 4016 | */ | ||
| 4017 | tlan_ctx->legacy_int = ICE_TX_LEGACY; | ||
| 4018 | } | ||
| 4019 | |||
| 4020 | /** | ||
| 4021 | * ice_vsi_cfg_txqs - Configure the VSI for Tx | ||
| 4022 | * @vsi: the VSI being configured | ||
| 4023 | * | ||
| 4024 | * Return 0 on success and a negative value on error | ||
| 4025 | * Configure the Tx VSI for operation. | ||
| 4026 | */ | ||
| 4027 | static int ice_vsi_cfg_txqs(struct ice_vsi *vsi) | ||
| 4028 | { | ||
| 4029 | struct ice_aqc_add_tx_qgrp *qg_buf; | ||
| 4030 | struct ice_aqc_add_txqs_perq *txq; | ||
| 4031 | struct ice_pf *pf = vsi->back; | ||
| 4032 | enum ice_status status; | ||
| 4033 | u16 buf_len, i, pf_q; | ||
| 4034 | int err = 0, tc = 0; | ||
| 4035 | u8 num_q_grps; | ||
| 4036 | |||
| 4037 | buf_len = sizeof(struct ice_aqc_add_tx_qgrp); | ||
| 4038 | qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); | ||
| 4039 | if (!qg_buf) | ||
| 4040 | return -ENOMEM; | ||
| 4041 | |||
| 4042 | if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { | ||
| 4043 | err = -EINVAL; | ||
| 4044 | goto err_cfg_txqs; | ||
| 4045 | } | ||
| 4046 | qg_buf->num_txqs = 1; | ||
| 4047 | num_q_grps = 1; | ||
| 4048 | |||
| 4049 | /* set up and configure the tx queues */ | ||
| 4050 | ice_for_each_txq(vsi, i) { | ||
| 4051 | struct ice_tlan_ctx tlan_ctx = { 0 }; | ||
| 4052 | |||
| 4053 | pf_q = vsi->txq_map[i]; | ||
| 4054 | ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); | ||
| 4055 | /* copy context contents into the qg_buf */ | ||
| 4056 | qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); | ||
| 4057 | ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, | ||
| 4058 | ice_tlan_ctx_info); | ||
| 4059 | |||
| 4060 | /* init queue specific tail reg. It is referred as transmit | ||
| 4061 | * comm scheduler queue doorbell. | ||
| 4062 | */ | ||
| 4063 | vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); | ||
| 4064 | status = ice_ena_vsi_txq(vsi->port_info, vsi->vsi_num, tc, | ||
| 4065 | num_q_grps, qg_buf, buf_len, NULL); | ||
| 4066 | if (status) { | ||
| 4067 | dev_err(&vsi->back->pdev->dev, | ||
| 4068 | "Failed to set LAN Tx queue context, error: %d\n", | ||
| 4069 | status); | ||
| 4070 | err = -ENODEV; | ||
| 4071 | goto err_cfg_txqs; | ||
| 4072 | } | ||
| 4073 | |||
| 4074 | /* Add Tx Queue TEID into the VSI tx ring from the response | ||
| 4075 | * This will complete configuring and enabling the queue. | ||
| 4076 | */ | ||
| 4077 | txq = &qg_buf->txqs[0]; | ||
| 4078 | if (pf_q == le16_to_cpu(txq->txq_id)) | ||
| 4079 | vsi->tx_rings[i]->txq_teid = | ||
| 4080 | le32_to_cpu(txq->q_teid); | ||
| 4081 | } | ||
| 4082 | err_cfg_txqs: | ||
| 4083 | devm_kfree(&pf->pdev->dev, qg_buf); | ||
| 4084 | return err; | ||
| 4085 | } | ||
| 4086 | |||
| 4087 | /** | ||
| 4088 | * ice_setup_rx_ctx - Configure a receive ring context | ||
| 4089 | * @ring: The Rx ring to configure | ||
| 4090 | * | ||
| 4091 | * Configure the Rx descriptor ring in RLAN context. | ||
| 4092 | */ | ||
| 4093 | static int ice_setup_rx_ctx(struct ice_ring *ring) | ||
| 4094 | { | ||
| 4095 | struct ice_vsi *vsi = ring->vsi; | ||
| 4096 | struct ice_hw *hw = &vsi->back->hw; | ||
| 4097 | u32 rxdid = ICE_RXDID_FLEX_NIC; | ||
| 4098 | struct ice_rlan_ctx rlan_ctx; | ||
| 4099 | u32 regval; | ||
| 4100 | u16 pf_q; | ||
| 4101 | int err; | ||
| 4102 | |||
| 4103 | /* what is RX queue number in global space of 2K rx queues */ | ||
| 4104 | pf_q = vsi->rxq_map[ring->q_index]; | ||
| 4105 | |||
| 4106 | /* clear the context structure first */ | ||
| 4107 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | ||
| 4108 | |||
| 4109 | rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S; | ||
| 4110 | |||
| 4111 | rlan_ctx.qlen = ring->count; | ||
| 4112 | |||
| 4113 | /* Receive Packet Data Buffer Size. | ||
| 4114 | * The Packet Data Buffer Size is defined in 128 byte units. | ||
| 4115 | */ | ||
| 4116 | rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; | ||
| 4117 | |||
| 4118 | /* use 32 byte descriptors */ | ||
| 4119 | rlan_ctx.dsize = 1; | ||
| 4120 | |||
| 4121 | /* Strip the Ethernet CRC bytes before the packet is posted to host | ||
| 4122 | * memory. | ||
| 4123 | */ | ||
| 4124 | rlan_ctx.crcstrip = 1; | ||
| 4125 | |||
| 4126 | /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ | ||
| 4127 | rlan_ctx.l2tsel = 1; | ||
| 4128 | |||
| 4129 | rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; | ||
| 4130 | rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; | ||
| 4131 | rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; | ||
| 4132 | |||
| 4133 | /* This controls whether VLAN is stripped from inner headers | ||
| 4134 | * The VLAN in the inner L2 header is stripped to the receive | ||
| 4135 | * descriptor if enabled by this flag. | ||
| 4136 | */ | ||
| 4137 | rlan_ctx.showiv = 0; | ||
| 4138 | |||
| 4139 | /* Max packet size for this queue - must not be set to a larger value | ||
| 4140 | * than 5 x DBUF | ||
| 4141 | */ | ||
| 4142 | rlan_ctx.rxmax = min_t(u16, vsi->max_frame, | ||
| 4143 | ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); | ||
| 4144 | |||
| 4145 | /* Rx queue threshold in units of 64 */ | ||
| 4146 | rlan_ctx.lrxqthresh = 1; | ||
| 4147 | |||
| 4148 | /* Enable Flexible Descriptors in the queue context which | ||
| 4149 | * allows this driver to select a specific receive descriptor format | ||
| 4150 | */ | ||
| 4151 | regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); | ||
| 4152 | regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & | ||
| 4153 | QRXFLXP_CNTXT_RXDID_IDX_M; | ||
| 4154 | |||
| 4155 | /* increasing context priority to pick up profile id; | ||
| 4156 | * default is 0x01; setting to 0x03 to ensure profile | ||
| 4157 | * is programming if prev context is of same priority | ||
| 4158 | */ | ||
| 4159 | regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & | ||
| 4160 | QRXFLXP_CNTXT_RXDID_PRIO_M; | ||
| 4161 | |||
| 4162 | wr32(hw, QRXFLXP_CNTXT(pf_q), regval); | ||
| 4163 | |||
| 4164 | /* Absolute queue number out of 2K needs to be passed */ | ||
| 4165 | err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); | ||
| 4166 | if (err) { | ||
| 4167 | dev_err(&vsi->back->pdev->dev, | ||
| 4168 | "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", | ||
| 4169 | pf_q, err); | ||
| 4170 | return -EIO; | ||
| 4171 | } | ||
| 4172 | |||
| 4173 | /* init queue specific tail register */ | ||
| 4174 | ring->tail = hw->hw_addr + QRX_TAIL(pf_q); | ||
| 4175 | writel(0, ring->tail); | ||
| 4176 | ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); | ||
| 4177 | |||
| 4178 | return 0; | ||
| 4179 | } | ||
| 4180 | |||
| 4181 | /** | ||
| 4182 | * ice_vsi_cfg_rxqs - Configure the VSI for Rx | ||
| 4183 | * @vsi: the VSI being configured | ||
| 4184 | * | ||
| 4185 | * Return 0 on success and a negative value on error | ||
| 4186 | * Configure the Rx VSI for operation. | ||
| 4187 | */ | ||
| 4188 | static int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) | ||
| 4189 | { | ||
| 4190 | int err = 0; | ||
| 4191 | u16 i; | ||
| 4192 | |||
| 4193 | if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN) | ||
| 4194 | vsi->max_frame = vsi->netdev->mtu + | ||
| 4195 | ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | ||
| 4196 | else | ||
| 4197 | vsi->max_frame = ICE_RXBUF_2048; | ||
| 4198 | |||
| 4199 | vsi->rx_buf_len = ICE_RXBUF_2048; | ||
| 4200 | /* set up individual rings */ | ||
| 4201 | for (i = 0; i < vsi->num_rxq && !err; i++) | ||
| 4202 | err = ice_setup_rx_ctx(vsi->rx_rings[i]); | ||
| 4203 | |||
| 4204 | if (err) { | ||
| 4205 | dev_err(&vsi->back->pdev->dev, "ice_setup_rx_ctx failed\n"); | ||
| 4206 | return -EIO; | ||
| 4207 | } | ||
| 4208 | return err; | ||
| 4209 | } | ||
| 4210 | |||
| 4211 | /** | ||
| 4212 | * ice_vsi_cfg - Setup the VSI | 3899 | * ice_vsi_cfg - Setup the VSI |
| 4213 | * @vsi: the VSI being configured | 3900 | * @vsi: the VSI being configured |
| 4214 | * | 3901 | * |
| @@ -4233,207 +3920,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) | |||
| 4233 | } | 3920 | } |
| 4234 | 3921 | ||
| 4235 | /** | 3922 | /** |
| 4236 | * ice_vsi_stop_tx_rings - Disable Tx rings | ||
| 4237 | * @vsi: the VSI being configured | ||
| 4238 | */ | ||
| 4239 | static int ice_vsi_stop_tx_rings(struct ice_vsi *vsi) | ||
| 4240 | { | ||
| 4241 | struct ice_pf *pf = vsi->back; | ||
| 4242 | struct ice_hw *hw = &pf->hw; | ||
| 4243 | enum ice_status status; | ||
| 4244 | u32 *q_teids, val; | ||
| 4245 | u16 *q_ids, i; | ||
| 4246 | int err = 0; | ||
| 4247 | |||
| 4248 | if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) | ||
| 4249 | return -EINVAL; | ||
| 4250 | |||
| 4251 | q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), | ||
| 4252 | GFP_KERNEL); | ||
| 4253 | if (!q_teids) | ||
| 4254 | return -ENOMEM; | ||
| 4255 | |||
| 4256 | q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), | ||
| 4257 | GFP_KERNEL); | ||
| 4258 | if (!q_ids) { | ||
| 4259 | err = -ENOMEM; | ||
| 4260 | goto err_alloc_q_ids; | ||
| 4261 | } | ||
| 4262 | |||
| 4263 | /* set up the tx queue list to be disabled */ | ||
| 4264 | ice_for_each_txq(vsi, i) { | ||
| 4265 | u16 v_idx; | ||
| 4266 | |||
| 4267 | if (!vsi->tx_rings || !vsi->tx_rings[i]) { | ||
| 4268 | err = -EINVAL; | ||
| 4269 | goto err_out; | ||
| 4270 | } | ||
| 4271 | |||
| 4272 | q_ids[i] = vsi->txq_map[i]; | ||
| 4273 | q_teids[i] = vsi->tx_rings[i]->txq_teid; | ||
| 4274 | |||
| 4275 | /* clear cause_ena bit for disabled queues */ | ||
| 4276 | val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); | ||
| 4277 | val &= ~QINT_TQCTL_CAUSE_ENA_M; | ||
| 4278 | wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); | ||
| 4279 | |||
| 4280 | /* software is expected to wait for 100 ns */ | ||
| 4281 | ndelay(100); | ||
| 4282 | |||
| 4283 | /* trigger a software interrupt for the vector associated to | ||
| 4284 | * the queue to schedule napi handler | ||
| 4285 | */ | ||
| 4286 | v_idx = vsi->tx_rings[i]->q_vector->v_idx; | ||
| 4287 | wr32(hw, GLINT_DYN_CTL(vsi->base_vector + v_idx), | ||
| 4288 | GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); | ||
| 4289 | } | ||
| 4290 | status = ice_dis_vsi_txq(vsi->port_info, vsi->num_txq, q_ids, q_teids, | ||
| 4291 | NULL); | ||
| 4292 | /* if the disable queue command was exercised during an active reset | ||
| 4293 | * flow, ICE_ERR_RESET_ONGOING is returned. This is not an error as | ||
| 4294 | * the reset operation disables queues at the hardware level anyway. | ||
| 4295 | */ | ||
| 4296 | if (status == ICE_ERR_RESET_ONGOING) { | ||
| 4297 | dev_dbg(&pf->pdev->dev, | ||
| 4298 | "Reset in progress. LAN Tx queues already disabled\n"); | ||
| 4299 | } else if (status) { | ||
| 4300 | dev_err(&pf->pdev->dev, | ||
| 4301 | "Failed to disable LAN Tx queues, error: %d\n", | ||
| 4302 | status); | ||
| 4303 | err = -ENODEV; | ||
| 4304 | } | ||
| 4305 | |||
| 4306 | err_out: | ||
| 4307 | devm_kfree(&pf->pdev->dev, q_ids); | ||
| 4308 | |||
| 4309 | err_alloc_q_ids: | ||
| 4310 | devm_kfree(&pf->pdev->dev, q_teids); | ||
| 4311 | |||
| 4312 | return err; | ||
| 4313 | } | ||
| 4314 | |||
| 4315 | /** | ||
| 4316 | * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled | ||
| 4317 | * @pf: the PF being configured | ||
| 4318 | * @pf_q: the PF queue | ||
| 4319 | * @ena: enable or disable state of the queue | ||
| 4320 | * | ||
| 4321 | * This routine will wait for the given Rx queue of the PF to reach the | ||
| 4322 | * enabled or disabled state. | ||
| 4323 | * Returns -ETIMEDOUT in case of failing to reach the requested state after | ||
| 4324 | * multiple retries; else will return 0 in case of success. | ||
| 4325 | */ | ||
| 4326 | static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) | ||
| 4327 | { | ||
| 4328 | int i; | ||
| 4329 | |||
| 4330 | for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { | ||
| 4331 | u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); | ||
| 4332 | |||
| 4333 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | ||
| 4334 | break; | ||
| 4335 | |||
| 4336 | usleep_range(10, 20); | ||
| 4337 | } | ||
| 4338 | if (i >= ICE_Q_WAIT_RETRY_LIMIT) | ||
| 4339 | return -ETIMEDOUT; | ||
| 4340 | |||
| 4341 | return 0; | ||
| 4342 | } | ||
| 4343 | |||
| 4344 | /** | ||
| 4345 | * ice_vsi_ctrl_rx_rings - Start or stop a VSI's rx rings | ||
| 4346 | * @vsi: the VSI being configured | ||
| 4347 | * @ena: start or stop the rx rings | ||
| 4348 | */ | ||
| 4349 | static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) | ||
| 4350 | { | ||
| 4351 | struct ice_pf *pf = vsi->back; | ||
| 4352 | struct ice_hw *hw = &pf->hw; | ||
| 4353 | int i, j, ret = 0; | ||
| 4354 | |||
| 4355 | for (i = 0; i < vsi->num_rxq; i++) { | ||
| 4356 | int pf_q = vsi->rxq_map[i]; | ||
| 4357 | u32 rx_reg; | ||
| 4358 | |||
| 4359 | for (j = 0; j < ICE_Q_WAIT_MAX_RETRY; j++) { | ||
| 4360 | rx_reg = rd32(hw, QRX_CTRL(pf_q)); | ||
| 4361 | if (((rx_reg >> QRX_CTRL_QENA_REQ_S) & 1) == | ||
| 4362 | ((rx_reg >> QRX_CTRL_QENA_STAT_S) & 1)) | ||
| 4363 | break; | ||
| 4364 | usleep_range(1000, 2000); | ||
| 4365 | } | ||
| 4366 | |||
| 4367 | /* Skip if the queue is already in the requested state */ | ||
| 4368 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | ||
| 4369 | continue; | ||
| 4370 | |||
| 4371 | /* turn on/off the queue */ | ||
| 4372 | if (ena) | ||
| 4373 | rx_reg |= QRX_CTRL_QENA_REQ_M; | ||
| 4374 | else | ||
| 4375 | rx_reg &= ~QRX_CTRL_QENA_REQ_M; | ||
| 4376 | wr32(hw, QRX_CTRL(pf_q), rx_reg); | ||
| 4377 | |||
| 4378 | /* wait for the change to finish */ | ||
| 4379 | ret = ice_pf_rxq_wait(pf, pf_q, ena); | ||
| 4380 | if (ret) { | ||
| 4381 | dev_err(&pf->pdev->dev, | ||
| 4382 | "VSI idx %d Rx ring %d %sable timeout\n", | ||
| 4383 | vsi->idx, pf_q, (ena ? "en" : "dis")); | ||
| 4384 | break; | ||
| 4385 | } | ||
| 4386 | } | ||
| 4387 | |||
| 4388 | return ret; | ||
| 4389 | } | ||
| 4390 | |||
| 4391 | /** | ||
| 4392 | * ice_vsi_start_rx_rings - start VSI's rx rings | ||
| 4393 | * @vsi: the VSI whose rings are to be started | ||
| 4394 | * | ||
| 4395 | * Returns 0 on success and a negative value on error | ||
| 4396 | */ | ||
| 4397 | static int ice_vsi_start_rx_rings(struct ice_vsi *vsi) | ||
| 4398 | { | ||
| 4399 | return ice_vsi_ctrl_rx_rings(vsi, true); | ||
| 4400 | } | ||
| 4401 | |||
| 4402 | /** | ||
| 4403 | * ice_vsi_stop_rx_rings - stop VSI's rx rings | ||
| 4404 | * @vsi: the VSI | ||
| 4405 | * | ||
| 4406 | * Returns 0 on success and a negative value on error | ||
| 4407 | */ | ||
| 4408 | static int ice_vsi_stop_rx_rings(struct ice_vsi *vsi) | ||
| 4409 | { | ||
| 4410 | return ice_vsi_ctrl_rx_rings(vsi, false); | ||
| 4411 | } | ||
| 4412 | |||
| 4413 | /** | ||
| 4414 | * ice_vsi_stop_tx_rx_rings - stop VSI's tx and rx rings | ||
| 4415 | * @vsi: the VSI | ||
| 4416 | * Returns 0 on success and a negative value on error | ||
| 4417 | */ | ||
| 4418 | static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi) | ||
| 4419 | { | ||
| 4420 | int err_tx, err_rx; | ||
| 4421 | |||
| 4422 | err_tx = ice_vsi_stop_tx_rings(vsi); | ||
| 4423 | if (err_tx) | ||
| 4424 | dev_dbg(&vsi->back->pdev->dev, "Failed to disable Tx rings\n"); | ||
| 4425 | |||
| 4426 | err_rx = ice_vsi_stop_rx_rings(vsi); | ||
| 4427 | if (err_rx) | ||
| 4428 | dev_dbg(&vsi->back->pdev->dev, "Failed to disable Rx rings\n"); | ||
| 4429 | |||
| 4430 | if (err_tx || err_rx) | ||
| 4431 | return -EIO; | ||
| 4432 | |||
| 4433 | return 0; | ||
| 4434 | } | ||
| 4435 | |||
| 4436 | /** | ||
| 4437 | * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI | 3923 | * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI |
| 4438 | * @vsi: the VSI being configured | 3924 | * @vsi: the VSI being configured |
| 4439 | */ | 3925 | */ |
| @@ -4822,7 +4308,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) | |||
| 4822 | */ | 4308 | */ |
| 4823 | int ice_down(struct ice_vsi *vsi) | 4309 | int ice_down(struct ice_vsi *vsi) |
| 4824 | { | 4310 | { |
| 4825 | int i, err; | 4311 | int i, tx_err, rx_err; |
| 4826 | 4312 | ||
| 4827 | /* Caller of this function is expected to set the | 4313 | /* Caller of this function is expected to set the |
| 4828 | * vsi->state __ICE_DOWN bit | 4314 | * vsi->state __ICE_DOWN bit |
| @@ -4833,7 +4319,18 @@ int ice_down(struct ice_vsi *vsi) | |||
| 4833 | } | 4319 | } |
| 4834 | 4320 | ||
| 4835 | ice_vsi_dis_irq(vsi); | 4321 | ice_vsi_dis_irq(vsi); |
| 4836 | err = ice_vsi_stop_tx_rx_rings(vsi); | 4322 | tx_err = ice_vsi_stop_tx_rings(vsi); |
| 4323 | if (tx_err) | ||
| 4324 | netdev_err(vsi->netdev, | ||
| 4325 | "Failed stop Tx rings, VSI %d error %d\n", | ||
| 4326 | vsi->vsi_num, tx_err); | ||
| 4327 | |||
| 4328 | rx_err = ice_vsi_stop_rx_rings(vsi); | ||
| 4329 | if (rx_err) | ||
| 4330 | netdev_err(vsi->netdev, | ||
| 4331 | "Failed stop Rx rings, VSI %d error %d\n", | ||
| 4332 | vsi->vsi_num, rx_err); | ||
| 4333 | |||
| 4837 | ice_napi_disable_all(vsi); | 4334 | ice_napi_disable_all(vsi); |
| 4838 | 4335 | ||
| 4839 | ice_for_each_txq(vsi, i) | 4336 | ice_for_each_txq(vsi, i) |
| @@ -4842,10 +4339,14 @@ int ice_down(struct ice_vsi *vsi) | |||
| 4842 | ice_for_each_rxq(vsi, i) | 4339 | ice_for_each_rxq(vsi, i) |
| 4843 | ice_clean_rx_ring(vsi->rx_rings[i]); | 4340 | ice_clean_rx_ring(vsi->rx_rings[i]); |
| 4844 | 4341 | ||
| 4845 | if (err) | 4342 | if (tx_err || rx_err) { |
| 4846 | netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", | 4343 | netdev_err(vsi->netdev, |
| 4344 | "Failed to close VSI 0x%04X on switch 0x%04X\n", | ||
| 4847 | vsi->vsi_num, vsi->vsw->sw_id); | 4345 | vsi->vsi_num, vsi->vsw->sw_id); |
| 4848 | return err; | 4346 | return -EIO; |
| 4347 | } | ||
| 4348 | |||
| 4349 | return 0; | ||
| 4849 | } | 4350 | } |
| 4850 | 4351 | ||
| 4851 | /** | 4352 | /** |
| @@ -4865,6 +4366,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi) | |||
| 4865 | } | 4366 | } |
| 4866 | 4367 | ||
| 4867 | ice_for_each_txq(vsi, i) { | 4368 | ice_for_each_txq(vsi, i) { |
| 4369 | vsi->tx_rings[i]->netdev = vsi->netdev; | ||
| 4868 | err = ice_setup_tx_ring(vsi->tx_rings[i]); | 4370 | err = ice_setup_tx_ring(vsi->tx_rings[i]); |
| 4869 | if (err) | 4371 | if (err) |
| 4870 | break; | 4372 | break; |
| @@ -4890,6 +4392,7 @@ static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi) | |||
| 4890 | } | 4392 | } |
| 4891 | 4393 | ||
| 4892 | ice_for_each_rxq(vsi, i) { | 4394 | ice_for_each_rxq(vsi, i) { |
| 4395 | vsi->rx_rings[i]->netdev = vsi->netdev; | ||
| 4893 | err = ice_setup_rx_ring(vsi->rx_rings[i]); | 4396 | err = ice_setup_rx_ring(vsi->rx_rings[i]); |
| 4894 | if (err) | 4397 | if (err) |
| 4895 | break; | 4398 | break; |
