aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_lib.c
diff options
context:
space:
mode:
authorAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>2018-12-19 13:03:27 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-01-15 14:11:10 -0500
commit03f7a986684597607de02f3023aa6c3657a78ab5 (patch)
treea68e18437a07c01c3f4a771941207a3f5dfbdb80 /drivers/net/ethernet/intel/ice/ice_lib.c
parentab4ab73fc1ec6dec548fa36c5e383ef5faa7b4c1 (diff)
ice: Rework queue management code for reuse
This patch reworks the queue management code to allow for reuse with the XDP feature (to be added in a future patch). Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_lib.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c236
1 files changed, 127 insertions, 109 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 3bd24173b39c..a1f523a9d39d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -514,110 +514,89 @@ unlock_pf:
514} 514}
515 515
516/** 516/**
517 * ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI 517 * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
518 * @vsi: the VSI getting queues 518 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
519 * 519 *
520 * Return 0 on success and a negative value on error 520 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
521 */ 521 */
522static int ice_vsi_get_qs_contig(struct ice_vsi *vsi) 522static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
523{ 523{
524 struct ice_pf *pf = vsi->back; 524 int offset, i;
525 int offset, ret = 0;
526
527 mutex_lock(&pf->avail_q_mutex);
528 /* look for contiguous block of queues for Tx */
529 offset = bitmap_find_next_zero_area(pf->avail_txqs, ICE_MAX_TXQS,
530 0, vsi->alloc_txq, 0);
531 if (offset < ICE_MAX_TXQS) {
532 int i;
533 525
534 bitmap_set(pf->avail_txqs, offset, vsi->alloc_txq); 526 mutex_lock(qs_cfg->qs_mutex);
535 for (i = 0; i < vsi->alloc_txq; i++) 527 offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
536 vsi->txq_map[i] = i + offset; 528 0, qs_cfg->q_count, 0);
537 } else { 529 if (offset >= qs_cfg->pf_map_size) {
538 ret = -ENOMEM; 530 mutex_unlock(qs_cfg->qs_mutex);
539 vsi->tx_mapping_mode = ICE_VSI_MAP_SCATTER; 531 return -ENOMEM;
540 } 532 }
541 533
542 /* look for contiguous block of queues for Rx */ 534 bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
543 offset = bitmap_find_next_zero_area(pf->avail_rxqs, ICE_MAX_RXQS, 535 for (i = 0; i < qs_cfg->q_count; i++)
544 0, vsi->alloc_rxq, 0); 536 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
545 if (offset < ICE_MAX_RXQS) { 537 mutex_unlock(qs_cfg->qs_mutex);
546 int i;
547
548 bitmap_set(pf->avail_rxqs, offset, vsi->alloc_rxq);
549 for (i = 0; i < vsi->alloc_rxq; i++)
550 vsi->rxq_map[i] = i + offset;
551 } else {
552 ret = -ENOMEM;
553 vsi->rx_mapping_mode = ICE_VSI_MAP_SCATTER;
554 }
555 mutex_unlock(&pf->avail_q_mutex);
556 538
557 return ret; 539 return 0;
558} 540}
559 541
560/** 542/**
561 * ice_vsi_get_qs_scatter - Assign a scattered queues to VSI 543 * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
562 * @vsi: the VSI getting queues 544 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
563 * 545 *
564 * Return 0 on success and a negative value on error 546 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
565 */ 547 */
566static int ice_vsi_get_qs_scatter(struct ice_vsi *vsi) 548static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
567{ 549{
568 struct ice_pf *pf = vsi->back;
569 int i, index = 0; 550 int i, index = 0;
570 551
571 mutex_lock(&pf->avail_q_mutex); 552 mutex_lock(qs_cfg->qs_mutex);
572 553 for (i = 0; i < qs_cfg->q_count; i++) {
573 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER) { 554 index = find_next_zero_bit(qs_cfg->pf_map,
574 for (i = 0; i < vsi->alloc_txq; i++) { 555 qs_cfg->pf_map_size, index);
575 index = find_next_zero_bit(pf->avail_txqs, 556 if (index >= qs_cfg->pf_map_size)
576 ICE_MAX_TXQS, index); 557 goto err_scatter;
577 if (index < ICE_MAX_TXQS) { 558 set_bit(index, qs_cfg->pf_map);
578 set_bit(index, pf->avail_txqs); 559 qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
579 vsi->txq_map[i] = index;
580 } else {
581 goto err_scatter_tx;
582 }
583 }
584 }
585
586 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER) {
587 for (i = 0; i < vsi->alloc_rxq; i++) {
588 index = find_next_zero_bit(pf->avail_rxqs,
589 ICE_MAX_RXQS, index);
590 if (index < ICE_MAX_RXQS) {
591 set_bit(index, pf->avail_rxqs);
592 vsi->rxq_map[i] = index;
593 } else {
594 goto err_scatter_rx;
595 }
596 }
597 } 560 }
561 mutex_unlock(qs_cfg->qs_mutex);
598 562
599 mutex_unlock(&pf->avail_q_mutex);
600 return 0; 563 return 0;
601 564err_scatter:
602err_scatter_rx:
603 /* unflag any queues we have grabbed (i is failed position) */
604 for (index = 0; index < i; index++) { 565 for (index = 0; index < i; index++) {
605 clear_bit(vsi->rxq_map[index], pf->avail_rxqs); 566 clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
606 vsi->rxq_map[index] = 0; 567 qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
607 }
608 i = vsi->alloc_txq;
609err_scatter_tx:
610 /* i is either position of failed attempt or vsi->alloc_txq */
611 for (index = 0; index < i; index++) {
612 clear_bit(vsi->txq_map[index], pf->avail_txqs);
613 vsi->txq_map[index] = 0;
614 } 568 }
569 mutex_unlock(qs_cfg->qs_mutex);
615 570
616 mutex_unlock(&pf->avail_q_mutex);
617 return -ENOMEM; 571 return -ENOMEM;
618} 572}
619 573
620/** 574/**
575 * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
576 * @qs_cfg: gathered variables needed for PF->VSI queues assignment
577 *
578 * This is an internal function for assigning queues from the PF to VSI and
579 * initially tries to find contiguous space. If it is not successful to find
580 * contiguous space, then it tries with the scatter approach.
581 *
582 * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
583 */
584static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
585{
586 int ret = 0;
587
588 ret = __ice_vsi_get_qs_contig(qs_cfg);
589 if (ret) {
590 /* contig failed, so try with scatter approach */
591 qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
592 qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
593 qs_cfg->scatter_count);
594 ret = __ice_vsi_get_qs_sc(qs_cfg);
595 }
596 return ret;
597}
598
599/**
621 * ice_vsi_get_qs - Assign queues from PF to VSI 600 * ice_vsi_get_qs - Assign queues from PF to VSI
622 * @vsi: the VSI to assign queues to 601 * @vsi: the VSI to assign queues to
623 * 602 *
@@ -625,25 +604,35 @@ err_scatter_tx:
625 */ 604 */
626static int ice_vsi_get_qs(struct ice_vsi *vsi) 605static int ice_vsi_get_qs(struct ice_vsi *vsi)
627{ 606{
607 struct ice_pf *pf = vsi->back;
608 struct ice_qs_cfg tx_qs_cfg = {
609 .qs_mutex = &pf->avail_q_mutex,
610 .pf_map = pf->avail_txqs,
611 .pf_map_size = ICE_MAX_TXQS,
612 .q_count = vsi->alloc_txq,
613 .scatter_count = ICE_MAX_SCATTER_TXQS,
614 .vsi_map = vsi->txq_map,
615 .vsi_map_offset = 0,
616 .mapping_mode = vsi->tx_mapping_mode
617 };
618 struct ice_qs_cfg rx_qs_cfg = {
619 .qs_mutex = &pf->avail_q_mutex,
620 .pf_map = pf->avail_rxqs,
621 .pf_map_size = ICE_MAX_RXQS,
622 .q_count = vsi->alloc_rxq,
623 .scatter_count = ICE_MAX_SCATTER_RXQS,
624 .vsi_map = vsi->rxq_map,
625 .vsi_map_offset = 0,
626 .mapping_mode = vsi->rx_mapping_mode
627 };
628 int ret = 0; 628 int ret = 0;
629 629
630 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG; 630 vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
631 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG; 631 vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
632 632
633 /* NOTE: ice_vsi_get_qs_contig() will set the Rx/Tx mapping 633 ret = __ice_vsi_get_qs(&tx_qs_cfg);
634 * modes individually to scatter if assigning contiguous queues 634 if (!ret)
635 * to Rx or Tx fails 635 ret = __ice_vsi_get_qs(&rx_qs_cfg);
636 */
637 ret = ice_vsi_get_qs_contig(vsi);
638 if (ret < 0) {
639 if (vsi->tx_mapping_mode == ICE_VSI_MAP_SCATTER)
640 vsi->alloc_txq = max_t(u16, vsi->alloc_txq,
641 ICE_MAX_SCATTER_TXQS);
642 if (vsi->rx_mapping_mode == ICE_VSI_MAP_SCATTER)
643 vsi->alloc_rxq = max_t(u16, vsi->alloc_rxq,
644 ICE_MAX_SCATTER_RXQS);
645 ret = ice_vsi_get_qs_scatter(vsi);
646 }
647 636
648 return ret; 637 return ret;
649} 638}
@@ -1614,11 +1603,14 @@ setup_rings:
1614/** 1603/**
1615 * ice_vsi_cfg_txqs - Configure the VSI for Tx 1604 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1616 * @vsi: the VSI being configured 1605 * @vsi: the VSI being configured
1606 * @rings: Tx ring array to be configured
1607 * @offset: offset within vsi->txq_map
1617 * 1608 *
1618 * Return 0 on success and a negative value on error 1609 * Return 0 on success and a negative value on error
1619 * Configure the Tx VSI for operation. 1610 * Configure the Tx VSI for operation.
1620 */ 1611 */
1621int ice_vsi_cfg_txqs(struct ice_vsi *vsi) 1612static int
1613ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
1622{ 1614{
1623 struct ice_aqc_add_tx_qgrp *qg_buf; 1615 struct ice_aqc_add_tx_qgrp *qg_buf;
1624 struct ice_aqc_add_txqs_perq *txq; 1616 struct ice_aqc_add_txqs_perq *txq;
@@ -1626,7 +1618,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
1626 u8 num_q_grps, q_idx = 0; 1618 u8 num_q_grps, q_idx = 0;
1627 enum ice_status status; 1619 enum ice_status status;
1628 u16 buf_len, i, pf_q; 1620 u16 buf_len, i, pf_q;
1629 int err = 0, tc = 0; 1621 int err = 0, tc;
1630 1622
1631 buf_len = sizeof(struct ice_aqc_add_tx_qgrp); 1623 buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
1632 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); 1624 qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
@@ -1644,9 +1636,8 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
1644 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { 1636 for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
1645 struct ice_tlan_ctx tlan_ctx = { 0 }; 1637 struct ice_tlan_ctx tlan_ctx = { 0 };
1646 1638
1647 pf_q = vsi->txq_map[q_idx]; 1639 pf_q = vsi->txq_map[q_idx + offset];
1648 ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx, 1640 ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
1649 pf_q);
1650 /* copy context contents into the qg_buf */ 1641 /* copy context contents into the qg_buf */
1651 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); 1642 qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
1652 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, 1643 ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
@@ -1655,7 +1646,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
1655 /* init queue specific tail reg. It is referred as 1646 /* init queue specific tail reg. It is referred as
1656 * transmit comm scheduler queue doorbell. 1647 * transmit comm scheduler queue doorbell.
1657 */ 1648 */
1658 vsi->tx_rings[q_idx]->tail = 1649 rings[q_idx]->tail =
1659 pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); 1650 pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
1660 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, 1651 status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
1661 num_q_grps, qg_buf, buf_len, 1652 num_q_grps, qg_buf, buf_len,
@@ -1674,7 +1665,7 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
1674 */ 1665 */
1675 txq = &qg_buf->txqs[0]; 1666 txq = &qg_buf->txqs[0];
1676 if (pf_q == le16_to_cpu(txq->txq_id)) 1667 if (pf_q == le16_to_cpu(txq->txq_id))
1677 vsi->tx_rings[q_idx]->txq_teid = 1668 rings[q_idx]->txq_teid =
1678 le32_to_cpu(txq->q_teid); 1669 le32_to_cpu(txq->q_teid);
1679 1670
1680 q_idx++; 1671 q_idx++;
@@ -1686,6 +1677,18 @@ err_cfg_txqs:
1686} 1677}
1687 1678
1688/** 1679/**
1680 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1681 * @vsi: the VSI being configured
1682 *
1683 * Return 0 on success and a negative value on error
1684 * Configure the Tx VSI for operation.
1685 */
1686int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1687{
1688 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
1689}
1690
1691/**
1689 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value 1692 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1690 * @intrl: interrupt rate limit in usecs 1693 * @intrl: interrupt rate limit in usecs
1691 * @gran: interrupt rate limit granularity in usecs 1694 * @gran: interrupt rate limit granularity in usecs
@@ -1897,9 +1900,12 @@ int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
1897 * @vsi: the VSI being configured 1900 * @vsi: the VSI being configured
1898 * @rst_src: reset source 1901 * @rst_src: reset source
1899 * @rel_vmvf_num: Relative id of VF/VM 1902 * @rel_vmvf_num: Relative id of VF/VM
1903 * @rings: Tx ring array to be stopped
1904 * @offset: offset within vsi->txq_map
1900 */ 1905 */
1901int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, 1906static int
1902 u16 rel_vmvf_num) 1907ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1908 u16 rel_vmvf_num, struct ice_ring **rings, int offset)
1903{ 1909{
1904 struct ice_pf *pf = vsi->back; 1910 struct ice_pf *pf = vsi->back;
1905 struct ice_hw *hw = &pf->hw; 1911 struct ice_hw *hw = &pf->hw;
@@ -1927,19 +1933,18 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1927 ice_for_each_txq(vsi, i) { 1933 ice_for_each_txq(vsi, i) {
1928 u16 v_idx; 1934 u16 v_idx;
1929 1935
1930 if (!vsi->tx_rings || !vsi->tx_rings[i] || 1936 if (!rings || !rings[i] || !rings[i]->q_vector) {
1931 !vsi->tx_rings[i]->q_vector) {
1932 err = -EINVAL; 1937 err = -EINVAL;
1933 goto err_out; 1938 goto err_out;
1934 } 1939 }
1935 1940
1936 q_ids[i] = vsi->txq_map[i]; 1941 q_ids[i] = vsi->txq_map[i + offset];
1937 q_teids[i] = vsi->tx_rings[i]->txq_teid; 1942 q_teids[i] = rings[i]->txq_teid;
1938 1943
1939 /* clear cause_ena bit for disabled queues */ 1944 /* clear cause_ena bit for disabled queues */
1940 val = rd32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); 1945 val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx));
1941 val &= ~QINT_TQCTL_CAUSE_ENA_M; 1946 val &= ~QINT_TQCTL_CAUSE_ENA_M;
1942 wr32(hw, QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); 1947 wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val);
1943 1948
1944 /* software is expected to wait for 100 ns */ 1949 /* software is expected to wait for 100 ns */
1945 ndelay(100); 1950 ndelay(100);
@@ -1947,7 +1952,7 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1947 /* trigger a software interrupt for the vector associated to 1952 /* trigger a software interrupt for the vector associated to
1948 * the queue to schedule NAPI handler 1953 * the queue to schedule NAPI handler
1949 */ 1954 */
1950 v_idx = vsi->tx_rings[i]->q_vector->v_idx; 1955 v_idx = rings[i]->q_vector->v_idx;
1951 wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx), 1956 wr32(hw, GLINT_DYN_CTL(vsi->hw_base_vector + v_idx),
1952 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M); 1957 GLINT_DYN_CTL_SWINT_TRIG_M | GLINT_DYN_CTL_INTENA_MSK_M);
1953 } 1958 }
@@ -1977,6 +1982,19 @@ err_alloc_q_ids:
1977} 1982}
1978 1983
1979/** 1984/**
1985 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
1986 * @vsi: the VSI being configured
1987 * @rst_src: reset source
1988 * @rel_vmvf_num: Relative id of VF/VM
1989 */
1990int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
1991 enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
1992{
1993 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
1994 0);
1995}
1996
1997/**
1980 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI 1998 * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
1981 * @vsi: VSI to enable or disable VLAN pruning on 1999 * @vsi: VSI to enable or disable VLAN pruning on
1982 * @ena: set to true to enable VLAN pruning and false to disable it 2000 * @ena: set to true to enable VLAN pruning and false to disable it