diff options
| -rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_ethtool.c | 1 | ||||
| -rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.c | 387 | ||||
| -rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.h | 6 |
3 files changed, 251 insertions, 143 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 521607bc4393..fd28bd0d1c1e 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |||
| @@ -81,6 +81,7 @@ static const struct ena_stats ena_stats_tx_strings[] = { | |||
| 81 | ENA_STAT_TX_ENTRY(doorbells), | 81 | ENA_STAT_TX_ENTRY(doorbells), |
| 82 | ENA_STAT_TX_ENTRY(prepare_ctx_err), | 82 | ENA_STAT_TX_ENTRY(prepare_ctx_err), |
| 83 | ENA_STAT_TX_ENTRY(bad_req_id), | 83 | ENA_STAT_TX_ENTRY(bad_req_id), |
| 84 | ENA_STAT_TX_ENTRY(llq_buffer_copy), | ||
| 84 | ENA_STAT_TX_ENTRY(missed_tx), | 85 | ENA_STAT_TX_ENTRY(missed_tx), |
| 85 | }; | 86 | }; |
| 86 | 87 | ||
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 79a4e87326b7..98314b28a000 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
| @@ -237,6 +237,17 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid) | |||
| 237 | } | 237 | } |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | size = tx_ring->tx_max_header_size; | ||
| 241 | tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); | ||
| 242 | if (!tx_ring->push_buf_intermediate_buf) { | ||
| 243 | tx_ring->push_buf_intermediate_buf = vzalloc(size); | ||
| 244 | if (!tx_ring->push_buf_intermediate_buf) { | ||
| 245 | vfree(tx_ring->tx_buffer_info); | ||
| 246 | vfree(tx_ring->free_tx_ids); | ||
| 247 | return -ENOMEM; | ||
| 248 | } | ||
| 249 | } | ||
| 250 | |||
| 240 | /* Req id ring for TX out of order completions */ | 251 | /* Req id ring for TX out of order completions */ |
| 241 | for (i = 0; i < tx_ring->ring_size; i++) | 252 | for (i = 0; i < tx_ring->ring_size; i++) |
| 242 | tx_ring->free_tx_ids[i] = i; | 253 | tx_ring->free_tx_ids[i] = i; |
| @@ -265,6 +276,9 @@ static void ena_free_tx_resources(struct ena_adapter *adapter, int qid) | |||
| 265 | 276 | ||
| 266 | vfree(tx_ring->free_tx_ids); | 277 | vfree(tx_ring->free_tx_ids); |
| 267 | tx_ring->free_tx_ids = NULL; | 278 | tx_ring->free_tx_ids = NULL; |
| 279 | |||
| 280 | vfree(tx_ring->push_buf_intermediate_buf); | ||
| 281 | tx_ring->push_buf_intermediate_buf = NULL; | ||
| 268 | } | 282 | } |
| 269 | 283 | ||
| 270 | /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues | 284 | /* ena_setup_all_tx_resources - allocate I/O Tx queues resources for All queues |
| @@ -602,6 +616,36 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) | |||
| 602 | ena_free_rx_bufs(adapter, i); | 616 | ena_free_rx_bufs(adapter, i); |
| 603 | } | 617 | } |
| 604 | 618 | ||
| 619 | static inline void ena_unmap_tx_skb(struct ena_ring *tx_ring, | ||
| 620 | struct ena_tx_buffer *tx_info) | ||
| 621 | { | ||
| 622 | struct ena_com_buf *ena_buf; | ||
| 623 | u32 cnt; | ||
| 624 | int i; | ||
| 625 | |||
| 626 | ena_buf = tx_info->bufs; | ||
| 627 | cnt = tx_info->num_of_bufs; | ||
| 628 | |||
| 629 | if (unlikely(!cnt)) | ||
| 630 | return; | ||
| 631 | |||
| 632 | if (tx_info->map_linear_data) { | ||
| 633 | dma_unmap_single(tx_ring->dev, | ||
| 634 | dma_unmap_addr(ena_buf, paddr), | ||
| 635 | dma_unmap_len(ena_buf, len), | ||
| 636 | DMA_TO_DEVICE); | ||
| 637 | ena_buf++; | ||
| 638 | cnt--; | ||
| 639 | } | ||
| 640 | |||
| 641 | /* unmap remaining mapped pages */ | ||
| 642 | for (i = 0; i < cnt; i++) { | ||
| 643 | dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), | ||
| 644 | dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); | ||
| 645 | ena_buf++; | ||
| 646 | } | ||
| 647 | } | ||
| 648 | |||
| 605 | /* ena_free_tx_bufs - Free Tx Buffers per Queue | 649 | /* ena_free_tx_bufs - Free Tx Buffers per Queue |
| 606 | * @tx_ring: TX ring for which buffers be freed | 650 | * @tx_ring: TX ring for which buffers be freed |
| 607 | */ | 651 | */ |
| @@ -612,9 +656,6 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) | |||
| 612 | 656 | ||
| 613 | for (i = 0; i < tx_ring->ring_size; i++) { | 657 | for (i = 0; i < tx_ring->ring_size; i++) { |
| 614 | struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; | 658 | struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; |
| 615 | struct ena_com_buf *ena_buf; | ||
| 616 | int nr_frags; | ||
| 617 | int j; | ||
| 618 | 659 | ||
| 619 | if (!tx_info->skb) | 660 | if (!tx_info->skb) |
| 620 | continue; | 661 | continue; |
| @@ -630,21 +671,7 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) | |||
| 630 | tx_ring->qid, i); | 671 | tx_ring->qid, i); |
| 631 | } | 672 | } |
| 632 | 673 | ||
| 633 | ena_buf = tx_info->bufs; | 674 | ena_unmap_tx_skb(tx_ring, tx_info); |
| 634 | dma_unmap_single(tx_ring->dev, | ||
| 635 | ena_buf->paddr, | ||
| 636 | ena_buf->len, | ||
| 637 | DMA_TO_DEVICE); | ||
| 638 | |||
| 639 | /* unmap remaining mapped pages */ | ||
| 640 | nr_frags = tx_info->num_of_bufs - 1; | ||
| 641 | for (j = 0; j < nr_frags; j++) { | ||
| 642 | ena_buf++; | ||
| 643 | dma_unmap_page(tx_ring->dev, | ||
| 644 | ena_buf->paddr, | ||
| 645 | ena_buf->len, | ||
| 646 | DMA_TO_DEVICE); | ||
| 647 | } | ||
| 648 | 675 | ||
| 649 | dev_kfree_skb_any(tx_info->skb); | 676 | dev_kfree_skb_any(tx_info->skb); |
| 650 | } | 677 | } |
| @@ -735,8 +762,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) | |||
| 735 | while (tx_pkts < budget) { | 762 | while (tx_pkts < budget) { |
| 736 | struct ena_tx_buffer *tx_info; | 763 | struct ena_tx_buffer *tx_info; |
| 737 | struct sk_buff *skb; | 764 | struct sk_buff *skb; |
| 738 | struct ena_com_buf *ena_buf; | ||
| 739 | int i, nr_frags; | ||
| 740 | 765 | ||
| 741 | rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, | 766 | rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, |
| 742 | &req_id); | 767 | &req_id); |
| @@ -756,24 +781,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) | |||
| 756 | tx_info->skb = NULL; | 781 | tx_info->skb = NULL; |
| 757 | tx_info->last_jiffies = 0; | 782 | tx_info->last_jiffies = 0; |
| 758 | 783 | ||
| 759 | if (likely(tx_info->num_of_bufs != 0)) { | 784 | ena_unmap_tx_skb(tx_ring, tx_info); |
| 760 | ena_buf = tx_info->bufs; | ||
| 761 | |||
| 762 | dma_unmap_single(tx_ring->dev, | ||
| 763 | dma_unmap_addr(ena_buf, paddr), | ||
| 764 | dma_unmap_len(ena_buf, len), | ||
| 765 | DMA_TO_DEVICE); | ||
| 766 | |||
| 767 | /* unmap remaining mapped pages */ | ||
| 768 | nr_frags = tx_info->num_of_bufs - 1; | ||
| 769 | for (i = 0; i < nr_frags; i++) { | ||
| 770 | ena_buf++; | ||
| 771 | dma_unmap_page(tx_ring->dev, | ||
| 772 | dma_unmap_addr(ena_buf, paddr), | ||
| 773 | dma_unmap_len(ena_buf, len), | ||
| 774 | DMA_TO_DEVICE); | ||
| 775 | } | ||
| 776 | } | ||
| 777 | 785 | ||
| 778 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, | 786 | netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, |
| 779 | "tx_poll: q %d skb %p completed\n", tx_ring->qid, | 787 | "tx_poll: q %d skb %p completed\n", tx_ring->qid, |
| @@ -1300,7 +1308,6 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) | |||
| 1300 | 1308 | ||
| 1301 | /* Reserved the max msix vectors we might need */ | 1309 | /* Reserved the max msix vectors we might need */ |
| 1302 | msix_vecs = ENA_MAX_MSIX_VEC(num_queues); | 1310 | msix_vecs = ENA_MAX_MSIX_VEC(num_queues); |
| 1303 | |||
| 1304 | netif_dbg(adapter, probe, adapter->netdev, | 1311 | netif_dbg(adapter, probe, adapter->netdev, |
| 1305 | "trying to enable MSI-X, vectors %d\n", msix_vecs); | 1312 | "trying to enable MSI-X, vectors %d\n", msix_vecs); |
| 1306 | 1313 | ||
| @@ -1593,7 +1600,7 @@ static int ena_up_complete(struct ena_adapter *adapter) | |||
| 1593 | 1600 | ||
| 1594 | static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) | 1601 | static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) |
| 1595 | { | 1602 | { |
| 1596 | struct ena_com_create_io_ctx ctx = { 0 }; | 1603 | struct ena_com_create_io_ctx ctx; |
| 1597 | struct ena_com_dev *ena_dev; | 1604 | struct ena_com_dev *ena_dev; |
| 1598 | struct ena_ring *tx_ring; | 1605 | struct ena_ring *tx_ring; |
| 1599 | u32 msix_vector; | 1606 | u32 msix_vector; |
| @@ -1606,6 +1613,8 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) | |||
| 1606 | msix_vector = ENA_IO_IRQ_IDX(qid); | 1613 | msix_vector = ENA_IO_IRQ_IDX(qid); |
| 1607 | ena_qid = ENA_IO_TXQ_IDX(qid); | 1614 | ena_qid = ENA_IO_TXQ_IDX(qid); |
| 1608 | 1615 | ||
| 1616 | memset(&ctx, 0x0, sizeof(ctx)); | ||
| 1617 | |||
| 1609 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; | 1618 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; |
| 1610 | ctx.qid = ena_qid; | 1619 | ctx.qid = ena_qid; |
| 1611 | ctx.mem_queue_type = ena_dev->tx_mem_queue_type; | 1620 | ctx.mem_queue_type = ena_dev->tx_mem_queue_type; |
| @@ -1659,7 +1668,7 @@ create_err: | |||
| 1659 | static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) | 1668 | static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) |
| 1660 | { | 1669 | { |
| 1661 | struct ena_com_dev *ena_dev; | 1670 | struct ena_com_dev *ena_dev; |
| 1662 | struct ena_com_create_io_ctx ctx = { 0 }; | 1671 | struct ena_com_create_io_ctx ctx; |
| 1663 | struct ena_ring *rx_ring; | 1672 | struct ena_ring *rx_ring; |
| 1664 | u32 msix_vector; | 1673 | u32 msix_vector; |
| 1665 | u16 ena_qid; | 1674 | u16 ena_qid; |
| @@ -1671,6 +1680,8 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) | |||
| 1671 | msix_vector = ENA_IO_IRQ_IDX(qid); | 1680 | msix_vector = ENA_IO_IRQ_IDX(qid); |
| 1672 | ena_qid = ENA_IO_RXQ_IDX(qid); | 1681 | ena_qid = ENA_IO_RXQ_IDX(qid); |
| 1673 | 1682 | ||
| 1683 | memset(&ctx, 0x0, sizeof(ctx)); | ||
| 1684 | |||
| 1674 | ctx.qid = ena_qid; | 1685 | ctx.qid = ena_qid; |
| 1675 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; | 1686 | ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; |
| 1676 | ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | 1687 | ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| @@ -1981,73 +1992,70 @@ static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, | |||
| 1981 | return rc; | 1992 | return rc; |
| 1982 | } | 1993 | } |
| 1983 | 1994 | ||
| 1984 | /* Called with netif_tx_lock. */ | 1995 | static int ena_tx_map_skb(struct ena_ring *tx_ring, |
| 1985 | static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1996 | struct ena_tx_buffer *tx_info, |
| 1997 | struct sk_buff *skb, | ||
| 1998 | void **push_hdr, | ||
| 1999 | u16 *header_len) | ||
| 1986 | { | 2000 | { |
| 1987 | struct ena_adapter *adapter = netdev_priv(dev); | 2001 | struct ena_adapter *adapter = tx_ring->adapter; |
| 1988 | struct ena_tx_buffer *tx_info; | ||
| 1989 | struct ena_com_tx_ctx ena_tx_ctx; | ||
| 1990 | struct ena_ring *tx_ring; | ||
| 1991 | struct netdev_queue *txq; | ||
| 1992 | struct ena_com_buf *ena_buf; | 2002 | struct ena_com_buf *ena_buf; |
| 1993 | void *push_hdr; | ||
| 1994 | u32 len, last_frag; | ||
| 1995 | u16 next_to_use; | ||
| 1996 | u16 req_id; | ||
| 1997 | u16 push_len; | ||
| 1998 | u16 header_len; | ||
| 1999 | dma_addr_t dma; | 2003 | dma_addr_t dma; |
| 2000 | int qid, rc, nb_hw_desc; | 2004 | u32 skb_head_len, frag_len, last_frag; |
| 2001 | int i = -1; | 2005 | u16 push_len = 0; |
| 2002 | 2006 | u16 delta = 0; | |
| 2003 | netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); | 2007 | int i = 0; |
| 2004 | /* Determine which tx ring we will be placed on */ | ||
| 2005 | qid = skb_get_queue_mapping(skb); | ||
| 2006 | tx_ring = &adapter->tx_ring[qid]; | ||
| 2007 | txq = netdev_get_tx_queue(dev, qid); | ||
| 2008 | |||
| 2009 | rc = ena_check_and_linearize_skb(tx_ring, skb); | ||
| 2010 | if (unlikely(rc)) | ||
| 2011 | goto error_drop_packet; | ||
| 2012 | |||
| 2013 | skb_tx_timestamp(skb); | ||
| 2014 | len = skb_headlen(skb); | ||
| 2015 | 2008 | ||
| 2016 | next_to_use = tx_ring->next_to_use; | 2009 | skb_head_len = skb_headlen(skb); |
| 2017 | req_id = tx_ring->free_tx_ids[next_to_use]; | ||
| 2018 | tx_info = &tx_ring->tx_buffer_info[req_id]; | ||
| 2019 | tx_info->num_of_bufs = 0; | ||
| 2020 | |||
| 2021 | WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); | ||
| 2022 | ena_buf = tx_info->bufs; | ||
| 2023 | tx_info->skb = skb; | 2010 | tx_info->skb = skb; |
| 2011 | ena_buf = tx_info->bufs; | ||
| 2024 | 2012 | ||
| 2025 | if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | 2013 | if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
| 2026 | /* prepared the push buffer */ | 2014 | /* When the device is LLQ mode, the driver will copy |
| 2027 | push_len = min_t(u32, len, tx_ring->tx_max_header_size); | 2015 | * the header into the device memory space. |
| 2028 | header_len = push_len; | 2016 | * the ena_com layer assume the header is in a linear |
| 2029 | push_hdr = skb->data; | 2017 | * memory space. |
| 2018 | * This assumption might be wrong since part of the header | ||
| 2019 | * can be in the fragmented buffers. | ||
| 2020 | * Use skb_header_pointer to make sure the header is in a | ||
| 2021 | * linear memory space. | ||
| 2022 | */ | ||
| 2023 | |||
| 2024 | push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); | ||
| 2025 | *push_hdr = skb_header_pointer(skb, 0, push_len, | ||
| 2026 | tx_ring->push_buf_intermediate_buf); | ||
| 2027 | *header_len = push_len; | ||
| 2028 | if (unlikely(skb->data != *push_hdr)) { | ||
| 2029 | u64_stats_update_begin(&tx_ring->syncp); | ||
| 2030 | tx_ring->tx_stats.llq_buffer_copy++; | ||
| 2031 | u64_stats_update_end(&tx_ring->syncp); | ||
| 2032 | |||
| 2033 | delta = push_len - skb_head_len; | ||
| 2034 | } | ||
| 2030 | } else { | 2035 | } else { |
| 2031 | push_len = 0; | 2036 | *push_hdr = NULL; |
| 2032 | header_len = min_t(u32, len, tx_ring->tx_max_header_size); | 2037 | *header_len = min_t(u32, skb_head_len, |
| 2033 | push_hdr = NULL; | 2038 | tx_ring->tx_max_header_size); |
| 2034 | } | 2039 | } |
| 2035 | 2040 | ||
| 2036 | netif_dbg(adapter, tx_queued, dev, | 2041 | netif_dbg(adapter, tx_queued, adapter->netdev, |
| 2037 | "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, | 2042 | "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, |
| 2038 | push_hdr, push_len); | 2043 | *push_hdr, push_len); |
| 2039 | 2044 | ||
| 2040 | if (len > push_len) { | 2045 | if (skb_head_len > push_len) { |
| 2041 | dma = dma_map_single(tx_ring->dev, skb->data + push_len, | 2046 | dma = dma_map_single(tx_ring->dev, skb->data + push_len, |
| 2042 | len - push_len, DMA_TO_DEVICE); | 2047 | skb_head_len - push_len, DMA_TO_DEVICE); |
| 2043 | if (dma_mapping_error(tx_ring->dev, dma)) | 2048 | if (unlikely(dma_mapping_error(tx_ring->dev, dma))) |
| 2044 | goto error_report_dma_error; | 2049 | goto error_report_dma_error; |
| 2045 | 2050 | ||
| 2046 | ena_buf->paddr = dma; | 2051 | ena_buf->paddr = dma; |
| 2047 | ena_buf->len = len - push_len; | 2052 | ena_buf->len = skb_head_len - push_len; |
| 2048 | 2053 | ||
| 2049 | ena_buf++; | 2054 | ena_buf++; |
| 2050 | tx_info->num_of_bufs++; | 2055 | tx_info->num_of_bufs++; |
| 2056 | tx_info->map_linear_data = 1; | ||
| 2057 | } else { | ||
| 2058 | tx_info->map_linear_data = 0; | ||
| 2051 | } | 2059 | } |
| 2052 | 2060 | ||
| 2053 | last_frag = skb_shinfo(skb)->nr_frags; | 2061 | last_frag = skb_shinfo(skb)->nr_frags; |
| @@ -2055,18 +2063,75 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2055 | for (i = 0; i < last_frag; i++) { | 2063 | for (i = 0; i < last_frag; i++) { |
| 2056 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2064 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
| 2057 | 2065 | ||
| 2058 | len = skb_frag_size(frag); | 2066 | frag_len = skb_frag_size(frag); |
| 2059 | dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len, | 2067 | |
| 2060 | DMA_TO_DEVICE); | 2068 | if (unlikely(delta >= frag_len)) { |
| 2061 | if (dma_mapping_error(tx_ring->dev, dma)) | 2069 | delta -= frag_len; |
| 2070 | continue; | ||
| 2071 | } | ||
| 2072 | |||
| 2073 | dma = skb_frag_dma_map(tx_ring->dev, frag, delta, | ||
| 2074 | frag_len - delta, DMA_TO_DEVICE); | ||
| 2075 | if (unlikely(dma_mapping_error(tx_ring->dev, dma))) | ||
| 2062 | goto error_report_dma_error; | 2076 | goto error_report_dma_error; |
| 2063 | 2077 | ||
| 2064 | ena_buf->paddr = dma; | 2078 | ena_buf->paddr = dma; |
| 2065 | ena_buf->len = len; | 2079 | ena_buf->len = frag_len - delta; |
| 2066 | ena_buf++; | 2080 | ena_buf++; |
| 2081 | tx_info->num_of_bufs++; | ||
| 2082 | delta = 0; | ||
| 2067 | } | 2083 | } |
| 2068 | 2084 | ||
| 2069 | tx_info->num_of_bufs += last_frag; | 2085 | return 0; |
| 2086 | |||
| 2087 | error_report_dma_error: | ||
| 2088 | u64_stats_update_begin(&tx_ring->syncp); | ||
| 2089 | tx_ring->tx_stats.dma_mapping_err++; | ||
| 2090 | u64_stats_update_end(&tx_ring->syncp); | ||
| 2091 | netdev_warn(adapter->netdev, "failed to map skb\n"); | ||
| 2092 | |||
| 2093 | tx_info->skb = NULL; | ||
| 2094 | |||
| 2095 | tx_info->num_of_bufs += i; | ||
| 2096 | ena_unmap_tx_skb(tx_ring, tx_info); | ||
| 2097 | |||
| 2098 | return -EINVAL; | ||
| 2099 | } | ||
| 2100 | |||
| 2101 | /* Called with netif_tx_lock. */ | ||
| 2102 | static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 2103 | { | ||
| 2104 | struct ena_adapter *adapter = netdev_priv(dev); | ||
| 2105 | struct ena_tx_buffer *tx_info; | ||
| 2106 | struct ena_com_tx_ctx ena_tx_ctx; | ||
| 2107 | struct ena_ring *tx_ring; | ||
| 2108 | struct netdev_queue *txq; | ||
| 2109 | void *push_hdr; | ||
| 2110 | u16 next_to_use, req_id, header_len; | ||
| 2111 | int qid, rc, nb_hw_desc; | ||
| 2112 | |||
| 2113 | netif_dbg(adapter, tx_queued, dev, "%s skb %p\n", __func__, skb); | ||
| 2114 | /* Determine which tx ring we will be placed on */ | ||
| 2115 | qid = skb_get_queue_mapping(skb); | ||
| 2116 | tx_ring = &adapter->tx_ring[qid]; | ||
| 2117 | txq = netdev_get_tx_queue(dev, qid); | ||
| 2118 | |||
| 2119 | rc = ena_check_and_linearize_skb(tx_ring, skb); | ||
| 2120 | if (unlikely(rc)) | ||
| 2121 | goto error_drop_packet; | ||
| 2122 | |||
| 2123 | skb_tx_timestamp(skb); | ||
| 2124 | |||
| 2125 | next_to_use = tx_ring->next_to_use; | ||
| 2126 | req_id = tx_ring->free_tx_ids[next_to_use]; | ||
| 2127 | tx_info = &tx_ring->tx_buffer_info[req_id]; | ||
| 2128 | tx_info->num_of_bufs = 0; | ||
| 2129 | |||
| 2130 | WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); | ||
| 2131 | |||
| 2132 | rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); | ||
| 2133 | if (unlikely(rc)) | ||
| 2134 | goto error_drop_packet; | ||
| 2070 | 2135 | ||
| 2071 | memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); | 2136 | memset(&ena_tx_ctx, 0x0, sizeof(struct ena_com_tx_ctx)); |
| 2072 | ena_tx_ctx.ena_bufs = tx_info->bufs; | 2137 | ena_tx_ctx.ena_bufs = tx_info->bufs; |
| @@ -2082,14 +2147,22 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2082 | rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, | 2147 | rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, |
| 2083 | &nb_hw_desc); | 2148 | &nb_hw_desc); |
| 2084 | 2149 | ||
| 2150 | /* ena_com_prepare_tx() can't fail due to overflow of tx queue, | ||
| 2151 | * since the number of free descriptors in the queue is checked | ||
| 2152 | * after sending the previous packet. In case there isn't enough | ||
| 2153 | * space in the queue for the next packet, it is stopped | ||
| 2154 | * until there is again enough available space in the queue. | ||
| 2155 | * All other failure reasons of ena_com_prepare_tx() are fatal | ||
| 2156 | * and therefore require a device reset. | ||
| 2157 | */ | ||
| 2085 | if (unlikely(rc)) { | 2158 | if (unlikely(rc)) { |
| 2086 | netif_err(adapter, tx_queued, dev, | 2159 | netif_err(adapter, tx_queued, dev, |
| 2087 | "failed to prepare tx bufs\n"); | 2160 | "failed to prepare tx bufs\n"); |
| 2088 | u64_stats_update_begin(&tx_ring->syncp); | 2161 | u64_stats_update_begin(&tx_ring->syncp); |
| 2089 | tx_ring->tx_stats.queue_stop++; | ||
| 2090 | tx_ring->tx_stats.prepare_ctx_err++; | 2162 | tx_ring->tx_stats.prepare_ctx_err++; |
| 2091 | u64_stats_update_end(&tx_ring->syncp); | 2163 | u64_stats_update_end(&tx_ring->syncp); |
| 2092 | netif_tx_stop_queue(txq); | 2164 | adapter->reset_reason = ENA_REGS_RESET_DRIVER_INVALID_STATE; |
| 2165 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
| 2093 | goto error_unmap_dma; | 2166 | goto error_unmap_dma; |
| 2094 | } | 2167 | } |
| 2095 | 2168 | ||
| @@ -2152,35 +2225,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2152 | 2225 | ||
| 2153 | return NETDEV_TX_OK; | 2226 | return NETDEV_TX_OK; |
| 2154 | 2227 | ||
| 2155 | error_report_dma_error: | ||
| 2156 | u64_stats_update_begin(&tx_ring->syncp); | ||
| 2157 | tx_ring->tx_stats.dma_mapping_err++; | ||
| 2158 | u64_stats_update_end(&tx_ring->syncp); | ||
| 2159 | netdev_warn(adapter->netdev, "failed to map skb\n"); | ||
| 2160 | |||
| 2161 | tx_info->skb = NULL; | ||
| 2162 | |||
| 2163 | error_unmap_dma: | 2228 | error_unmap_dma: |
| 2164 | if (i >= 0) { | 2229 | ena_unmap_tx_skb(tx_ring, tx_info); |
| 2165 | /* save value of frag that failed */ | 2230 | tx_info->skb = NULL; |
| 2166 | last_frag = i; | ||
| 2167 | |||
| 2168 | /* start back at beginning and unmap skb */ | ||
| 2169 | tx_info->skb = NULL; | ||
| 2170 | ena_buf = tx_info->bufs; | ||
| 2171 | dma_unmap_single(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), | ||
| 2172 | dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); | ||
| 2173 | |||
| 2174 | /* unmap remaining mapped pages */ | ||
| 2175 | for (i = 0; i < last_frag; i++) { | ||
| 2176 | ena_buf++; | ||
| 2177 | dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), | ||
| 2178 | dma_unmap_len(ena_buf, len), DMA_TO_DEVICE); | ||
| 2179 | } | ||
| 2180 | } | ||
| 2181 | 2231 | ||
| 2182 | error_drop_packet: | 2232 | error_drop_packet: |
| 2183 | |||
| 2184 | dev_kfree_skb(skb); | 2233 | dev_kfree_skb(skb); |
| 2185 | return NETDEV_TX_OK; | 2234 | return NETDEV_TX_OK; |
| 2186 | } | 2235 | } |
| @@ -2616,7 +2665,9 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
| 2616 | 2665 | ||
| 2617 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | 2666 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
| 2618 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | 2667 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
| 2619 | dev_err(&pdev->dev, "Device reset completed successfully\n"); | 2668 | dev_err(&pdev->dev, |
| 2669 | "Device reset completed successfully, Driver info: %s\n", | ||
| 2670 | version); | ||
| 2620 | 2671 | ||
| 2621 | return rc; | 2672 | return rc; |
| 2622 | err_disable_msix: | 2673 | err_disable_msix: |
| @@ -2979,18 +3030,52 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, | |||
| 2979 | return io_queue_num; | 3030 | return io_queue_num; |
| 2980 | } | 3031 | } |
| 2981 | 3032 | ||
| 2982 | static void ena_set_push_mode(struct pci_dev *pdev, struct ena_com_dev *ena_dev, | 3033 | static int ena_set_queues_placement_policy(struct pci_dev *pdev, |
| 2983 | struct ena_com_dev_get_features_ctx *get_feat_ctx) | 3034 | struct ena_com_dev *ena_dev, |
| 3035 | struct ena_admin_feature_llq_desc *llq, | ||
| 3036 | struct ena_llq_configurations *llq_default_configurations) | ||
| 2984 | { | 3037 | { |
| 2985 | bool has_mem_bar; | 3038 | bool has_mem_bar; |
| 3039 | int rc; | ||
| 3040 | u32 llq_feature_mask; | ||
| 3041 | |||
| 3042 | llq_feature_mask = 1 << ENA_ADMIN_LLQ; | ||
| 3043 | if (!(ena_dev->supported_features & llq_feature_mask)) { | ||
| 3044 | dev_err(&pdev->dev, | ||
| 3045 | "LLQ is not supported Fallback to host mode policy.\n"); | ||
| 3046 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | ||
| 3047 | return 0; | ||
| 3048 | } | ||
| 2986 | 3049 | ||
| 2987 | has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); | 3050 | has_mem_bar = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(ENA_MEM_BAR); |
| 2988 | 3051 | ||
| 2989 | /* Enable push mode if device supports LLQ */ | 3052 | rc = ena_com_config_dev_mode(ena_dev, llq, llq_default_configurations); |
| 2990 | if (has_mem_bar && get_feat_ctx->max_queues.max_legacy_llq_num > 0) | 3053 | if (unlikely(rc)) { |
| 2991 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV; | 3054 | dev_err(&pdev->dev, |
| 2992 | else | 3055 | "Failed to configure the device mode. Fallback to host mode policy.\n"); |
| 3056 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | ||
| 3057 | return 0; | ||
| 3058 | } | ||
| 3059 | |||
| 3060 | /* Nothing to config, exit */ | ||
| 3061 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) | ||
| 3062 | return 0; | ||
| 3063 | |||
| 3064 | if (!has_mem_bar) { | ||
| 3065 | dev_err(&pdev->dev, | ||
| 3066 | "ENA device does not expose LLQ bar. Fallback to host mode policy.\n"); | ||
| 2993 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; | 3067 | ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; |
| 3068 | return 0; | ||
| 3069 | } | ||
| 3070 | |||
| 3071 | ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, | ||
| 3072 | pci_resource_start(pdev, ENA_MEM_BAR), | ||
| 3073 | pci_resource_len(pdev, ENA_MEM_BAR)); | ||
| 3074 | |||
| 3075 | if (!ena_dev->mem_bar) | ||
| 3076 | return -EFAULT; | ||
| 3077 | |||
| 3078 | return 0; | ||
| 2994 | } | 3079 | } |
| 2995 | 3080 | ||
| 2996 | static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, | 3081 | static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, |
| @@ -3115,6 +3200,15 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |||
| 3115 | pci_release_selected_regions(pdev, release_bars); | 3200 | pci_release_selected_regions(pdev, release_bars); |
| 3116 | } | 3201 | } |
| 3117 | 3202 | ||
| 3203 | static inline void set_default_llq_configurations(struct ena_llq_configurations *llq_config) | ||
| 3204 | { | ||
| 3205 | llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; | ||
| 3206 | llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; | ||
| 3207 | llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; | ||
| 3208 | llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; | ||
| 3209 | llq_config->llq_ring_entry_size_value = 128; | ||
| 3210 | } | ||
| 3211 | |||
| 3118 | static int ena_calc_queue_size(struct pci_dev *pdev, | 3212 | static int ena_calc_queue_size(struct pci_dev *pdev, |
| 3119 | struct ena_com_dev *ena_dev, | 3213 | struct ena_com_dev *ena_dev, |
| 3120 | u16 *max_tx_sgl_size, | 3214 | u16 *max_tx_sgl_size, |
| @@ -3163,7 +3257,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3163 | static int version_printed; | 3257 | static int version_printed; |
| 3164 | struct net_device *netdev; | 3258 | struct net_device *netdev; |
| 3165 | struct ena_adapter *adapter; | 3259 | struct ena_adapter *adapter; |
| 3260 | struct ena_llq_configurations llq_config; | ||
| 3166 | struct ena_com_dev *ena_dev = NULL; | 3261 | struct ena_com_dev *ena_dev = NULL; |
| 3262 | char *queue_type_str; | ||
| 3167 | static int adapters_found; | 3263 | static int adapters_found; |
| 3168 | int io_queue_num, bars, rc; | 3264 | int io_queue_num, bars, rc; |
| 3169 | int queue_size; | 3265 | int queue_size; |
| @@ -3217,16 +3313,13 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3217 | goto err_free_region; | 3313 | goto err_free_region; |
| 3218 | } | 3314 | } |
| 3219 | 3315 | ||
| 3220 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); | 3316 | set_default_llq_configurations(&llq_config); |
| 3221 | 3317 | ||
| 3222 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | 3318 | rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx.llq, |
| 3223 | ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, | 3319 | &llq_config); |
| 3224 | pci_resource_start(pdev, ENA_MEM_BAR), | 3320 | if (rc) { |
| 3225 | pci_resource_len(pdev, ENA_MEM_BAR)); | 3321 | dev_err(&pdev->dev, "ena device init failed\n"); |
| 3226 | if (!ena_dev->mem_bar) { | 3322 | goto err_device_destroy; |
| 3227 | rc = -EFAULT; | ||
| 3228 | goto err_device_destroy; | ||
| 3229 | } | ||
| 3230 | } | 3323 | } |
| 3231 | 3324 | ||
| 3232 | /* initial Tx interrupt delay, Assumes 1 usec granularity. | 3325 | /* initial Tx interrupt delay, Assumes 1 usec granularity. |
| @@ -3241,8 +3334,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3241 | goto err_device_destroy; | 3334 | goto err_device_destroy; |
| 3242 | } | 3335 | } |
| 3243 | 3336 | ||
| 3244 | dev_info(&pdev->dev, "creating %d io queues. queue size: %d\n", | 3337 | dev_info(&pdev->dev, "creating %d io queues. queue size: %d. LLQ is %s\n", |
| 3245 | io_queue_num, queue_size); | 3338 | io_queue_num, queue_size, |
| 3339 | (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ? | ||
| 3340 | "ENABLED" : "DISABLED"); | ||
| 3246 | 3341 | ||
| 3247 | /* dev zeroed in init_etherdev */ | 3342 | /* dev zeroed in init_etherdev */ |
| 3248 | netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); | 3343 | netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); |
| @@ -3332,9 +3427,15 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3332 | timer_setup(&adapter->timer_service, ena_timer_service, 0); | 3427 | timer_setup(&adapter->timer_service, ena_timer_service, 0); |
| 3333 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | 3428 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
| 3334 | 3429 | ||
| 3335 | dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n", | 3430 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) |
| 3431 | queue_type_str = "Regular"; | ||
| 3432 | else | ||
| 3433 | queue_type_str = "Low Latency"; | ||
| 3434 | |||
| 3435 | dev_info(&pdev->dev, | ||
| 3436 | "%s found at mem %lx, mac addr %pM Queues %d, Placement policy: %s\n", | ||
| 3336 | DEVICE_NAME, (long)pci_resource_start(pdev, 0), | 3437 | DEVICE_NAME, (long)pci_resource_start(pdev, 0), |
| 3337 | netdev->dev_addr, io_queue_num); | 3438 | netdev->dev_addr, io_queue_num, queue_type_str); |
| 3338 | 3439 | ||
| 3339 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | 3440 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
| 3340 | 3441 | ||
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 7c7ae56c52cf..4fa7d2fda475 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
| @@ -151,6 +151,9 @@ struct ena_tx_buffer { | |||
| 151 | /* num of buffers used by this skb */ | 151 | /* num of buffers used by this skb */ |
| 152 | u32 num_of_bufs; | 152 | u32 num_of_bufs; |
| 153 | 153 | ||
| 154 | /* Indicate if bufs[0] map the linear data of the skb. */ | ||
| 155 | u8 map_linear_data; | ||
| 156 | |||
| 154 | /* Used for detect missing tx packets to limit the number of prints */ | 157 | /* Used for detect missing tx packets to limit the number of prints */ |
| 155 | u32 print_once; | 158 | u32 print_once; |
| 156 | /* Save the last jiffies to detect missing tx packets | 159 | /* Save the last jiffies to detect missing tx packets |
| @@ -186,6 +189,7 @@ struct ena_stats_tx { | |||
| 186 | u64 tx_poll; | 189 | u64 tx_poll; |
| 187 | u64 doorbells; | 190 | u64 doorbells; |
| 188 | u64 bad_req_id; | 191 | u64 bad_req_id; |
| 192 | u64 llq_buffer_copy; | ||
| 189 | u64 missed_tx; | 193 | u64 missed_tx; |
| 190 | }; | 194 | }; |
| 191 | 195 | ||
| @@ -257,6 +261,8 @@ struct ena_ring { | |||
| 257 | struct ena_stats_tx tx_stats; | 261 | struct ena_stats_tx tx_stats; |
| 258 | struct ena_stats_rx rx_stats; | 262 | struct ena_stats_rx rx_stats; |
| 259 | }; | 263 | }; |
| 264 | |||
| 265 | u8 *push_buf_intermediate_buf; | ||
| 260 | int empty_rx_queue; | 266 | int empty_rx_queue; |
| 261 | } ____cacheline_aligned; | 267 | } ____cacheline_aligned; |
| 262 | 268 | ||
