aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEmil Tantilov <emil.s.tantilov@intel.com>2017-12-11 13:37:25 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-01-26 10:46:51 -0500
commit6f3554548ecca3d836dd17ffef21d706aae8dd25 (patch)
tree827836c42387c33ee49edddb7dc3cef8139d1961
parent40b8178bc97dfcc688eb42d04df45e2f3c905830 (diff)
ixgbevf: improve performance and reduce size of ixgbevf_tx_map()
Based on commit ec718254cbfe ("ixgbe: Improve performance and reduce size of ixgbe_tx_map") This change is meant to both improve the performance and reduce the size of ixgbevf_tx_map(). Expand the work done in the main loop by pushing first into tx_buffer. This allows us to pull in the dma_mapping_error check, the tx_buffer value assignment, and the initial DMA value assignment to the Tx descriptor. Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com> Tested-by: Krishneil Singh <krishneil.k.singh@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c45
1 files changed, 20 insertions, 25 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a793f9ea05e7..d3415ee38597 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -3532,34 +3532,37 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3532 struct ixgbevf_tx_buffer *first, 3532 struct ixgbevf_tx_buffer *first,
3533 const u8 hdr_len) 3533 const u8 hdr_len)
3534{ 3534{
3535 dma_addr_t dma;
3536 struct sk_buff *skb = first->skb; 3535 struct sk_buff *skb = first->skb;
3537 struct ixgbevf_tx_buffer *tx_buffer; 3536 struct ixgbevf_tx_buffer *tx_buffer;
3538 union ixgbe_adv_tx_desc *tx_desc; 3537 union ixgbe_adv_tx_desc *tx_desc;
3539 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 3538 struct skb_frag_struct *frag;
3540 unsigned int data_len = skb->data_len; 3539 dma_addr_t dma;
3541 unsigned int size = skb_headlen(skb); 3540 unsigned int data_len, size;
3542 unsigned int paylen = skb->len - hdr_len;
3543 u32 tx_flags = first->tx_flags; 3541 u32 tx_flags = first->tx_flags;
3544 __le32 cmd_type; 3542 __le32 cmd_type = ixgbevf_tx_cmd_type(tx_flags);
3545 u16 i = tx_ring->next_to_use; 3543 u16 i = tx_ring->next_to_use;
3546 3544
3547 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 3545 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
3548 3546
3549 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, paylen); 3547 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
3550 cmd_type = ixgbevf_tx_cmd_type(tx_flags); 3548
3549 size = skb_headlen(skb);
3550 data_len = skb->data_len;
3551 3551
3552 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 3552 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
3553 if (dma_mapping_error(tx_ring->dev, dma))
3554 goto dma_error;
3555 3553
3556 /* record length, and DMA address */ 3554 tx_buffer = first;
3557 dma_unmap_len_set(first, len, size);
3558 dma_unmap_addr_set(first, dma, dma);
3559 3555
3560 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3556 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
3557 if (dma_mapping_error(tx_ring->dev, dma))
3558 goto dma_error;
3559
3560 /* record length, and DMA address */
3561 dma_unmap_len_set(tx_buffer, len, size);
3562 dma_unmap_addr_set(tx_buffer, dma, dma);
3563
3564 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3561 3565
3562 for (;;) {
3563 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 3566 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
3564 tx_desc->read.cmd_type_len = 3567 tx_desc->read.cmd_type_len =
3565 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD); 3568 cmd_type | cpu_to_le32(IXGBE_MAX_DATA_PER_TXD);
@@ -3570,12 +3573,12 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3570 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3573 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3571 i = 0; 3574 i = 0;
3572 } 3575 }
3576 tx_desc->read.olinfo_status = 0;
3573 3577
3574 dma += IXGBE_MAX_DATA_PER_TXD; 3578 dma += IXGBE_MAX_DATA_PER_TXD;
3575 size -= IXGBE_MAX_DATA_PER_TXD; 3579 size -= IXGBE_MAX_DATA_PER_TXD;
3576 3580
3577 tx_desc->read.buffer_addr = cpu_to_le64(dma); 3581 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3578 tx_desc->read.olinfo_status = 0;
3579 } 3582 }
3580 3583
3581 if (likely(!data_len)) 3584 if (likely(!data_len))
@@ -3589,23 +3592,15 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3589 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); 3592 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
3590 i = 0; 3593 i = 0;
3591 } 3594 }
3595 tx_desc->read.olinfo_status = 0;
3592 3596
3593 size = skb_frag_size(frag); 3597 size = skb_frag_size(frag);
3594 data_len -= size; 3598 data_len -= size;
3595 3599
3596 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 3600 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
3597 DMA_TO_DEVICE); 3601 DMA_TO_DEVICE);
3598 if (dma_mapping_error(tx_ring->dev, dma))
3599 goto dma_error;
3600 3602
3601 tx_buffer = &tx_ring->tx_buffer_info[i]; 3603 tx_buffer = &tx_ring->tx_buffer_info[i];
3602 dma_unmap_len_set(tx_buffer, len, size);
3603 dma_unmap_addr_set(tx_buffer, dma, dma);
3604
3605 tx_desc->read.buffer_addr = cpu_to_le64(dma);
3606 tx_desc->read.olinfo_status = 0;
3607
3608 frag++;
3609 } 3604 }
3610 3605
3611 /* write last descriptor with RS and EOP bits */ 3606 /* write last descriptor with RS and EOP bits */