aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2012-10-30 02:01:55 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2013-01-19 07:21:05 -0500
commitec718254cbfe2c311ee56a41af41877b7a51a556 (patch)
tree0c69b6199828c8ae07a1dae8497fc89be4e62745 /drivers/net/ethernet/intel
parent472148c320c0d11245932ba1315bc4ec0667ed57 (diff)
ixgbe: Improve performance and reduce size of ixgbe_tx_map
This change is meant to both improve the performance and reduce the size of ixgbe_tx_map. To do this I have expanded the work done in the main loop by pushing first into tx_buffer. This allows us to pull in the dma_mapping_error check, the tx_buffer value assignment, and the initial DMA value assignment to the Tx descriptor. The net result is that the function reduces in size by a little over a 100 bytes and is about 1% or 2% faster. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
1 files changed, 19 insertions, 23 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f7a314f8b743..e7109de2204a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6091,21 +6091,22 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6091 struct ixgbe_tx_buffer *first, 6091 struct ixgbe_tx_buffer *first,
6092 const u8 hdr_len) 6092 const u8 hdr_len)
6093{ 6093{
6094 dma_addr_t dma;
6095 struct sk_buff *skb = first->skb; 6094 struct sk_buff *skb = first->skb;
6096 struct ixgbe_tx_buffer *tx_buffer; 6095 struct ixgbe_tx_buffer *tx_buffer;
6097 union ixgbe_adv_tx_desc *tx_desc; 6096 union ixgbe_adv_tx_desc *tx_desc;
6098 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 6097 struct skb_frag_struct *frag;
6099 unsigned int data_len = skb->data_len; 6098 dma_addr_t dma;
6100 unsigned int size = skb_headlen(skb); 6099 unsigned int data_len, size;
6101 unsigned int paylen = skb->len - hdr_len;
6102 u32 tx_flags = first->tx_flags; 6100 u32 tx_flags = first->tx_flags;
6103 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); 6101 u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags);
6104 u16 i = tx_ring->next_to_use; 6102 u16 i = tx_ring->next_to_use;
6105 6103
6106 tx_desc = IXGBE_TX_DESC(tx_ring, i); 6104 tx_desc = IXGBE_TX_DESC(tx_ring, i);
6107 6105
6108 ixgbe_tx_olinfo_status(tx_desc, tx_flags, paylen); 6106 ixgbe_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len);
6107
6108 size = skb_headlen(skb);
6109 data_len = skb->data_len;
6109 6110
6110#ifdef IXGBE_FCOE 6111#ifdef IXGBE_FCOE
6111 if (tx_flags & IXGBE_TX_FLAGS_FCOE) { 6112 if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
@@ -6119,16 +6120,19 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6119 6120
6120#endif 6121#endif
6121 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 6122 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
6122 if (dma_mapping_error(tx_ring->dev, dma))
6123 goto dma_error;
6124 6123
6125 /* record length, and DMA address */ 6124 tx_buffer = first;
6126 dma_unmap_len_set(first, len, size); 6125
6127 dma_unmap_addr_set(first, dma, dma); 6126 for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
6127 if (dma_mapping_error(tx_ring->dev, dma))
6128 goto dma_error;
6129
6130 /* record length, and DMA address */
6131 dma_unmap_len_set(tx_buffer, len, size);
6132 dma_unmap_addr_set(tx_buffer, dma, dma);
6128 6133
6129 tx_desc->read.buffer_addr = cpu_to_le64(dma); 6134 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6130 6135
6131 for (;;) {
6132 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) { 6136 while (unlikely(size > IXGBE_MAX_DATA_PER_TXD)) {
6133 tx_desc->read.cmd_type_len = 6137 tx_desc->read.cmd_type_len =
6134 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD); 6138 cpu_to_le32(cmd_type ^ IXGBE_MAX_DATA_PER_TXD);
@@ -6139,12 +6143,12 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6139 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 6143 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6140 i = 0; 6144 i = 0;
6141 } 6145 }
6146 tx_desc->read.olinfo_status = 0;
6142 6147
6143 dma += IXGBE_MAX_DATA_PER_TXD; 6148 dma += IXGBE_MAX_DATA_PER_TXD;
6144 size -= IXGBE_MAX_DATA_PER_TXD; 6149 size -= IXGBE_MAX_DATA_PER_TXD;
6145 6150
6146 tx_desc->read.buffer_addr = cpu_to_le64(dma); 6151 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6147 tx_desc->read.olinfo_status = 0;
6148 } 6152 }
6149 6153
6150 if (likely(!data_len)) 6154 if (likely(!data_len))
@@ -6158,6 +6162,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6158 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 6162 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
6159 i = 0; 6163 i = 0;
6160 } 6164 }
6165 tx_desc->read.olinfo_status = 0;
6161 6166
6162#ifdef IXGBE_FCOE 6167#ifdef IXGBE_FCOE
6163 size = min_t(unsigned int, data_len, skb_frag_size(frag)); 6168 size = min_t(unsigned int, data_len, skb_frag_size(frag));
@@ -6168,17 +6173,8 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
6168 6173
6169 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, 6174 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
6170 DMA_TO_DEVICE); 6175 DMA_TO_DEVICE);
6171 if (dma_mapping_error(tx_ring->dev, dma))
6172 goto dma_error;
6173 6176
6174 tx_buffer = &tx_ring->tx_buffer_info[i]; 6177 tx_buffer = &tx_ring->tx_buffer_info[i];
6175 dma_unmap_len_set(tx_buffer, len, size);
6176 dma_unmap_addr_set(tx_buffer, dma, dma);
6177
6178 tx_desc->read.buffer_addr = cpu_to_le64(dma);
6179 tx_desc->read.olinfo_status = 0;
6180
6181 frag++;
6182 } 6178 }
6183 6179
6184 /* write last descriptor with RS and EOP bits */ 6180 /* write last descriptor with RS and EOP bits */