aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEmil Tantilov <emil.s.tantilov@intel.com>2017-12-11 13:37:31 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-01-26 13:25:02 -0500
commit865a4d987bce71a383079c1ec8e8b9fbd5431e45 (patch)
tree98babed809ed6bff652ea6629ecbe23b07e81833
parent6f3554548ecca3d836dd17ffef21d706aae8dd25 (diff)
ixgbevf: don't bother clearing tx_buffer_info in ixgbevf_clean_tx_ring()
In the case of the Tx rings we need to only clear the Tx buffer_info when we are resetting the rings. Ideally we do this when we configure the ring to bring it back up instead of when we are taking it down in order to avoid dirtying pages we don't need to. In addition we don't need to clear the Tx descriptor ring since we will fully repopulate it when we begin transmitting frames and next_to_watch can be cleared to prevent the ring from being cleaned beyond that point instead of needing to touch anything in the Tx descriptor ring. Finally with these changes we can avoid having to reset the skb member of the Tx buffer_info structure in the cleanup path since the skb will always be associated with the first buffer which has next_to_watch set. Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com> Tested-by: Krishneil Singh <krishneil.k.singh@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c115
1 files changed, 72 insertions, 43 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d3415ee38597..9b3d43d28106 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -206,28 +206,6 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
206 } 206 }
207} 207}
208 208
209static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
210 struct ixgbevf_tx_buffer *tx_buffer)
211{
212 if (tx_buffer->skb) {
213 dev_kfree_skb_any(tx_buffer->skb);
214 if (dma_unmap_len(tx_buffer, len))
215 dma_unmap_single(tx_ring->dev,
216 dma_unmap_addr(tx_buffer, dma),
217 dma_unmap_len(tx_buffer, len),
218 DMA_TO_DEVICE);
219 } else if (dma_unmap_len(tx_buffer, len)) {
220 dma_unmap_page(tx_ring->dev,
221 dma_unmap_addr(tx_buffer, dma),
222 dma_unmap_len(tx_buffer, len),
223 DMA_TO_DEVICE);
224 }
225 tx_buffer->next_to_watch = NULL;
226 tx_buffer->skb = NULL;
227 dma_unmap_len_set(tx_buffer, len, 0);
228 /* tx_buffer must be completely set up in the transmit path */
229}
230
231static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) 209static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring)
232{ 210{
233 return ring->stats.packets; 211 return ring->stats.packets;
@@ -349,7 +327,6 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
349 DMA_TO_DEVICE); 327 DMA_TO_DEVICE);
350 328
351 /* clear tx_buffer data */ 329 /* clear tx_buffer data */
352 tx_buffer->skb = NULL;
353 dma_unmap_len_set(tx_buffer, len, 0); 330 dma_unmap_len_set(tx_buffer, len, 0);
354 331
355 /* unmap remaining buffers */ 332 /* unmap remaining buffers */
@@ -1576,6 +1553,10 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
1576 txdctl |= (1u << 8) | /* HTHRESH = 1 */ 1553 txdctl |= (1u << 8) | /* HTHRESH = 1 */
1577 32; /* PTHRESH = 32 */ 1554 32; /* PTHRESH = 32 */
1578 1555
1556 /* reinitialize tx_buffer_info */
1557 memset(ring->tx_buffer_info, 0,
1558 sizeof(struct ixgbevf_tx_buffer) * ring->count);
1559
1579 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); 1560 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
1580 1561
1581 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl); 1562 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(reg_idx), txdctl);
@@ -2184,23 +2165,57 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
2184 **/ 2165 **/
2185static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring) 2166static void ixgbevf_clean_tx_ring(struct ixgbevf_ring *tx_ring)
2186{ 2167{
2187 struct ixgbevf_tx_buffer *tx_buffer_info; 2168 u16 i = tx_ring->next_to_clean;
2188 unsigned long size; 2169 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
2189 unsigned int i;
2190 2170
2191 if (!tx_ring->tx_buffer_info) 2171 while (i != tx_ring->next_to_use) {
2192 return; 2172 union ixgbe_adv_tx_desc *eop_desc, *tx_desc;
2173
2174 /* Free all the Tx ring sk_buffs */
2175 dev_kfree_skb_any(tx_buffer->skb);
2176
2177 /* unmap skb header data */
2178 dma_unmap_single(tx_ring->dev,
2179 dma_unmap_addr(tx_buffer, dma),
2180 dma_unmap_len(tx_buffer, len),
2181 DMA_TO_DEVICE);
2182
2183 /* check for eop_desc to determine the end of the packet */
2184 eop_desc = tx_buffer->next_to_watch;
2185 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
2186
2187 /* unmap remaining buffers */
2188 while (tx_desc != eop_desc) {
2189 tx_buffer++;
2190 tx_desc++;
2191 i++;
2192 if (unlikely(i == tx_ring->count)) {
2193 i = 0;
2194 tx_buffer = tx_ring->tx_buffer_info;
2195 tx_desc = IXGBEVF_TX_DESC(tx_ring, 0);
2196 }
2197
2198 /* unmap any remaining paged data */
2199 if (dma_unmap_len(tx_buffer, len))
2200 dma_unmap_page(tx_ring->dev,
2201 dma_unmap_addr(tx_buffer, dma),
2202 dma_unmap_len(tx_buffer, len),
2203 DMA_TO_DEVICE);
2204 }
2193 2205
2194 /* Free all the Tx ring sk_buffs */ 2206 /* move us one more past the eop_desc for start of next pkt */
2195 for (i = 0; i < tx_ring->count; i++) { 2207 tx_buffer++;
2196 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2208 i++;
2197 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 2209 if (unlikely(i == tx_ring->count)) {
2210 i = 0;
2211 tx_buffer = tx_ring->tx_buffer_info;
2212 }
2198 } 2213 }
2199 2214
2200 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2215 /* reset next_to_use and next_to_clean */
2201 memset(tx_ring->tx_buffer_info, 0, size); 2216 tx_ring->next_to_use = 0;
2217 tx_ring->next_to_clean = 0;
2202 2218
2203 memset(tx_ring->desc, 0, tx_ring->size);
2204} 2219}
2205 2220
2206/** 2221/**
@@ -3030,7 +3045,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
3030 int size; 3045 int size;
3031 3046
3032 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 3047 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
3033 tx_ring->tx_buffer_info = vzalloc(size); 3048 tx_ring->tx_buffer_info = vmalloc(size);
3034 if (!tx_ring->tx_buffer_info) 3049 if (!tx_ring->tx_buffer_info)
3035 goto err; 3050 goto err;
3036 3051
@@ -3634,18 +3649,32 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
3634 return; 3649 return;
3635dma_error: 3650dma_error:
3636 dev_err(tx_ring->dev, "TX DMA map failed\n"); 3651 dev_err(tx_ring->dev, "TX DMA map failed\n");
3652 tx_buffer = &tx_ring->tx_buffer_info[i];
3637 3653
3638 /* clear dma mappings for failed tx_buffer_info map */ 3654 /* clear dma mappings for failed tx_buffer_info map */
3639 for (;;) { 3655 while (tx_buffer != first) {
3656 if (dma_unmap_len(tx_buffer, len))
3657 dma_unmap_page(tx_ring->dev,
3658 dma_unmap_addr(tx_buffer, dma),
3659 dma_unmap_len(tx_buffer, len),
3660 DMA_TO_DEVICE);
3661 dma_unmap_len_set(tx_buffer, len, 0);
3662
3663 if (i-- == 0)
3664 i += tx_ring->count;
3640 tx_buffer = &tx_ring->tx_buffer_info[i]; 3665 tx_buffer = &tx_ring->tx_buffer_info[i];
3641 ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer);
3642 if (tx_buffer == first)
3643 break;
3644 if (i == 0)
3645 i = tx_ring->count;
3646 i--;
3647 } 3666 }
3648 3667
3668 if (dma_unmap_len(tx_buffer, len))
3669 dma_unmap_single(tx_ring->dev,
3670 dma_unmap_addr(tx_buffer, dma),
3671 dma_unmap_len(tx_buffer, len),
3672 DMA_TO_DEVICE);
3673 dma_unmap_len_set(tx_buffer, len, 0);
3674
3675 dev_kfree_skb_any(tx_buffer->skb);
3676 tx_buffer->skb = NULL;
3677
3649 tx_ring->next_to_use = i; 3678 tx_ring->next_to_use = i;
3650} 3679}
3651 3680