aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_txrx.c
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2019-02-13 13:51:04 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-03-25 12:33:13 -0400
commit03c66a1376616015b04b6783feadfcf02ba37c3f (patch)
tree0a7453ee71aa40c532b8756137387483db00910c /drivers/net/ethernet/intel/ice/ice_txrx.c
parent1857ca42a734d41261f4c30e5f625fa7e2b70b0d (diff)
ice: Introduce bulk update for page count
{get,put}_page are atomic operations which we use for page count handling. The current logic for refcount handling is that we increment it when passing a skb with the data from the first half of page up to netstack and recycle the second half of page. This operation protects us from losing a page since the network stack can decrement the refcount of page from skb. The performance can be gently improved by doing the bulk updates of refcount instead of doing it one by one. During the buffer initialization, maximize the page's refcount and don't allow the refcount to become less than two. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c26
1 files changed, 19 insertions, 7 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index becee476002d..d003f4d49ae6 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -283,7 +283,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
283 continue; 283 continue;
284 284
285 dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE); 285 dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
286 __free_pages(rx_buf->page, 0); 286 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
287 287
288 rx_buf->page = NULL; 288 rx_buf->page = NULL;
289 rx_buf->page_offset = 0; 289 rx_buf->page_offset = 0;
@@ -423,6 +423,8 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
423 bi->dma = dma; 423 bi->dma = dma;
424 bi->page = page; 424 bi->page = page;
425 bi->page_offset = 0; 425 bi->page_offset = 0;
426 page_ref_add(page, USHRT_MAX - 1);
427 bi->pagecnt_bias = USHRT_MAX;
426 428
427 return true; 429 return true;
428} 430}
@@ -509,6 +511,7 @@ static bool ice_page_is_reserved(struct page *page)
509static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, 511static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
510 unsigned int truesize) 512 unsigned int truesize)
511{ 513{
514 unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
512 struct page *page = rx_buf->page; 515 struct page *page = rx_buf->page;
513 516
514 /* avoid re-using remote pages */ 517 /* avoid re-using remote pages */
@@ -517,7 +520,7 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
517 520
518#if (PAGE_SIZE < 8192) 521#if (PAGE_SIZE < 8192)
519 /* if we are only owner of page we can reuse it */ 522 /* if we are only owner of page we can reuse it */
520 if (unlikely(page_count(page) != 1)) 523 if (unlikely((page_count(page) - pagecnt_bias) > 1))
521 return false; 524 return false;
522 525
523 /* flip page offset to other buffer */ 526 /* flip page offset to other buffer */
@@ -530,10 +533,14 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
530 return false; 533 return false;
531#endif /* PAGE_SIZE < 8192) */ 534#endif /* PAGE_SIZE < 8192) */
532 535
533 /* Even if we own the page, we are not allowed to use atomic_set() 536 /* If we have drained the page fragment pool we need to update
534 * This would break get_page_unless_zero() users. 537 * the pagecnt_bias and page count so that we fully restock the
538 * number of references the driver holds.
535 */ 539 */
536 get_page(page); 540 if (unlikely(pagecnt_bias == 1)) {
541 page_ref_add(page, USHRT_MAX - 1);
542 rx_buf->pagecnt_bias = USHRT_MAX;
543 }
537 544
538 return true; 545 return true;
539} 546}
@@ -576,11 +583,12 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
576 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 583 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
577 584
578 /* page is not reserved, we can reuse buffer as-is */ 585 /* page is not reserved, we can reuse buffer as-is */
579 if (likely(!ice_page_is_reserved(page))) 586 if (likely(!ice_page_is_reserved(page))) {
587 rx_buf->pagecnt_bias++;
580 return true; 588 return true;
589 }
581 590
582 /* this page cannot be reused so discard it */ 591 /* this page cannot be reused so discard it */
583 __free_pages(page, 0);
584 return false; 592 return false;
585 } 593 }
586 594
@@ -650,6 +658,9 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size)
650 rx_buf->page_offset, size, 658 rx_buf->page_offset, size,
651 DMA_FROM_DEVICE); 659 DMA_FROM_DEVICE);
652 660
661 /* We have pulled a buffer for use, so decrement pagecnt_bias */
662 rx_buf->pagecnt_bias--;
663
653 return rx_buf; 664 return rx_buf;
654} 665}
655 666
@@ -703,6 +714,7 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
703 /* we are not reusing the buffer so unmap it */ 714 /* we are not reusing the buffer so unmap it */
704 dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE, 715 dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
705 DMA_FROM_DEVICE); 716 DMA_FROM_DEVICE);
717 __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
706 } 718 }
707 719
708 /* clear contents of buffer_info */ 720 /* clear contents of buffer_info */