aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ice/ice_txrx.c
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2019-02-13 13:51:02 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2019-03-25 11:00:14 -0400
commitbbb97808a0eff71fd841d297dba8cd3ebc4d700d (patch)
tree25e05a1642fab0c84f52703fda4c3652c47e3cc9 /drivers/net/ethernet/intel/ice/ice_txrx.c
parent6c869cb7a8f02b0c5f5494bb37c29b6686711ec8 (diff)
ice: Pull out page reuse checks onto separate function
Introduce ice_can_reuse_rx_page which will verify whether the page can be reused and return the boolean result to caller. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c80
1 files changed, 45 insertions, 35 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 8c0a8b63670b..50321d39e463 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -497,6 +497,48 @@ static bool ice_page_is_reserved(struct page *page)
497} 497}
498 498
499/** 499/**
500 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
501 * @rx_buf: buffer containing the page
502 * @truesize: the offset that needs to be applied to page
503 *
504 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
505 * which will assign the current buffer to the buffer that next_to_alloc is
506 * pointing to; otherwise, the DMA mapping needs to be destroyed and
507 * page freed
508 */
509static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf,
510 unsigned int truesize)
511{
512 struct page *page = rx_buf->page;
513
514 /* avoid re-using remote pages */
515 if (unlikely(ice_page_is_reserved(page)))
516 return false;
517
518#if (PAGE_SIZE < 8192)
519 /* if we are only owner of page we can reuse it */
520 if (unlikely(page_count(page) != 1))
521 return false;
522
523 /* flip page offset to other buffer */
524 rx_buf->page_offset ^= truesize;
525#else
526 /* move offset up to the next cache line */
527 rx_buf->page_offset += truesize;
528
529 if (rx_buf->page_offset > PAGE_SIZE - ICE_RXBUF_2048)
530 return false;
531#endif /* PAGE_SIZE < 8192) */
532
533 /* Even if we own the page, we are not allowed to use atomic_set()
534 * This would break get_page_unless_zero() users.
535 */
536 get_page(page);
537
538 return true;
539}
540
541/**
500 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff 542 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
501 * @rx_buf: buffer containing page to add 543 * @rx_buf: buffer containing page to add
502 * @skb: sk_buf to place the data into 544 * @skb: sk_buf to place the data into
@@ -517,17 +559,9 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
517#if (PAGE_SIZE < 8192) 559#if (PAGE_SIZE < 8192)
518 unsigned int truesize = ICE_RXBUF_2048; 560 unsigned int truesize = ICE_RXBUF_2048;
519#else 561#else
520 unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; 562 unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
521 unsigned int truesize;
522#endif /* PAGE_SIZE < 8192) */ 563#endif /* PAGE_SIZE < 8192) */
523 564 struct page *page = rx_buf->page;
524 struct page *page;
525
526 page = rx_buf->page;
527
528#if (PAGE_SIZE >= 8192)
529 truesize = ALIGN(size, L1_CACHE_BYTES);
530#endif /* PAGE_SIZE >= 8192) */
531 565
532 /* will the data fit in the skb we allocated? if so, just 566 /* will the data fit in the skb we allocated? if so, just
533 * copy it as it is pretty small anyway 567 * copy it as it is pretty small anyway
@@ -549,31 +583,7 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
549 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 583 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
550 rx_buf->page_offset, size, truesize); 584 rx_buf->page_offset, size, truesize);
551 585
552 /* avoid re-using remote pages */ 586 return ice_can_reuse_rx_page(rx_buf, truesize);
553 if (unlikely(ice_page_is_reserved(page)))
554 return false;
555
556#if (PAGE_SIZE < 8192)
557 /* if we are only owner of page we can reuse it */
558 if (unlikely(page_count(page) != 1))
559 return false;
560
561 /* flip page offset to other buffer */
562 rx_buf->page_offset ^= truesize;
563#else
564 /* move offset up to the next cache line */
565 rx_buf->page_offset += truesize;
566
567 if (rx_buf->page_offset > last_offset)
568 return false;
569#endif /* PAGE_SIZE < 8192) */
570
571 /* Even if we own the page, we are not allowed to use atomic_set()
572 * This would break get_page_unless_zero() users.
573 */
574 get_page(rx_buf->page);
575
576 return true;
577} 587}
578 588
579/** 589/**