diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_txrx.c')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 76 |
1 files changed, 50 insertions, 26 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index d003f4d49ae6..122a0af1ea52 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c | |||
@@ -499,18 +499,41 @@ static bool ice_page_is_reserved(struct page *page) | |||
499 | } | 499 | } |
500 | 500 | ||
501 | /** | 501 | /** |
502 | * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse | ||
503 | * @rx_buf: Rx buffer to adjust | ||
504 | * @size: Size of adjustment | ||
505 | * | ||
506 | * Update the offset within page so that Rx buf will be ready to be reused. | ||
507 | * For systems with PAGE_SIZE < 8192 this function will flip the page offset | ||
508 | * so the second half of page assigned to Rx buffer will be used, otherwise | ||
509 | * the offset is moved by the @size bytes | ||
510 | */ | ||
511 | static void | ||
512 | ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) | ||
513 | { | ||
514 | #if (PAGE_SIZE < 8192) | ||
515 | /* flip page offset to other buffer */ | ||
516 | rx_buf->page_offset ^= size; | ||
517 | #else | ||
518 | /* move offset up to the next cache line */ | ||
519 | rx_buf->page_offset += size; | ||
520 | #endif | ||
521 | } | ||
522 | |||
523 | /** | ||
502 | * ice_can_reuse_rx_page - Determine if page can be reused for another Rx | 524 | * ice_can_reuse_rx_page - Determine if page can be reused for another Rx |
503 | * @rx_buf: buffer containing the page | 525 | * @rx_buf: buffer containing the page |
504 | * @truesize: the offset that needs to be applied to page | ||
505 | * | 526 | * |
506 | * If page is reusable, we have a green light for calling ice_reuse_rx_page, | 527 | * If page is reusable, we have a green light for calling ice_reuse_rx_page, |
507 | * which will assign the current buffer to the buffer that next_to_alloc is | 528 | * which will assign the current buffer to the buffer that next_to_alloc is |
508 | * pointing to; otherwise, the DMA mapping needs to be destroyed and | 529 | * pointing to; otherwise, the DMA mapping needs to be destroyed and |
509 | * page freed | 530 | * page freed |
510 | */ | 531 | */ |
511 | static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, | 532 | static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) |
512 | unsigned int truesize) | ||
513 | { | 533 | { |
534 | #if (PAGE_SIZE >= 8192) | ||
535 | unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048; | ||
536 | #endif | ||
514 | unsigned int pagecnt_bias = rx_buf->pagecnt_bias; | 537 | unsigned int pagecnt_bias = rx_buf->pagecnt_bias; |
515 | struct page *page = rx_buf->page; | 538 | struct page *page = rx_buf->page; |
516 | 539 | ||
@@ -522,14 +545,8 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, | |||
522 | /* if we are only owner of page we can reuse it */ | 545 | /* if we are only owner of page we can reuse it */ |
523 | if (unlikely((page_count(page) - pagecnt_bias) > 1)) | 546 | if (unlikely((page_count(page) - pagecnt_bias) > 1)) |
524 | return false; | 547 | return false; |
525 | |||
526 | /* flip page offset to other buffer */ | ||
527 | rx_buf->page_offset ^= truesize; | ||
528 | #else | 548 | #else |
529 | /* move offset up to the next cache line */ | 549 | if (rx_buf->page_offset > last_offset) |
530 | rx_buf->page_offset += truesize; | ||
531 | |||
532 | if (rx_buf->page_offset > PAGE_SIZE - ICE_RXBUF_2048) | ||
533 | return false; | 550 | return false; |
534 | #endif /* PAGE_SIZE < 8192) */ | 551 | #endif /* PAGE_SIZE < 8192) */ |
535 | 552 | ||
@@ -556,10 +573,9 @@ static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, | |||
556 | * less than the skb header size, otherwise it will just attach the page as | 573 | * less than the skb header size, otherwise it will just attach the page as |
557 | * a frag to the skb. | 574 | * a frag to the skb. |
558 | * | 575 | * |
559 | * The function will then update the page offset if necessary and return | 576 | * The function will then update the page offset |
560 | * true if the buffer can be reused by the adapter. | ||
561 | */ | 577 | */ |
562 | static bool | 578 | static void |
563 | ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, | 579 | ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, |
564 | unsigned int size) | 580 | unsigned int size) |
565 | { | 581 | { |
@@ -582,14 +598,8 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, | |||
582 | if (size <= ICE_RX_HDR_SIZE) { | 598 | if (size <= ICE_RX_HDR_SIZE) { |
583 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); | 599 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); |
584 | 600 | ||
585 | /* page is not reserved, we can reuse buffer as-is */ | 601 | rx_buf->pagecnt_bias++; |
586 | if (likely(!ice_page_is_reserved(page))) { | 602 | return; |
587 | rx_buf->pagecnt_bias++; | ||
588 | return true; | ||
589 | } | ||
590 | |||
591 | /* this page cannot be reused so discard it */ | ||
592 | return false; | ||
593 | } | 603 | } |
594 | 604 | ||
595 | /* we need the header to contain the greater of either ETH_HLEN or | 605 | /* we need the header to contain the greater of either ETH_HLEN or |
@@ -610,8 +620,7 @@ ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb, | |||
610 | add_tail_frag: | 620 | add_tail_frag: |
611 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | 621 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, |
612 | (unsigned long)va & ~PAGE_MASK, size, truesize); | 622 | (unsigned long)va & ~PAGE_MASK, size, truesize); |
613 | 623 | ice_rx_buf_adjust_pg_offset(rx_buf, truesize); | |
614 | return ice_can_reuse_rx_page(rx_buf, truesize); | ||
615 | } | 624 | } |
616 | 625 | ||
617 | /** | 626 | /** |
@@ -697,6 +706,7 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, | |||
697 | GFP_ATOMIC | __GFP_NOWARN); | 706 | GFP_ATOMIC | __GFP_NOWARN); |
698 | if (unlikely(!skb)) { | 707 | if (unlikely(!skb)) { |
699 | rx_ring->rx_stats.alloc_buf_failed++; | 708 | rx_ring->rx_stats.alloc_buf_failed++; |
709 | rx_buf->pagecnt_bias++; | ||
700 | return NULL; | 710 | return NULL; |
701 | } | 711 | } |
702 | 712 | ||
@@ -706,8 +716,23 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, | |||
706 | } | 716 | } |
707 | 717 | ||
708 | /* pull page into skb */ | 718 | /* pull page into skb */ |
709 | if (ice_add_rx_frag(rx_buf, skb, size)) { | 719 | ice_add_rx_frag(rx_buf, skb, size); |
720 | |||
721 | return skb; | ||
722 | } | ||
723 | |||
724 | /** | ||
725 | * ice_put_rx_buf - Clean up used buffer and either recycle or free | ||
726 | * @rx_ring: Rx descriptor ring to transact packets on | ||
727 | * @rx_buf: Rx buffer to pull data from | ||
728 | * | ||
729 | * This function will clean up the contents of the rx_buf. It will | ||
730 | * either recycle the buffer or unmap it and free the associated resources. | ||
731 | */ | ||
732 | static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf) | ||
733 | { | ||
710 | /* hand second half of page back to the ring */ | 734 | /* hand second half of page back to the ring */ |
735 | if (ice_can_reuse_rx_page(rx_buf)) { | ||
711 | ice_reuse_rx_page(rx_ring, rx_buf); | 736 | ice_reuse_rx_page(rx_ring, rx_buf); |
712 | rx_ring->rx_stats.page_reuse_count++; | 737 | rx_ring->rx_stats.page_reuse_count++; |
713 | } else { | 738 | } else { |
@@ -719,8 +744,6 @@ ice_fetch_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf, | |||
719 | 744 | ||
720 | /* clear contents of buffer_info */ | 745 | /* clear contents of buffer_info */ |
721 | rx_buf->page = NULL; | 746 | rx_buf->page = NULL; |
722 | |||
723 | return skb; | ||
724 | } | 747 | } |
725 | 748 | ||
726 | /** | 749 | /** |
@@ -1007,6 +1030,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) | |||
1007 | if (!skb) | 1030 | if (!skb) |
1008 | break; | 1031 | break; |
1009 | 1032 | ||
1033 | ice_put_rx_buf(rx_ring, rx_buf); | ||
1010 | cleaned_count++; | 1034 | cleaned_count++; |
1011 | 1035 | ||
1012 | /* skip if it is NOP desc */ | 1036 | /* skip if it is NOP desc */ |