aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEmil Tantilov <emil.s.tantilov@intel.com>2018-01-30 19:51:49 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2018-02-26 12:34:50 -0500
commit925f5690ff5d5a1d9ec027e938d37b539e3fd186 (patch)
tree00ad0f11b4d64c22170a7fb12006636f1073717f
parent21c046e448616529a181a35445d9f6d60352e01f (diff)
ixgbevf: break out Rx buffer page management
Based on commit e014272672b9 ("igb: Break out Rx buffer page management") Consolidate Rx code paths to reduce duplication when we expand them in the future. Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com> Tested-by: Krishneil Singh <krishneil.k.singh@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c227
1 files changed, 114 insertions, 113 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 6219ab2e3f52..faeb426c2f0f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -130,6 +130,9 @@ static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
130static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter); 130static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
131static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector); 131static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
132static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter); 132static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
133static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
134static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
135 struct ixgbevf_rx_buffer *old_buff);
133 136
134static void ixgbevf_remove_adapter(struct ixgbe_hw *hw) 137static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
135{ 138{
@@ -527,6 +530,49 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
527 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 530 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
528} 531}
529 532
533static
534struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
535 const unsigned int size)
536{
537 struct ixgbevf_rx_buffer *rx_buffer;
538
539 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
540 prefetchw(rx_buffer->page);
541
542 /* we are reusing so sync this buffer for CPU use */
543 dma_sync_single_range_for_cpu(rx_ring->dev,
544 rx_buffer->dma,
545 rx_buffer->page_offset,
546 size,
547 DMA_FROM_DEVICE);
548
549 rx_buffer->pagecnt_bias--;
550
551 return rx_buffer;
552}
553
554static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
555 struct ixgbevf_rx_buffer *rx_buffer)
556{
557 if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
558 /* hand second half of page back to the ring */
559 ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
560 } else {
561 /* We are not reusing the buffer so unmap it and free
562 * any references we are holding to it
563 */
564 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
565 ixgbevf_rx_pg_size(rx_ring),
566 DMA_FROM_DEVICE,
567 IXGBEVF_RX_DMA_ATTR);
568 __page_frag_cache_drain(rx_buffer->page,
569 rx_buffer->pagecnt_bias);
570 }
571
572 /* clear contents of rx_buffer */
573 rx_buffer->page = NULL;
574}
575
530/** 576/**
531 * ixgbevf_is_non_eop - process handling of non-EOP buffers 577 * ixgbevf_is_non_eop - process handling of non-EOP buffers
532 * @rx_ring: Rx ring being processed 578 * @rx_ring: Rx ring being processed
@@ -740,11 +786,10 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
740 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); 786 return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
741} 787}
742 788
743static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer, 789static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
744 struct page *page,
745 const unsigned int truesize)
746{ 790{
747 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; 791 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
792 struct page *page = rx_buffer->page;
748 793
749 /* avoid re-using remote pages */ 794 /* avoid re-using remote pages */
750 if (unlikely(ixgbevf_page_is_reserved(page))) 795 if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -752,16 +797,9 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
752 797
753#if (PAGE_SIZE < 8192) 798#if (PAGE_SIZE < 8192)
754 /* if we are only owner of page we can reuse it */ 799 /* if we are only owner of page we can reuse it */
755 if (unlikely(page_ref_count(page) != pagecnt_bias)) 800 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
756 return false; 801 return false;
757
758 /* flip page offset to other buffer */
759 rx_buffer->page_offset ^= truesize;
760
761#else 802#else
762 /* move offset up to the next cache line */
763 rx_buffer->page_offset += truesize;
764
765#define IXGBEVF_LAST_OFFSET \ 803#define IXGBEVF_LAST_OFFSET \
766 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048) 804 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
767 805
@@ -774,7 +812,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
774 * the pagecnt_bias and page count so that we fully restock the 812 * the pagecnt_bias and page count so that we fully restock the
775 * number of references the driver holds. 813 * number of references the driver holds.
776 */ 814 */
777 if (unlikely(pagecnt_bias == 1)) { 815 if (unlikely(!pagecnt_bias)) {
778 page_ref_add(page, USHRT_MAX); 816 page_ref_add(page, USHRT_MAX);
779 rx_buffer->pagecnt_bias = USHRT_MAX; 817 rx_buffer->pagecnt_bias = USHRT_MAX;
780 } 818 }
@@ -786,25 +824,16 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
786 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff 824 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
787 * @rx_ring: rx descriptor ring to transact packets on 825 * @rx_ring: rx descriptor ring to transact packets on
788 * @rx_buffer: buffer containing page to add 826 * @rx_buffer: buffer containing page to add
789 * @rx_desc: descriptor containing length of buffer written by hardware
790 * @skb: sk_buff to place the data into 827 * @skb: sk_buff to place the data into
828 * @size: size of buffer to be added
791 * 829 *
792 * This function will add the data contained in rx_buffer->page to the skb. 830 * This function will add the data contained in rx_buffer->page to the skb.
793 * This is done either through a direct copy if the data in the buffer is
794 * less than the skb header size, otherwise it will just attach the page as
795 * a frag to the skb.
796 *
797 * The function will then update the page offset if necessary and return
798 * true if the buffer can be reused by the adapter.
799 **/ 831 **/
800static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, 832static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
801 struct ixgbevf_rx_buffer *rx_buffer, 833 struct ixgbevf_rx_buffer *rx_buffer,
802 u16 size, 834 struct sk_buff *skb,
803 union ixgbe_adv_rx_desc *rx_desc, 835 unsigned int size)
804 struct sk_buff *skb)
805{ 836{
806 struct page *page = rx_buffer->page;
807 void *va = page_address(page) + rx_buffer->page_offset;
808#if (PAGE_SIZE < 8192) 837#if (PAGE_SIZE < 8192)
809 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2; 838 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
810#else 839#else
@@ -812,102 +841,64 @@ static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
812 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) : 841 SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
813 SKB_DATA_ALIGN(size); 842 SKB_DATA_ALIGN(size);
814#endif 843#endif
815 unsigned int pull_len; 844 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
816 845 rx_buffer->page_offset, size, truesize);
817 if (unlikely(skb_is_nonlinear(skb))) 846#if (PAGE_SIZE < 8192)
818 goto add_tail_frag; 847 rx_buffer->page_offset ^= truesize;
819 848#else
820 if (likely(size <= IXGBEVF_RX_HDR_SIZE)) { 849 rx_buffer->page_offset += truesize;
821 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 850#endif
822
823 /* page is not reserved, we can reuse buffer as is */
824 if (likely(!ixgbevf_page_is_reserved(page)))
825 return true;
826
827 /* this page cannot be reused so discard it */
828 return false;
829 }
830
831 /* we need the header to contain the greater of either ETH_HLEN or
832 * 60 bytes if the skb->len is less than 60 for skb_pad.
833 */
834 pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
835
836 /* align pull length to size of long to optimize memcpy performance */
837 memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
838
839 /* update all of the pointers */
840 va += pull_len;
841 size -= pull_len;
842
843add_tail_frag:
844 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
845 va - page_address(page), size, truesize);
846
847 return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
848} 851}
849 852
850static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, 853static
851 union ixgbe_adv_rx_desc *rx_desc, 854struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
852 struct sk_buff *skb) 855 struct ixgbevf_rx_buffer *rx_buffer,
856 union ixgbe_adv_rx_desc *rx_desc,
857 unsigned int size)
853{ 858{
854 struct ixgbevf_rx_buffer *rx_buffer; 859 void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
855 struct page *page; 860#if (PAGE_SIZE < 8192)
856 u16 size = le16_to_cpu(rx_desc->wb.upper.length); 861 unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
857 862#else
858 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; 863 unsigned int truesize = SKB_DATA_ALIGN(size);
859 page = rx_buffer->page; 864#endif
860 prefetchw(page); 865 unsigned int headlen;
861 866 struct sk_buff *skb;
862 /* we are reusing so sync this buffer for CPU use */
863 dma_sync_single_range_for_cpu(rx_ring->dev,
864 rx_buffer->dma,
865 rx_buffer->page_offset,
866 size,
867 DMA_FROM_DEVICE);
868
869 if (likely(!skb)) {
870 void *va = page_address(page) + rx_buffer->page_offset;
871 867
872 /* prefetch first cache line of first page */ 868 /* prefetch first cache line of first page */
873 prefetch(va); 869 prefetch(va);
874#if L1_CACHE_BYTES < 128 870#if L1_CACHE_BYTES < 128
875 prefetch(va + L1_CACHE_BYTES); 871 prefetch(va + L1_CACHE_BYTES);
876#endif 872#endif
877 873
878 /* allocate a skb to store the frags */ 874 /* allocate a skb to store the frags */
879 skb = netdev_alloc_skb_ip_align(rx_ring->netdev, 875 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
880 IXGBEVF_RX_HDR_SIZE); 876 if (unlikely(!skb))
881 if (unlikely(!skb)) { 877 return NULL;
882 rx_ring->rx_stats.alloc_rx_buff_failed++;
883 return NULL;
884 }
885 878
886 /* we will be copying header into skb->data in 879 /* Determine available headroom for copy */
887 * pskb_may_pull so it is in our interest to prefetch 880 headlen = size;
888 * it now to avoid a possible cache miss 881 if (headlen > IXGBEVF_RX_HDR_SIZE)
889 */ 882 headlen = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
890 prefetchw(skb->data);
891 }
892 883
893 /* pull page into skb */ 884 /* align pull length to size of long to optimize memcpy performance */
894 if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { 885 memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
895 /* hand second half of page back to the ring */ 886
896 ixgbevf_reuse_rx_page(rx_ring, rx_buffer); 887 /* update all of the pointers */
888 size -= headlen;
889 if (size) {
890 skb_add_rx_frag(skb, 0, rx_buffer->page,
891 (va + headlen) - page_address(rx_buffer->page),
892 size, truesize);
893#if (PAGE_SIZE < 8192)
894 rx_buffer->page_offset ^= truesize;
895#else
896 rx_buffer->page_offset += truesize;
897#endif
897 } else { 898 } else {
898 /* We are not reusing the buffer so unmap it and free 899 rx_buffer->pagecnt_bias++;
899 * any references we are holding to it
900 */
901 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
902 ixgbevf_rx_pg_size(rx_ring),
903 DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
904 __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
905 } 900 }
906 901
907 /* clear contents of buffer_info */
908 rx_buffer->dma = 0;
909 rx_buffer->page = NULL;
910
911 return skb; 902 return skb;
912} 903}
913 904
@@ -929,6 +920,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
929 920
930 while (likely(total_rx_packets < budget)) { 921 while (likely(total_rx_packets < budget)) {
931 union ixgbe_adv_rx_desc *rx_desc; 922 union ixgbe_adv_rx_desc *rx_desc;
923 struct ixgbevf_rx_buffer *rx_buffer;
924 unsigned int size;
932 925
933 /* return some buffers to hardware, one at a time is too slow */ 926 /* return some buffers to hardware, one at a time is too slow */
934 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 927 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
@@ -937,8 +930,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
937 } 930 }
938 931
939 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); 932 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
940 933 size = le16_to_cpu(rx_desc->wb.upper.length);
941 if (!rx_desc->wb.upper.length) 934 if (!size)
942 break; 935 break;
943 936
944 /* This memory barrier is needed to keep us from reading 937 /* This memory barrier is needed to keep us from reading
@@ -947,15 +940,23 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
947 */ 940 */
948 rmb(); 941 rmb();
949 942
943 rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
944
950 /* retrieve a buffer from the ring */ 945 /* retrieve a buffer from the ring */
951 skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb); 946 if (skb)
947 ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
948 else
949 skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
950 rx_desc, size);
952 951
953 /* exit if we failed to retrieve a buffer */ 952 /* exit if we failed to retrieve a buffer */
954 if (!skb) { 953 if (!skb) {
955 rx_ring->rx_stats.alloc_rx_buff_failed++; 954 rx_ring->rx_stats.alloc_rx_buff_failed++;
955 rx_buffer->pagecnt_bias++;
956 break; 956 break;
957 } 957 }
958 958
959 ixgbevf_put_rx_buffer(rx_ring, rx_buffer);
959 cleaned_count++; 960 cleaned_count++;
960 961
961 /* fetch next buffer in frame if non-eop */ 962 /* fetch next buffer in frame if non-eop */