diff options
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r-- | drivers/net/ethernet/intel/igb/igb_main.c | 77 |
1 files changed, 50 insertions, 27 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cae24a8ccf47..a761001308dc 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -3947,11 +3947,23 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) | |||
3947 | if (!buffer_info->page) | 3947 | if (!buffer_info->page) |
3948 | continue; | 3948 | continue; |
3949 | 3949 | ||
3950 | dma_unmap_page(rx_ring->dev, | 3950 | /* Invalidate cache lines that may have been written to by |
3951 | buffer_info->dma, | 3951 | * device so that we avoid corrupting memory. |
3952 | PAGE_SIZE, | 3952 | */ |
3953 | DMA_FROM_DEVICE); | 3953 | dma_sync_single_range_for_cpu(rx_ring->dev, |
3954 | __free_page(buffer_info->page); | 3954 | buffer_info->dma, |
3955 | buffer_info->page_offset, | ||
3956 | IGB_RX_BUFSZ, | ||
3957 | DMA_FROM_DEVICE); | ||
3958 | |||
3959 | /* free resources associated with mapping */ | ||
3960 | dma_unmap_page_attrs(rx_ring->dev, | ||
3961 | buffer_info->dma, | ||
3962 | PAGE_SIZE, | ||
3963 | DMA_FROM_DEVICE, | ||
3964 | DMA_ATTR_SKIP_CPU_SYNC); | ||
3965 | __page_frag_drain(buffer_info->page, 0, | ||
3966 | buffer_info->pagecnt_bias); | ||
3955 | 3967 | ||
3956 | buffer_info->page = NULL; | 3968 | buffer_info->page = NULL; |
3957 | } | 3969 | } |
@@ -6812,12 +6824,6 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, | |||
6812 | 6824 | ||
6813 | /* transfer page from old buffer to new buffer */ | 6825 | /* transfer page from old buffer to new buffer */ |
6814 | *new_buff = *old_buff; | 6826 | *new_buff = *old_buff; |
6815 | |||
6816 | /* sync the buffer for use by the device */ | ||
6817 | dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, | ||
6818 | old_buff->page_offset, | ||
6819 | IGB_RX_BUFSZ, | ||
6820 | DMA_FROM_DEVICE); | ||
6821 | } | 6827 | } |
6822 | 6828 | ||
6823 | static inline bool igb_page_is_reserved(struct page *page) | 6829 | static inline bool igb_page_is_reserved(struct page *page) |
@@ -6829,13 +6835,15 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, | |||
6829 | struct page *page, | 6835 | struct page *page, |
6830 | unsigned int truesize) | 6836 | unsigned int truesize) |
6831 | { | 6837 | { |
6838 | unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--; | ||
6839 | |||
6832 | /* avoid re-using remote pages */ | 6840 | /* avoid re-using remote pages */ |
6833 | if (unlikely(igb_page_is_reserved(page))) | 6841 | if (unlikely(igb_page_is_reserved(page))) |
6834 | return false; | 6842 | return false; |
6835 | 6843 | ||
6836 | #if (PAGE_SIZE < 8192) | 6844 | #if (PAGE_SIZE < 8192) |
6837 | /* if we are only owner of page we can reuse it */ | 6845 | /* if we are only owner of page we can reuse it */ |
6838 | if (unlikely(page_count(page) != 1)) | 6846 | if (unlikely(page_ref_count(page) != pagecnt_bias)) |
6839 | return false; | 6847 | return false; |
6840 | 6848 | ||
6841 | /* flip page offset to other buffer */ | 6849 | /* flip page offset to other buffer */ |
@@ -6848,10 +6856,14 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, | |||
6848 | return false; | 6856 | return false; |
6849 | #endif | 6857 | #endif |
6850 | 6858 | ||
6851 | /* Even if we own the page, we are not allowed to use atomic_set() | 6859 | /* If we have drained the page fragment pool we need to update |
6852 | * This would break get_page_unless_zero() users. | 6860 | * the pagecnt_bias and page count so that we fully restock the |
6861 | * number of references the driver holds. | ||
6853 | */ | 6862 | */ |
6854 | page_ref_inc(page); | 6863 | if (unlikely(pagecnt_bias == 1)) { |
6864 | page_ref_add(page, USHRT_MAX); | ||
6865 | rx_buffer->pagecnt_bias = USHRT_MAX; | ||
6866 | } | ||
6855 | 6867 | ||
6856 | return true; | 6868 | return true; |
6857 | } | 6869 | } |
@@ -6903,7 +6915,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, | |||
6903 | return true; | 6915 | return true; |
6904 | 6916 | ||
6905 | /* this page cannot be reused so discard it */ | 6917 | /* this page cannot be reused so discard it */ |
6906 | __free_page(page); | ||
6907 | return false; | 6918 | return false; |
6908 | } | 6919 | } |
6909 | 6920 | ||
@@ -6938,6 +6949,13 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, | |||
6938 | page = rx_buffer->page; | 6949 | page = rx_buffer->page; |
6939 | prefetchw(page); | 6950 | prefetchw(page); |
6940 | 6951 | ||
6952 | /* we are reusing so sync this buffer for CPU use */ | ||
6953 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
6954 | rx_buffer->dma, | ||
6955 | rx_buffer->page_offset, | ||
6956 | size, | ||
6957 | DMA_FROM_DEVICE); | ||
6958 | |||
6941 | if (likely(!skb)) { | 6959 | if (likely(!skb)) { |
6942 | void *page_addr = page_address(page) + | 6960 | void *page_addr = page_address(page) + |
6943 | rx_buffer->page_offset; | 6961 | rx_buffer->page_offset; |
@@ -6962,21 +6980,18 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, | |||
6962 | prefetchw(skb->data); | 6980 | prefetchw(skb->data); |
6963 | } | 6981 | } |
6964 | 6982 | ||
6965 | /* we are reusing so sync this buffer for CPU use */ | ||
6966 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
6967 | rx_buffer->dma, | ||
6968 | rx_buffer->page_offset, | ||
6969 | size, | ||
6970 | DMA_FROM_DEVICE); | ||
6971 | |||
6972 | /* pull page into skb */ | 6983 | /* pull page into skb */ |
6973 | if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { | 6984 | if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { |
6974 | /* hand second half of page back to the ring */ | 6985 | /* hand second half of page back to the ring */ |
6975 | igb_reuse_rx_page(rx_ring, rx_buffer); | 6986 | igb_reuse_rx_page(rx_ring, rx_buffer); |
6976 | } else { | 6987 | } else { |
6977 | /* we are not reusing the buffer so unmap it */ | 6988 | /* We are not reusing the buffer so unmap it and free |
6978 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | 6989 | * any references we are holding to it |
6979 | PAGE_SIZE, DMA_FROM_DEVICE); | 6990 | */ |
6991 | dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, | ||
6992 | PAGE_SIZE, DMA_FROM_DEVICE, | ||
6993 | DMA_ATTR_SKIP_CPU_SYNC); | ||
6994 | __page_frag_drain(page, 0, rx_buffer->pagecnt_bias); | ||
6980 | } | 6995 | } |
6981 | 6996 | ||
6982 | /* clear contents of rx_buffer */ | 6997 | /* clear contents of rx_buffer */ |
@@ -7234,7 +7249,8 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, | |||
7234 | } | 7249 | } |
7235 | 7250 | ||
7236 | /* map page for use */ | 7251 | /* map page for use */ |
7237 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | 7252 | dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, |
7253 | DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); | ||
7238 | 7254 | ||
7239 | /* if mapping failed free memory back to system since | 7255 | /* if mapping failed free memory back to system since |
7240 | * there isn't much point in holding memory we can't use | 7256 | * there isn't much point in holding memory we can't use |
@@ -7249,6 +7265,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring, | |||
7249 | bi->dma = dma; | 7265 | bi->dma = dma; |
7250 | bi->page = page; | 7266 | bi->page = page; |
7251 | bi->page_offset = 0; | 7267 | bi->page_offset = 0; |
7268 | bi->pagecnt_bias = 1; | ||
7252 | 7269 | ||
7253 | return true; | 7270 | return true; |
7254 | } | 7271 | } |
@@ -7275,6 +7292,12 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) | |||
7275 | if (!igb_alloc_mapped_page(rx_ring, bi)) | 7292 | if (!igb_alloc_mapped_page(rx_ring, bi)) |
7276 | break; | 7293 | break; |
7277 | 7294 | ||
7295 | /* sync the buffer for use by the device */ | ||
7296 | dma_sync_single_range_for_device(rx_ring->dev, bi->dma, | ||
7297 | bi->page_offset, | ||
7298 | IGB_RX_BUFSZ, | ||
7299 | DMA_FROM_DEVICE); | ||
7300 | |||
7278 | /* Refresh the desc even if buffer_addrs didn't change | 7301 | /* Refresh the desc even if buffer_addrs didn't change |
7279 | * because each write-back erases this info. | 7302 | * because each write-back erases this info. |
7280 | */ | 7303 | */ |