diff options
| author | Emil Tantilov <emil.s.tantilov@intel.com> | 2017-12-11 13:37:04 -0500 |
|---|---|---|
| committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2018-01-26 10:46:50 -0500 |
| commit | 16b359498b12e83b795be160f105d7c3dc4a8bb5 (patch) | |
| tree | 18bae71f71d56385452547f95a512089cbd22d56 /drivers/net/ethernet/intel/ixgbevf | |
| parent | 24bff091d7691c162580bcdb57bb00015471d34e (diff) | |
ixgbevf: add support for DMA_ATTR_SKIP_CPU_SYNC/WEAK_ORDERING
Based on commit 5be5955425c2
("igb: update driver to make use of DMA_ATTR_SKIP_CPU_SYNC")
and
commit 7bd175928280 ("igb: Add support for DMA_ATTR_WEAK_ORDERING")
Convert the calls to dma_map/unmap_page() to the attributes version
and add DMA_ATTR_SKIP_CPU_SYNC/WEAK_ORDERING which should help
improve performance on some platforms.
Move sync_for_cpu call before we perform a prefetch to avoid
invalidating the first 128 bytes of the packet on architectures where
that call may invalidate the cache.
Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbevf')
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 3 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 57 |
2 files changed, 38 insertions, 22 deletions
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index 581f44bbd7b3..b1da9f41c1dc 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | |||
| @@ -260,6 +260,9 @@ static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) | |||
| 260 | #define MIN_MSIX_Q_VECTORS 1 | 260 | #define MIN_MSIX_Q_VECTORS 1 |
| 261 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) | 261 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) |
| 262 | 262 | ||
| 263 | #define IXGBEVF_RX_DMA_ATTR \ | ||
| 264 | (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) | ||
| 265 | |||
| 263 | /* board specific private data structure */ | 266 | /* board specific private data structure */ |
| 264 | struct ixgbevf_adapter { | 267 | struct ixgbevf_adapter { |
| 265 | /* this field must be first, see ixgbevf_process_skb_fields */ | 268 | /* this field must be first, see ixgbevf_process_skb_fields */ |
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 725fe2dca868..fbd493efd14e 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | |||
| @@ -595,8 +595,8 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring, | |||
| 595 | } | 595 | } |
| 596 | 596 | ||
| 597 | /* map page for use */ | 597 | /* map page for use */ |
| 598 | dma = dma_map_page(rx_ring->dev, page, 0, | 598 | dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE, |
| 599 | PAGE_SIZE, DMA_FROM_DEVICE); | 599 | DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR); |
| 600 | 600 | ||
| 601 | /* if mapping failed free memory back to system since | 601 | /* if mapping failed free memory back to system since |
| 602 | * there isn't much point in holding memory we can't use | 602 | * there isn't much point in holding memory we can't use |
| @@ -639,6 +639,12 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, | |||
| 639 | if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) | 639 | if (!ixgbevf_alloc_mapped_page(rx_ring, bi)) |
| 640 | break; | 640 | break; |
| 641 | 641 | ||
| 642 | /* sync the buffer for use by the device */ | ||
| 643 | dma_sync_single_range_for_device(rx_ring->dev, bi->dma, | ||
| 644 | bi->page_offset, | ||
| 645 | IXGBEVF_RX_BUFSZ, | ||
| 646 | DMA_FROM_DEVICE); | ||
| 647 | |||
| 642 | /* Refresh the desc even if pkt_addr didn't change | 648 | /* Refresh the desc even if pkt_addr didn't change |
| 643 | * because each write-back erases this info. | 649 | * because each write-back erases this info. |
| 644 | */ | 650 | */ |
| @@ -741,12 +747,6 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, | |||
| 741 | new_buff->page = old_buff->page; | 747 | new_buff->page = old_buff->page; |
| 742 | new_buff->dma = old_buff->dma; | 748 | new_buff->dma = old_buff->dma; |
| 743 | new_buff->page_offset = old_buff->page_offset; | 749 | new_buff->page_offset = old_buff->page_offset; |
| 744 | |||
| 745 | /* sync the buffer for use by the device */ | ||
| 746 | dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, | ||
| 747 | new_buff->page_offset, | ||
| 748 | IXGBEVF_RX_BUFSZ, | ||
| 749 | DMA_FROM_DEVICE); | ||
| 750 | } | 750 | } |
| 751 | 751 | ||
| 752 | static inline bool ixgbevf_page_is_reserved(struct page *page) | 752 | static inline bool ixgbevf_page_is_reserved(struct page *page) |
| @@ -862,6 +862,13 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, | |||
| 862 | page = rx_buffer->page; | 862 | page = rx_buffer->page; |
| 863 | prefetchw(page); | 863 | prefetchw(page); |
| 864 | 864 | ||
| 865 | /* we are reusing so sync this buffer for CPU use */ | ||
| 866 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
| 867 | rx_buffer->dma, | ||
| 868 | rx_buffer->page_offset, | ||
| 869 | size, | ||
| 870 | DMA_FROM_DEVICE); | ||
| 871 | |||
| 865 | if (likely(!skb)) { | 872 | if (likely(!skb)) { |
| 866 | void *page_addr = page_address(page) + | 873 | void *page_addr = page_address(page) + |
| 867 | rx_buffer->page_offset; | 874 | rx_buffer->page_offset; |
| @@ -887,21 +894,15 @@ static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring, | |||
| 887 | prefetchw(skb->data); | 894 | prefetchw(skb->data); |
| 888 | } | 895 | } |
| 889 | 896 | ||
| 890 | /* we are reusing so sync this buffer for CPU use */ | ||
| 891 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
| 892 | rx_buffer->dma, | ||
| 893 | rx_buffer->page_offset, | ||
| 894 | size, | ||
| 895 | DMA_FROM_DEVICE); | ||
| 896 | |||
| 897 | /* pull page into skb */ | 897 | /* pull page into skb */ |
| 898 | if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { | 898 | if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) { |
| 899 | /* hand second half of page back to the ring */ | 899 | /* hand second half of page back to the ring */ |
| 900 | ixgbevf_reuse_rx_page(rx_ring, rx_buffer); | 900 | ixgbevf_reuse_rx_page(rx_ring, rx_buffer); |
| 901 | } else { | 901 | } else { |
| 902 | /* we are not reusing the buffer so unmap it */ | 902 | /* we are not reusing the buffer so unmap it */ |
| 903 | dma_unmap_page(rx_ring->dev, rx_buffer->dma, | 903 | dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, |
| 904 | PAGE_SIZE, DMA_FROM_DEVICE); | 904 | PAGE_SIZE, DMA_FROM_DEVICE, |
| 905 | IXGBEVF_RX_DMA_ATTR); | ||
| 905 | } | 906 | } |
| 906 | 907 | ||
| 907 | /* clear contents of buffer_info */ | 908 | /* clear contents of buffer_info */ |
| @@ -2116,7 +2117,6 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter) | |||
| 2116 | **/ | 2117 | **/ |
| 2117 | static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) | 2118 | static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) |
| 2118 | { | 2119 | { |
| 2119 | struct device *dev = rx_ring->dev; | ||
| 2120 | unsigned long size; | 2120 | unsigned long size; |
| 2121 | unsigned int i; | 2121 | unsigned int i; |
| 2122 | 2122 | ||
| @@ -2135,10 +2135,23 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring) | |||
| 2135 | struct ixgbevf_rx_buffer *rx_buffer; | 2135 | struct ixgbevf_rx_buffer *rx_buffer; |
| 2136 | 2136 | ||
| 2137 | rx_buffer = &rx_ring->rx_buffer_info[i]; | 2137 | rx_buffer = &rx_ring->rx_buffer_info[i]; |
| 2138 | if (rx_buffer->dma) | 2138 | |
| 2139 | dma_unmap_page(dev, rx_buffer->dma, | 2139 | /* Invalidate cache lines that may have been written to by |
| 2140 | PAGE_SIZE, DMA_FROM_DEVICE); | 2140 | * device so that we avoid corrupting memory. |
| 2141 | rx_buffer->dma = 0; | 2141 | */ |
| 2142 | dma_sync_single_range_for_cpu(rx_ring->dev, | ||
| 2143 | rx_buffer->dma, | ||
| 2144 | rx_buffer->page_offset, | ||
| 2145 | IXGBEVF_RX_BUFSZ, | ||
| 2146 | DMA_FROM_DEVICE); | ||
| 2147 | |||
| 2148 | /* free resources associated with mapping */ | ||
| 2149 | dma_unmap_page_attrs(rx_ring->dev, | ||
| 2150 | rx_buffer->dma, | ||
| 2151 | PAGE_SIZE, | ||
| 2152 | DMA_FROM_DEVICE, | ||
| 2153 | IXGBEVF_RX_DMA_ATTR); | ||
| 2154 | |||
| 2142 | if (rx_buffer->page) | 2155 | if (rx_buffer->page) |
| 2143 | __free_page(rx_buffer->page); | 2156 | __free_page(rx_buffer->page); |
| 2144 | rx_buffer->page = NULL; | 2157 | rx_buffer->page = NULL; |
