diff options
author | Alexander Duyck <alexander.h.duyck@redhat.com> | 2014-11-13 19:56:29 -0500 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2014-12-05 12:13:04 -0500 |
commit | 18cb652a41ab2c9975e9b4d7ac69230d5a258f24 (patch) | |
tree | 1d742ee9cc38a390f5d1e3ab974c41a362f05ffa /drivers/net/ethernet/intel | |
parent | d8febb77b52ebddb9bd03ccaa5b61005e3a45a85 (diff) |
ixgbe: Clean-up page reuse code
This patch cleans up the page reuse code getting it into a state where all
the workarounds needed are in place as well as cleaning up a few minor
oversights such as using __free_pages instead of put_page to drop a locally
allocated page.
It also cleans up how we clear the descriptor status bits. Previously they
were zeroed as a part of clearing the hdr_addr. However the hdr_addr is a
64 bit field and 64 bit writes can be a bit more expensive on on 32 bit
systems. Since we are no longer using the header split feature the upper
32 bits of the address no longer need to be cleared. As a result we can
just clear the status bits and leave the length and VLAN fields as-is which
should provide more information in debugging.
Cc: Don Skidmore <donald.c.skidmore@intel.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 78 |
1 files changed, 36 insertions, 42 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 9afa167d52a6..3c1d4ea47782 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -1436,20 +1436,17 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, | |||
1436 | struct ixgbe_rx_buffer *bi) | 1436 | struct ixgbe_rx_buffer *bi) |
1437 | { | 1437 | { |
1438 | struct page *page = bi->page; | 1438 | struct page *page = bi->page; |
1439 | dma_addr_t dma = bi->dma; | 1439 | dma_addr_t dma; |
1440 | 1440 | ||
1441 | /* since we are recycling buffers we should seldom need to alloc */ | 1441 | /* since we are recycling buffers we should seldom need to alloc */ |
1442 | if (likely(dma)) | 1442 | if (likely(page)) |
1443 | return true; | 1443 | return true; |
1444 | 1444 | ||
1445 | /* alloc new page for storage */ | 1445 | /* alloc new page for storage */ |
1446 | if (likely(!page)) { | 1446 | page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); |
1447 | page = dev_alloc_pages(ixgbe_rx_pg_order(rx_ring)); | 1447 | if (unlikely(!page)) { |
1448 | if (unlikely(!page)) { | 1448 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1449 | rx_ring->rx_stats.alloc_rx_page_failed++; | 1449 | return false; |
1450 | return false; | ||
1451 | } | ||
1452 | bi->page = page; | ||
1453 | } | 1450 | } |
1454 | 1451 | ||
1455 | /* map page for use */ | 1452 | /* map page for use */ |
@@ -1462,13 +1459,13 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, | |||
1462 | */ | 1459 | */ |
1463 | if (dma_mapping_error(rx_ring->dev, dma)) { | 1460 | if (dma_mapping_error(rx_ring->dev, dma)) { |
1464 | __free_pages(page, ixgbe_rx_pg_order(rx_ring)); | 1461 | __free_pages(page, ixgbe_rx_pg_order(rx_ring)); |
1465 | bi->page = NULL; | ||
1466 | 1462 | ||
1467 | rx_ring->rx_stats.alloc_rx_page_failed++; | 1463 | rx_ring->rx_stats.alloc_rx_page_failed++; |
1468 | return false; | 1464 | return false; |
1469 | } | 1465 | } |
1470 | 1466 | ||
1471 | bi->dma = dma; | 1467 | bi->dma = dma; |
1468 | bi->page = page; | ||
1472 | bi->page_offset = 0; | 1469 | bi->page_offset = 0; |
1473 | 1470 | ||
1474 | return true; | 1471 | return true; |
@@ -1512,8 +1509,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) | |||
1512 | i -= rx_ring->count; | 1509 | i -= rx_ring->count; |
1513 | } | 1510 | } |
1514 | 1511 | ||
1515 | /* clear the hdr_addr for the next_to_use descriptor */ | 1512 | /* clear the status bits for the next_to_use descriptor */ |
1516 | rx_desc->read.hdr_addr = 0; | 1513 | rx_desc->wb.upper.status_error = 0; |
1517 | 1514 | ||
1518 | cleaned_count--; | 1515 | cleaned_count--; |
1519 | } while (cleaned_count); | 1516 | } while (cleaned_count); |
@@ -1798,9 +1795,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, | |||
1798 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | 1795 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; |
1799 | 1796 | ||
1800 | /* transfer page from old buffer to new buffer */ | 1797 | /* transfer page from old buffer to new buffer */ |
1801 | new_buff->page = old_buff->page; | 1798 | *new_buff = *old_buff; |
1802 | new_buff->dma = old_buff->dma; | ||
1803 | new_buff->page_offset = old_buff->page_offset; | ||
1804 | 1799 | ||
1805 | /* sync the buffer for use by the device */ | 1800 | /* sync the buffer for use by the device */ |
1806 | dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, | 1801 | dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, |
@@ -1809,6 +1804,11 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, | |||
1809 | DMA_FROM_DEVICE); | 1804 | DMA_FROM_DEVICE); |
1810 | } | 1805 | } |
1811 | 1806 | ||
1807 | static inline bool ixgbe_page_is_reserved(struct page *page) | ||
1808 | { | ||
1809 | return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; | ||
1810 | } | ||
1811 | |||
1812 | /** | 1812 | /** |
1813 | * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff | 1813 | * ixgbe_add_rx_frag - Add contents of Rx buffer to sk_buff |
1814 | * @rx_ring: rx descriptor ring to transact packets on | 1814 | * @rx_ring: rx descriptor ring to transact packets on |
@@ -1844,12 +1844,12 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, | |||
1844 | 1844 | ||
1845 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); | 1845 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); |
1846 | 1846 | ||
1847 | /* we can reuse buffer as-is, just make sure it is local */ | 1847 | /* page is not reserved, we can reuse buffer as-is */ |
1848 | if (likely(page_to_nid(page) == numa_node_id())) | 1848 | if (likely(!ixgbe_page_is_reserved(page))) |
1849 | return true; | 1849 | return true; |
1850 | 1850 | ||
1851 | /* this page cannot be reused so discard it */ | 1851 | /* this page cannot be reused so discard it */ |
1852 | put_page(page); | 1852 | __free_pages(page, ixgbe_rx_pg_order(rx_ring)); |
1853 | return false; | 1853 | return false; |
1854 | } | 1854 | } |
1855 | 1855 | ||
@@ -1857,7 +1857,7 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, | |||
1857 | rx_buffer->page_offset, size, truesize); | 1857 | rx_buffer->page_offset, size, truesize); |
1858 | 1858 | ||
1859 | /* avoid re-using remote pages */ | 1859 | /* avoid re-using remote pages */ |
1860 | if (unlikely(page_to_nid(page) != numa_node_id())) | 1860 | if (unlikely(ixgbe_page_is_reserved(page))) |
1861 | return false; | 1861 | return false; |
1862 | 1862 | ||
1863 | #if (PAGE_SIZE < 8192) | 1863 | #if (PAGE_SIZE < 8192) |
@@ -1867,22 +1867,19 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, | |||
1867 | 1867 | ||
1868 | /* flip page offset to other buffer */ | 1868 | /* flip page offset to other buffer */ |
1869 | rx_buffer->page_offset ^= truesize; | 1869 | rx_buffer->page_offset ^= truesize; |
1870 | |||
1871 | /* Even if we own the page, we are not allowed to use atomic_set() | ||
1872 | * This would break get_page_unless_zero() users. | ||
1873 | */ | ||
1874 | atomic_inc(&page->_count); | ||
1875 | #else | 1870 | #else |
1876 | /* move offset up to the next cache line */ | 1871 | /* move offset up to the next cache line */ |
1877 | rx_buffer->page_offset += truesize; | 1872 | rx_buffer->page_offset += truesize; |
1878 | 1873 | ||
1879 | if (rx_buffer->page_offset > last_offset) | 1874 | if (rx_buffer->page_offset > last_offset) |
1880 | return false; | 1875 | return false; |
1881 | |||
1882 | /* bump ref count on page before it is given to the stack */ | ||
1883 | get_page(page); | ||
1884 | #endif | 1876 | #endif |
1885 | 1877 | ||
1878 | /* Even if we own the page, we are not allowed to use atomic_set() | ||
1879 | * This would break get_page_unless_zero() users. | ||
1880 | */ | ||
1881 | atomic_inc(&page->_count); | ||
1882 | |||
1886 | return true; | 1883 | return true; |
1887 | } | 1884 | } |
1888 | 1885 | ||
@@ -1945,6 +1942,8 @@ dma_sync: | |||
1945 | rx_buffer->page_offset, | 1942 | rx_buffer->page_offset, |
1946 | ixgbe_rx_bufsz(rx_ring), | 1943 | ixgbe_rx_bufsz(rx_ring), |
1947 | DMA_FROM_DEVICE); | 1944 | DMA_FROM_DEVICE); |
1945 | |||
1946 | rx_buffer->skb = NULL; | ||
1948 | } | 1947 | } |
1949 | 1948 | ||
1950 | /* pull page into skb */ | 1949 | /* pull page into skb */ |
@@ -1962,8 +1961,6 @@ dma_sync: | |||
1962 | } | 1961 | } |
1963 | 1962 | ||
1964 | /* clear contents of buffer_info */ | 1963 | /* clear contents of buffer_info */ |
1965 | rx_buffer->skb = NULL; | ||
1966 | rx_buffer->dma = 0; | ||
1967 | rx_buffer->page = NULL; | 1964 | rx_buffer->page = NULL; |
1968 | 1965 | ||
1969 | return skb; | 1966 | return skb; |
@@ -4344,29 +4341,26 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) | |||
4344 | 4341 | ||
4345 | /* Free all the Rx ring sk_buffs */ | 4342 | /* Free all the Rx ring sk_buffs */ |
4346 | for (i = 0; i < rx_ring->count; i++) { | 4343 | for (i = 0; i < rx_ring->count; i++) { |
4347 | struct ixgbe_rx_buffer *rx_buffer; | 4344 | struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; |
4348 | 4345 | ||
4349 | rx_buffer = &rx_ring->rx_buffer_info[i]; | ||
4350 | if (rx_buffer->skb) { | 4346 | if (rx_buffer->skb) { |
4351 | struct sk_buff *skb = rx_buffer->skb; | 4347 | struct sk_buff *skb = rx_buffer->skb; |
4352 | if (IXGBE_CB(skb)->page_released) { | 4348 | if (IXGBE_CB(skb)->page_released) |
4353 | dma_unmap_page(dev, | 4349 | dma_unmap_page(dev, |
4354 | IXGBE_CB(skb)->dma, | 4350 | IXGBE_CB(skb)->dma, |
4355 | ixgbe_rx_bufsz(rx_ring), | 4351 | ixgbe_rx_bufsz(rx_ring), |
4356 | DMA_FROM_DEVICE); | 4352 | DMA_FROM_DEVICE); |
4357 | IXGBE_CB(skb)->page_released = false; | ||
4358 | } | ||
4359 | dev_kfree_skb(skb); | 4353 | dev_kfree_skb(skb); |
4360 | rx_buffer->skb = NULL; | 4354 | rx_buffer->skb = NULL; |
4361 | } | 4355 | } |
4362 | if (rx_buffer->dma) | 4356 | |
4363 | dma_unmap_page(dev, rx_buffer->dma, | 4357 | if (!rx_buffer->page) |
4364 | ixgbe_rx_pg_size(rx_ring), | 4358 | continue; |
4365 | DMA_FROM_DEVICE); | 4359 | |
4366 | rx_buffer->dma = 0; | 4360 | dma_unmap_page(dev, rx_buffer->dma, |
4367 | if (rx_buffer->page) | 4361 | ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); |
4368 | __free_pages(rx_buffer->page, | 4362 | __free_pages(rx_buffer->page, ixgbe_rx_pg_order(rx_ring)); |
4369 | ixgbe_rx_pg_order(rx_ring)); | 4363 | |
4370 | rx_buffer->page = NULL; | 4364 | rx_buffer->page = NULL; |
4371 | } | 4365 | } |
4372 | 4366 | ||