diff options
| -rw-r--r-- | drivers/net/ethernet/intel/fm10k/fm10k_main.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index caa43f7c2931..c7a19a5e0ec9 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c | |||
| @@ -97,7 +97,6 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, | |||
| 97 | */ | 97 | */ |
| 98 | if (dma_mapping_error(rx_ring->dev, dma)) { | 98 | if (dma_mapping_error(rx_ring->dev, dma)) { |
| 99 | __free_page(page); | 99 | __free_page(page); |
| 100 | bi->page = NULL; | ||
| 101 | 100 | ||
| 102 | rx_ring->rx_stats.alloc_failed++; | 101 | rx_ring->rx_stats.alloc_failed++; |
| 103 | return false; | 102 | return false; |
| @@ -147,8 +146,8 @@ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) | |||
| 147 | i -= rx_ring->count; | 146 | i -= rx_ring->count; |
| 148 | } | 147 | } |
| 149 | 148 | ||
| 150 | /* clear the hdr_addr for the next_to_use descriptor */ | 149 | /* clear the status bits for the next_to_use descriptor */ |
| 151 | rx_desc->q.hdr_addr = 0; | 150 | rx_desc->d.staterr = 0; |
| 152 | 151 | ||
| 153 | cleaned_count--; | 152 | cleaned_count--; |
| 154 | } while (cleaned_count); | 153 | } while (cleaned_count); |
| @@ -194,7 +193,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, | |||
| 194 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | 193 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; |
| 195 | 194 | ||
| 196 | /* transfer page from old buffer to new buffer */ | 195 | /* transfer page from old buffer to new buffer */ |
| 197 | memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer)); | 196 | *new_buff = *old_buff; |
| 198 | 197 | ||
| 199 | /* sync the buffer for use by the device */ | 198 | /* sync the buffer for use by the device */ |
| 200 | dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, | 199 | dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, |
| @@ -203,12 +202,17 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, | |||
| 203 | DMA_FROM_DEVICE); | 202 | DMA_FROM_DEVICE); |
| 204 | } | 203 | } |
| 205 | 204 | ||
| 205 | static inline bool fm10k_page_is_reserved(struct page *page) | ||
| 206 | { | ||
| 207 | return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; | ||
| 208 | } | ||
| 209 | |||
| 206 | static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, | 210 | static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, |
| 207 | struct page *page, | 211 | struct page *page, |
| 208 | unsigned int truesize) | 212 | unsigned int truesize) |
| 209 | { | 213 | { |
| 210 | /* avoid re-using remote pages */ | 214 | /* avoid re-using remote pages */ |
| 211 | if (unlikely(page_to_nid(page) != numa_mem_id())) | 215 | if (unlikely(fm10k_page_is_reserved(page))) |
| 212 | return false; | 216 | return false; |
| 213 | 217 | ||
| 214 | #if (PAGE_SIZE < 8192) | 218 | #if (PAGE_SIZE < 8192) |
| @@ -218,22 +222,19 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, | |||
| 218 | 222 | ||
| 219 | /* flip page offset to other buffer */ | 223 | /* flip page offset to other buffer */ |
| 220 | rx_buffer->page_offset ^= FM10K_RX_BUFSZ; | 224 | rx_buffer->page_offset ^= FM10K_RX_BUFSZ; |
| 221 | |||
| 222 | /* Even if we own the page, we are not allowed to use atomic_set() | ||
| 223 | * This would break get_page_unless_zero() users. | ||
| 224 | */ | ||
| 225 | atomic_inc(&page->_count); | ||
| 226 | #else | 225 | #else |
| 227 | /* move offset up to the next cache line */ | 226 | /* move offset up to the next cache line */ |
| 228 | rx_buffer->page_offset += truesize; | 227 | rx_buffer->page_offset += truesize; |
| 229 | 228 | ||
| 230 | if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) | 229 | if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) |
| 231 | return false; | 230 | return false; |
| 232 | |||
| 233 | /* bump ref count on page before it is given to the stack */ | ||
| 234 | get_page(page); | ||
| 235 | #endif | 231 | #endif |
| 236 | 232 | ||
| 233 | /* Even if we own the page, we are not allowed to use atomic_set() | ||
| 234 | * This would break get_page_unless_zero() users. | ||
| 235 | */ | ||
| 236 | atomic_inc(&page->_count); | ||
| 237 | |||
| 237 | return true; | 238 | return true; |
| 238 | } | 239 | } |
| 239 | 240 | ||
| @@ -270,12 +271,12 @@ static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring, | |||
| 270 | 271 | ||
| 271 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); | 272 | memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); |
| 272 | 273 | ||
| 273 | /* we can reuse buffer as-is, just make sure it is local */ | 274 | /* page is not reserved, we can reuse buffer as-is */ |
| 274 | if (likely(page_to_nid(page) == numa_mem_id())) | 275 | if (likely(!fm10k_page_is_reserved(page))) |
| 275 | return true; | 276 | return true; |
| 276 | 277 | ||
| 277 | /* this page cannot be reused so discard it */ | 278 | /* this page cannot be reused so discard it */ |
| 278 | put_page(page); | 279 | __free_page(page); |
| 279 | return false; | 280 | return false; |
| 280 | } | 281 | } |
| 281 | 282 | ||
| @@ -293,7 +294,6 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, | |||
| 293 | struct page *page; | 294 | struct page *page; |
| 294 | 295 | ||
| 295 | rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; | 296 | rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; |
| 296 | |||
| 297 | page = rx_buffer->page; | 297 | page = rx_buffer->page; |
| 298 | prefetchw(page); | 298 | prefetchw(page); |
| 299 | 299 | ||
