diff options
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r-- | drivers/net/sfc/rx.c | 39 |
1 files changed, 24 insertions, 15 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index a6413309c577..f15d33225342 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -86,14 +86,21 @@ static unsigned int rx_refill_limit = 95; | |||
86 | */ | 86 | */ |
87 | #define EFX_RXD_HEAD_ROOM 2 | 87 | #define EFX_RXD_HEAD_ROOM 2 |
88 | 88 | ||
89 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | 89 | static inline unsigned int efx_page_offset(void *p) |
90 | #define RX_DATA_OFFSET(_data) \ | 90 | { |
91 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | 91 | return (__force unsigned int)p & (PAGE_SIZE - 1); |
92 | #define RX_BUF_OFFSET(_rx_buf) \ | 92 | } |
93 | RX_DATA_OFFSET((_rx_buf)->data) | 93 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
94 | 94 | { | |
95 | #define RX_PAGE_SIZE(_efx) \ | 95 | /* Offset is always within one page, so we don't need to consider |
96 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | 96 | * the page order. |
97 | */ | ||
98 | return efx_page_offset(buf->data); | ||
99 | } | ||
100 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | ||
101 | { | ||
102 | return PAGE_SIZE << efx->rx_buffer_order; | ||
103 | } | ||
97 | 104 | ||
98 | 105 | ||
99 | /************************************************************************** | 106 | /************************************************************************** |
@@ -269,7 +276,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
269 | return -ENOMEM; | 276 | return -ENOMEM; |
270 | 277 | ||
271 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 278 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, |
272 | 0, RX_PAGE_SIZE(efx), | 279 | 0, efx_rx_buf_size(efx), |
273 | PCI_DMA_FROMDEVICE); | 280 | PCI_DMA_FROMDEVICE); |
274 | 281 | ||
275 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 282 | if (unlikely(pci_dma_mapping_error(dma_addr))) { |
@@ -284,7 +291,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
284 | EFX_PAGE_IP_ALIGN); | 291 | EFX_PAGE_IP_ALIGN); |
285 | } | 292 | } |
286 | 293 | ||
287 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | 294 | offset = efx_page_offset(rx_queue->buf_data); |
288 | rx_buf->len = bytes; | 295 | rx_buf->len = bytes; |
289 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | 296 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; |
290 | rx_buf->data = rx_queue->buf_data; | 297 | rx_buf->data = rx_queue->buf_data; |
@@ -295,7 +302,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
295 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 302 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); |
296 | offset += ((bytes + 0x1ff) & ~0x1ff); | 303 | offset += ((bytes + 0x1ff) & ~0x1ff); |
297 | 304 | ||
298 | space = RX_PAGE_SIZE(efx) - offset; | 305 | space = efx_rx_buf_size(efx) - offset; |
299 | if (space >= bytes) { | 306 | if (space >= bytes) { |
300 | /* Refs dropped on kernel releasing each skb */ | 307 | /* Refs dropped on kernel releasing each skb */ |
301 | get_page(rx_queue->buf_page); | 308 | get_page(rx_queue->buf_page); |
@@ -344,7 +351,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
344 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 351 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
345 | if (rx_buf->unmap_addr) { | 352 | if (rx_buf->unmap_addr) { |
346 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 353 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, |
347 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | 354 | efx_rx_buf_size(efx), |
355 | PCI_DMA_FROMDEVICE); | ||
348 | rx_buf->unmap_addr = 0; | 356 | rx_buf->unmap_addr = 0; |
349 | } | 357 | } |
350 | } else if (likely(rx_buf->skb)) { | 358 | } else if (likely(rx_buf->skb)) { |
@@ -553,7 +561,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, | |||
553 | struct skb_frag_struct frags; | 561 | struct skb_frag_struct frags; |
554 | 562 | ||
555 | frags.page = rx_buf->page; | 563 | frags.page = rx_buf->page; |
556 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | 564 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
557 | frags.size = rx_buf->len; | 565 | frags.size = rx_buf->len; |
558 | 566 | ||
559 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 567 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, |
@@ -598,7 +606,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |||
598 | if (unlikely(rx_buf->len > hdr_len)) { | 606 | if (unlikely(rx_buf->len > hdr_len)) { |
599 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | 607 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; |
600 | frag->page = rx_buf->page; | 608 | frag->page = rx_buf->page; |
601 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | 609 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
602 | frag->size = skb->len - hdr_len; | 610 | frag->size = skb->len - hdr_len; |
603 | skb_shinfo(skb)->nr_frags = 1; | 611 | skb_shinfo(skb)->nr_frags = 1; |
604 | skb->data_len = frag->size; | 612 | skb->data_len = frag->size; |
@@ -852,7 +860,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
852 | /* For a page that is part-way through splitting into RX buffers */ | 860 | /* For a page that is part-way through splitting into RX buffers */ |
853 | if (rx_queue->buf_page != NULL) { | 861 | if (rx_queue->buf_page != NULL) { |
854 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | 862 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, |
855 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | 863 | efx_rx_buf_size(rx_queue->efx), |
864 | PCI_DMA_FROMDEVICE); | ||
856 | __free_pages(rx_queue->buf_page, | 865 | __free_pages(rx_queue->buf_page, |
857 | rx_queue->efx->rx_buffer_order); | 866 | rx_queue->efx->rx_buffer_order); |
858 | rx_queue->buf_page = NULL; | 867 | rx_queue->buf_page = NULL; |