diff options
Diffstat (limited to 'drivers/net/sfc/rx.c')
| -rw-r--r-- | drivers/net/sfc/rx.c | 48 |
1 files changed, 27 insertions, 21 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 670622373ddf..601b001437c0 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
| @@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95; | |||
| 86 | */ | 86 | */ |
| 87 | #define EFX_RXD_HEAD_ROOM 2 | 87 | #define EFX_RXD_HEAD_ROOM 2 |
| 88 | 88 | ||
| 89 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | 89 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
| 90 | #define RX_DATA_OFFSET(_data) \ | 90 | { |
| 91 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | 91 | /* Offset is always within one page, so we don't need to consider |
| 92 | #define RX_BUF_OFFSET(_rx_buf) \ | 92 | * the page order. |
| 93 | RX_DATA_OFFSET((_rx_buf)->data) | 93 | */ |
| 94 | 94 | return (__force unsigned long) buf->data & (PAGE_SIZE - 1); | |
| 95 | #define RX_PAGE_SIZE(_efx) \ | 95 | } |
| 96 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | 96 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) |
| 97 | { | ||
| 98 | return PAGE_SIZE << efx->rx_buffer_order; | ||
| 99 | } | ||
| 97 | 100 | ||
| 98 | 101 | ||
| 99 | /************************************************************************** | 102 | /************************************************************************** |
| @@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95; | |||
| 106 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | 109 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, |
| 107 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | 110 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) |
| 108 | { | 111 | { |
| 109 | struct efx_channel *channel = (struct efx_channel *)priv; | 112 | struct efx_channel *channel = priv; |
| 110 | struct iphdr *iph; | 113 | struct iphdr *iph; |
| 111 | struct tcphdr *th; | 114 | struct tcphdr *th; |
| 112 | 115 | ||
| @@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | |||
| 131 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | 134 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, |
| 132 | void *priv) | 135 | void *priv) |
| 133 | { | 136 | { |
| 134 | struct efx_channel *channel = (struct efx_channel *)priv; | 137 | struct efx_channel *channel = priv; |
| 135 | struct ethhdr *eh; | 138 | struct ethhdr *eh; |
| 136 | struct iphdr *iph; | 139 | struct iphdr *iph; |
| 137 | 140 | ||
| 138 | /* We support EtherII and VLAN encapsulated IPv4 */ | 141 | /* We support EtherII and VLAN encapsulated IPv4 */ |
| 139 | eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); | 142 | eh = page_address(frag->page) + frag->page_offset; |
| 140 | *mac_hdr = eh; | 143 | *mac_hdr = eh; |
| 141 | 144 | ||
| 142 | if (eh->h_proto == htons(ETH_P_IP)) { | 145 | if (eh->h_proto == htons(ETH_P_IP)) { |
| @@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
| 269 | return -ENOMEM; | 272 | return -ENOMEM; |
| 270 | 273 | ||
| 271 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 274 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, |
| 272 | 0, RX_PAGE_SIZE(efx), | 275 | 0, efx_rx_buf_size(efx), |
| 273 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
| 274 | 277 | ||
| 275 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { |
| @@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
| 280 | 283 | ||
| 281 | rx_queue->buf_page = rx_buf->page; | 284 | rx_queue->buf_page = rx_buf->page; |
| 282 | rx_queue->buf_dma_addr = dma_addr; | 285 | rx_queue->buf_dma_addr = dma_addr; |
| 283 | rx_queue->buf_data = ((char *) page_address(rx_buf->page) + | 286 | rx_queue->buf_data = (page_address(rx_buf->page) + |
| 284 | EFX_PAGE_IP_ALIGN); | 287 | EFX_PAGE_IP_ALIGN); |
| 285 | } | 288 | } |
| 286 | 289 | ||
| 287 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | ||
| 288 | rx_buf->len = bytes; | 290 | rx_buf->len = bytes; |
| 289 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
| 290 | rx_buf->data = rx_queue->buf_data; | 291 | rx_buf->data = rx_queue->buf_data; |
| 292 | offset = efx_rx_buf_offset(rx_buf); | ||
| 293 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
| 291 | 294 | ||
| 292 | /* Try to pack multiple buffers per page */ | 295 | /* Try to pack multiple buffers per page */ |
| 293 | if (efx->rx_buffer_order == 0) { | 296 | if (efx->rx_buffer_order == 0) { |
| @@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
| 295 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 298 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); |
| 296 | offset += ((bytes + 0x1ff) & ~0x1ff); | 299 | offset += ((bytes + 0x1ff) & ~0x1ff); |
| 297 | 300 | ||
| 298 | space = RX_PAGE_SIZE(efx) - offset; | 301 | space = efx_rx_buf_size(efx) - offset; |
| 299 | if (space >= bytes) { | 302 | if (space >= bytes) { |
| 300 | /* Refs dropped on kernel releasing each skb */ | 303 | /* Refs dropped on kernel releasing each skb */ |
| 301 | get_page(rx_queue->buf_page); | 304 | get_page(rx_queue->buf_page); |
| @@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
| 344 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 347 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
| 345 | if (rx_buf->unmap_addr) { | 348 | if (rx_buf->unmap_addr) { |
| 346 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 349 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, |
| 347 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | 350 | efx_rx_buf_size(efx), |
| 351 | PCI_DMA_FROMDEVICE); | ||
| 348 | rx_buf->unmap_addr = 0; | 352 | rx_buf->unmap_addr = 0; |
| 349 | } | 353 | } |
| 350 | } else if (likely(rx_buf->skb)) { | 354 | } else if (likely(rx_buf->skb)) { |
| @@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
| 400 | return 0; | 404 | return 0; |
| 401 | 405 | ||
| 402 | /* Record minimum fill level */ | 406 | /* Record minimum fill level */ |
| 403 | if (unlikely(fill_level < rx_queue->min_fill)) | 407 | if (unlikely(fill_level < rx_queue->min_fill)) { |
| 404 | if (fill_level) | 408 | if (fill_level) |
| 405 | rx_queue->min_fill = fill_level; | 409 | rx_queue->min_fill = fill_level; |
| 410 | } | ||
| 406 | 411 | ||
| 407 | /* Acquire RX add lock. If this lock is contended, then a fast | 412 | /* Acquire RX add lock. If this lock is contended, then a fast |
| 408 | * fill must already be in progress (e.g. in the refill | 413 | * fill must already be in progress (e.g. in the refill |
| @@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, | |||
| 552 | struct skb_frag_struct frags; | 557 | struct skb_frag_struct frags; |
| 553 | 558 | ||
| 554 | frags.page = rx_buf->page; | 559 | frags.page = rx_buf->page; |
| 555 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | 560 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
| 556 | frags.size = rx_buf->len; | 561 | frags.size = rx_buf->len; |
| 557 | 562 | ||
| 558 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 563 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, |
| @@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |||
| 597 | if (unlikely(rx_buf->len > hdr_len)) { | 602 | if (unlikely(rx_buf->len > hdr_len)) { |
| 598 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | 603 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; |
| 599 | frag->page = rx_buf->page; | 604 | frag->page = rx_buf->page; |
| 600 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | 605 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
| 601 | frag->size = skb->len - hdr_len; | 606 | frag->size = skb->len - hdr_len; |
| 602 | skb_shinfo(skb)->nr_frags = 1; | 607 | skb_shinfo(skb)->nr_frags = 1; |
| 603 | skb->data_len = frag->size; | 608 | skb->data_len = frag->size; |
| @@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
| 851 | /* For a page that is part-way through splitting into RX buffers */ | 856 | /* For a page that is part-way through splitting into RX buffers */ |
| 852 | if (rx_queue->buf_page != NULL) { | 857 | if (rx_queue->buf_page != NULL) { |
| 853 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | 858 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, |
| 854 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | 859 | efx_rx_buf_size(rx_queue->efx), |
| 860 | PCI_DMA_FROMDEVICE); | ||
| 855 | __free_pages(rx_queue->buf_page, | 861 | __free_pages(rx_queue->buf_page, |
| 856 | rx_queue->efx->rx_buffer_order); | 862 | rx_queue->efx->rx_buffer_order); |
| 857 | rx_queue->buf_page = NULL; | 863 | rx_queue->buf_page = NULL; |
