diff options
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r-- | drivers/net/sfc/rx.c | 59 |
1 files changed, 37 insertions, 22 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 551299b462ae..601b001437c0 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include "rx.h" | 19 | #include "rx.h" |
20 | #include "efx.h" | 20 | #include "efx.h" |
21 | #include "falcon.h" | 21 | #include "falcon.h" |
22 | #include "selftest.h" | ||
22 | #include "workarounds.h" | 23 | #include "workarounds.h" |
23 | 24 | ||
24 | /* Number of RX descriptors pushed at once. */ | 25 | /* Number of RX descriptors pushed at once. */ |
@@ -85,14 +86,17 @@ static unsigned int rx_refill_limit = 95; | |||
85 | */ | 86 | */ |
86 | #define EFX_RXD_HEAD_ROOM 2 | 87 | #define EFX_RXD_HEAD_ROOM 2 |
87 | 88 | ||
88 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | 89 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
89 | #define RX_DATA_OFFSET(_data) \ | 90 | { |
90 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | 91 | /* Offset is always within one page, so we don't need to consider |
91 | #define RX_BUF_OFFSET(_rx_buf) \ | 92 | * the page order. |
92 | RX_DATA_OFFSET((_rx_buf)->data) | 93 | */ |
93 | 94 | return (__force unsigned long) buf->data & (PAGE_SIZE - 1); | |
94 | #define RX_PAGE_SIZE(_efx) \ | 95 | } |
95 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | 96 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) |
97 | { | ||
98 | return PAGE_SIZE << efx->rx_buffer_order; | ||
99 | } | ||
96 | 100 | ||
97 | 101 | ||
98 | /************************************************************************** | 102 | /************************************************************************** |
@@ -105,7 +109,7 @@ static unsigned int rx_refill_limit = 95; | |||
105 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | 109 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, |
106 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | 110 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) |
107 | { | 111 | { |
108 | struct efx_channel *channel = (struct efx_channel *)priv; | 112 | struct efx_channel *channel = priv; |
109 | struct iphdr *iph; | 113 | struct iphdr *iph; |
110 | struct tcphdr *th; | 114 | struct tcphdr *th; |
111 | 115 | ||
@@ -130,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | |||
130 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | 134 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, |
131 | void *priv) | 135 | void *priv) |
132 | { | 136 | { |
133 | struct efx_channel *channel = (struct efx_channel *)priv; | 137 | struct efx_channel *channel = priv; |
134 | struct ethhdr *eh; | 138 | struct ethhdr *eh; |
135 | struct iphdr *iph; | 139 | struct iphdr *iph; |
136 | 140 | ||
137 | /* We support EtherII and VLAN encapsulated IPv4 */ | 141 | /* We support EtherII and VLAN encapsulated IPv4 */ |
138 | eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); | 142 | eh = page_address(frag->page) + frag->page_offset; |
139 | *mac_hdr = eh; | 143 | *mac_hdr = eh; |
140 | 144 | ||
141 | if (eh->h_proto == htons(ETH_P_IP)) { | 145 | if (eh->h_proto == htons(ETH_P_IP)) { |
@@ -268,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
268 | return -ENOMEM; | 272 | return -ENOMEM; |
269 | 273 | ||
270 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 274 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, |
271 | 0, RX_PAGE_SIZE(efx), | 275 | 0, efx_rx_buf_size(efx), |
272 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
273 | 277 | ||
274 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { |
@@ -279,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
279 | 283 | ||
280 | rx_queue->buf_page = rx_buf->page; | 284 | rx_queue->buf_page = rx_buf->page; |
281 | rx_queue->buf_dma_addr = dma_addr; | 285 | rx_queue->buf_dma_addr = dma_addr; |
282 | rx_queue->buf_data = ((char *) page_address(rx_buf->page) + | 286 | rx_queue->buf_data = (page_address(rx_buf->page) + |
283 | EFX_PAGE_IP_ALIGN); | 287 | EFX_PAGE_IP_ALIGN); |
284 | } | 288 | } |
285 | 289 | ||
286 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | ||
287 | rx_buf->len = bytes; | 290 | rx_buf->len = bytes; |
288 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
289 | rx_buf->data = rx_queue->buf_data; | 291 | rx_buf->data = rx_queue->buf_data; |
292 | offset = efx_rx_buf_offset(rx_buf); | ||
293 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
290 | 294 | ||
291 | /* Try to pack multiple buffers per page */ | 295 | /* Try to pack multiple buffers per page */ |
292 | if (efx->rx_buffer_order == 0) { | 296 | if (efx->rx_buffer_order == 0) { |
@@ -294,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
294 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 298 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); |
295 | offset += ((bytes + 0x1ff) & ~0x1ff); | 299 | offset += ((bytes + 0x1ff) & ~0x1ff); |
296 | 300 | ||
297 | space = RX_PAGE_SIZE(efx) - offset; | 301 | space = efx_rx_buf_size(efx) - offset; |
298 | if (space >= bytes) { | 302 | if (space >= bytes) { |
299 | /* Refs dropped on kernel releasing each skb */ | 303 | /* Refs dropped on kernel releasing each skb */ |
300 | get_page(rx_queue->buf_page); | 304 | get_page(rx_queue->buf_page); |
@@ -343,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
343 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 347 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
344 | if (rx_buf->unmap_addr) { | 348 | if (rx_buf->unmap_addr) { |
345 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 349 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, |
346 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | 350 | efx_rx_buf_size(efx), |
351 | PCI_DMA_FROMDEVICE); | ||
347 | rx_buf->unmap_addr = 0; | 352 | rx_buf->unmap_addr = 0; |
348 | } | 353 | } |
349 | } else if (likely(rx_buf->skb)) { | 354 | } else if (likely(rx_buf->skb)) { |
@@ -399,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
399 | return 0; | 404 | return 0; |
400 | 405 | ||
401 | /* Record minimum fill level */ | 406 | /* Record minimum fill level */ |
402 | if (unlikely(fill_level < rx_queue->min_fill)) | 407 | if (unlikely(fill_level < rx_queue->min_fill)) { |
403 | if (fill_level) | 408 | if (fill_level) |
404 | rx_queue->min_fill = fill_level; | 409 | rx_queue->min_fill = fill_level; |
410 | } | ||
405 | 411 | ||
406 | /* Acquire RX add lock. If this lock is contended, then a fast | 412 | /* Acquire RX add lock. If this lock is contended, then a fast |
407 | * fill must already be in progress (e.g. in the refill | 413 | * fill must already be in progress (e.g. in the refill |
@@ -551,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, | |||
551 | struct skb_frag_struct frags; | 557 | struct skb_frag_struct frags; |
552 | 558 | ||
553 | frags.page = rx_buf->page; | 559 | frags.page = rx_buf->page; |
554 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | 560 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
555 | frags.size = rx_buf->len; | 561 | frags.size = rx_buf->len; |
556 | 562 | ||
557 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 563 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, |
@@ -596,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |||
596 | if (unlikely(rx_buf->len > hdr_len)) { | 602 | if (unlikely(rx_buf->len > hdr_len)) { |
597 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | 603 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; |
598 | frag->page = rx_buf->page; | 604 | frag->page = rx_buf->page; |
599 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | 605 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
600 | frag->size = skb->len - hdr_len; | 606 | frag->size = skb->len - hdr_len; |
601 | skb_shinfo(skb)->nr_frags = 1; | 607 | skb_shinfo(skb)->nr_frags = 1; |
602 | skb->data_len = frag->size; | 608 | skb->data_len = frag->size; |
@@ -683,6 +689,15 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
683 | struct sk_buff *skb; | 689 | struct sk_buff *skb; |
684 | int lro = efx->net_dev->features & NETIF_F_LRO; | 690 | int lro = efx->net_dev->features & NETIF_F_LRO; |
685 | 691 | ||
692 | /* If we're in loopback test, then pass the packet directly to the | ||
693 | * loopback layer, and free the rx_buf here | ||
694 | */ | ||
695 | if (unlikely(efx->loopback_selftest)) { | ||
696 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); | ||
697 | efx_free_rx_buffer(efx, rx_buf); | ||
698 | goto done; | ||
699 | } | ||
700 | |||
686 | if (rx_buf->skb) { | 701 | if (rx_buf->skb) { |
687 | prefetch(skb_shinfo(rx_buf->skb)); | 702 | prefetch(skb_shinfo(rx_buf->skb)); |
688 | 703 | ||
@@ -736,7 +751,6 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
736 | /* Update allocation strategy method */ | 751 | /* Update allocation strategy method */ |
737 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | 752 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; |
738 | 753 | ||
739 | /* fall-thru */ | ||
740 | done: | 754 | done: |
741 | efx->net_dev->last_rx = jiffies; | 755 | efx->net_dev->last_rx = jiffies; |
742 | } | 756 | } |
@@ -842,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
842 | /* For a page that is part-way through splitting into RX buffers */ | 856 | /* For a page that is part-way through splitting into RX buffers */ |
843 | if (rx_queue->buf_page != NULL) { | 857 | if (rx_queue->buf_page != NULL) { |
844 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | 858 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, |
845 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | 859 | efx_rx_buf_size(rx_queue->efx), |
860 | PCI_DMA_FROMDEVICE); | ||
846 | __free_pages(rx_queue->buf_page, | 861 | __free_pages(rx_queue->buf_page, |
847 | rx_queue->efx->rx_buffer_order); | 862 | rx_queue->efx->rx_buffer_order); |
848 | rx_queue->buf_page = NULL; | 863 | rx_queue->buf_page = NULL; |