aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/rx.c
diff options
context:
space:
mode:
authorSteve Hodgson <shodgson@solarflare.com>2011-02-24 18:45:16 -0500
committerBen Hutchings <bhutchings@solarflare.com>2011-02-28 18:57:23 -0500
commita526f140b22131376b0e49577210e6af73e2b89f (patch)
treee669975e1d635b629f8df2f00de2adc28dee858f /drivers/net/sfc/rx.c
parent8ba5366adacef220b6ce16dca777600433a22a42 (diff)
sfc: Reduce size of efx_rx_buffer further by removing data member
Instead calculate the KVA of receive data. It's not like it's a hard sum. [bwh: Fixed to work with GRO.] Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r--drivers/net/sfc/rx.c50
1 files changed, 28 insertions, 22 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index bcbd2ec2d92a..81bec873e9d3 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -89,24 +89,37 @@ static unsigned int rx_refill_limit = 95;
89 */ 89 */
90#define EFX_RXD_HEAD_ROOM 2 90#define EFX_RXD_HEAD_ROOM 2
91 91
92static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) 92/* Offset of ethernet header within page */
93static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
94 struct efx_rx_buffer *buf)
93{ 95{
94 /* Offset is always within one page, so we don't need to consider 96 /* Offset is always within one page, so we don't need to consider
95 * the page order. 97 * the page order.
96 */ 98 */
97 return (__force unsigned long) buf->data & (PAGE_SIZE - 1); 99 return (((__force unsigned long) buf->dma_addr & (PAGE_SIZE - 1)) +
100 efx->type->rx_buffer_hash_size);
98} 101}
99static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 102static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
100{ 103{
101 return PAGE_SIZE << efx->rx_buffer_order; 104 return PAGE_SIZE << efx->rx_buffer_order;
102} 105}
103 106
104static inline u32 efx_rx_buf_hash(struct efx_rx_buffer *buf) 107static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
105{ 108{
109 if (buf->is_page)
110 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
111 else
112 return ((u8 *)buf->u.skb->data +
113 efx->type->rx_buffer_hash_size);
114}
115
116static inline u32 efx_rx_buf_hash(const u8 *eh)
117{
118 /* The ethernet header is always directly after any hash. */
106#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0 119#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
107 return __le32_to_cpup((const __le32 *)(buf->data - 4)); 120 return __le32_to_cpup((const __le32 *)(eh - 4));
108#else 121#else
109 const u8 *data = (const u8 *)(buf->data - 4); 122 const u8 *data = eh - 4;
110 return ((u32)data[0] | 123 return ((u32)data[0] |
111 (u32)data[1] << 8 | 124 (u32)data[1] << 8 |
112 (u32)data[2] << 16 | 125 (u32)data[2] << 16 |
@@ -143,13 +156,12 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
143 156
144 /* Adjust the SKB for padding and checksum */ 157 /* Adjust the SKB for padding and checksum */
145 skb_reserve(skb, NET_IP_ALIGN); 158 skb_reserve(skb, NET_IP_ALIGN);
146 rx_buf->data = (char *)skb->data;
147 rx_buf->len = skb_len - NET_IP_ALIGN; 159 rx_buf->len = skb_len - NET_IP_ALIGN;
148 rx_buf->is_page = false; 160 rx_buf->is_page = false;
149 skb->ip_summed = CHECKSUM_UNNECESSARY; 161 skb->ip_summed = CHECKSUM_UNNECESSARY;
150 162
151 rx_buf->dma_addr = pci_map_single(efx->pci_dev, 163 rx_buf->dma_addr = pci_map_single(efx->pci_dev,
152 rx_buf->data, rx_buf->len, 164 skb->data, rx_buf->len,
153 PCI_DMA_FROMDEVICE); 165 PCI_DMA_FROMDEVICE);
154 if (unlikely(pci_dma_mapping_error(efx->pci_dev, 166 if (unlikely(pci_dma_mapping_error(efx->pci_dev,
155 rx_buf->dma_addr))) { 167 rx_buf->dma_addr))) {
@@ -213,7 +225,6 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
213 rx_buf = efx_rx_buffer(rx_queue, index); 225 rx_buf = efx_rx_buffer(rx_queue, index);
214 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 226 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
215 rx_buf->u.page = page; 227 rx_buf->u.page = page;
216 rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 228 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
218 rx_buf->is_page = true; 229 rx_buf->is_page = true;
219 ++rx_queue->added_count; 230 ++rx_queue->added_count;
@@ -297,8 +308,6 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
297 new_buf = efx_rx_buffer(rx_queue, index); 308 new_buf = efx_rx_buffer(rx_queue, index);
298 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 309 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
299 new_buf->u.page = rx_buf->u.page; 310 new_buf->u.page = rx_buf->u.page;
300 new_buf->data = (void *)
301 ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1));
302 new_buf->len = rx_buf->len; 311 new_buf->len = rx_buf->len;
303 new_buf->is_page = true; 312 new_buf->is_page = true;
304 ++rx_queue->added_count; 313 ++rx_queue->added_count;
@@ -446,7 +455,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
446 */ 455 */
447static void efx_rx_packet_gro(struct efx_channel *channel, 456static void efx_rx_packet_gro(struct efx_channel *channel,
448 struct efx_rx_buffer *rx_buf, 457 struct efx_rx_buffer *rx_buf,
449 bool checksummed) 458 const u8 *eh, bool checksummed)
450{ 459{
451 struct napi_struct *napi = &channel->napi_str; 460 struct napi_struct *napi = &channel->napi_str;
452 gro_result_t gro_result; 461 gro_result_t gro_result;
@@ -466,11 +475,11 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
466 } 475 }
467 476
468 if (efx->net_dev->features & NETIF_F_RXHASH) 477 if (efx->net_dev->features & NETIF_F_RXHASH)
469 skb->rxhash = efx_rx_buf_hash(rx_buf); 478 skb->rxhash = efx_rx_buf_hash(eh);
470 479
471 skb_shinfo(skb)->frags[0].page = page; 480 skb_shinfo(skb)->frags[0].page = page;
472 skb_shinfo(skb)->frags[0].page_offset = 481 skb_shinfo(skb)->frags[0].page_offset =
473 efx_rx_buf_offset(rx_buf); 482 efx_rx_buf_offset(efx, rx_buf);
474 skb_shinfo(skb)->frags[0].size = rx_buf->len; 483 skb_shinfo(skb)->frags[0].size = rx_buf->len;
475 skb_shinfo(skb)->nr_frags = 1; 484 skb_shinfo(skb)->nr_frags = 1;
476 485
@@ -509,7 +518,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
509 bool leak_packet = false; 518 bool leak_packet = false;
510 519
511 rx_buf = efx_rx_buffer(rx_queue, index); 520 rx_buf = efx_rx_buffer(rx_queue, index);
512 EFX_BUG_ON_PARANOID(!rx_buf->data);
513 521
514 /* This allows the refill path to post another buffer. 522 /* This allows the refill path to post another buffer.
515 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 523 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
@@ -548,12 +556,12 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
548 /* Prefetch nice and early so data will (hopefully) be in cache by 556 /* Prefetch nice and early so data will (hopefully) be in cache by
549 * the time we look at it. 557 * the time we look at it.
550 */ 558 */
551 prefetch(rx_buf->data); 559 prefetch(efx_rx_buf_eh(efx, rx_buf));
552 560
553 /* Pipeline receives so that we give time for packet headers to be 561 /* Pipeline receives so that we give time for packet headers to be
554 * prefetched into cache. 562 * prefetched into cache.
555 */ 563 */
556 rx_buf->len = len; 564 rx_buf->len = len - efx->type->rx_buffer_hash_size;
557out: 565out:
558 if (channel->rx_pkt) 566 if (channel->rx_pkt)
559 __efx_rx_packet(channel, 567 __efx_rx_packet(channel,
@@ -568,15 +576,13 @@ void __efx_rx_packet(struct efx_channel *channel,
568{ 576{
569 struct efx_nic *efx = channel->efx; 577 struct efx_nic *efx = channel->efx;
570 struct sk_buff *skb; 578 struct sk_buff *skb;
571 579 u8 *eh = efx_rx_buf_eh(efx, rx_buf);
572 rx_buf->data += efx->type->rx_buffer_hash_size;
573 rx_buf->len -= efx->type->rx_buffer_hash_size;
574 580
575 /* If we're in loopback test, then pass the packet directly to the 581 /* If we're in loopback test, then pass the packet directly to the
576 * loopback layer, and free the rx_buf here 582 * loopback layer, and free the rx_buf here
577 */ 583 */
578 if (unlikely(efx->loopback_selftest)) { 584 if (unlikely(efx->loopback_selftest)) {
579 efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); 585 efx_loopback_rx_packet(efx, eh, rx_buf->len);
580 efx_free_rx_buffer(efx, rx_buf); 586 efx_free_rx_buffer(efx, rx_buf);
581 return; 587 return;
582 } 588 }
@@ -590,7 +596,7 @@ void __efx_rx_packet(struct efx_channel *channel,
590 skb_put(skb, rx_buf->len); 596 skb_put(skb, rx_buf->len);
591 597
592 if (efx->net_dev->features & NETIF_F_RXHASH) 598 if (efx->net_dev->features & NETIF_F_RXHASH)
593 skb->rxhash = efx_rx_buf_hash(rx_buf); 599 skb->rxhash = efx_rx_buf_hash(eh);
594 600
595 /* Move past the ethernet header. rx_buf->data still points 601 /* Move past the ethernet header. rx_buf->data still points
596 * at the ethernet header */ 602 * at the ethernet header */
@@ -600,7 +606,7 @@ void __efx_rx_packet(struct efx_channel *channel,
600 } 606 }
601 607
602 if (likely(checksummed || rx_buf->is_page)) { 608 if (likely(checksummed || rx_buf->is_page)) {
603 efx_rx_packet_gro(channel, rx_buf, checksummed); 609 efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
604 return; 610 return;
605 } 611 }
606 612