diff options
author | Steve Hodgson <shodgson@solarflare.com> | 2011-02-24 18:36:01 -0500 |
---|---|---|
committer | Ben Hutchings <bhutchings@solarflare.com> | 2011-02-28 18:57:23 -0500 |
commit | 8ba5366adacef220b6ce16dca777600433a22a42 (patch) | |
tree | 5eaa58616988d2e08e65ab4175671d74782d2548 | |
parent | 5b2c4dd2ec12cf0e53b2bd2926f0fe2d1fbb4eda (diff) |
sfc: Reduce size of efx_rx_buffer by unionising skb and page
[bwh: Forward-ported to net-next-2.6.]
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
-rw-r--r-- | drivers/net/sfc/net_driver.h | 8 | ||||
-rw-r--r-- | drivers/net/sfc/rx.c | 96 |
2 files changed, 51 insertions, 53 deletions
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 15b9068e5b87..59ff32ac7ec6 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
@@ -216,13 +216,17 @@ struct efx_tx_queue { | |||
216 | * If both this and skb are %NULL, the buffer slot is currently free. | 216 | * If both this and skb are %NULL, the buffer slot is currently free. |
217 | * @data: Pointer to ethernet header | 217 | * @data: Pointer to ethernet header |
218 | * @len: Buffer length, in bytes. | 218 | * @len: Buffer length, in bytes. |
219 | * @is_page: Indicates if @page is valid. If false, @skb is valid. | ||
219 | */ | 220 | */ |
220 | struct efx_rx_buffer { | 221 | struct efx_rx_buffer { |
221 | dma_addr_t dma_addr; | 222 | dma_addr_t dma_addr; |
222 | struct sk_buff *skb; | 223 | union { |
223 | struct page *page; | 224 | struct sk_buff *skb; |
225 | struct page *page; | ||
226 | } u; | ||
224 | char *data; | 227 | char *data; |
225 | unsigned int len; | 228 | unsigned int len; |
229 | bool is_page; | ||
226 | }; | 230 | }; |
227 | 231 | ||
228 | /** | 232 | /** |
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 3925fd621177..bcbd2ec2d92a 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -129,6 +129,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) | |||
129 | struct efx_nic *efx = rx_queue->efx; | 129 | struct efx_nic *efx = rx_queue->efx; |
130 | struct net_device *net_dev = efx->net_dev; | 130 | struct net_device *net_dev = efx->net_dev; |
131 | struct efx_rx_buffer *rx_buf; | 131 | struct efx_rx_buffer *rx_buf; |
132 | struct sk_buff *skb; | ||
132 | int skb_len = efx->rx_buffer_len; | 133 | int skb_len = efx->rx_buffer_len; |
133 | unsigned index, count; | 134 | unsigned index, count; |
134 | 135 | ||
@@ -136,24 +137,24 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) | |||
136 | index = rx_queue->added_count & rx_queue->ptr_mask; | 137 | index = rx_queue->added_count & rx_queue->ptr_mask; |
137 | rx_buf = efx_rx_buffer(rx_queue, index); | 138 | rx_buf = efx_rx_buffer(rx_queue, index); |
138 | 139 | ||
139 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); | 140 | rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); |
140 | if (unlikely(!rx_buf->skb)) | 141 | if (unlikely(!skb)) |
141 | return -ENOMEM; | 142 | return -ENOMEM; |
142 | rx_buf->page = NULL; | ||
143 | 143 | ||
144 | /* Adjust the SKB for padding and checksum */ | 144 | /* Adjust the SKB for padding and checksum */ |
145 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); | 145 | skb_reserve(skb, NET_IP_ALIGN); |
146 | rx_buf->data = (char *)skb->data; | ||
146 | rx_buf->len = skb_len - NET_IP_ALIGN; | 147 | rx_buf->len = skb_len - NET_IP_ALIGN; |
147 | rx_buf->data = (char *)rx_buf->skb->data; | 148 | rx_buf->is_page = false; |
148 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | 149 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
149 | 150 | ||
150 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | 151 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, |
151 | rx_buf->data, rx_buf->len, | 152 | rx_buf->data, rx_buf->len, |
152 | PCI_DMA_FROMDEVICE); | 153 | PCI_DMA_FROMDEVICE); |
153 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, | 154 | if (unlikely(pci_dma_mapping_error(efx->pci_dev, |
154 | rx_buf->dma_addr))) { | 155 | rx_buf->dma_addr))) { |
155 | dev_kfree_skb_any(rx_buf->skb); | 156 | dev_kfree_skb_any(skb); |
156 | rx_buf->skb = NULL; | 157 | rx_buf->u.skb = NULL; |
157 | return -EIO; | 158 | return -EIO; |
158 | } | 159 | } |
159 | 160 | ||
@@ -211,10 +212,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
211 | index = rx_queue->added_count & rx_queue->ptr_mask; | 212 | index = rx_queue->added_count & rx_queue->ptr_mask; |
212 | rx_buf = efx_rx_buffer(rx_queue, index); | 213 | rx_buf = efx_rx_buffer(rx_queue, index); |
213 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; | 214 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
214 | rx_buf->skb = NULL; | 215 | rx_buf->u.page = page; |
215 | rx_buf->page = page; | ||
216 | rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN; | 216 | rx_buf->data = page_addr + EFX_PAGE_IP_ALIGN; |
217 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | 217 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
218 | rx_buf->is_page = true; | ||
218 | ++rx_queue->added_count; | 219 | ++rx_queue->added_count; |
219 | ++rx_queue->alloc_page_count; | 220 | ++rx_queue->alloc_page_count; |
220 | ++state->refcnt; | 221 | ++state->refcnt; |
@@ -235,19 +236,17 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
235 | static void efx_unmap_rx_buffer(struct efx_nic *efx, | 236 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
236 | struct efx_rx_buffer *rx_buf) | 237 | struct efx_rx_buffer *rx_buf) |
237 | { | 238 | { |
238 | if (rx_buf->page) { | 239 | if (rx_buf->is_page && rx_buf->u.page) { |
239 | struct efx_rx_page_state *state; | 240 | struct efx_rx_page_state *state; |
240 | 241 | ||
241 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 242 | state = page_address(rx_buf->u.page); |
242 | |||
243 | state = page_address(rx_buf->page); | ||
244 | if (--state->refcnt == 0) { | 243 | if (--state->refcnt == 0) { |
245 | pci_unmap_page(efx->pci_dev, | 244 | pci_unmap_page(efx->pci_dev, |
246 | state->dma_addr, | 245 | state->dma_addr, |
247 | efx_rx_buf_size(efx), | 246 | efx_rx_buf_size(efx), |
248 | PCI_DMA_FROMDEVICE); | 247 | PCI_DMA_FROMDEVICE); |
249 | } | 248 | } |
250 | } else if (likely(rx_buf->skb)) { | 249 | } else if (!rx_buf->is_page && rx_buf->u.skb) { |
251 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | 250 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, |
252 | rx_buf->len, PCI_DMA_FROMDEVICE); | 251 | rx_buf->len, PCI_DMA_FROMDEVICE); |
253 | } | 252 | } |
@@ -256,12 +255,12 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
256 | static void efx_free_rx_buffer(struct efx_nic *efx, | 255 | static void efx_free_rx_buffer(struct efx_nic *efx, |
257 | struct efx_rx_buffer *rx_buf) | 256 | struct efx_rx_buffer *rx_buf) |
258 | { | 257 | { |
259 | if (rx_buf->page) { | 258 | if (rx_buf->is_page && rx_buf->u.page) { |
260 | __free_pages(rx_buf->page, efx->rx_buffer_order); | 259 | __free_pages(rx_buf->u.page, efx->rx_buffer_order); |
261 | rx_buf->page = NULL; | 260 | rx_buf->u.page = NULL; |
262 | } else if (likely(rx_buf->skb)) { | 261 | } else if (!rx_buf->is_page && rx_buf->u.skb) { |
263 | dev_kfree_skb_any(rx_buf->skb); | 262 | dev_kfree_skb_any(rx_buf->u.skb); |
264 | rx_buf->skb = NULL; | 263 | rx_buf->u.skb = NULL; |
265 | } | 264 | } |
266 | } | 265 | } |
267 | 266 | ||
@@ -277,7 +276,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | |||
277 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | 276 | static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, |
278 | struct efx_rx_buffer *rx_buf) | 277 | struct efx_rx_buffer *rx_buf) |
279 | { | 278 | { |
280 | struct efx_rx_page_state *state = page_address(rx_buf->page); | 279 | struct efx_rx_page_state *state = page_address(rx_buf->u.page); |
281 | struct efx_rx_buffer *new_buf; | 280 | struct efx_rx_buffer *new_buf; |
282 | unsigned fill_level, index; | 281 | unsigned fill_level, index; |
283 | 282 | ||
@@ -292,16 +291,16 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | |||
292 | } | 291 | } |
293 | 292 | ||
294 | ++state->refcnt; | 293 | ++state->refcnt; |
295 | get_page(rx_buf->page); | 294 | get_page(rx_buf->u.page); |
296 | 295 | ||
297 | index = rx_queue->added_count & rx_queue->ptr_mask; | 296 | index = rx_queue->added_count & rx_queue->ptr_mask; |
298 | new_buf = efx_rx_buffer(rx_queue, index); | 297 | new_buf = efx_rx_buffer(rx_queue, index); |
299 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); | 298 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); |
300 | new_buf->skb = NULL; | 299 | new_buf->u.page = rx_buf->u.page; |
301 | new_buf->page = rx_buf->page; | ||
302 | new_buf->data = (void *) | 300 | new_buf->data = (void *) |
303 | ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1)); | 301 | ((__force unsigned long)rx_buf->data ^ (PAGE_SIZE >> 1)); |
304 | new_buf->len = rx_buf->len; | 302 | new_buf->len = rx_buf->len; |
303 | new_buf->is_page = true; | ||
305 | ++rx_queue->added_count; | 304 | ++rx_queue->added_count; |
306 | } | 305 | } |
307 | 306 | ||
@@ -315,16 +314,15 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, | |||
315 | struct efx_rx_buffer *new_buf; | 314 | struct efx_rx_buffer *new_buf; |
316 | unsigned index; | 315 | unsigned index; |
317 | 316 | ||
318 | if (rx_buf->page != NULL && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && | 317 | if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && |
319 | page_count(rx_buf->page) == 1) | 318 | page_count(rx_buf->u.page) == 1) |
320 | efx_resurrect_rx_buffer(rx_queue, rx_buf); | 319 | efx_resurrect_rx_buffer(rx_queue, rx_buf); |
321 | 320 | ||
322 | index = rx_queue->added_count & rx_queue->ptr_mask; | 321 | index = rx_queue->added_count & rx_queue->ptr_mask; |
323 | new_buf = efx_rx_buffer(rx_queue, index); | 322 | new_buf = efx_rx_buffer(rx_queue, index); |
324 | 323 | ||
325 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); | 324 | memcpy(new_buf, rx_buf, sizeof(*new_buf)); |
326 | rx_buf->page = NULL; | 325 | rx_buf->u.page = NULL; |
327 | rx_buf->skb = NULL; | ||
328 | ++rx_queue->added_count; | 326 | ++rx_queue->added_count; |
329 | } | 327 | } |
330 | 328 | ||
@@ -428,7 +426,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
428 | * data at the end of the skb will be trashed. So | 426 | * data at the end of the skb will be trashed. So |
429 | * we have no choice but to leak the fragment. | 427 | * we have no choice but to leak the fragment. |
430 | */ | 428 | */ |
431 | *leak_packet = (rx_buf->skb != NULL); | 429 | *leak_packet = !rx_buf->is_page; |
432 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | 430 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); |
433 | } else { | 431 | } else { |
434 | if (net_ratelimit()) | 432 | if (net_ratelimit()) |
@@ -454,13 +452,12 @@ static void efx_rx_packet_gro(struct efx_channel *channel, | |||
454 | gro_result_t gro_result; | 452 | gro_result_t gro_result; |
455 | 453 | ||
456 | /* Pass the skb/page into the GRO engine */ | 454 | /* Pass the skb/page into the GRO engine */ |
457 | if (rx_buf->page) { | 455 | if (rx_buf->is_page) { |
458 | struct efx_nic *efx = channel->efx; | 456 | struct efx_nic *efx = channel->efx; |
459 | struct page *page = rx_buf->page; | 457 | struct page *page = rx_buf->u.page; |
460 | struct sk_buff *skb; | 458 | struct sk_buff *skb; |
461 | 459 | ||
462 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 460 | rx_buf->u.page = NULL; |
463 | rx_buf->page = NULL; | ||
464 | 461 | ||
465 | skb = napi_get_frags(napi); | 462 | skb = napi_get_frags(napi); |
466 | if (!skb) { | 463 | if (!skb) { |
@@ -487,11 +484,10 @@ static void efx_rx_packet_gro(struct efx_channel *channel, | |||
487 | 484 | ||
488 | gro_result = napi_gro_frags(napi); | 485 | gro_result = napi_gro_frags(napi); |
489 | } else { | 486 | } else { |
490 | struct sk_buff *skb = rx_buf->skb; | 487 | struct sk_buff *skb = rx_buf->u.skb; |
491 | 488 | ||
492 | EFX_BUG_ON_PARANOID(!skb); | ||
493 | EFX_BUG_ON_PARANOID(!checksummed); | 489 | EFX_BUG_ON_PARANOID(!checksummed); |
494 | rx_buf->skb = NULL; | 490 | rx_buf->u.skb = NULL; |
495 | 491 | ||
496 | gro_result = napi_gro_receive(napi, skb); | 492 | gro_result = napi_gro_receive(napi, skb); |
497 | } | 493 | } |
@@ -514,8 +510,6 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
514 | 510 | ||
515 | rx_buf = efx_rx_buffer(rx_queue, index); | 511 | rx_buf = efx_rx_buffer(rx_queue, index); |
516 | EFX_BUG_ON_PARANOID(!rx_buf->data); | 512 | EFX_BUG_ON_PARANOID(!rx_buf->data); |
517 | EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page); | ||
518 | EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page)); | ||
519 | 513 | ||
520 | /* This allows the refill path to post another buffer. | 514 | /* This allows the refill path to post another buffer. |
521 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | 515 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using |
@@ -587,32 +581,32 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
587 | return; | 581 | return; |
588 | } | 582 | } |
589 | 583 | ||
590 | if (rx_buf->skb) { | 584 | if (!rx_buf->is_page) { |
591 | prefetch(skb_shinfo(rx_buf->skb)); | 585 | skb = rx_buf->u.skb; |
586 | |||
587 | prefetch(skb_shinfo(skb)); | ||
592 | 588 | ||
593 | skb_reserve(rx_buf->skb, efx->type->rx_buffer_hash_size); | 589 | skb_reserve(skb, efx->type->rx_buffer_hash_size); |
594 | skb_put(rx_buf->skb, rx_buf->len); | 590 | skb_put(skb, rx_buf->len); |
595 | 591 | ||
596 | if (efx->net_dev->features & NETIF_F_RXHASH) | 592 | if (efx->net_dev->features & NETIF_F_RXHASH) |
597 | rx_buf->skb->rxhash = efx_rx_buf_hash(rx_buf); | 593 | skb->rxhash = efx_rx_buf_hash(rx_buf); |
598 | 594 | ||
599 | /* Move past the ethernet header. rx_buf->data still points | 595 | /* Move past the ethernet header. rx_buf->data still points |
600 | * at the ethernet header */ | 596 | * at the ethernet header */ |
601 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, | 597 | skb->protocol = eth_type_trans(skb, efx->net_dev); |
602 | efx->net_dev); | ||
603 | 598 | ||
604 | skb_record_rx_queue(rx_buf->skb, channel->channel); | 599 | skb_record_rx_queue(skb, channel->channel); |
605 | } | 600 | } |
606 | 601 | ||
607 | if (likely(checksummed || rx_buf->page)) { | 602 | if (likely(checksummed || rx_buf->is_page)) { |
608 | efx_rx_packet_gro(channel, rx_buf, checksummed); | 603 | efx_rx_packet_gro(channel, rx_buf, checksummed); |
609 | return; | 604 | return; |
610 | } | 605 | } |
611 | 606 | ||
612 | /* We now own the SKB */ | 607 | /* We now own the SKB */ |
613 | skb = rx_buf->skb; | 608 | skb = rx_buf->u.skb; |
614 | rx_buf->skb = NULL; | 609 | rx_buf->u.skb = NULL; |
615 | EFX_BUG_ON_PARANOID(!skb); | ||
616 | 610 | ||
617 | /* Set the SKB flags */ | 611 | /* Set the SKB flags */ |
618 | skb_checksum_none_assert(skb); | 612 | skb_checksum_none_assert(skb); |