diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2011-08-26 13:05:11 -0400 |
---|---|---|
committer | Ben Hutchings <bhutchings@solarflare.com> | 2012-01-26 19:11:00 -0500 |
commit | db3395697cad6e9dff8d21249e0b59dc9bb83b48 (patch) | |
tree | f3cfd937eda7e0a88733c4c49e1512fa382253e5 /drivers/net/ethernet/sfc/rx.c | |
parent | 1ddceb4c69463e09b6929c750046c59589d45d82 (diff) |
sfc: Replace efx_rx_buffer::is_page and other booleans with a flags field
Replace checksummed and discard booleans from efx_handle_rx_event()
with a bitmask, added to the flags field.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r-- | drivers/net/ethernet/sfc/rx.c | 65 |
1 files changed, 32 insertions, 33 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index d97c6ebcf06d..a33aef25ead9 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c | |||
@@ -108,7 +108,7 @@ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) | |||
108 | 108 | ||
109 | static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) | 109 | static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf) |
110 | { | 110 | { |
111 | if (buf->is_page) | 111 | if (buf->flags & EFX_RX_BUF_PAGE) |
112 | return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); | 112 | return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf); |
113 | else | 113 | else |
114 | return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; | 114 | return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size; |
@@ -158,7 +158,7 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) | |||
158 | /* Adjust the SKB for padding and checksum */ | 158 | /* Adjust the SKB for padding and checksum */ |
159 | skb_reserve(skb, NET_IP_ALIGN); | 159 | skb_reserve(skb, NET_IP_ALIGN); |
160 | rx_buf->len = skb_len - NET_IP_ALIGN; | 160 | rx_buf->len = skb_len - NET_IP_ALIGN; |
161 | rx_buf->is_page = false; | 161 | rx_buf->flags = 0; |
162 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 162 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
163 | 163 | ||
164 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | 164 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, |
@@ -227,7 +227,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
227 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; | 227 | rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; |
228 | rx_buf->u.page = page; | 228 | rx_buf->u.page = page; |
229 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | 229 | rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; |
230 | rx_buf->is_page = true; | 230 | rx_buf->flags = EFX_RX_BUF_PAGE; |
231 | ++rx_queue->added_count; | 231 | ++rx_queue->added_count; |
232 | ++rx_queue->alloc_page_count; | 232 | ++rx_queue->alloc_page_count; |
233 | ++state->refcnt; | 233 | ++state->refcnt; |
@@ -248,7 +248,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) | |||
248 | static void efx_unmap_rx_buffer(struct efx_nic *efx, | 248 | static void efx_unmap_rx_buffer(struct efx_nic *efx, |
249 | struct efx_rx_buffer *rx_buf) | 249 | struct efx_rx_buffer *rx_buf) |
250 | { | 250 | { |
251 | if (rx_buf->is_page && rx_buf->u.page) { | 251 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { |
252 | struct efx_rx_page_state *state; | 252 | struct efx_rx_page_state *state; |
253 | 253 | ||
254 | state = page_address(rx_buf->u.page); | 254 | state = page_address(rx_buf->u.page); |
@@ -258,7 +258,7 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
258 | efx_rx_buf_size(efx), | 258 | efx_rx_buf_size(efx), |
259 | PCI_DMA_FROMDEVICE); | 259 | PCI_DMA_FROMDEVICE); |
260 | } | 260 | } |
261 | } else if (!rx_buf->is_page && rx_buf->u.skb) { | 261 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { |
262 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | 262 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, |
263 | rx_buf->len, PCI_DMA_FROMDEVICE); | 263 | rx_buf->len, PCI_DMA_FROMDEVICE); |
264 | } | 264 | } |
@@ -267,10 +267,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
267 | static void efx_free_rx_buffer(struct efx_nic *efx, | 267 | static void efx_free_rx_buffer(struct efx_nic *efx, |
268 | struct efx_rx_buffer *rx_buf) | 268 | struct efx_rx_buffer *rx_buf) |
269 | { | 269 | { |
270 | if (rx_buf->is_page && rx_buf->u.page) { | 270 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { |
271 | __free_pages(rx_buf->u.page, efx->rx_buffer_order); | 271 | __free_pages(rx_buf->u.page, efx->rx_buffer_order); |
272 | rx_buf->u.page = NULL; | 272 | rx_buf->u.page = NULL; |
273 | } else if (!rx_buf->is_page && rx_buf->u.skb) { | 273 | } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { |
274 | dev_kfree_skb_any(rx_buf->u.skb); | 274 | dev_kfree_skb_any(rx_buf->u.skb); |
275 | rx_buf->u.skb = NULL; | 275 | rx_buf->u.skb = NULL; |
276 | } | 276 | } |
@@ -310,7 +310,7 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, | |||
310 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); | 310 | new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); |
311 | new_buf->u.page = rx_buf->u.page; | 311 | new_buf->u.page = rx_buf->u.page; |
312 | new_buf->len = rx_buf->len; | 312 | new_buf->len = rx_buf->len; |
313 | new_buf->is_page = true; | 313 | new_buf->flags = EFX_RX_BUF_PAGE; |
314 | ++rx_queue->added_count; | 314 | ++rx_queue->added_count; |
315 | } | 315 | } |
316 | 316 | ||
@@ -324,7 +324,10 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel, | |||
324 | struct efx_rx_buffer *new_buf; | 324 | struct efx_rx_buffer *new_buf; |
325 | unsigned index; | 325 | unsigned index; |
326 | 326 | ||
327 | if (rx_buf->is_page && efx->rx_buffer_len <= EFX_RX_HALF_PAGE && | 327 | rx_buf->flags &= EFX_RX_BUF_PAGE; |
328 | |||
329 | if ((rx_buf->flags & EFX_RX_BUF_PAGE) && | ||
330 | efx->rx_buffer_len <= EFX_RX_HALF_PAGE && | ||
328 | page_count(rx_buf->u.page) == 1) | 331 | page_count(rx_buf->u.page) == 1) |
329 | efx_resurrect_rx_buffer(rx_queue, rx_buf); | 332 | efx_resurrect_rx_buffer(rx_queue, rx_buf); |
330 | 333 | ||
@@ -411,8 +414,7 @@ void efx_rx_slow_fill(unsigned long context) | |||
411 | 414 | ||
412 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | 415 | static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, |
413 | struct efx_rx_buffer *rx_buf, | 416 | struct efx_rx_buffer *rx_buf, |
414 | int len, bool *discard, | 417 | int len, bool *leak_packet) |
415 | bool *leak_packet) | ||
416 | { | 418 | { |
417 | struct efx_nic *efx = rx_queue->efx; | 419 | struct efx_nic *efx = rx_queue->efx; |
418 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | 420 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; |
@@ -423,7 +425,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
423 | /* The packet must be discarded, but this is only a fatal error | 425 | /* The packet must be discarded, but this is only a fatal error |
424 | * if the caller indicated it was | 426 | * if the caller indicated it was |
425 | */ | 427 | */ |
426 | *discard = true; | 428 | rx_buf->flags |= EFX_RX_PKT_DISCARD; |
427 | 429 | ||
428 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | 430 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { |
429 | if (net_ratelimit()) | 431 | if (net_ratelimit()) |
@@ -436,7 +438,7 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
436 | * data at the end of the skb will be trashed. So | 438 | * data at the end of the skb will be trashed. So |
437 | * we have no choice but to leak the fragment. | 439 | * we have no choice but to leak the fragment. |
438 | */ | 440 | */ |
439 | *leak_packet = !rx_buf->is_page; | 441 | *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE); |
440 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | 442 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); |
441 | } else { | 443 | } else { |
442 | if (net_ratelimit()) | 444 | if (net_ratelimit()) |
@@ -456,13 +458,13 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
456 | */ | 458 | */ |
457 | static void efx_rx_packet_gro(struct efx_channel *channel, | 459 | static void efx_rx_packet_gro(struct efx_channel *channel, |
458 | struct efx_rx_buffer *rx_buf, | 460 | struct efx_rx_buffer *rx_buf, |
459 | const u8 *eh, bool checksummed) | 461 | const u8 *eh) |
460 | { | 462 | { |
461 | struct napi_struct *napi = &channel->napi_str; | 463 | struct napi_struct *napi = &channel->napi_str; |
462 | gro_result_t gro_result; | 464 | gro_result_t gro_result; |
463 | 465 | ||
464 | /* Pass the skb/page into the GRO engine */ | 466 | /* Pass the skb/page into the GRO engine */ |
465 | if (rx_buf->is_page) { | 467 | if (rx_buf->flags & EFX_RX_BUF_PAGE) { |
466 | struct efx_nic *efx = channel->efx; | 468 | struct efx_nic *efx = channel->efx; |
467 | struct page *page = rx_buf->u.page; | 469 | struct page *page = rx_buf->u.page; |
468 | struct sk_buff *skb; | 470 | struct sk_buff *skb; |
@@ -484,8 +486,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel, | |||
484 | skb->len = rx_buf->len; | 486 | skb->len = rx_buf->len; |
485 | skb->data_len = rx_buf->len; | 487 | skb->data_len = rx_buf->len; |
486 | skb->truesize += rx_buf->len; | 488 | skb->truesize += rx_buf->len; |
487 | skb->ip_summed = | 489 | skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? |
488 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; | 490 | CHECKSUM_UNNECESSARY : CHECKSUM_NONE); |
489 | 491 | ||
490 | skb_record_rx_queue(skb, channel->channel); | 492 | skb_record_rx_queue(skb, channel->channel); |
491 | 493 | ||
@@ -493,7 +495,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel, | |||
493 | } else { | 495 | } else { |
494 | struct sk_buff *skb = rx_buf->u.skb; | 496 | struct sk_buff *skb = rx_buf->u.skb; |
495 | 497 | ||
496 | EFX_BUG_ON_PARANOID(!checksummed); | 498 | EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED)); |
497 | rx_buf->u.skb = NULL; | 499 | rx_buf->u.skb = NULL; |
498 | 500 | ||
499 | gro_result = napi_gro_receive(napi, skb); | 501 | gro_result = napi_gro_receive(napi, skb); |
@@ -508,7 +510,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel, | |||
508 | } | 510 | } |
509 | 511 | ||
510 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | 512 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
511 | unsigned int len, bool checksummed, bool discard) | 513 | unsigned int len, u16 flags) |
512 | { | 514 | { |
513 | struct efx_nic *efx = rx_queue->efx; | 515 | struct efx_nic *efx = rx_queue->efx; |
514 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | 516 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
@@ -516,6 +518,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
516 | bool leak_packet = false; | 518 | bool leak_packet = false; |
517 | 519 | ||
518 | rx_buf = efx_rx_buffer(rx_queue, index); | 520 | rx_buf = efx_rx_buffer(rx_queue, index); |
521 | rx_buf->flags |= flags; | ||
519 | 522 | ||
520 | /* This allows the refill path to post another buffer. | 523 | /* This allows the refill path to post another buffer. |
521 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | 524 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using |
@@ -524,18 +527,17 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
524 | rx_queue->removed_count++; | 527 | rx_queue->removed_count++; |
525 | 528 | ||
526 | /* Validate the length encoded in the event vs the descriptor pushed */ | 529 | /* Validate the length encoded in the event vs the descriptor pushed */ |
527 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | 530 | efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet); |
528 | &discard, &leak_packet); | ||
529 | 531 | ||
530 | netif_vdbg(efx, rx_status, efx->net_dev, | 532 | netif_vdbg(efx, rx_status, efx->net_dev, |
531 | "RX queue %d received id %x at %llx+%x %s%s\n", | 533 | "RX queue %d received id %x at %llx+%x %s%s\n", |
532 | efx_rx_queue_index(rx_queue), index, | 534 | efx_rx_queue_index(rx_queue), index, |
533 | (unsigned long long)rx_buf->dma_addr, len, | 535 | (unsigned long long)rx_buf->dma_addr, len, |
534 | (checksummed ? " [SUMMED]" : ""), | 536 | (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", |
535 | (discard ? " [DISCARD]" : "")); | 537 | (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); |
536 | 538 | ||
537 | /* Discard packet, if instructed to do so */ | 539 | /* Discard packet, if instructed to do so */ |
538 | if (unlikely(discard)) { | 540 | if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { |
539 | if (unlikely(leak_packet)) | 541 | if (unlikely(leak_packet)) |
540 | channel->n_skbuff_leaks++; | 542 | channel->n_skbuff_leaks++; |
541 | else | 543 | else |
@@ -562,10 +564,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | |||
562 | rx_buf->len = len - efx->type->rx_buffer_hash_size; | 564 | rx_buf->len = len - efx->type->rx_buffer_hash_size; |
563 | out: | 565 | out: |
564 | if (channel->rx_pkt) | 566 | if (channel->rx_pkt) |
565 | __efx_rx_packet(channel, | 567 | __efx_rx_packet(channel, channel->rx_pkt); |
566 | channel->rx_pkt, channel->rx_pkt_csummed); | ||
567 | channel->rx_pkt = rx_buf; | 568 | channel->rx_pkt = rx_buf; |
568 | channel->rx_pkt_csummed = checksummed; | ||
569 | } | 569 | } |
570 | 570 | ||
571 | static void efx_rx_deliver(struct efx_channel *channel, | 571 | static void efx_rx_deliver(struct efx_channel *channel, |
@@ -588,8 +588,7 @@ static void efx_rx_deliver(struct efx_channel *channel, | |||
588 | } | 588 | } |
589 | 589 | ||
590 | /* Handle a received packet. Second half: Touches packet payload. */ | 590 | /* Handle a received packet. Second half: Touches packet payload. */ |
591 | void __efx_rx_packet(struct efx_channel *channel, | 591 | void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) |
592 | struct efx_rx_buffer *rx_buf, bool checksummed) | ||
593 | { | 592 | { |
594 | struct efx_nic *efx = channel->efx; | 593 | struct efx_nic *efx = channel->efx; |
595 | u8 *eh = efx_rx_buf_eh(efx, rx_buf); | 594 | u8 *eh = efx_rx_buf_eh(efx, rx_buf); |
@@ -603,7 +602,7 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
603 | return; | 602 | return; |
604 | } | 603 | } |
605 | 604 | ||
606 | if (!rx_buf->is_page) { | 605 | if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) { |
607 | struct sk_buff *skb = rx_buf->u.skb; | 606 | struct sk_buff *skb = rx_buf->u.skb; |
608 | 607 | ||
609 | prefetch(skb_shinfo(skb)); | 608 | prefetch(skb_shinfo(skb)); |
@@ -622,10 +621,10 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
622 | } | 621 | } |
623 | 622 | ||
624 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) | 623 | if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) |
625 | checksummed = false; | 624 | rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; |
626 | 625 | ||
627 | if (likely(checksummed || rx_buf->is_page)) | 626 | if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED))) |
628 | efx_rx_packet_gro(channel, rx_buf, eh, checksummed); | 627 | efx_rx_packet_gro(channel, rx_buf, eh); |
629 | else | 628 | else |
630 | efx_rx_deliver(channel, rx_buf); | 629 | efx_rx_deliver(channel, rx_buf); |
631 | } | 630 | } |