diff options
Diffstat (limited to 'drivers/net/sfc/rx.c')
-rw-r--r-- | drivers/net/sfc/rx.c | 82 |
1 files changed, 42 insertions, 40 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 98bff5ada09a..a97c923b560c 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2008 Solarflare Communications Inc. | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
@@ -16,9 +16,8 @@ | |||
16 | #include <net/ip.h> | 16 | #include <net/ip.h> |
17 | #include <net/checksum.h> | 17 | #include <net/checksum.h> |
18 | #include "net_driver.h" | 18 | #include "net_driver.h" |
19 | #include "rx.h" | ||
20 | #include "efx.h" | 19 | #include "efx.h" |
21 | #include "falcon.h" | 20 | #include "nic.h" |
22 | #include "selftest.h" | 21 | #include "selftest.h" |
23 | #include "workarounds.h" | 22 | #include "workarounds.h" |
24 | 23 | ||
@@ -61,7 +60,7 @@ | |||
61 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? | 60 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? |
62 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) | 61 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) |
63 | */ | 62 | */ |
64 | static int rx_alloc_method = RX_ALLOC_METHOD_PAGE; | 63 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; |
65 | 64 | ||
66 | #define RX_ALLOC_LEVEL_LRO 0x2000 | 65 | #define RX_ALLOC_LEVEL_LRO 0x2000 |
67 | #define RX_ALLOC_LEVEL_MAX 0x3000 | 66 | #define RX_ALLOC_LEVEL_MAX 0x3000 |
@@ -293,8 +292,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
293 | * fill anyway. | 292 | * fill anyway. |
294 | */ | 293 | */ |
295 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | 294 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
296 | EFX_BUG_ON_PARANOID(fill_level > | 295 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); |
297 | rx_queue->efx->type->rxd_ring_mask + 1); | ||
298 | 296 | ||
299 | /* Don't fill if we don't need to */ | 297 | /* Don't fill if we don't need to */ |
300 | if (fill_level >= rx_queue->fast_fill_trigger) | 298 | if (fill_level >= rx_queue->fast_fill_trigger) |
@@ -316,8 +314,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
316 | retry: | 314 | retry: |
317 | /* Recalculate current fill level now that we have the lock */ | 315 | /* Recalculate current fill level now that we have the lock */ |
318 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | 316 | fill_level = (rx_queue->added_count - rx_queue->removed_count); |
319 | EFX_BUG_ON_PARANOID(fill_level > | 317 | EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); |
320 | rx_queue->efx->type->rxd_ring_mask + 1); | ||
321 | space = rx_queue->fast_fill_limit - fill_level; | 318 | space = rx_queue->fast_fill_limit - fill_level; |
322 | if (space < EFX_RX_BATCH) | 319 | if (space < EFX_RX_BATCH) |
323 | goto out_unlock; | 320 | goto out_unlock; |
@@ -329,8 +326,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
329 | 326 | ||
330 | do { | 327 | do { |
331 | for (i = 0; i < EFX_RX_BATCH; ++i) { | 328 | for (i = 0; i < EFX_RX_BATCH; ++i) { |
332 | index = (rx_queue->added_count & | 329 | index = rx_queue->added_count & EFX_RXQ_MASK; |
333 | rx_queue->efx->type->rxd_ring_mask); | ||
334 | rx_buf = efx_rx_buffer(rx_queue, index); | 330 | rx_buf = efx_rx_buffer(rx_queue, index); |
335 | rc = efx_init_rx_buffer(rx_queue, rx_buf); | 331 | rc = efx_init_rx_buffer(rx_queue, rx_buf); |
336 | if (unlikely(rc)) | 332 | if (unlikely(rc)) |
@@ -345,7 +341,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
345 | 341 | ||
346 | out: | 342 | out: |
347 | /* Send write pointer to card. */ | 343 | /* Send write pointer to card. */ |
348 | falcon_notify_rx_desc(rx_queue); | 344 | efx_nic_notify_rx_desc(rx_queue); |
349 | 345 | ||
350 | /* If the fast fill is running inside from the refill tasklet, then | 346 | /* If the fast fill is running inside from the refill tasklet, then |
351 | * for SMP systems it may be running on a different CPU to | 347 | * for SMP systems it may be running on a different CPU to |
@@ -448,17 +444,23 @@ static void efx_rx_packet_lro(struct efx_channel *channel, | |||
448 | bool checksummed) | 444 | bool checksummed) |
449 | { | 445 | { |
450 | struct napi_struct *napi = &channel->napi_str; | 446 | struct napi_struct *napi = &channel->napi_str; |
447 | gro_result_t gro_result; | ||
451 | 448 | ||
452 | /* Pass the skb/page into the LRO engine */ | 449 | /* Pass the skb/page into the LRO engine */ |
453 | if (rx_buf->page) { | 450 | if (rx_buf->page) { |
454 | struct sk_buff *skb = napi_get_frags(napi); | 451 | struct page *page = rx_buf->page; |
452 | struct sk_buff *skb; | ||
455 | 453 | ||
454 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
455 | rx_buf->page = NULL; | ||
456 | |||
457 | skb = napi_get_frags(napi); | ||
456 | if (!skb) { | 458 | if (!skb) { |
457 | put_page(rx_buf->page); | 459 | put_page(page); |
458 | goto out; | 460 | return; |
459 | } | 461 | } |
460 | 462 | ||
461 | skb_shinfo(skb)->frags[0].page = rx_buf->page; | 463 | skb_shinfo(skb)->frags[0].page = page; |
462 | skb_shinfo(skb)->frags[0].page_offset = | 464 | skb_shinfo(skb)->frags[0].page_offset = |
463 | efx_rx_buf_offset(rx_buf); | 465 | efx_rx_buf_offset(rx_buf); |
464 | skb_shinfo(skb)->frags[0].size = rx_buf->len; | 466 | skb_shinfo(skb)->frags[0].size = rx_buf->len; |
@@ -470,17 +472,24 @@ static void efx_rx_packet_lro(struct efx_channel *channel, | |||
470 | skb->ip_summed = | 472 | skb->ip_summed = |
471 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; | 473 | checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; |
472 | 474 | ||
473 | napi_gro_frags(napi); | 475 | skb_record_rx_queue(skb, channel->channel); |
474 | 476 | ||
475 | out: | 477 | gro_result = napi_gro_frags(napi); |
476 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
477 | rx_buf->page = NULL; | ||
478 | } else { | 478 | } else { |
479 | EFX_BUG_ON_PARANOID(!rx_buf->skb); | 479 | struct sk_buff *skb = rx_buf->skb; |
480 | EFX_BUG_ON_PARANOID(!checksummed); | ||
481 | 480 | ||
482 | napi_gro_receive(napi, rx_buf->skb); | 481 | EFX_BUG_ON_PARANOID(!skb); |
482 | EFX_BUG_ON_PARANOID(!checksummed); | ||
483 | rx_buf->skb = NULL; | 483 | rx_buf->skb = NULL; |
484 | |||
485 | gro_result = napi_gro_receive(napi, skb); | ||
486 | } | ||
487 | |||
488 | if (gro_result == GRO_NORMAL) { | ||
489 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
490 | } else if (gro_result != GRO_DROP) { | ||
491 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | ||
492 | channel->irq_mod_score += 2; | ||
484 | } | 493 | } |
485 | } | 494 | } |
486 | 495 | ||
@@ -558,7 +567,7 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
558 | if (unlikely(efx->loopback_selftest)) { | 567 | if (unlikely(efx->loopback_selftest)) { |
559 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); | 568 | efx_loopback_rx_packet(efx, rx_buf->data, rx_buf->len); |
560 | efx_free_rx_buffer(efx, rx_buf); | 569 | efx_free_rx_buffer(efx, rx_buf); |
561 | goto done; | 570 | return; |
562 | } | 571 | } |
563 | 572 | ||
564 | if (rx_buf->skb) { | 573 | if (rx_buf->skb) { |
@@ -570,34 +579,28 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
570 | * at the ethernet header */ | 579 | * at the ethernet header */ |
571 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, | 580 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, |
572 | efx->net_dev); | 581 | efx->net_dev); |
582 | |||
583 | skb_record_rx_queue(rx_buf->skb, channel->channel); | ||
573 | } | 584 | } |
574 | 585 | ||
575 | if (likely(checksummed || rx_buf->page)) { | 586 | if (likely(checksummed || rx_buf->page)) { |
576 | efx_rx_packet_lro(channel, rx_buf, checksummed); | 587 | efx_rx_packet_lro(channel, rx_buf, checksummed); |
577 | goto done; | 588 | return; |
578 | } | 589 | } |
579 | 590 | ||
580 | /* We now own the SKB */ | 591 | /* We now own the SKB */ |
581 | skb = rx_buf->skb; | 592 | skb = rx_buf->skb; |
582 | rx_buf->skb = NULL; | 593 | rx_buf->skb = NULL; |
583 | |||
584 | EFX_BUG_ON_PARANOID(rx_buf->page); | ||
585 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
586 | EFX_BUG_ON_PARANOID(!skb); | 594 | EFX_BUG_ON_PARANOID(!skb); |
587 | 595 | ||
588 | /* Set the SKB flags */ | 596 | /* Set the SKB flags */ |
589 | skb->ip_summed = CHECKSUM_NONE; | 597 | skb->ip_summed = CHECKSUM_NONE; |
590 | 598 | ||
591 | skb_record_rx_queue(skb, channel->channel); | ||
592 | |||
593 | /* Pass the packet up */ | 599 | /* Pass the packet up */ |
594 | netif_receive_skb(skb); | 600 | netif_receive_skb(skb); |
595 | 601 | ||
596 | /* Update allocation strategy method */ | 602 | /* Update allocation strategy method */ |
597 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | 603 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; |
598 | |||
599 | done: | ||
600 | ; | ||
601 | } | 604 | } |
602 | 605 | ||
603 | void efx_rx_strategy(struct efx_channel *channel) | 606 | void efx_rx_strategy(struct efx_channel *channel) |
@@ -632,12 +635,12 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
632 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); | 635 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); |
633 | 636 | ||
634 | /* Allocate RX buffers */ | 637 | /* Allocate RX buffers */ |
635 | rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); | 638 | rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer); |
636 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); | 639 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); |
637 | if (!rx_queue->buffer) | 640 | if (!rx_queue->buffer) |
638 | return -ENOMEM; | 641 | return -ENOMEM; |
639 | 642 | ||
640 | rc = falcon_probe_rx(rx_queue); | 643 | rc = efx_nic_probe_rx(rx_queue); |
641 | if (rc) { | 644 | if (rc) { |
642 | kfree(rx_queue->buffer); | 645 | kfree(rx_queue->buffer); |
643 | rx_queue->buffer = NULL; | 646 | rx_queue->buffer = NULL; |
@@ -647,7 +650,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | |||
647 | 650 | ||
648 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | 651 | void efx_init_rx_queue(struct efx_rx_queue *rx_queue) |
649 | { | 652 | { |
650 | struct efx_nic *efx = rx_queue->efx; | ||
651 | unsigned int max_fill, trigger, limit; | 653 | unsigned int max_fill, trigger, limit; |
652 | 654 | ||
653 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); | 655 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); |
@@ -660,7 +662,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |||
660 | rx_queue->min_overfill = -1U; | 662 | rx_queue->min_overfill = -1U; |
661 | 663 | ||
662 | /* Initialise limit fields */ | 664 | /* Initialise limit fields */ |
663 | max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; | 665 | max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM; |
664 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | 666 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; |
665 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | 667 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; |
666 | 668 | ||
@@ -669,7 +671,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) | |||
669 | rx_queue->fast_fill_limit = limit; | 671 | rx_queue->fast_fill_limit = limit; |
670 | 672 | ||
671 | /* Set up RX descriptor ring */ | 673 | /* Set up RX descriptor ring */ |
672 | falcon_init_rx(rx_queue); | 674 | efx_nic_init_rx(rx_queue); |
673 | } | 675 | } |
674 | 676 | ||
675 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | 677 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) |
@@ -679,11 +681,11 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
679 | 681 | ||
680 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); | 682 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); |
681 | 683 | ||
682 | falcon_fini_rx(rx_queue); | 684 | efx_nic_fini_rx(rx_queue); |
683 | 685 | ||
684 | /* Release RX buffers NB start at index 0 not current HW ptr */ | 686 | /* Release RX buffers NB start at index 0 not current HW ptr */ |
685 | if (rx_queue->buffer) { | 687 | if (rx_queue->buffer) { |
686 | for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { | 688 | for (i = 0; i <= EFX_RXQ_MASK; i++) { |
687 | rx_buf = efx_rx_buffer(rx_queue, i); | 689 | rx_buf = efx_rx_buffer(rx_queue, i); |
688 | efx_fini_rx_buffer(rx_queue, rx_buf); | 690 | efx_fini_rx_buffer(rx_queue, rx_buf); |
689 | } | 691 | } |
@@ -704,7 +706,7 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | |||
704 | { | 706 | { |
705 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); | 707 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); |
706 | 708 | ||
707 | falcon_remove_rx(rx_queue); | 709 | efx_nic_remove_rx(rx_queue); |
708 | 710 | ||
709 | kfree(rx_queue->buffer); | 711 | kfree(rx_queue->buffer); |
710 | rx_queue->buffer = NULL; | 712 | rx_queue->buffer = NULL; |