diff options
author | stephen hemminger <shemminger@vyatta.com> | 2010-12-06 07:33:01 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-10 18:03:24 -0500 |
commit | 4afb7527ac8cc7bd8f03570e12f6eed0eca03363 (patch) | |
tree | d2106630e16f7495ac09f230382d85df5182da8c /drivers/net/sfc | |
parent | 376d940ee91318cc6becefbb9454bb4454d7473f (diff) |
sfc: convert references to LRO to GRO
This driver now uses Generic Receive Offload, not the older LRO.
Change references to LRO in names and comments.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Acked-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r-- | drivers/net/sfc/rx.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 6d0959b5158e..3925fd621177 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
@@ -37,7 +37,7 @@ | |||
37 | * This driver supports two methods for allocating and using RX buffers: | 37 | * This driver supports two methods for allocating and using RX buffers: |
38 | * each RX buffer may be backed by an skb or by an order-n page. | 38 | * each RX buffer may be backed by an skb or by an order-n page. |
39 | * | 39 | * |
40 | * When LRO is in use then the second method has a lower overhead, | 40 | * When GRO is in use then the second method has a lower overhead, |
41 | * since we don't have to allocate then free skbs on reassembled frames. | 41 | * since we don't have to allocate then free skbs on reassembled frames. |
42 | * | 42 | * |
43 | * Values: | 43 | * Values: |
@@ -50,25 +50,25 @@ | |||
50 | * | 50 | * |
51 | * - Since pushing and popping descriptors are separated by the rx_queue | 51 | * - Since pushing and popping descriptors are separated by the rx_queue |
52 | * size, so the watermarks should be ~rxd_size. | 52 | * size, so the watermarks should be ~rxd_size. |
53 | * - The performance win by using page-based allocation for LRO is less | 53 | * - The performance win by using page-based allocation for GRO is less |
54 | * than the performance hit of using page-based allocation of non-LRO, | 54 | * than the performance hit of using page-based allocation of non-GRO, |
55 | * so the watermarks should reflect this. | 55 | * so the watermarks should reflect this. |
56 | * | 56 | * |
57 | * Per channel we maintain a single variable, updated by each channel: | 57 | * Per channel we maintain a single variable, updated by each channel: |
58 | * | 58 | * |
59 | * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO : | 59 | * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO : |
60 | * RX_ALLOC_FACTOR_SKB) | 60 | * RX_ALLOC_FACTOR_SKB) |
61 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | 61 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which |
62 | * limits the hysteresis), and update the allocation strategy: | 62 | * limits the hysteresis), and update the allocation strategy: |
63 | * | 63 | * |
64 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? | 64 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ? |
65 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) | 65 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) |
66 | */ | 66 | */ |
67 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; | 67 | static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; |
68 | 68 | ||
69 | #define RX_ALLOC_LEVEL_LRO 0x2000 | 69 | #define RX_ALLOC_LEVEL_GRO 0x2000 |
70 | #define RX_ALLOC_LEVEL_MAX 0x3000 | 70 | #define RX_ALLOC_LEVEL_MAX 0x3000 |
71 | #define RX_ALLOC_FACTOR_LRO 1 | 71 | #define RX_ALLOC_FACTOR_GRO 1 |
72 | #define RX_ALLOC_FACTOR_SKB (-2) | 72 | #define RX_ALLOC_FACTOR_SKB (-2) |
73 | 73 | ||
74 | /* This is the percentage fill level below which new RX descriptors | 74 | /* This is the percentage fill level below which new RX descriptors |
@@ -441,19 +441,19 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | |||
441 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; | 441 | efx_rx_queue_channel(rx_queue)->n_rx_overlength++; |
442 | } | 442 | } |
443 | 443 | ||
444 | /* Pass a received packet up through the generic LRO stack | 444 | /* Pass a received packet up through the generic GRO stack |
445 | * | 445 | * |
446 | * Handles driverlink veto, and passes the fragment up via | 446 | * Handles driverlink veto, and passes the fragment up via |
447 | * the appropriate LRO method | 447 | * the appropriate GRO method |
448 | */ | 448 | */ |
449 | static void efx_rx_packet_lro(struct efx_channel *channel, | 449 | static void efx_rx_packet_gro(struct efx_channel *channel, |
450 | struct efx_rx_buffer *rx_buf, | 450 | struct efx_rx_buffer *rx_buf, |
451 | bool checksummed) | 451 | bool checksummed) |
452 | { | 452 | { |
453 | struct napi_struct *napi = &channel->napi_str; | 453 | struct napi_struct *napi = &channel->napi_str; |
454 | gro_result_t gro_result; | 454 | gro_result_t gro_result; |
455 | 455 | ||
456 | /* Pass the skb/page into the LRO engine */ | 456 | /* Pass the skb/page into the GRO engine */ |
457 | if (rx_buf->page) { | 457 | if (rx_buf->page) { |
458 | struct efx_nic *efx = channel->efx; | 458 | struct efx_nic *efx = channel->efx; |
459 | struct page *page = rx_buf->page; | 459 | struct page *page = rx_buf->page; |
@@ -499,7 +499,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel, | |||
499 | if (gro_result == GRO_NORMAL) { | 499 | if (gro_result == GRO_NORMAL) { |
500 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | 500 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; |
501 | } else if (gro_result != GRO_DROP) { | 501 | } else if (gro_result != GRO_DROP) { |
502 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | 502 | channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; |
503 | channel->irq_mod_score += 2; | 503 | channel->irq_mod_score += 2; |
504 | } | 504 | } |
505 | } | 505 | } |
@@ -605,7 +605,7 @@ void __efx_rx_packet(struct efx_channel *channel, | |||
605 | } | 605 | } |
606 | 606 | ||
607 | if (likely(checksummed || rx_buf->page)) { | 607 | if (likely(checksummed || rx_buf->page)) { |
608 | efx_rx_packet_lro(channel, rx_buf, checksummed); | 608 | efx_rx_packet_gro(channel, rx_buf, checksummed); |
609 | return; | 609 | return; |
610 | } | 610 | } |
611 | 611 | ||
@@ -628,7 +628,7 @@ void efx_rx_strategy(struct efx_channel *channel) | |||
628 | { | 628 | { |
629 | enum efx_rx_alloc_method method = rx_alloc_method; | 629 | enum efx_rx_alloc_method method = rx_alloc_method; |
630 | 630 | ||
631 | /* Only makes sense to use page based allocation if LRO is enabled */ | 631 | /* Only makes sense to use page based allocation if GRO is enabled */ |
632 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { | 632 | if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { |
633 | method = RX_ALLOC_METHOD_SKB; | 633 | method = RX_ALLOC_METHOD_SKB; |
634 | } else if (method == RX_ALLOC_METHOD_AUTO) { | 634 | } else if (method == RX_ALLOC_METHOD_AUTO) { |
@@ -639,7 +639,7 @@ void efx_rx_strategy(struct efx_channel *channel) | |||
639 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | 639 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; |
640 | 640 | ||
641 | /* Decide on the allocation method */ | 641 | /* Decide on the allocation method */ |
642 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ? | 642 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ? |
643 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); | 643 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); |
644 | } | 644 | } |
645 | 645 | ||