aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/rx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r--drivers/net/ethernet/sfc/rx.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 719319b89d7a..9e0ad1b75c33 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -479,7 +479,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
479 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ? 479 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
480 CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 480 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
481 481
482 skb_record_rx_queue(skb, channel->channel); 482 skb_record_rx_queue(skb, channel->rx_queue.core_index);
483 483
484 gro_result = napi_gro_frags(napi); 484 gro_result = napi_gro_frags(napi);
485 } else { 485 } else {
@@ -571,8 +571,14 @@ static void efx_rx_deliver(struct efx_channel *channel,
571 /* Set the SKB flags */ 571 /* Set the SKB flags */
572 skb_checksum_none_assert(skb); 572 skb_checksum_none_assert(skb);
573 573
574 /* Record the rx_queue */
575 skb_record_rx_queue(skb, channel->rx_queue.core_index);
576
574 /* Pass the packet up */ 577 /* Pass the packet up */
575 netif_receive_skb(skb); 578 if (channel->type->receive_skb)
579 channel->type->receive_skb(channel, skb);
580 else
581 netif_receive_skb(skb);
576 582
577 /* Update allocation strategy method */ 583 /* Update allocation strategy method */
578 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 584 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
@@ -608,13 +614,14 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
608 * at the ethernet header */ 614 * at the ethernet header */
609 skb->protocol = eth_type_trans(skb, efx->net_dev); 615 skb->protocol = eth_type_trans(skb, efx->net_dev);
610 616
611 skb_record_rx_queue(skb, channel->channel); 617 skb_record_rx_queue(skb, channel->rx_queue.core_index);
612 } 618 }
613 619
614 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 620 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
615 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 621 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
616 622
617 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED))) 623 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
624 !channel->type->receive_skb)
618 efx_rx_packet_gro(channel, rx_buf, eh); 625 efx_rx_packet_gro(channel, rx_buf, eh);
619 else 626 else
620 efx_rx_deliver(channel, rx_buf); 627 efx_rx_deliver(channel, rx_buf);
@@ -624,6 +631,11 @@ void efx_rx_strategy(struct efx_channel *channel)
624{ 631{
625 enum efx_rx_alloc_method method = rx_alloc_method; 632 enum efx_rx_alloc_method method = rx_alloc_method;
626 633
634 if (channel->type->receive_skb) {
635 channel->rx_alloc_push_pages = false;
636 return;
637 }
638
627 /* Only makes sense to use page based allocation if GRO is enabled */ 639 /* Only makes sense to use page based allocation if GRO is enabled */
628 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { 640 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
629 method = RX_ALLOC_METHOD_SKB; 641 method = RX_ALLOC_METHOD_SKB;