aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/rx.c
diff options
context:
space:
mode:
authorStuart Hodgson <smhodgson@solarflare.com>2012-07-18 04:52:11 -0400
committerBen Hutchings <bhutchings@solarflare.com>2012-09-07 16:13:39 -0400
commitc31e5f9f97fba32103c8bdd286eee8e3aefbee31 (patch)
tree33fd1ed248a3f4812686af9a2b749c6e6f4b4090 /drivers/net/ethernet/sfc/rx.c
parent79d68b370074044d7a9dd789ee103ffe5ef00bda (diff)
sfc: Add channel specific receive_skb handler and post_remove callback
Allows an extra channel to override the standard receive_skb handler and also for extra non generic operations to be performed on remove. Also set default rx strategy so only skbs can be delivered to the PTP receive function. Signed-off-by: Stuart Hodgson <smhodgson@solarflare.com> Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net/ethernet/sfc/rx.c')
-rw-r--r--drivers/net/ethernet/sfc/rx.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index e997f83f14f5..9e0ad1b75c33 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -575,7 +575,10 @@ static void efx_rx_deliver(struct efx_channel *channel,
575 skb_record_rx_queue(skb, channel->rx_queue.core_index); 575 skb_record_rx_queue(skb, channel->rx_queue.core_index);
576 576
577 /* Pass the packet up */ 577 /* Pass the packet up */
578 netif_receive_skb(skb); 578 if (channel->type->receive_skb)
579 channel->type->receive_skb(channel, skb);
580 else
581 netif_receive_skb(skb);
579 582
580 /* Update allocation strategy method */ 583 /* Update allocation strategy method */
581 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 584 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
@@ -617,7 +620,8 @@ void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
617 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 620 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
618 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 621 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
619 622
620 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED))) 623 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
624 !channel->type->receive_skb)
621 efx_rx_packet_gro(channel, rx_buf, eh); 625 efx_rx_packet_gro(channel, rx_buf, eh);
622 else 626 else
623 efx_rx_deliver(channel, rx_buf); 627 efx_rx_deliver(channel, rx_buf);
@@ -627,6 +631,11 @@ void efx_rx_strategy(struct efx_channel *channel)
627{ 631{
628 enum efx_rx_alloc_method method = rx_alloc_method; 632 enum efx_rx_alloc_method method = rx_alloc_method;
629 633
634 if (channel->type->receive_skb) {
635 channel->rx_alloc_push_pages = false;
636 return;
637 }
638
630 /* Only makes sense to use page based allocation if GRO is enabled */ 639 /* Only makes sense to use page based allocation if GRO is enabled */
631 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { 640 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
632 method = RX_ALLOC_METHOD_SKB; 641 method = RX_ALLOC_METHOD_SKB;