aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-11-23 11:44:56 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-25 19:37:49 -0500
commitf52dffe049ee11ecc02588a118fbe4092672fbaa (patch)
treea5532c4e1afd9e1a48a3c4a6f17f69af91344fe4 /net/core/dev.c
parentca89fa77b4488ecf2e3f72096386e8f3a58fe2fc (diff)
net: properly flush delay-freed skbs
Typical NAPI drivers use napi_consume_skb(skb) at TX completion time. This put skb in a percpu special queue, napi_alloc_cache, to get bulk frees. It turns out the queue is not flushed and hits the NAPI_SKB_CACHE_SIZE limit quite often, with skbs that were queued hundreds of usec earlier. I measured this can take ~6000 nsec to perform one flush. __kfree_skb_flush() can be called from two points right now : 1) From net_tx_action(), but only for skbs that were queued to sd->completion_queue. -> Irrelevant for NAPI drivers in normal operation. 2) From net_rx_action(), but only under high stress or if RPS/RFS has a pending action. This patch changes net_rx_action() to perform the flush in all cases and after more urgent operations happened (like kicking remote CPUS for RPS/RFS). Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index f71b34ab57a5..048b46b7c92a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5260,7 +5260,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
5260 5260
5261 if (list_empty(&list)) { 5261 if (list_empty(&list)) {
5262 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 5262 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
5263 return; 5263 goto out;
5264 break; 5264 break;
5265 } 5265 }
5266 5266
@@ -5278,7 +5278,6 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
5278 } 5278 }
5279 } 5279 }
5280 5280
5281 __kfree_skb_flush();
5282 local_irq_disable(); 5281 local_irq_disable();
5283 5282
5284 list_splice_tail_init(&sd->poll_list, &list); 5283 list_splice_tail_init(&sd->poll_list, &list);
@@ -5288,6 +5287,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
5288 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 5287 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5289 5288
5290 net_rps_action_and_irq_enable(sd); 5289 net_rps_action_and_irq_enable(sd);
5290out:
5291 __kfree_skb_flush();
5291} 5292}
5292 5293
5293struct netdev_adjacent { 5294struct netdev_adjacent {