diff options
author | Eric Dumazet <edumazet@google.com> | 2013-12-05 07:45:08 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-12-06 15:24:02 -0500 |
commit | e6247027e5173c00efb2084d688d06ff835bc3b0 (patch) | |
tree | 44c1fc4da5358f778fc82a1056ef9a2a6e7eea0e /net | |
parent | f96eb74c84ebb09a03031f7f1e51789a928f9de0 (diff) |
net: introduce dev_consume_skb_any()
Some network drivers use dev_kfree_skb_any() and dev_kfree_skb_irq()
helpers to free skbs, both for dropped packets and TX completed ones.
We need to separate the two causes to get better diagnostics
given by dropwatch or "perf record -e skb:kfree_skb"
This patch provides two new helpers, dev_consume_skb_any() and
dev_consume_skb_irq() to be used for consumed skbs.
__dev_kfree_skb_irq() is slightly optimized to remove one
atomic_dec_and_test() in fast path, and use this_cpu_{r|w} accessors.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 45 |
1 files changed, 30 insertions, 15 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index c98052487e98..6cc98dd49c7a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2145,30 +2145,42 @@ void __netif_schedule(struct Qdisc *q) | |||
2145 | } | 2145 | } |
2146 | EXPORT_SYMBOL(__netif_schedule); | 2146 | EXPORT_SYMBOL(__netif_schedule); |
2147 | 2147 | ||
2148 | void dev_kfree_skb_irq(struct sk_buff *skb) | 2148 | struct dev_kfree_skb_cb { |
2149 | enum skb_free_reason reason; | ||
2150 | }; | ||
2151 | |||
2152 | static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb) | ||
2153 | { | ||
2154 | return (struct dev_kfree_skb_cb *)skb->cb; | ||
2155 | } | ||
2156 | |||
2157 | void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) | ||
2149 | { | 2158 | { |
2150 | if (atomic_dec_and_test(&skb->users)) { | 2159 | unsigned long flags; |
2151 | struct softnet_data *sd; | ||
2152 | unsigned long flags; | ||
2153 | 2160 | ||
2154 | local_irq_save(flags); | 2161 | if (likely(atomic_read(&skb->users) == 1)) { |
2155 | sd = &__get_cpu_var(softnet_data); | 2162 | smp_rmb(); |
2156 | skb->next = sd->completion_queue; | 2163 | atomic_set(&skb->users, 0); |
2157 | sd->completion_queue = skb; | 2164 | } else if (likely(!atomic_dec_and_test(&skb->users))) { |
2158 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 2165 | return; |
2159 | local_irq_restore(flags); | ||
2160 | } | 2166 | } |
2167 | get_kfree_skb_cb(skb)->reason = reason; | ||
2168 | local_irq_save(flags); | ||
2169 | skb->next = __this_cpu_read(softnet_data.completion_queue); | ||
2170 | __this_cpu_write(softnet_data.completion_queue, skb); | ||
2171 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | ||
2172 | local_irq_restore(flags); | ||
2161 | } | 2173 | } |
2162 | EXPORT_SYMBOL(dev_kfree_skb_irq); | 2174 | EXPORT_SYMBOL(__dev_kfree_skb_irq); |
2163 | 2175 | ||
2164 | void dev_kfree_skb_any(struct sk_buff *skb) | 2176 | void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason) |
2165 | { | 2177 | { |
2166 | if (in_irq() || irqs_disabled()) | 2178 | if (in_irq() || irqs_disabled()) |
2167 | dev_kfree_skb_irq(skb); | 2179 | __dev_kfree_skb_irq(skb, reason); |
2168 | else | 2180 | else |
2169 | dev_kfree_skb(skb); | 2181 | dev_kfree_skb(skb); |
2170 | } | 2182 | } |
2171 | EXPORT_SYMBOL(dev_kfree_skb_any); | 2183 | EXPORT_SYMBOL(__dev_kfree_skb_any); |
2172 | 2184 | ||
2173 | 2185 | ||
2174 | /** | 2186 | /** |
@@ -3306,7 +3318,10 @@ static void net_tx_action(struct softirq_action *h) | |||
3306 | clist = clist->next; | 3318 | clist = clist->next; |
3307 | 3319 | ||
3308 | WARN_ON(atomic_read(&skb->users)); | 3320 | WARN_ON(atomic_read(&skb->users)); |
3309 | trace_kfree_skb(skb, net_tx_action); | 3321 | if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED)) |
3322 | trace_consume_skb(skb); | ||
3323 | else | ||
3324 | trace_kfree_skb(skb, net_tx_action); | ||
3310 | __kfree_skb(skb); | 3325 | __kfree_skb(skb); |
3311 | } | 3326 | } |
3312 | } | 3327 | } |