aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netdevice.h53
-rw-r--r--net/core/dev.c45
2 files changed, 74 insertions, 24 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7f0ed423a360..9d55e5188b96 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2368,17 +2368,52 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2368#define DEFAULT_MAX_NUM_RSS_QUEUES (8) 2368#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2369int netif_get_num_default_rss_queues(void); 2369int netif_get_num_default_rss_queues(void);
2370 2370
2371/* Use this variant when it is known for sure that it 2371enum skb_free_reason {
2372 * is executing from hardware interrupt context or with hardware interrupts 2372 SKB_REASON_CONSUMED,
2373 * disabled. 2373 SKB_REASON_DROPPED,
2374 */ 2374};
2375void dev_kfree_skb_irq(struct sk_buff *skb); 2375
2376void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason);
2377void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason);
2376 2378
2377/* Use this variant in places where it could be invoked 2379/*
2378 * from either hardware interrupt or other context, with hardware interrupts 2380 * It is not allowed to call kfree_skb() or consume_skb() from hardware
2379 * either disabled or enabled. 2381 * interrupt context or with hardware interrupts being disabled.
2382 * (in_irq() || irqs_disabled())
2383 *
2384 * We provide four helpers that can be used in following contexts :
2385 *
2386 * dev_kfree_skb_irq(skb) when caller drops a packet from irq context,
2387 * replacing kfree_skb(skb)
2388 *
2389 * dev_consume_skb_irq(skb) when caller consumes a packet from irq context.
2390 * Typically used in place of consume_skb(skb) in TX completion path
2391 *
2392 * dev_kfree_skb_any(skb) when caller doesn't know its current irq context,
2393 * replacing kfree_skb(skb)
2394 *
2395 * dev_consume_skb_any(skb) when caller doesn't know its current irq context,
2396 * and consumed a packet. Used in place of consume_skb(skb)
2380 */ 2397 */
2381void dev_kfree_skb_any(struct sk_buff *skb); 2398static inline void dev_kfree_skb_irq(struct sk_buff *skb)
2399{
2400 __dev_kfree_skb_irq(skb, SKB_REASON_DROPPED);
2401}
2402
2403static inline void dev_consume_skb_irq(struct sk_buff *skb)
2404{
2405 __dev_kfree_skb_irq(skb, SKB_REASON_CONSUMED);
2406}
2407
2408static inline void dev_kfree_skb_any(struct sk_buff *skb)
2409{
2410 __dev_kfree_skb_any(skb, SKB_REASON_DROPPED);
2411}
2412
2413static inline void dev_consume_skb_any(struct sk_buff *skb)
2414{
2415 __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED);
2416}
2382 2417
2383int netif_rx(struct sk_buff *skb); 2418int netif_rx(struct sk_buff *skb);
2384int netif_rx_ni(struct sk_buff *skb); 2419int netif_rx_ni(struct sk_buff *skb);
diff --git a/net/core/dev.c b/net/core/dev.c
index c98052487e98..6cc98dd49c7a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2145,30 +2145,42 @@ void __netif_schedule(struct Qdisc *q)
2145} 2145}
2146EXPORT_SYMBOL(__netif_schedule); 2146EXPORT_SYMBOL(__netif_schedule);
2147 2147
2148void dev_kfree_skb_irq(struct sk_buff *skb) 2148struct dev_kfree_skb_cb {
2149 enum skb_free_reason reason;
2150};
2151
2152static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2153{
2154 return (struct dev_kfree_skb_cb *)skb->cb;
2155}
2156
2157void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2149{ 2158{
2150 if (atomic_dec_and_test(&skb->users)) { 2159 unsigned long flags;
2151 struct softnet_data *sd;
2152 unsigned long flags;
2153 2160
2154 local_irq_save(flags); 2161 if (likely(atomic_read(&skb->users) == 1)) {
2155 sd = &__get_cpu_var(softnet_data); 2162 smp_rmb();
2156 skb->next = sd->completion_queue; 2163 atomic_set(&skb->users, 0);
2157 sd->completion_queue = skb; 2164 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2158 raise_softirq_irqoff(NET_TX_SOFTIRQ); 2165 return;
2159 local_irq_restore(flags);
2160 } 2166 }
2167 get_kfree_skb_cb(skb)->reason = reason;
2168 local_irq_save(flags);
2169 skb->next = __this_cpu_read(softnet_data.completion_queue);
2170 __this_cpu_write(softnet_data.completion_queue, skb);
2171 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2172 local_irq_restore(flags);
2161} 2173}
2162EXPORT_SYMBOL(dev_kfree_skb_irq); 2174EXPORT_SYMBOL(__dev_kfree_skb_irq);
2163 2175
2164void dev_kfree_skb_any(struct sk_buff *skb) 2176void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2165{ 2177{
2166 if (in_irq() || irqs_disabled()) 2178 if (in_irq() || irqs_disabled())
2167 dev_kfree_skb_irq(skb); 2179 __dev_kfree_skb_irq(skb, reason);
2168 else 2180 else
2169 dev_kfree_skb(skb); 2181 dev_kfree_skb(skb);
2170} 2182}
2171EXPORT_SYMBOL(dev_kfree_skb_any); 2183EXPORT_SYMBOL(__dev_kfree_skb_any);
2172 2184
2173 2185
2174/** 2186/**
@@ -3306,7 +3318,10 @@ static void net_tx_action(struct softirq_action *h)
3306 clist = clist->next; 3318 clist = clist->next;
3307 3319
3308 WARN_ON(atomic_read(&skb->users)); 3320 WARN_ON(atomic_read(&skb->users));
3309 trace_kfree_skb(skb, net_tx_action); 3321 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3322 trace_consume_skb(skb);
3323 else
3324 trace_kfree_skb(skb, net_tx_action);
3310 __kfree_skb(skb); 3325 __kfree_skb(skb);
3311 } 3326 }
3312 } 3327 }