aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c54
1 files changed, 35 insertions, 19 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 600bb23c4c2e..e719ed29310f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1339} 1339}
1340 1340
1341 1341
1342void __netif_schedule(struct Qdisc *q) 1342static inline void __netif_reschedule(struct Qdisc *q)
1343{ 1343{
1344 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { 1344 struct softnet_data *sd;
1345 struct softnet_data *sd; 1345 unsigned long flags;
1346 unsigned long flags;
1347 1346
1348 local_irq_save(flags); 1347 local_irq_save(flags);
1349 sd = &__get_cpu_var(softnet_data); 1348 sd = &__get_cpu_var(softnet_data);
1350 q->next_sched = sd->output_queue; 1349 q->next_sched = sd->output_queue;
1351 sd->output_queue = q; 1350 sd->output_queue = q;
1352 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1353 local_irq_restore(flags); 1352 local_irq_restore(flags);
1354 } 1353}
1354
1355void __netif_schedule(struct Qdisc *q)
1356{
1357 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1358 __netif_reschedule(q);
1355} 1359}
1356EXPORT_SYMBOL(__netif_schedule); 1360EXPORT_SYMBOL(__netif_schedule);
1357 1361
@@ -1800,9 +1804,13 @@ gso:
1800 1804
1801 spin_lock(root_lock); 1805 spin_lock(root_lock);
1802 1806
1803 rc = qdisc_enqueue_root(skb, q); 1807 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1804 qdisc_run(q); 1808 kfree_skb(skb);
1805 1809 rc = NET_XMIT_DROP;
1810 } else {
1811 rc = qdisc_enqueue_root(skb, q);
1812 qdisc_run(q);
1813 }
1806 spin_unlock(root_lock); 1814 spin_unlock(root_lock);
1807 1815
1808 goto out; 1816 goto out;
@@ -1974,15 +1982,22 @@ static void net_tx_action(struct softirq_action *h)
1974 1982
1975 head = head->next_sched; 1983 head = head->next_sched;
1976 1984
1977 smp_mb__before_clear_bit();
1978 clear_bit(__QDISC_STATE_SCHED, &q->state);
1979
1980 root_lock = qdisc_lock(q); 1985 root_lock = qdisc_lock(q);
1981 if (spin_trylock(root_lock)) { 1986 if (spin_trylock(root_lock)) {
1987 smp_mb__before_clear_bit();
1988 clear_bit(__QDISC_STATE_SCHED,
1989 &q->state);
1982 qdisc_run(q); 1990 qdisc_run(q);
1983 spin_unlock(root_lock); 1991 spin_unlock(root_lock);
1984 } else { 1992 } else {
1985 __netif_schedule(q); 1993 if (!test_bit(__QDISC_STATE_DEACTIVATED,
1994 &q->state)) {
1995 __netif_reschedule(q);
1996 } else {
1997 smp_mb__before_clear_bit();
1998 clear_bit(__QDISC_STATE_SCHED,
1999 &q->state);
2000 }
1986 } 2001 }
1987 } 2002 }
1988 } 2003 }
@@ -2084,7 +2099,8 @@ static int ing_filter(struct sk_buff *skb)
2084 q = rxq->qdisc; 2099 q = rxq->qdisc;
2085 if (q != &noop_qdisc) { 2100 if (q != &noop_qdisc) {
2086 spin_lock(qdisc_lock(q)); 2101 spin_lock(qdisc_lock(q));
2087 result = qdisc_enqueue_root(skb, q); 2102 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2103 result = qdisc_enqueue_root(skb, q);
2088 spin_unlock(qdisc_lock(q)); 2104 spin_unlock(qdisc_lock(q));
2089 } 2105 }
2090 2106