aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c49
1 files changed, 30 insertions, 19 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 600bb23c4c2..60c51f76588 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1339} 1339}
1340 1340
1341 1341
1342void __netif_schedule(struct Qdisc *q) 1342static inline void __netif_reschedule(struct Qdisc *q)
1343{ 1343{
1344 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { 1344 struct softnet_data *sd;
1345 struct softnet_data *sd; 1345 unsigned long flags;
1346 unsigned long flags;
1347 1346
1348 local_irq_save(flags); 1347 local_irq_save(flags);
1349 sd = &__get_cpu_var(softnet_data); 1348 sd = &__get_cpu_var(softnet_data);
1350 q->next_sched = sd->output_queue; 1349 q->next_sched = sd->output_queue;
1351 sd->output_queue = q; 1350 sd->output_queue = q;
1352 raise_softirq_irqoff(NET_TX_SOFTIRQ); 1351 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1353 local_irq_restore(flags); 1352 local_irq_restore(flags);
1354 } 1353}
1354
1355void __netif_schedule(struct Qdisc *q)
1356{
1357 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1358 __netif_reschedule(q);
1355} 1359}
1356EXPORT_SYMBOL(__netif_schedule); 1360EXPORT_SYMBOL(__netif_schedule);
1357 1361
@@ -1800,9 +1804,13 @@ gso:
1800 1804
1801 spin_lock(root_lock); 1805 spin_lock(root_lock);
1802 1806
1803 rc = qdisc_enqueue_root(skb, q); 1807 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
1804 qdisc_run(q); 1808 kfree_skb(skb);
1805 1809 rc = NET_XMIT_DROP;
1810 } else {
1811 rc = qdisc_enqueue_root(skb, q);
1812 qdisc_run(q);
1813 }
1806 spin_unlock(root_lock); 1814 spin_unlock(root_lock);
1807 1815
1808 goto out; 1816 goto out;
@@ -1974,15 +1982,17 @@ static void net_tx_action(struct softirq_action *h)
1974 1982
1975 head = head->next_sched; 1983 head = head->next_sched;
1976 1984
1977 smp_mb__before_clear_bit();
1978 clear_bit(__QDISC_STATE_SCHED, &q->state);
1979
1980 root_lock = qdisc_lock(q); 1985 root_lock = qdisc_lock(q);
1981 if (spin_trylock(root_lock)) { 1986 if (spin_trylock(root_lock)) {
1987 smp_mb__before_clear_bit();
1988 clear_bit(__QDISC_STATE_SCHED,
1989 &q->state);
1982 qdisc_run(q); 1990 qdisc_run(q);
1983 spin_unlock(root_lock); 1991 spin_unlock(root_lock);
1984 } else { 1992 } else {
1985 __netif_schedule(q); 1993 if (!test_bit(__QDISC_STATE_DEACTIVATED,
1994 &q->state))
1995 __netif_reschedule(q);
1986 } 1996 }
1987 } 1997 }
1988 } 1998 }
@@ -2084,7 +2094,8 @@ static int ing_filter(struct sk_buff *skb)
2084 q = rxq->qdisc; 2094 q = rxq->qdisc;
2085 if (q != &noop_qdisc) { 2095 if (q != &noop_qdisc) {
2086 spin_lock(qdisc_lock(q)); 2096 spin_lock(qdisc_lock(q));
2087 result = qdisc_enqueue_root(skb, q); 2097 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
2098 result = qdisc_enqueue_root(skb, q);
2088 spin_unlock(qdisc_lock(q)); 2099 spin_unlock(qdisc_lock(q));
2089 } 2100 }
2090 2101