aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/netdevice.h41
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_multiq.c6
-rw-r--r--net/sched/sch_teql.c6
7 files changed, 46 insertions, 25 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ac9a4b9344c..d19f93265ca 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -517,11 +517,23 @@ static inline void napi_synchronize(const struct napi_struct *n)
517#endif 517#endif
518 518
519enum netdev_queue_state_t { 519enum netdev_queue_state_t {
520 __QUEUE_STATE_XOFF, 520 __QUEUE_STATE_DRV_XOFF,
521 __QUEUE_STATE_STACK_XOFF,
521 __QUEUE_STATE_FROZEN, 522 __QUEUE_STATE_FROZEN,
522#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \ 523#define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
523 (1 << __QUEUE_STATE_FROZEN)) 524 (1 << __QUEUE_STATE_STACK_XOFF))
525#define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
526 (1 << __QUEUE_STATE_FROZEN))
524}; 527};
528/*
529 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
530 * netif_tx_* functions below are used to manipulate this flag. The
531 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
532 * queue independently. The netif_xmit_*stopped functions below are called
533 * to check if the queue has been stopped by the driver or stack (either
534 * of the XOFF bits are set in the state). Drivers should not need to call
535 * netif_xmit*stopped functions, they should only be using netif_tx_*.
536 */
525 537
526struct netdev_queue { 538struct netdev_queue {
527/* 539/*
@@ -1718,7 +1730,7 @@ extern void __netif_schedule(struct Qdisc *q);
1718 1730
1719static inline void netif_schedule_queue(struct netdev_queue *txq) 1731static inline void netif_schedule_queue(struct netdev_queue *txq)
1720{ 1732{
1721 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) 1733 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1722 __netif_schedule(txq->qdisc); 1734 __netif_schedule(txq->qdisc);
1723} 1735}
1724 1736
@@ -1732,7 +1744,7 @@ static inline void netif_tx_schedule_all(struct net_device *dev)
1732 1744
1733static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 1745static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1734{ 1746{
1735 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1747 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1736} 1748}
1737 1749
1738/** 1750/**
@@ -1764,7 +1776,7 @@ static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1764 return; 1776 return;
1765 } 1777 }
1766#endif 1778#endif
1767 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) 1779 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
1768 __netif_schedule(dev_queue->qdisc); 1780 __netif_schedule(dev_queue->qdisc);
1769} 1781}
1770 1782
@@ -1796,7 +1808,7 @@ static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1796 pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); 1808 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1797 return; 1809 return;
1798 } 1810 }
1799 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1811 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1800} 1812}
1801 1813
1802/** 1814/**
@@ -1823,7 +1835,7 @@ static inline void netif_tx_stop_all_queues(struct net_device *dev)
1823 1835
1824static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 1836static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1825{ 1837{
1826 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1838 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1827} 1839}
1828 1840
1829/** 1841/**
@@ -1837,9 +1849,16 @@ static inline int netif_queue_stopped(const struct net_device *dev)
1837 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1849 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1838} 1850}
1839 1851
1840static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) 1852static inline int netif_xmit_stopped(const struct netdev_queue *dev_queue)
1841{ 1853{
1842 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; 1854 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
1855}
1856
1857static inline int netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
1858{
1859 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
1860}
1861
1843} 1862}
1844 1863
1845/** 1864/**
@@ -1926,7 +1945,7 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1926 if (netpoll_trap()) 1945 if (netpoll_trap())
1927 return; 1946 return;
1928#endif 1947#endif
1929 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) 1948 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
1930 __netif_schedule(txq->qdisc); 1949 __netif_schedule(txq->qdisc);
1931} 1950}
1932 1951
diff --git a/net/core/dev.c b/net/core/dev.c
index c7ef6c5d378..cb8f753b423 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2270,7 +2270,7 @@ gso:
2270 return rc; 2270 return rc;
2271 } 2271 }
2272 txq_trans_update(txq); 2272 txq_trans_update(txq);
2273 if (unlikely(netif_tx_queue_stopped(txq) && skb->next)) 2273 if (unlikely(netif_xmit_stopped(txq) && skb->next))
2274 return NETDEV_TX_BUSY; 2274 return NETDEV_TX_BUSY;
2275 } while (skb->next); 2275 } while (skb->next);
2276 2276
@@ -2558,7 +2558,7 @@ int dev_queue_xmit(struct sk_buff *skb)
2558 2558
2559 HARD_TX_LOCK(dev, txq, cpu); 2559 HARD_TX_LOCK(dev, txq, cpu);
2560 2560
2561 if (!netif_tx_queue_stopped(txq)) { 2561 if (!netif_xmit_stopped(txq)) {
2562 __this_cpu_inc(xmit_recursion); 2562 __this_cpu_inc(xmit_recursion);
2563 rc = dev_hard_start_xmit(skb, dev, txq); 2563 rc = dev_hard_start_xmit(skb, dev, txq);
2564 __this_cpu_dec(xmit_recursion); 2564 __this_cpu_dec(xmit_recursion);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 1a7d8e2c976..0d38808a230 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -76,7 +76,7 @@ static void queue_process(struct work_struct *work)
76 76
77 local_irq_save(flags); 77 local_irq_save(flags);
78 __netif_tx_lock(txq, smp_processor_id()); 78 __netif_tx_lock(txq, smp_processor_id());
79 if (netif_tx_queue_frozen_or_stopped(txq) || 79 if (netif_xmit_frozen_or_stopped(txq) ||
80 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { 80 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
81 skb_queue_head(&npinfo->txq, skb); 81 skb_queue_head(&npinfo->txq, skb);
82 __netif_tx_unlock(txq); 82 __netif_tx_unlock(txq);
@@ -317,7 +317,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
317 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; 317 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
318 tries > 0; --tries) { 318 tries > 0; --tries) {
319 if (__netif_tx_trylock(txq)) { 319 if (__netif_tx_trylock(txq)) {
320 if (!netif_tx_queue_stopped(txq)) { 320 if (!netif_xmit_stopped(txq)) {
321 status = ops->ndo_start_xmit(skb, dev); 321 status = ops->ndo_start_xmit(skb, dev);
322 if (status == NETDEV_TX_OK) 322 if (status == NETDEV_TX_OK)
323 txq_trans_update(txq); 323 txq_trans_update(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index aa53a35a631..449fe0f068f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3342,7 +3342,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3342 3342
3343 __netif_tx_lock_bh(txq); 3343 __netif_tx_lock_bh(txq);
3344 3344
3345 if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) { 3345 if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
3346 ret = NETDEV_TX_BUSY; 3346 ret = NETDEV_TX_BUSY;
3347 pkt_dev->last_ok = 0; 3347 pkt_dev->last_ok = 0;
3348 goto unlock; 3348 goto unlock;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 79ac1458c2b..67fc573e013 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
60 60
61 /* check the reason of requeuing without tx lock first */ 61 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_tx_queue_frozen_or_stopped(txq)) { 63 if (!netif_xmit_frozen_or_stopped(txq)) {
64 q->gso_skb = NULL; 64 q->gso_skb = NULL;
65 q->q.qlen--; 65 q->q.qlen--;
66 } else 66 } else
@@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
121 spin_unlock(root_lock); 121 spin_unlock(root_lock);
122 122
123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
124 if (!netif_tx_queue_frozen_or_stopped(txq)) 124 if (!netif_xmit_frozen_or_stopped(txq))
125 ret = dev_hard_start_xmit(skb, dev, txq); 125 ret = dev_hard_start_xmit(skb, dev, txq);
126 126
127 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
@@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
143 ret = dev_requeue_skb(skb, q); 143 ret = dev_requeue_skb(skb, q);
144 } 144 }
145 145
146 if (ret && netif_tx_queue_frozen_or_stopped(txq)) 146 if (ret && netif_xmit_frozen_or_stopped(txq))
147 ret = 0; 147 ret = 0;
148 148
149 return ret; 149 return ret;
@@ -242,7 +242,7 @@ static void dev_watchdog(unsigned long arg)
242 * old device drivers set dev->trans_start 242 * old device drivers set dev->trans_start
243 */ 243 */
244 trans_start = txq->trans_start ? : dev->trans_start; 244 trans_start = txq->trans_start ? : dev->trans_start;
245 if (netif_tx_queue_stopped(txq) && 245 if (netif_xmit_stopped(txq) &&
246 time_after(jiffies, (trans_start + 246 time_after(jiffies, (trans_start +
247 dev->watchdog_timeo))) { 247 dev->watchdog_timeo))) {
248 some_queue_timedout = 1; 248 some_queue_timedout = 1;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index edc1950e0e7..49131d7a744 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -107,7 +107,8 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
107 /* Check that target subqueue is available before 107 /* Check that target subqueue is available before
108 * pulling an skb to avoid head-of-line blocking. 108 * pulling an skb to avoid head-of-line blocking.
109 */ 109 */
110 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { 110 if (!netif_xmit_stopped(
111 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
111 qdisc = q->queues[q->curband]; 112 qdisc = q->queues[q->curband];
112 skb = qdisc->dequeue(qdisc); 113 skb = qdisc->dequeue(qdisc);
113 if (skb) { 114 if (skb) {
@@ -138,7 +139,8 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
138 /* Check that target subqueue is available before 139 /* Check that target subqueue is available before
139 * pulling an skb to avoid head-of-line blocking. 140 * pulling an skb to avoid head-of-line blocking.
140 */ 141 */
141 if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { 142 if (!netif_xmit_stopped(
143 netdev_get_tx_queue(qdisc_dev(sch), curband))) {
142 qdisc = q->queues[curband]; 144 qdisc = q->queues[curband];
143 skb = qdisc->ops->peek(qdisc); 145 skb = qdisc->ops->peek(qdisc);
144 if (skb) 146 if (skb)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index a3b7120fcc7..283bfe3de59 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -301,7 +301,7 @@ restart:
301 301
302 if (slave_txq->qdisc_sleeping != q) 302 if (slave_txq->qdisc_sleeping != q)
303 continue; 303 continue;
304 if (__netif_subqueue_stopped(slave, subq) || 304 if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
305 !netif_running(slave)) { 305 !netif_running(slave)) {
306 busy = 1; 306 busy = 1;
307 continue; 307 continue;
@@ -312,7 +312,7 @@ restart:
312 if (__netif_tx_trylock(slave_txq)) { 312 if (__netif_tx_trylock(slave_txq)) {
313 unsigned int length = qdisc_pkt_len(skb); 313 unsigned int length = qdisc_pkt_len(skb);
314 314
315 if (!netif_tx_queue_frozen_or_stopped(slave_txq) && 315 if (!netif_xmit_frozen_or_stopped(slave_txq) &&
316 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { 316 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
317 txq_trans_update(slave_txq); 317 txq_trans_update(slave_txq);
318 __netif_tx_unlock(slave_txq); 318 __netif_tx_unlock(slave_txq);
@@ -324,7 +324,7 @@ restart:
324 } 324 }
325 __netif_tx_unlock(slave_txq); 325 __netif_tx_unlock(slave_txq);
326 } 326 }
327 if (netif_queue_stopped(dev)) 327 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
328 busy = 1; 328 busy = 1;
329 break; 329 break;
330 case 1: 330 case 1: