aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2011-11-28 11:32:44 -0500
committerDavid S. Miller <davem@davemloft.net>2011-11-29 12:46:19 -0500
commit7346649826382b769cfadf4a2fe8a84d060c55e9 (patch)
tree0241cce453992881f61d3fbc4f9baf7eb0578135 /net/sched
parent75957ba36c05b979701e9ec64b37819adc12f830 (diff)
net: Add queue state xoff flag for stack
Create separate queue state flags so that either the stack or drivers can turn on XOFF. Added a set of functions used in the stack to determine if a queue is really stopped (either by stack or driver) Signed-off-by: Tom Herbert <therbert@google.com> Acked-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_multiq.c6
-rw-r--r--net/sched/sch_teql.c6
3 files changed, 11 insertions, 9 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 79ac1458c2ba..67fc573e013a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,7 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
60 60
61 /* check the reason of requeuing without tx lock first */ 61 /* check the reason of requeuing without tx lock first */
62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 62 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
63 if (!netif_tx_queue_frozen_or_stopped(txq)) { 63 if (!netif_xmit_frozen_or_stopped(txq)) {
64 q->gso_skb = NULL; 64 q->gso_skb = NULL;
65 q->q.qlen--; 65 q->q.qlen--;
66 } else 66 } else
@@ -121,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
121 spin_unlock(root_lock); 121 spin_unlock(root_lock);
122 122
123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 123 HARD_TX_LOCK(dev, txq, smp_processor_id());
124 if (!netif_tx_queue_frozen_or_stopped(txq)) 124 if (!netif_xmit_frozen_or_stopped(txq))
125 ret = dev_hard_start_xmit(skb, dev, txq); 125 ret = dev_hard_start_xmit(skb, dev, txq);
126 126
127 HARD_TX_UNLOCK(dev, txq); 127 HARD_TX_UNLOCK(dev, txq);
@@ -143,7 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
143 ret = dev_requeue_skb(skb, q); 143 ret = dev_requeue_skb(skb, q);
144 } 144 }
145 145
146 if (ret && netif_tx_queue_frozen_or_stopped(txq)) 146 if (ret && netif_xmit_frozen_or_stopped(txq))
147 ret = 0; 147 ret = 0;
148 148
149 return ret; 149 return ret;
@@ -242,7 +242,7 @@ static void dev_watchdog(unsigned long arg)
242 * old device drivers set dev->trans_start 242 * old device drivers set dev->trans_start
243 */ 243 */
244 trans_start = txq->trans_start ? : dev->trans_start; 244 trans_start = txq->trans_start ? : dev->trans_start;
245 if (netif_tx_queue_stopped(txq) && 245 if (netif_xmit_stopped(txq) &&
246 time_after(jiffies, (trans_start + 246 time_after(jiffies, (trans_start +
247 dev->watchdog_timeo))) { 247 dev->watchdog_timeo))) {
248 some_queue_timedout = 1; 248 some_queue_timedout = 1;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index edc1950e0e77..49131d7a7446 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -107,7 +107,8 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
107 /* Check that target subqueue is available before 107 /* Check that target subqueue is available before
108 * pulling an skb to avoid head-of-line blocking. 108 * pulling an skb to avoid head-of-line blocking.
109 */ 109 */
110 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { 110 if (!netif_xmit_stopped(
111 netdev_get_tx_queue(qdisc_dev(sch), q->curband))) {
111 qdisc = q->queues[q->curband]; 112 qdisc = q->queues[q->curband];
112 skb = qdisc->dequeue(qdisc); 113 skb = qdisc->dequeue(qdisc);
113 if (skb) { 114 if (skb) {
@@ -138,7 +139,8 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
138 /* Check that target subqueue is available before 139 /* Check that target subqueue is available before
139 * pulling an skb to avoid head-of-line blocking. 140 * pulling an skb to avoid head-of-line blocking.
140 */ 141 */
141 if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { 142 if (!netif_xmit_stopped(
143 netdev_get_tx_queue(qdisc_dev(sch), curband))) {
142 qdisc = q->queues[curband]; 144 qdisc = q->queues[curband];
143 skb = qdisc->ops->peek(qdisc); 145 skb = qdisc->ops->peek(qdisc);
144 if (skb) 146 if (skb)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index a3b7120fcc74..283bfe3de59d 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -301,7 +301,7 @@ restart:
301 301
302 if (slave_txq->qdisc_sleeping != q) 302 if (slave_txq->qdisc_sleeping != q)
303 continue; 303 continue;
304 if (__netif_subqueue_stopped(slave, subq) || 304 if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
305 !netif_running(slave)) { 305 !netif_running(slave)) {
306 busy = 1; 306 busy = 1;
307 continue; 307 continue;
@@ -312,7 +312,7 @@ restart:
312 if (__netif_tx_trylock(slave_txq)) { 312 if (__netif_tx_trylock(slave_txq)) {
313 unsigned int length = qdisc_pkt_len(skb); 313 unsigned int length = qdisc_pkt_len(skb);
314 314
315 if (!netif_tx_queue_frozen_or_stopped(slave_txq) && 315 if (!netif_xmit_frozen_or_stopped(slave_txq) &&
316 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { 316 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
317 txq_trans_update(slave_txq); 317 txq_trans_update(slave_txq);
318 __netif_tx_unlock(slave_txq); 318 __netif_tx_unlock(slave_txq);
@@ -324,7 +324,7 @@ restart:
324 } 324 }
325 __netif_tx_unlock(slave_txq); 325 __netif_tx_unlock(slave_txq);
326 } 326 }
327 if (netif_queue_stopped(dev)) 327 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
328 busy = 1; 328 busy = 1;
329 break; 329 break;
330 case 1: 330 case 1: