aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPeter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>2007-11-13 23:40:55 -0500
committerDavid S. Miller <davem@davemloft.net>2007-11-13 23:40:55 -0500
commit5f1a485d5905aa641f33009019b3699076666a4c (patch)
tree8bdcb81a47090cc3b34b1de7159b981e1c55b88b /net
parent9418d5dc9ba40b88737580457bf3b7c63c60ec43 (diff)
[PKT_SCHED]: Check subqueue status before calling hard_start_xmit
The only qdiscs that check subqueue state before dequeue'ing are PRIO and RR. The other qdiscs, including the default pfifo_fast qdisc, will allow traffic bound for subqueue 0 through to hard_start_xmit. The check for netif_queue_stopped() is done above in pkt_sched.h, so it is unnecessary for qdisc_restart(). However, if the underlying driver is multiqueue capable, and only sets queue states on subqueues, this will allow packets to enter the driver when it's currently unable to process packets, resulting in expensive requeues and driver entries. This patch re-adds the check for the subqueue status before calling hard_start_xmit, so we can try and avoid the driver entry when the queues are stopped. Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_generic.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fa1a6f45dc41..e595e6570ce0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -134,7 +134,7 @@ static inline int qdisc_restart(struct net_device *dev)
134{ 134{
135 struct Qdisc *q = dev->qdisc; 135 struct Qdisc *q = dev->qdisc;
136 struct sk_buff *skb; 136 struct sk_buff *skb;
137 int ret; 137 int ret = NETDEV_TX_BUSY;
138 138
139 /* Dequeue packet */ 139 /* Dequeue packet */
140 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) 140 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
@@ -145,7 +145,8 @@ static inline int qdisc_restart(struct net_device *dev)
145 spin_unlock(&dev->queue_lock); 145 spin_unlock(&dev->queue_lock);
146 146
147 HARD_TX_LOCK(dev, smp_processor_id()); 147 HARD_TX_LOCK(dev, smp_processor_id());
148 ret = dev_hard_start_xmit(skb, dev); 148 if (!netif_subqueue_stopped(dev, skb))
149 ret = dev_hard_start_xmit(skb, dev);
149 HARD_TX_UNLOCK(dev); 150 HARD_TX_UNLOCK(dev);
150 151
151 spin_lock(&dev->queue_lock); 152 spin_lock(&dev->queue_lock);