aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3385ee592541..f28bb2dc58d0 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -71,12 +71,9 @@ void qdisc_unlock_tree(struct net_device *dev)
71 71
72 72
73/* Kick device. 73/* Kick device.
74 Note, that this procedure can be called by a watchdog timer, so that
75 we do not check dev->tbusy flag here.
76 74
77 Returns: 0 - queue is empty. 75 Returns: 0 - queue is empty or throttled.
78 >0 - queue is not empty, but throttled. 76 >0 - queue is not empty.
79 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
80 77
81 NOTE: Called under dev->queue_lock with locally disabled BH. 78 NOTE: Called under dev->queue_lock with locally disabled BH.
82*/ 79*/
@@ -115,7 +112,7 @@ static inline int qdisc_restart(struct net_device *dev)
115 kfree_skb(skb); 112 kfree_skb(skb);
116 if (net_ratelimit()) 113 if (net_ratelimit())
117 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); 114 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
118 return -1; 115 goto out;
119 } 116 }
120 __get_cpu_var(netdev_rx_stat).cpu_collision++; 117 __get_cpu_var(netdev_rx_stat).cpu_collision++;
121 goto requeue; 118 goto requeue;
@@ -135,10 +132,12 @@ static inline int qdisc_restart(struct net_device *dev)
135 netif_tx_unlock(dev); 132 netif_tx_unlock(dev);
136 } 133 }
137 spin_lock(&dev->queue_lock); 134 spin_lock(&dev->queue_lock);
138 return -1; 135 q = dev->qdisc;
136 goto out;
139 } 137 }
140 if (ret == NETDEV_TX_LOCKED && nolock) { 138 if (ret == NETDEV_TX_LOCKED && nolock) {
141 spin_lock(&dev->queue_lock); 139 spin_lock(&dev->queue_lock);
140 q = dev->qdisc;
142 goto collision; 141 goto collision;
143 } 142 }
144 } 143 }
@@ -163,26 +162,28 @@ static inline int qdisc_restart(struct net_device *dev)
163 */ 162 */
164 163
165requeue: 164requeue:
166 if (skb->next) 165 if (unlikely(q == &noop_qdisc))
166 kfree_skb(skb);
167 else if (skb->next)
167 dev->gso_skb = skb; 168 dev->gso_skb = skb;
168 else 169 else
169 q->ops->requeue(skb, q); 170 q->ops->requeue(skb, q);
170 netif_schedule(dev); 171 netif_schedule(dev);
171 return 1; 172 return 0;
172 } 173 }
174
175out:
173 BUG_ON((int) q->q.qlen < 0); 176 BUG_ON((int) q->q.qlen < 0);
174 return q->q.qlen; 177 return q->q.qlen;
175} 178}
176 179
177void __qdisc_run(struct net_device *dev) 180void __qdisc_run(struct net_device *dev)
178{ 181{
179 if (unlikely(dev->qdisc == &noop_qdisc)) 182 do {
180 goto out; 183 if (!qdisc_restart(dev))
181 184 break;
182 while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev)) 185 } while (!netif_queue_stopped(dev));
183 /* NOTHING */;
184 186
185out:
186 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 187 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
187} 188}
188 189
@@ -544,6 +545,7 @@ void dev_activate(struct net_device *dev)
544void dev_deactivate(struct net_device *dev) 545void dev_deactivate(struct net_device *dev)
545{ 546{
546 struct Qdisc *qdisc; 547 struct Qdisc *qdisc;
548 struct sk_buff *skb;
547 549
548 spin_lock_bh(&dev->queue_lock); 550 spin_lock_bh(&dev->queue_lock);
549 qdisc = dev->qdisc; 551 qdisc = dev->qdisc;
@@ -551,8 +553,12 @@ void dev_deactivate(struct net_device *dev)
551 553
552 qdisc_reset(qdisc); 554 qdisc_reset(qdisc);
553 555
556 skb = dev->gso_skb;
557 dev->gso_skb = NULL;
554 spin_unlock_bh(&dev->queue_lock); 558 spin_unlock_bh(&dev->queue_lock);
555 559
560 kfree_skb(skb);
561
556 dev_watchdog_down(dev); 562 dev_watchdog_down(dev);
557 563
558 /* Wait for outstanding dev_queue_xmit calls. */ 564 /* Wait for outstanding dev_queue_xmit calls. */
@@ -561,11 +567,6 @@ void dev_deactivate(struct net_device *dev)
561 /* Wait for outstanding qdisc_run calls. */ 567 /* Wait for outstanding qdisc_run calls. */
562 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 568 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
563 yield(); 569 yield();
564
565 if (dev->gso_skb) {
566 kfree_skb(dev->gso_skb);
567 dev->gso_skb = NULL;
568 }
569} 570}
570 571
571void dev_init_scheduler(struct net_device *dev) 572void dev_init_scheduler(struct net_device *dev)