diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2007-05-10 07:55:14 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-05-11 02:47:40 -0400 |
commit | d90df3ad07a20cd93921e05ff2b12ca7030b4fd7 (patch) | |
tree | 5af931c25932cfad852acb8ddc2ad16a7fa45525 | |
parent | 5830725f8a36908111ecccf2899d06d6dcf54d45 (diff) |
[NET_SCHED]: Rationalise return value of qdisc_restart
The current return value scheme and associated comment was invented
back in the 20th century when we still had that tbusy flag. Things
have changed quite a bit since then (even Tony Blair is moving on
now, not to mention the new French president).
All we need to indicate now is whether the caller should continue
processing the queue. Therefore it's sufficient if we return 0 if
we want to stop and non-zero otherwise.
This is based on a patch by Krishna Kumar.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/sched/sch_generic.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a8240c578772..07200bfebf00 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -71,12 +71,9 @@ void qdisc_unlock_tree(struct net_device *dev) | |||
71 | 71 | ||
72 | 72 | ||
73 | /* Kick device. | 73 | /* Kick device. |
74 | Note, that this procedure can be called by a watchdog timer, so that | ||
75 | we do not check dev->tbusy flag here. | ||
76 | 74 | ||
77 | Returns: 0 - queue is empty. | 75 | Returns: 0 - queue is empty or throttled. |
78 | >0 - queue is not empty, but throttled. | 76 | >0 - queue is not empty. |
79 | <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. | ||
80 | 77 | ||
81 | NOTE: Called under dev->queue_lock with locally disabled BH. | 78 | NOTE: Called under dev->queue_lock with locally disabled BH. |
82 | */ | 79 | */ |
@@ -115,7 +112,7 @@ static inline int qdisc_restart(struct net_device *dev) | |||
115 | kfree_skb(skb); | 112 | kfree_skb(skb); |
116 | if (net_ratelimit()) | 113 | if (net_ratelimit()) |
117 | printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); | 114 | printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); |
118 | return -1; | 115 | goto out; |
119 | } | 116 | } |
120 | __get_cpu_var(netdev_rx_stat).cpu_collision++; | 117 | __get_cpu_var(netdev_rx_stat).cpu_collision++; |
121 | goto requeue; | 118 | goto requeue; |
@@ -135,7 +132,7 @@ static inline int qdisc_restart(struct net_device *dev) | |||
135 | netif_tx_unlock(dev); | 132 | netif_tx_unlock(dev); |
136 | } | 133 | } |
137 | spin_lock(&dev->queue_lock); | 134 | spin_lock(&dev->queue_lock); |
138 | return -1; | 135 | goto out; |
139 | } | 136 | } |
140 | if (ret == NETDEV_TX_LOCKED && nolock) { | 137 | if (ret == NETDEV_TX_LOCKED && nolock) { |
141 | spin_lock(&dev->queue_lock); | 138 | spin_lock(&dev->queue_lock); |
@@ -169,8 +166,10 @@ requeue: | |||
169 | else | 166 | else |
170 | q->ops->requeue(skb, q); | 167 | q->ops->requeue(skb, q); |
171 | netif_schedule(dev); | 168 | netif_schedule(dev); |
172 | return 1; | 169 | return 0; |
173 | } | 170 | } |
171 | |||
172 | out: | ||
174 | BUG_ON((int) q->q.qlen < 0); | 173 | BUG_ON((int) q->q.qlen < 0); |
175 | return q->q.qlen; | 174 | return q->q.qlen; |
176 | } | 175 | } |
@@ -180,8 +179,10 @@ void __qdisc_run(struct net_device *dev) | |||
180 | if (unlikely(dev->qdisc == &noop_qdisc)) | 179 | if (unlikely(dev->qdisc == &noop_qdisc)) |
181 | goto out; | 180 | goto out; |
182 | 181 | ||
183 | while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev)) | 182 | do { |
184 | /* NOTHING */; | 183 | if (!qdisc_restart(dev)) |
184 | break; | ||
185 | } while (!netif_queue_stopped(dev)); | ||
185 | 186 | ||
186 | out: | 187 | out: |
187 | clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); | 188 | clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); |