diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/sch_gred.c | 32 |
1 files changed, 9 insertions, 23 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index f7c6c0359ce5..95c5f2cf3fdf 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -230,22 +230,15 @@ gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
230 | if (q->backlog + skb->len <= q->limit) { | 230 | if (q->backlog + skb->len <= q->limit) { |
231 | q->backlog += skb->len; | 231 | q->backlog += skb->len; |
232 | do_enqueue: | 232 | do_enqueue: |
233 | __skb_queue_tail(&sch->q, skb); | 233 | return qdisc_enqueue_tail(skb, sch); |
234 | sch->qstats.backlog += skb->len; | ||
235 | sch->bstats.bytes += skb->len; | ||
236 | sch->bstats.packets++; | ||
237 | return 0; | ||
238 | } | 234 | } |
239 | 235 | ||
240 | q->stats.pdrop++; | 236 | q->stats.pdrop++; |
241 | drop: | 237 | drop: |
242 | kfree_skb(skb); | 238 | return qdisc_drop(skb, sch); |
243 | sch->qstats.drops++; | ||
244 | return NET_XMIT_DROP; | ||
245 | 239 | ||
246 | congestion_drop: | 240 | congestion_drop: |
247 | kfree_skb(skb); | 241 | qdisc_drop(skb, sch); |
248 | sch->qstats.drops++; | ||
249 | return NET_XMIT_CN; | 242 | return NET_XMIT_CN; |
250 | } | 243 | } |
251 | 244 | ||
@@ -260,11 +253,8 @@ gred_requeue(struct sk_buff *skb, struct Qdisc* sch) | |||
260 | if (red_is_idling(&q->parms)) | 253 | if (red_is_idling(&q->parms)) |
261 | red_end_of_idle_period(&q->parms); | 254 | red_end_of_idle_period(&q->parms); |
262 | 255 | ||
263 | __skb_queue_head(&sch->q, skb); | ||
264 | sch->qstats.backlog += skb->len; | ||
265 | sch->qstats.requeues++; | ||
266 | q->backlog += skb->len; | 256 | q->backlog += skb->len; |
267 | return 0; | 257 | return qdisc_requeue(skb, sch); |
268 | } | 258 | } |
269 | 259 | ||
270 | static struct sk_buff * | 260 | static struct sk_buff * |
@@ -274,9 +264,9 @@ gred_dequeue(struct Qdisc* sch) | |||
274 | struct gred_sched_data *q; | 264 | struct gred_sched_data *q; |
275 | struct gred_sched *t= qdisc_priv(sch); | 265 | struct gred_sched *t= qdisc_priv(sch); |
276 | 266 | ||
277 | skb = __skb_dequeue(&sch->q); | 267 | skb = qdisc_dequeue_head(sch); |
268 | |||
278 | if (skb) { | 269 | if (skb) { |
279 | sch->qstats.backlog -= skb->len; | ||
280 | q= t->tab[(skb->tc_index&0xf)]; | 270 | q= t->tab[(skb->tc_index&0xf)]; |
281 | if (q) { | 271 | if (q) { |
282 | q->backlog -= skb->len; | 272 | q->backlog -= skb->len; |
@@ -307,11 +297,9 @@ static unsigned int gred_drop(struct Qdisc* sch) | |||
307 | struct gred_sched_data *q; | 297 | struct gred_sched_data *q; |
308 | struct gred_sched *t= qdisc_priv(sch); | 298 | struct gred_sched *t= qdisc_priv(sch); |
309 | 299 | ||
310 | skb = __skb_dequeue_tail(&sch->q); | 300 | skb = qdisc_dequeue_tail(sch); |
311 | if (skb) { | 301 | if (skb) { |
312 | unsigned int len = skb->len; | 302 | unsigned int len = skb->len; |
313 | sch->qstats.backlog -= len; | ||
314 | sch->qstats.drops++; | ||
315 | q= t->tab[(skb->tc_index&0xf)]; | 303 | q= t->tab[(skb->tc_index&0xf)]; |
316 | if (q) { | 304 | if (q) { |
317 | q->backlog -= len; | 305 | q->backlog -= len; |
@@ -322,7 +310,7 @@ static unsigned int gred_drop(struct Qdisc* sch) | |||
322 | D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); | 310 | D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf); |
323 | } | 311 | } |
324 | 312 | ||
325 | kfree_skb(skb); | 313 | qdisc_drop(skb, sch); |
326 | return len; | 314 | return len; |
327 | } | 315 | } |
328 | 316 | ||
@@ -343,9 +331,7 @@ static void gred_reset(struct Qdisc* sch) | |||
343 | struct gred_sched_data *q; | 331 | struct gred_sched_data *q; |
344 | struct gred_sched *t= qdisc_priv(sch); | 332 | struct gred_sched *t= qdisc_priv(sch); |
345 | 333 | ||
346 | __skb_queue_purge(&sch->q); | 334 | qdisc_reset_queue(sch); |
347 | |||
348 | sch->qstats.backlog = 0; | ||
349 | 335 | ||
350 | for (i=0;i<t->DPs;i++) { | 336 | for (i=0;i<t->DPs;i++) { |
351 | q= t->tab[i]; | 337 | q= t->tab[i]; |