diff options
author | Jarek Poplawski <jarkao2@gmail.com> | 2008-11-14 01:56:30 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-14 01:56:30 -0500 |
commit | f30ab418a1d3c5a8b83493e7d70d6876a74aa0ce (patch) | |
tree | 271f0d093d2436b0d0ebdff151fc4f5b1fb15f21 /net | |
parent | 38a7ddffa4b79d7b1fbc9bf2fa82b21b72622858 (diff) |
pkt_sched: Remove qdisc->ops->requeue() etc.
After implementing qdisc->ops->peek() and changing sch_netem into
classless qdisc there are no more qdisc->ops->requeue() users. This
patch removes this method with its wrappers (qdisc_requeue()), and
also unused qdisc->requeue structure. There are a few minor fixes of
warnings (htb_enqueue()) and comments btw.
The idea to kill ->requeue() and a similar patch were first developed
by David S. Miller.
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/sch_api.c | 7 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 20 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 35 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 21 | ||||
-rw-r--r-- | net/sched/sch_fifo.c | 2 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 23 | ||||
-rw-r--r-- | net/sched/sch_gred.c | 21 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 19 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 44 | ||||
-rw-r--r-- | net/sched/sch_multiq.c | 39 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 16 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 28 | ||||
-rw-r--r-- | net/sched/sch_red.c | 18 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 63 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 14 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 11 |
16 files changed, 4 insertions, 377 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index e5646614e88d..5bcef13408c8 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -97,11 +97,6 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, | |||
97 | 97 | ||
98 | Auxiliary routines: | 98 | Auxiliary routines: |
99 | 99 | ||
100 | ---requeue | ||
101 | |||
102 | requeues once dequeued packet. It is used for non-standard or | ||
103 | just buggy devices, which can defer output even if netif_queue_stopped()=0. | ||
104 | |||
105 | ---peek | 100 | ---peek |
106 | 101 | ||
107 | like dequeue but without removing a packet from the queue | 102 | like dequeue but without removing a packet from the queue |
@@ -151,8 +146,6 @@ int register_qdisc(struct Qdisc_ops *qops) | |||
151 | 146 | ||
152 | if (qops->enqueue == NULL) | 147 | if (qops->enqueue == NULL) |
153 | qops->enqueue = noop_qdisc_ops.enqueue; | 148 | qops->enqueue = noop_qdisc_ops.enqueue; |
154 | if (qops->requeue == NULL) | ||
155 | qops->requeue = noop_qdisc_ops.requeue; | ||
156 | if (qops->peek == NULL) { | 149 | if (qops->peek == NULL) { |
157 | if (qops->dequeue == NULL) { | 150 | if (qops->dequeue == NULL) { |
158 | qops->peek = noop_qdisc_ops.peek; | 151 | qops->peek = noop_qdisc_ops.peek; |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 6eb9a650b63d..ca90f6e59aee 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -62,7 +62,7 @@ struct atm_qdisc_data { | |||
62 | struct atm_flow_data link; /* unclassified skbs go here */ | 62 | struct atm_flow_data link; /* unclassified skbs go here */ |
63 | struct atm_flow_data *flows; /* NB: "link" is also on this | 63 | struct atm_flow_data *flows; /* NB: "link" is also on this |
64 | list */ | 64 | list */ |
65 | struct tasklet_struct task; /* requeue tasklet */ | 65 | struct tasklet_struct task; /* dequeue tasklet */ |
66 | }; | 66 | }; |
67 | 67 | ||
68 | /* ------------------------- Class/flow operations ------------------------- */ | 68 | /* ------------------------- Class/flow operations ------------------------- */ |
@@ -534,23 +534,6 @@ static struct sk_buff *atm_tc_peek(struct Qdisc *sch) | |||
534 | return p->link.q->ops->peek(p->link.q); | 534 | return p->link.q->ops->peek(p->link.q); |
535 | } | 535 | } |
536 | 536 | ||
537 | static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
538 | { | ||
539 | struct atm_qdisc_data *p = qdisc_priv(sch); | ||
540 | int ret; | ||
541 | |||
542 | pr_debug("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); | ||
543 | ret = p->link.q->ops->requeue(skb, p->link.q); | ||
544 | if (!ret) { | ||
545 | sch->q.qlen++; | ||
546 | sch->qstats.requeues++; | ||
547 | } else if (net_xmit_drop_count(ret)) { | ||
548 | sch->qstats.drops++; | ||
549 | p->link.qstats.drops++; | ||
550 | } | ||
551 | return ret; | ||
552 | } | ||
553 | |||
554 | static unsigned int atm_tc_drop(struct Qdisc *sch) | 537 | static unsigned int atm_tc_drop(struct Qdisc *sch) |
555 | { | 538 | { |
556 | struct atm_qdisc_data *p = qdisc_priv(sch); | 539 | struct atm_qdisc_data *p = qdisc_priv(sch); |
@@ -707,7 +690,6 @@ static struct Qdisc_ops atm_qdisc_ops __read_mostly = { | |||
707 | .enqueue = atm_tc_enqueue, | 690 | .enqueue = atm_tc_enqueue, |
708 | .dequeue = atm_tc_dequeue, | 691 | .dequeue = atm_tc_dequeue, |
709 | .peek = atm_tc_peek, | 692 | .peek = atm_tc_peek, |
710 | .requeue = atm_tc_requeue, | ||
711 | .drop = atm_tc_drop, | 693 | .drop = atm_tc_drop, |
712 | .init = atm_tc_init, | 694 | .init = atm_tc_init, |
713 | .reset = atm_tc_reset, | 695 | .reset = atm_tc_reset, |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 63efa70abbea..a99e37e9e6f1 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -405,40 +405,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
405 | return ret; | 405 | return ret; |
406 | } | 406 | } |
407 | 407 | ||
408 | static int | ||
409 | cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
410 | { | ||
411 | struct cbq_sched_data *q = qdisc_priv(sch); | ||
412 | struct cbq_class *cl; | ||
413 | int ret; | ||
414 | |||
415 | if ((cl = q->tx_class) == NULL) { | ||
416 | kfree_skb(skb); | ||
417 | sch->qstats.drops++; | ||
418 | return NET_XMIT_CN; | ||
419 | } | ||
420 | q->tx_class = NULL; | ||
421 | |||
422 | cbq_mark_toplevel(q, cl); | ||
423 | |||
424 | #ifdef CONFIG_NET_CLS_ACT | ||
425 | q->rx_class = cl; | ||
426 | cl->q->__parent = sch; | ||
427 | #endif | ||
428 | if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) { | ||
429 | sch->q.qlen++; | ||
430 | sch->qstats.requeues++; | ||
431 | if (!cl->next_alive) | ||
432 | cbq_activate_class(cl); | ||
433 | return 0; | ||
434 | } | ||
435 | if (net_xmit_drop_count(ret)) { | ||
436 | sch->qstats.drops++; | ||
437 | cl->qstats.drops++; | ||
438 | } | ||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | /* Overlimit actions */ | 408 | /* Overlimit actions */ |
443 | 409 | ||
444 | /* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */ | 410 | /* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */ |
@@ -2067,7 +2033,6 @@ static struct Qdisc_ops cbq_qdisc_ops __read_mostly = { | |||
2067 | .enqueue = cbq_enqueue, | 2033 | .enqueue = cbq_enqueue, |
2068 | .dequeue = cbq_dequeue, | 2034 | .dequeue = cbq_dequeue, |
2069 | .peek = qdisc_peek_dequeued, | 2035 | .peek = qdisc_peek_dequeued, |
2070 | .requeue = cbq_requeue, | ||
2071 | .drop = cbq_drop, | 2036 | .drop = cbq_drop, |
2072 | .init = cbq_init, | 2037 | .init = cbq_init, |
2073 | .reset = cbq_reset, | 2038 | .reset = cbq_reset, |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 3e491479ea86..3f9427a4b757 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -322,26 +322,6 @@ static struct sk_buff *dsmark_peek(struct Qdisc *sch) | |||
322 | return p->q->ops->peek(p->q); | 322 | return p->q->ops->peek(p->q); |
323 | } | 323 | } |
324 | 324 | ||
325 | static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
326 | { | ||
327 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | ||
328 | int err; | ||
329 | |||
330 | pr_debug("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); | ||
331 | |||
332 | err = p->q->ops->requeue(skb, p->q); | ||
333 | if (err != NET_XMIT_SUCCESS) { | ||
334 | if (net_xmit_drop_count(err)) | ||
335 | sch->qstats.drops++; | ||
336 | return err; | ||
337 | } | ||
338 | |||
339 | sch->q.qlen++; | ||
340 | sch->qstats.requeues++; | ||
341 | |||
342 | return NET_XMIT_SUCCESS; | ||
343 | } | ||
344 | |||
345 | static unsigned int dsmark_drop(struct Qdisc *sch) | 325 | static unsigned int dsmark_drop(struct Qdisc *sch) |
346 | { | 326 | { |
347 | struct dsmark_qdisc_data *p = qdisc_priv(sch); | 327 | struct dsmark_qdisc_data *p = qdisc_priv(sch); |
@@ -506,7 +486,6 @@ static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = { | |||
506 | .enqueue = dsmark_enqueue, | 486 | .enqueue = dsmark_enqueue, |
507 | .dequeue = dsmark_dequeue, | 487 | .dequeue = dsmark_dequeue, |
508 | .peek = dsmark_peek, | 488 | .peek = dsmark_peek, |
509 | .requeue = dsmark_requeue, | ||
510 | .drop = dsmark_drop, | 489 | .drop = dsmark_drop, |
511 | .init = dsmark_init, | 490 | .init = dsmark_init, |
512 | .reset = dsmark_reset, | 491 | .reset = dsmark_reset, |
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 8825e8806f41..92cfc9d7e3b9 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -84,7 +84,6 @@ struct Qdisc_ops pfifo_qdisc_ops __read_mostly = { | |||
84 | .enqueue = pfifo_enqueue, | 84 | .enqueue = pfifo_enqueue, |
85 | .dequeue = qdisc_dequeue_head, | 85 | .dequeue = qdisc_dequeue_head, |
86 | .peek = qdisc_peek_head, | 86 | .peek = qdisc_peek_head, |
87 | .requeue = qdisc_requeue, | ||
88 | .drop = qdisc_queue_drop, | 87 | .drop = qdisc_queue_drop, |
89 | .init = fifo_init, | 88 | .init = fifo_init, |
90 | .reset = qdisc_reset_queue, | 89 | .reset = qdisc_reset_queue, |
@@ -100,7 +99,6 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = { | |||
100 | .enqueue = bfifo_enqueue, | 99 | .enqueue = bfifo_enqueue, |
101 | .dequeue = qdisc_dequeue_head, | 100 | .dequeue = qdisc_dequeue_head, |
102 | .peek = qdisc_peek_head, | 101 | .peek = qdisc_peek_head, |
103 | .requeue = qdisc_requeue, | ||
104 | .drop = qdisc_queue_drop, | 102 | .drop = qdisc_queue_drop, |
105 | .init = fifo_init, | 103 | .init = fifo_init, |
106 | .reset = qdisc_reset_queue, | 104 | .reset = qdisc_reset_queue, |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 1192da229835..80c8f3dbbea1 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -306,22 +306,12 @@ static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) | |||
306 | return NULL; | 306 | return NULL; |
307 | } | 307 | } |
308 | 308 | ||
309 | static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc) | ||
310 | { | ||
311 | if (net_ratelimit()) | ||
312 | printk(KERN_DEBUG "%s deferred output. It is buggy.\n", | ||
313 | skb->dev->name); | ||
314 | kfree_skb(skb); | ||
315 | return NET_XMIT_CN; | ||
316 | } | ||
317 | |||
318 | struct Qdisc_ops noop_qdisc_ops __read_mostly = { | 309 | struct Qdisc_ops noop_qdisc_ops __read_mostly = { |
319 | .id = "noop", | 310 | .id = "noop", |
320 | .priv_size = 0, | 311 | .priv_size = 0, |
321 | .enqueue = noop_enqueue, | 312 | .enqueue = noop_enqueue, |
322 | .dequeue = noop_dequeue, | 313 | .dequeue = noop_dequeue, |
323 | .peek = noop_dequeue, | 314 | .peek = noop_dequeue, |
324 | .requeue = noop_requeue, | ||
325 | .owner = THIS_MODULE, | 315 | .owner = THIS_MODULE, |
326 | }; | 316 | }; |
327 | 317 | ||
@@ -336,7 +326,6 @@ struct Qdisc noop_qdisc = { | |||
336 | .flags = TCQ_F_BUILTIN, | 326 | .flags = TCQ_F_BUILTIN, |
337 | .ops = &noop_qdisc_ops, | 327 | .ops = &noop_qdisc_ops, |
338 | .list = LIST_HEAD_INIT(noop_qdisc.list), | 328 | .list = LIST_HEAD_INIT(noop_qdisc.list), |
339 | .requeue.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | ||
340 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), | 329 | .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), |
341 | .dev_queue = &noop_netdev_queue, | 330 | .dev_queue = &noop_netdev_queue, |
342 | }; | 331 | }; |
@@ -348,7 +337,6 @@ static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { | |||
348 | .enqueue = noop_enqueue, | 337 | .enqueue = noop_enqueue, |
349 | .dequeue = noop_dequeue, | 338 | .dequeue = noop_dequeue, |
350 | .peek = noop_dequeue, | 339 | .peek = noop_dequeue, |
351 | .requeue = noop_requeue, | ||
352 | .owner = THIS_MODULE, | 340 | .owner = THIS_MODULE, |
353 | }; | 341 | }; |
354 | 342 | ||
@@ -364,7 +352,6 @@ static struct Qdisc noqueue_qdisc = { | |||
364 | .flags = TCQ_F_BUILTIN, | 352 | .flags = TCQ_F_BUILTIN, |
365 | .ops = &noqueue_qdisc_ops, | 353 | .ops = &noqueue_qdisc_ops, |
366 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), | 354 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), |
367 | .requeue.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), | ||
368 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), | 355 | .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock), |
369 | .dev_queue = &noqueue_netdev_queue, | 356 | .dev_queue = &noqueue_netdev_queue, |
370 | }; | 357 | }; |
@@ -426,12 +413,6 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | |||
426 | return NULL; | 413 | return NULL; |
427 | } | 414 | } |
428 | 415 | ||
429 | static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) | ||
430 | { | ||
431 | qdisc->q.qlen++; | ||
432 | return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); | ||
433 | } | ||
434 | |||
435 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 416 | static void pfifo_fast_reset(struct Qdisc* qdisc) |
436 | { | 417 | { |
437 | int prio; | 418 | int prio; |
@@ -473,7 +454,6 @@ static struct Qdisc_ops pfifo_fast_ops __read_mostly = { | |||
473 | .enqueue = pfifo_fast_enqueue, | 454 | .enqueue = pfifo_fast_enqueue, |
474 | .dequeue = pfifo_fast_dequeue, | 455 | .dequeue = pfifo_fast_dequeue, |
475 | .peek = pfifo_fast_peek, | 456 | .peek = pfifo_fast_peek, |
476 | .requeue = pfifo_fast_requeue, | ||
477 | .init = pfifo_fast_init, | 457 | .init = pfifo_fast_init, |
478 | .reset = pfifo_fast_reset, | 458 | .reset = pfifo_fast_reset, |
479 | .dump = pfifo_fast_dump, | 459 | .dump = pfifo_fast_dump, |
@@ -499,7 +479,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
499 | sch->padded = (char *) sch - (char *) p; | 479 | sch->padded = (char *) sch - (char *) p; |
500 | 480 | ||
501 | INIT_LIST_HEAD(&sch->list); | 481 | INIT_LIST_HEAD(&sch->list); |
502 | skb_queue_head_init(&sch->requeue); | ||
503 | skb_queue_head_init(&sch->q); | 482 | skb_queue_head_init(&sch->q); |
504 | sch->ops = ops; | 483 | sch->ops = ops; |
505 | sch->enqueue = ops->enqueue; | 484 | sch->enqueue = ops->enqueue; |
@@ -571,8 +550,6 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
571 | dev_put(qdisc_dev(qdisc)); | 550 | dev_put(qdisc_dev(qdisc)); |
572 | 551 | ||
573 | kfree_skb(qdisc->gso_skb); | 552 | kfree_skb(qdisc->gso_skb); |
574 | __skb_queue_purge(&qdisc->requeue); | ||
575 | |||
576 | kfree((char *) qdisc - qdisc->padded); | 553 | kfree((char *) qdisc - qdisc->padded); |
577 | } | 554 | } |
578 | EXPORT_SYMBOL(qdisc_destroy); | 555 | EXPORT_SYMBOL(qdisc_destroy); |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index cb20ee3b9fc2..40408d595c08 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -240,26 +240,6 @@ congestion_drop: | |||
240 | return NET_XMIT_CN; | 240 | return NET_XMIT_CN; |
241 | } | 241 | } |
242 | 242 | ||
243 | static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch) | ||
244 | { | ||
245 | struct gred_sched *t = qdisc_priv(sch); | ||
246 | struct gred_sched_data *q; | ||
247 | u16 dp = tc_index_to_dp(skb); | ||
248 | |||
249 | if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { | ||
250 | if (net_ratelimit()) | ||
251 | printk(KERN_WARNING "GRED: Unable to relocate VQ 0x%x " | ||
252 | "for requeue, screwing up backlog.\n", | ||
253 | tc_index_to_dp(skb)); | ||
254 | } else { | ||
255 | if (red_is_idling(&q->parms)) | ||
256 | red_end_of_idle_period(&q->parms); | ||
257 | q->backlog += qdisc_pkt_len(skb); | ||
258 | } | ||
259 | |||
260 | return qdisc_requeue(skb, sch); | ||
261 | } | ||
262 | |||
263 | static struct sk_buff *gred_dequeue(struct Qdisc* sch) | 243 | static struct sk_buff *gred_dequeue(struct Qdisc* sch) |
264 | { | 244 | { |
265 | struct sk_buff *skb; | 245 | struct sk_buff *skb; |
@@ -603,7 +583,6 @@ static struct Qdisc_ops gred_qdisc_ops __read_mostly = { | |||
603 | .enqueue = gred_enqueue, | 583 | .enqueue = gred_enqueue, |
604 | .dequeue = gred_dequeue, | 584 | .dequeue = gred_dequeue, |
605 | .peek = qdisc_peek_head, | 585 | .peek = qdisc_peek_head, |
606 | .requeue = gred_requeue, | ||
607 | .drop = gred_drop, | 586 | .drop = gred_drop, |
608 | .init = gred_init, | 587 | .init = gred_init, |
609 | .reset = gred_reset, | 588 | .reset = gred_reset, |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index d90b1652f2af..071c4749a12b 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -184,7 +184,6 @@ struct hfsc_sched | |||
184 | struct rb_root eligible; /* eligible tree */ | 184 | struct rb_root eligible; /* eligible tree */ |
185 | struct list_head droplist; /* active leaf class list (for | 185 | struct list_head droplist; /* active leaf class list (for |
186 | dropping) */ | 186 | dropping) */ |
187 | struct sk_buff_head requeue; /* requeued packet */ | ||
188 | struct qdisc_watchdog watchdog; /* watchdog timer */ | 187 | struct qdisc_watchdog watchdog; /* watchdog timer */ |
189 | }; | 188 | }; |
190 | 189 | ||
@@ -1432,7 +1431,6 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1432 | return err; | 1431 | return err; |
1433 | q->eligible = RB_ROOT; | 1432 | q->eligible = RB_ROOT; |
1434 | INIT_LIST_HEAD(&q->droplist); | 1433 | INIT_LIST_HEAD(&q->droplist); |
1435 | skb_queue_head_init(&q->requeue); | ||
1436 | 1434 | ||
1437 | q->root.cl_common.classid = sch->handle; | 1435 | q->root.cl_common.classid = sch->handle; |
1438 | q->root.refcnt = 1; | 1436 | q->root.refcnt = 1; |
@@ -1517,7 +1515,6 @@ hfsc_reset_qdisc(struct Qdisc *sch) | |||
1517 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) | 1515 | hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode) |
1518 | hfsc_reset_class(cl); | 1516 | hfsc_reset_class(cl); |
1519 | } | 1517 | } |
1520 | __skb_queue_purge(&q->requeue); | ||
1521 | q->eligible = RB_ROOT; | 1518 | q->eligible = RB_ROOT; |
1522 | INIT_LIST_HEAD(&q->droplist); | 1519 | INIT_LIST_HEAD(&q->droplist); |
1523 | qdisc_watchdog_cancel(&q->watchdog); | 1520 | qdisc_watchdog_cancel(&q->watchdog); |
@@ -1542,7 +1539,6 @@ hfsc_destroy_qdisc(struct Qdisc *sch) | |||
1542 | hfsc_destroy_class(sch, cl); | 1539 | hfsc_destroy_class(sch, cl); |
1543 | } | 1540 | } |
1544 | qdisc_class_hash_destroy(&q->clhash); | 1541 | qdisc_class_hash_destroy(&q->clhash); |
1545 | __skb_queue_purge(&q->requeue); | ||
1546 | qdisc_watchdog_cancel(&q->watchdog); | 1542 | qdisc_watchdog_cancel(&q->watchdog); |
1547 | } | 1543 | } |
1548 | 1544 | ||
@@ -1609,8 +1605,6 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1609 | 1605 | ||
1610 | if (sch->q.qlen == 0) | 1606 | if (sch->q.qlen == 0) |
1611 | return NULL; | 1607 | return NULL; |
1612 | if ((skb = __skb_dequeue(&q->requeue))) | ||
1613 | goto out; | ||
1614 | 1608 | ||
1615 | cur_time = psched_get_time(); | 1609 | cur_time = psched_get_time(); |
1616 | 1610 | ||
@@ -1659,24 +1653,12 @@ hfsc_dequeue(struct Qdisc *sch) | |||
1659 | set_passive(cl); | 1653 | set_passive(cl); |
1660 | } | 1654 | } |
1661 | 1655 | ||
1662 | out: | ||
1663 | sch->flags &= ~TCQ_F_THROTTLED; | 1656 | sch->flags &= ~TCQ_F_THROTTLED; |
1664 | sch->q.qlen--; | 1657 | sch->q.qlen--; |
1665 | 1658 | ||
1666 | return skb; | 1659 | return skb; |
1667 | } | 1660 | } |
1668 | 1661 | ||
1669 | static int | ||
1670 | hfsc_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
1671 | { | ||
1672 | struct hfsc_sched *q = qdisc_priv(sch); | ||
1673 | |||
1674 | __skb_queue_head(&q->requeue, skb); | ||
1675 | sch->q.qlen++; | ||
1676 | sch->qstats.requeues++; | ||
1677 | return NET_XMIT_SUCCESS; | ||
1678 | } | ||
1679 | |||
1680 | static unsigned int | 1662 | static unsigned int |
1681 | hfsc_drop(struct Qdisc *sch) | 1663 | hfsc_drop(struct Qdisc *sch) |
1682 | { | 1664 | { |
@@ -1728,7 +1710,6 @@ static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = { | |||
1728 | .enqueue = hfsc_enqueue, | 1710 | .enqueue = hfsc_enqueue, |
1729 | .dequeue = hfsc_dequeue, | 1711 | .dequeue = hfsc_dequeue, |
1730 | .peek = qdisc_peek_dequeued, | 1712 | .peek = qdisc_peek_dequeued, |
1731 | .requeue = hfsc_requeue, | ||
1732 | .drop = hfsc_drop, | 1713 | .drop = hfsc_drop, |
1733 | .cl_ops = &hfsc_class_ops, | 1714 | .cl_ops = &hfsc_class_ops, |
1734 | .priv_size = sizeof(struct hfsc_sched), | 1715 | .priv_size = sizeof(struct hfsc_sched), |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 3fda8199713d..83f5e69243c1 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -551,7 +551,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) | |||
551 | 551 | ||
552 | static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 552 | static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
553 | { | 553 | { |
554 | int ret; | 554 | int uninitialized_var(ret); |
555 | struct htb_sched *q = qdisc_priv(sch); | 555 | struct htb_sched *q = qdisc_priv(sch); |
556 | struct htb_class *cl = htb_classify(skb, sch, &ret); | 556 | struct htb_class *cl = htb_classify(skb, sch, &ret); |
557 | 557 | ||
@@ -591,47 +591,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
591 | return NET_XMIT_SUCCESS; | 591 | return NET_XMIT_SUCCESS; |
592 | } | 592 | } |
593 | 593 | ||
594 | /* TODO: requeuing packet charges it to policers again !! */ | ||
595 | static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
596 | { | ||
597 | int ret; | ||
598 | struct htb_sched *q = qdisc_priv(sch); | ||
599 | struct htb_class *cl = htb_classify(skb, sch, &ret); | ||
600 | struct sk_buff *tskb; | ||
601 | |||
602 | if (cl == HTB_DIRECT) { | ||
603 | /* enqueue to helper queue */ | ||
604 | if (q->direct_queue.qlen < q->direct_qlen) { | ||
605 | __skb_queue_head(&q->direct_queue, skb); | ||
606 | } else { | ||
607 | __skb_queue_head(&q->direct_queue, skb); | ||
608 | tskb = __skb_dequeue_tail(&q->direct_queue); | ||
609 | kfree_skb(tskb); | ||
610 | sch->qstats.drops++; | ||
611 | return NET_XMIT_CN; | ||
612 | } | ||
613 | #ifdef CONFIG_NET_CLS_ACT | ||
614 | } else if (!cl) { | ||
615 | if (ret & __NET_XMIT_BYPASS) | ||
616 | sch->qstats.drops++; | ||
617 | kfree_skb(skb); | ||
618 | return ret; | ||
619 | #endif | ||
620 | } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) != | ||
621 | NET_XMIT_SUCCESS) { | ||
622 | if (net_xmit_drop_count(ret)) { | ||
623 | sch->qstats.drops++; | ||
624 | cl->qstats.drops++; | ||
625 | } | ||
626 | return ret; | ||
627 | } else | ||
628 | htb_activate(q, cl); | ||
629 | |||
630 | sch->q.qlen++; | ||
631 | sch->qstats.requeues++; | ||
632 | return NET_XMIT_SUCCESS; | ||
633 | } | ||
634 | |||
635 | /** | 594 | /** |
636 | * htb_charge_class - charges amount "bytes" to leaf and ancestors | 595 | * htb_charge_class - charges amount "bytes" to leaf and ancestors |
637 | * | 596 | * |
@@ -1566,7 +1525,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = { | |||
1566 | .enqueue = htb_enqueue, | 1525 | .enqueue = htb_enqueue, |
1567 | .dequeue = htb_dequeue, | 1526 | .dequeue = htb_dequeue, |
1568 | .peek = qdisc_peek_dequeued, | 1527 | .peek = qdisc_peek_dequeued, |
1569 | .requeue = htb_requeue, | ||
1570 | .drop = htb_drop, | 1528 | .drop = htb_drop, |
1571 | .init = htb_init, | 1529 | .init = htb_init, |
1572 | .reset = htb_reset, | 1530 | .reset = htb_reset, |
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 155648d23b7c..f645ac55a1a1 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -92,40 +92,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
92 | return ret; | 92 | return ret; |
93 | } | 93 | } |
94 | 94 | ||
95 | |||
96 | static int | ||
97 | multiq_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
98 | { | ||
99 | struct Qdisc *qdisc; | ||
100 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
101 | int ret; | ||
102 | |||
103 | qdisc = multiq_classify(skb, sch, &ret); | ||
104 | #ifdef CONFIG_NET_CLS_ACT | ||
105 | if (qdisc == NULL) { | ||
106 | if (ret & __NET_XMIT_BYPASS) | ||
107 | sch->qstats.drops++; | ||
108 | kfree_skb(skb); | ||
109 | return ret; | ||
110 | } | ||
111 | #endif | ||
112 | |||
113 | ret = qdisc->ops->requeue(skb, qdisc); | ||
114 | if (ret == NET_XMIT_SUCCESS) { | ||
115 | sch->q.qlen++; | ||
116 | sch->qstats.requeues++; | ||
117 | if (q->curband) | ||
118 | q->curband--; | ||
119 | else | ||
120 | q->curband = q->bands - 1; | ||
121 | return NET_XMIT_SUCCESS; | ||
122 | } | ||
123 | if (net_xmit_drop_count(ret)) | ||
124 | sch->qstats.drops++; | ||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | |||
129 | static struct sk_buff *multiq_dequeue(struct Qdisc *sch) | 95 | static struct sk_buff *multiq_dequeue(struct Qdisc *sch) |
130 | { | 96 | { |
131 | struct multiq_sched_data *q = qdisc_priv(sch); | 97 | struct multiq_sched_data *q = qdisc_priv(sch); |
@@ -140,7 +106,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch) | |||
140 | q->curband = 0; | 106 | q->curband = 0; |
141 | 107 | ||
142 | /* Check that target subqueue is available before | 108 | /* Check that target subqueue is available before |
143 | * pulling an skb to avoid excessive requeues | 109 | * pulling an skb to avoid head-of-line blocking. |
144 | */ | 110 | */ |
145 | if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { | 111 | if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { |
146 | qdisc = q->queues[q->curband]; | 112 | qdisc = q->queues[q->curband]; |
@@ -170,7 +136,7 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch) | |||
170 | curband = 0; | 136 | curband = 0; |
171 | 137 | ||
172 | /* Check that target subqueue is available before | 138 | /* Check that target subqueue is available before |
173 | * pulling an skb to avoid excessive requeues | 139 | * pulling an skb to avoid head-of-line blocking. |
174 | */ | 140 | */ |
175 | if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { | 141 | if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { |
176 | qdisc = q->queues[curband]; | 142 | qdisc = q->queues[curband]; |
@@ -480,7 +446,6 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { | |||
480 | .enqueue = multiq_enqueue, | 446 | .enqueue = multiq_enqueue, |
481 | .dequeue = multiq_dequeue, | 447 | .dequeue = multiq_dequeue, |
482 | .peek = multiq_peek, | 448 | .peek = multiq_peek, |
483 | .requeue = multiq_requeue, | ||
484 | .drop = multiq_drop, | 449 | .drop = multiq_drop, |
485 | .init = multiq_init, | 450 | .init = multiq_init, |
486 | .reset = multiq_reset, | 451 | .reset = multiq_reset, |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index f69698ff88d9..3cbc3ff7b5bc 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -252,20 +252,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
252 | return ret; | 252 | return ret; |
253 | } | 253 | } |
254 | 254 | ||
255 | /* Requeue packets but don't change time stamp */ | ||
256 | static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
257 | { | ||
258 | struct netem_sched_data *q = qdisc_priv(sch); | ||
259 | int ret; | ||
260 | |||
261 | if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) { | ||
262 | sch->q.qlen++; | ||
263 | sch->qstats.requeues++; | ||
264 | } | ||
265 | |||
266 | return ret; | ||
267 | } | ||
268 | |||
269 | static unsigned int netem_drop(struct Qdisc* sch) | 255 | static unsigned int netem_drop(struct Qdisc* sch) |
270 | { | 256 | { |
271 | struct netem_sched_data *q = qdisc_priv(sch); | 257 | struct netem_sched_data *q = qdisc_priv(sch); |
@@ -531,7 +517,6 @@ static struct Qdisc_ops tfifo_qdisc_ops __read_mostly = { | |||
531 | .enqueue = tfifo_enqueue, | 517 | .enqueue = tfifo_enqueue, |
532 | .dequeue = qdisc_dequeue_head, | 518 | .dequeue = qdisc_dequeue_head, |
533 | .peek = qdisc_peek_head, | 519 | .peek = qdisc_peek_head, |
534 | .requeue = qdisc_requeue, | ||
535 | .drop = qdisc_queue_drop, | 520 | .drop = qdisc_queue_drop, |
536 | .init = tfifo_init, | 521 | .init = tfifo_init, |
537 | .reset = qdisc_reset_queue, | 522 | .reset = qdisc_reset_queue, |
@@ -620,7 +605,6 @@ static struct Qdisc_ops netem_qdisc_ops __read_mostly = { | |||
620 | .enqueue = netem_enqueue, | 605 | .enqueue = netem_enqueue, |
621 | .dequeue = netem_dequeue, | 606 | .dequeue = netem_dequeue, |
622 | .peek = qdisc_peek_dequeued, | 607 | .peek = qdisc_peek_dequeued, |
623 | .requeue = netem_requeue, | ||
624 | .drop = netem_drop, | 608 | .drop = netem_drop, |
625 | .init = netem_init, | 609 | .init = netem_init, |
626 | .reset = netem_reset, | 610 | .reset = netem_reset, |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 3651da3e2802..ea65a87ec22c 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -93,33 +93,6 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
93 | return ret; | 93 | return ret; |
94 | } | 94 | } |
95 | 95 | ||
96 | |||
97 | static int | ||
98 | prio_requeue(struct sk_buff *skb, struct Qdisc* sch) | ||
99 | { | ||
100 | struct Qdisc *qdisc; | ||
101 | int ret; | ||
102 | |||
103 | qdisc = prio_classify(skb, sch, &ret); | ||
104 | #ifdef CONFIG_NET_CLS_ACT | ||
105 | if (qdisc == NULL) { | ||
106 | if (ret & __NET_XMIT_BYPASS) | ||
107 | sch->qstats.drops++; | ||
108 | kfree_skb(skb); | ||
109 | return ret; | ||
110 | } | ||
111 | #endif | ||
112 | |||
113 | if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) { | ||
114 | sch->q.qlen++; | ||
115 | sch->qstats.requeues++; | ||
116 | return NET_XMIT_SUCCESS; | ||
117 | } | ||
118 | if (net_xmit_drop_count(ret)) | ||
119 | sch->qstats.drops++; | ||
120 | return ret; | ||
121 | } | ||
122 | |||
123 | static struct sk_buff *prio_peek(struct Qdisc *sch) | 96 | static struct sk_buff *prio_peek(struct Qdisc *sch) |
124 | { | 97 | { |
125 | struct prio_sched_data *q = qdisc_priv(sch); | 98 | struct prio_sched_data *q = qdisc_priv(sch); |
@@ -435,7 +408,6 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = { | |||
435 | .enqueue = prio_enqueue, | 408 | .enqueue = prio_enqueue, |
436 | .dequeue = prio_dequeue, | 409 | .dequeue = prio_dequeue, |
437 | .peek = prio_peek, | 410 | .peek = prio_peek, |
438 | .requeue = prio_requeue, | ||
439 | .drop = prio_drop, | 411 | .drop = prio_drop, |
440 | .init = prio_init, | 412 | .init = prio_init, |
441 | .reset = prio_reset, | 413 | .reset = prio_reset, |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 7abc51454c2d..6a0371c22643 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -108,23 +108,6 @@ congestion_drop: | |||
108 | return NET_XMIT_CN; | 108 | return NET_XMIT_CN; |
109 | } | 109 | } |
110 | 110 | ||
111 | static int red_requeue(struct sk_buff *skb, struct Qdisc* sch) | ||
112 | { | ||
113 | struct red_sched_data *q = qdisc_priv(sch); | ||
114 | struct Qdisc *child = q->qdisc; | ||
115 | int ret; | ||
116 | |||
117 | if (red_is_idling(&q->parms)) | ||
118 | red_end_of_idle_period(&q->parms); | ||
119 | |||
120 | ret = child->ops->requeue(skb, child); | ||
121 | if (likely(ret == NET_XMIT_SUCCESS)) { | ||
122 | sch->qstats.requeues++; | ||
123 | sch->q.qlen++; | ||
124 | } | ||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | static struct sk_buff * red_dequeue(struct Qdisc* sch) | 111 | static struct sk_buff * red_dequeue(struct Qdisc* sch) |
129 | { | 112 | { |
130 | struct sk_buff *skb; | 113 | struct sk_buff *skb; |
@@ -370,7 +353,6 @@ static struct Qdisc_ops red_qdisc_ops __read_mostly = { | |||
370 | .enqueue = red_enqueue, | 353 | .enqueue = red_enqueue, |
371 | .dequeue = red_dequeue, | 354 | .dequeue = red_dequeue, |
372 | .peek = red_peek, | 355 | .peek = red_peek, |
373 | .requeue = red_requeue, | ||
374 | .drop = red_drop, | 356 | .drop = red_drop, |
375 | .init = red_init, | 357 | .init = red_init, |
376 | .reset = red_reset, | 358 | .reset = red_reset, |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 198b83d42ba8..ab8cfee3c9ce 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -329,68 +329,6 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
329 | return NET_XMIT_CN; | 329 | return NET_XMIT_CN; |
330 | } | 330 | } |
331 | 331 | ||
332 | static int | ||
333 | sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
334 | { | ||
335 | struct sfq_sched_data *q = qdisc_priv(sch); | ||
336 | unsigned int hash; | ||
337 | sfq_index x; | ||
338 | int ret; | ||
339 | |||
340 | hash = sfq_classify(skb, sch, &ret); | ||
341 | if (hash == 0) { | ||
342 | if (ret & __NET_XMIT_BYPASS) | ||
343 | sch->qstats.drops++; | ||
344 | kfree_skb(skb); | ||
345 | return ret; | ||
346 | } | ||
347 | hash--; | ||
348 | |||
349 | x = q->ht[hash]; | ||
350 | if (x == SFQ_DEPTH) { | ||
351 | q->ht[hash] = x = q->dep[SFQ_DEPTH].next; | ||
352 | q->hash[x] = hash; | ||
353 | } | ||
354 | |||
355 | sch->qstats.backlog += qdisc_pkt_len(skb); | ||
356 | __skb_queue_head(&q->qs[x], skb); | ||
357 | /* If selected queue has length q->limit+1, this means that | ||
358 | * all another queues are empty and we do simple tail drop. | ||
359 | * This packet is still requeued at head of queue, tail packet | ||
360 | * is dropped. | ||
361 | */ | ||
362 | if (q->qs[x].qlen > q->limit) { | ||
363 | skb = q->qs[x].prev; | ||
364 | __skb_unlink(skb, &q->qs[x]); | ||
365 | sch->qstats.drops++; | ||
366 | sch->qstats.backlog -= qdisc_pkt_len(skb); | ||
367 | kfree_skb(skb); | ||
368 | return NET_XMIT_CN; | ||
369 | } | ||
370 | |||
371 | sfq_inc(q, x); | ||
372 | if (q->qs[x].qlen == 1) { /* The flow is new */ | ||
373 | if (q->tail == SFQ_DEPTH) { /* It is the first flow */ | ||
374 | q->tail = x; | ||
375 | q->next[x] = x; | ||
376 | q->allot[x] = q->quantum; | ||
377 | } else { | ||
378 | q->next[x] = q->next[q->tail]; | ||
379 | q->next[q->tail] = x; | ||
380 | q->tail = x; | ||
381 | } | ||
382 | } | ||
383 | |||
384 | if (++sch->q.qlen <= q->limit) { | ||
385 | sch->qstats.requeues++; | ||
386 | return 0; | ||
387 | } | ||
388 | |||
389 | sch->qstats.drops++; | ||
390 | sfq_drop(sch); | ||
391 | return NET_XMIT_CN; | ||
392 | } | ||
393 | |||
394 | static struct sk_buff * | 332 | static struct sk_buff * |
395 | sfq_peek(struct Qdisc *sch) | 333 | sfq_peek(struct Qdisc *sch) |
396 | { | 334 | { |
@@ -636,7 +574,6 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = { | |||
636 | .enqueue = sfq_enqueue, | 574 | .enqueue = sfq_enqueue, |
637 | .dequeue = sfq_dequeue, | 575 | .dequeue = sfq_dequeue, |
638 | .peek = sfq_peek, | 576 | .peek = sfq_peek, |
639 | .requeue = sfq_requeue, | ||
640 | .drop = sfq_drop, | 577 | .drop = sfq_drop, |
641 | .init = sfq_init, | 578 | .init = sfq_init, |
642 | .reset = sfq_reset, | 579 | .reset = sfq_reset, |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 435076cf620e..bb7783d584bb 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -139,19 +139,6 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | 141 | ||
142 | static int tbf_requeue(struct sk_buff *skb, struct Qdisc* sch) | ||
143 | { | ||
144 | struct tbf_sched_data *q = qdisc_priv(sch); | ||
145 | int ret; | ||
146 | |||
147 | if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) { | ||
148 | sch->q.qlen++; | ||
149 | sch->qstats.requeues++; | ||
150 | } | ||
151 | |||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | static unsigned int tbf_drop(struct Qdisc* sch) | 142 | static unsigned int tbf_drop(struct Qdisc* sch) |
156 | { | 143 | { |
157 | struct tbf_sched_data *q = qdisc_priv(sch); | 144 | struct tbf_sched_data *q = qdisc_priv(sch); |
@@ -468,7 +455,6 @@ static struct Qdisc_ops tbf_qdisc_ops __read_mostly = { | |||
468 | .enqueue = tbf_enqueue, | 455 | .enqueue = tbf_enqueue, |
469 | .dequeue = tbf_dequeue, | 456 | .dequeue = tbf_dequeue, |
470 | .peek = qdisc_peek_dequeued, | 457 | .peek = qdisc_peek_dequeued, |
471 | .requeue = tbf_requeue, | ||
472 | .drop = tbf_drop, | 458 | .drop = tbf_drop, |
473 | .init = tbf_init, | 459 | .init = tbf_init, |
474 | .reset = tbf_reset, | 460 | .reset = tbf_reset, |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index bf03e7fa1849..cfc8e7caba62 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -93,16 +93,6 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
93 | return NET_XMIT_DROP; | 93 | return NET_XMIT_DROP; |
94 | } | 94 | } |
95 | 95 | ||
96 | static int | ||
97 | teql_requeue(struct sk_buff *skb, struct Qdisc* sch) | ||
98 | { | ||
99 | struct teql_sched_data *q = qdisc_priv(sch); | ||
100 | |||
101 | __skb_queue_head(&q->q, skb); | ||
102 | sch->qstats.requeues++; | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static struct sk_buff * | 96 | static struct sk_buff * |
107 | teql_dequeue(struct Qdisc* sch) | 97 | teql_dequeue(struct Qdisc* sch) |
108 | { | 98 | { |
@@ -441,7 +431,6 @@ static __init void teql_master_setup(struct net_device *dev) | |||
441 | ops->enqueue = teql_enqueue; | 431 | ops->enqueue = teql_enqueue; |
442 | ops->dequeue = teql_dequeue; | 432 | ops->dequeue = teql_dequeue; |
443 | ops->peek = teql_peek; | 433 | ops->peek = teql_peek; |
444 | ops->requeue = teql_requeue; | ||
445 | ops->init = teql_qdisc_init; | 434 | ops->init = teql_qdisc_init; |
446 | ops->reset = teql_reset; | 435 | ops->reset = teql_reset; |
447 | ops->destroy = teql_destroy; | 436 | ops->destroy = teql_destroy; |