diff options
author | Jarek Poplawski <jarkao2@gmail.com> | 2008-11-14 01:56:30 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-11-14 01:56:30 -0500 |
commit | f30ab418a1d3c5a8b83493e7d70d6876a74aa0ce (patch) | |
tree | 271f0d093d2436b0d0ebdff151fc4f5b1fb15f21 /net/sched/sch_cbq.c | |
parent | 38a7ddffa4b79d7b1fbc9bf2fa82b21b72622858 (diff) |
pkt_sched: Remove qdisc->ops->requeue() etc.
After implementing qdisc->ops->peek() and changing sch_netem into
classless qdisc there are no more qdisc->ops->requeue() users. This
patch removes this method with its wrappers (qdisc_requeue()), and
also unused qdisc->requeue structure. There are a few minor fixes of
warnings (htb_enqueue()) and comments btw.
The idea to kill ->requeue() and a similar patch were first developed
by David S. Miller.
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r-- | net/sched/sch_cbq.c | 35 |
1 files changed, 0 insertions, 35 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 63efa70abbea..a99e37e9e6f1 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -405,40 +405,6 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
405 | return ret; | 405 | return ret; |
406 | } | 406 | } |
407 | 407 | ||
408 | static int | ||
409 | cbq_requeue(struct sk_buff *skb, struct Qdisc *sch) | ||
410 | { | ||
411 | struct cbq_sched_data *q = qdisc_priv(sch); | ||
412 | struct cbq_class *cl; | ||
413 | int ret; | ||
414 | |||
415 | if ((cl = q->tx_class) == NULL) { | ||
416 | kfree_skb(skb); | ||
417 | sch->qstats.drops++; | ||
418 | return NET_XMIT_CN; | ||
419 | } | ||
420 | q->tx_class = NULL; | ||
421 | |||
422 | cbq_mark_toplevel(q, cl); | ||
423 | |||
424 | #ifdef CONFIG_NET_CLS_ACT | ||
425 | q->rx_class = cl; | ||
426 | cl->q->__parent = sch; | ||
427 | #endif | ||
428 | if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) { | ||
429 | sch->q.qlen++; | ||
430 | sch->qstats.requeues++; | ||
431 | if (!cl->next_alive) | ||
432 | cbq_activate_class(cl); | ||
433 | return 0; | ||
434 | } | ||
435 | if (net_xmit_drop_count(ret)) { | ||
436 | sch->qstats.drops++; | ||
437 | cl->qstats.drops++; | ||
438 | } | ||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | /* Overlimit actions */ | 408 | /* Overlimit actions */ |
443 | 409 | ||
444 | /* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */ | 410 | /* TC_CBQ_OVL_CLASSIC: (default) penalize leaf class by adding offtime */ |
@@ -2067,7 +2033,6 @@ static struct Qdisc_ops cbq_qdisc_ops __read_mostly = { | |||
2067 | .enqueue = cbq_enqueue, | 2033 | .enqueue = cbq_enqueue, |
2068 | .dequeue = cbq_dequeue, | 2034 | .dequeue = cbq_dequeue, |
2069 | .peek = qdisc_peek_dequeued, | 2035 | .peek = qdisc_peek_dequeued, |
2070 | .requeue = cbq_requeue, | ||
2071 | .drop = cbq_drop, | 2036 | .drop = cbq_drop, |
2072 | .init = cbq_init, | 2037 | .init = cbq_init, |
2073 | .reset = cbq_reset, | 2038 | .reset = cbq_reset, |