aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2008-11-14 01:56:30 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-14 01:56:30 -0500
commitf30ab418a1d3c5a8b83493e7d70d6876a74aa0ce (patch)
tree271f0d093d2436b0d0ebdff151fc4f5b1fb15f21 /net/sched/sch_htb.c
parent38a7ddffa4b79d7b1fbc9bf2fa82b21b72622858 (diff)
pkt_sched: Remove qdisc->ops->requeue() etc.
After implementing qdisc->ops->peek() and changing sch_netem into classless qdisc there are no more qdisc->ops->requeue() users. This patch removes this method with its wrappers (qdisc_requeue()), and also unused qdisc->requeue structure. There are a few minor fixes of warnings (htb_enqueue()) and comments btw. The idea to kill ->requeue() and a similar patch were first developed by David S. Miller. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c44
1 files changed, 1 insertions, 43 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 3fda8199713d..83f5e69243c1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -551,7 +551,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
551 551
552static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) 552static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
553{ 553{
554 int ret; 554 int uninitialized_var(ret);
555 struct htb_sched *q = qdisc_priv(sch); 555 struct htb_sched *q = qdisc_priv(sch);
556 struct htb_class *cl = htb_classify(skb, sch, &ret); 556 struct htb_class *cl = htb_classify(skb, sch, &ret);
557 557
@@ -591,47 +591,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
591 return NET_XMIT_SUCCESS; 591 return NET_XMIT_SUCCESS;
592} 592}
593 593
594/* TODO: requeuing packet charges it to policers again !! */
595static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
596{
597 int ret;
598 struct htb_sched *q = qdisc_priv(sch);
599 struct htb_class *cl = htb_classify(skb, sch, &ret);
600 struct sk_buff *tskb;
601
602 if (cl == HTB_DIRECT) {
603 /* enqueue to helper queue */
604 if (q->direct_queue.qlen < q->direct_qlen) {
605 __skb_queue_head(&q->direct_queue, skb);
606 } else {
607 __skb_queue_head(&q->direct_queue, skb);
608 tskb = __skb_dequeue_tail(&q->direct_queue);
609 kfree_skb(tskb);
610 sch->qstats.drops++;
611 return NET_XMIT_CN;
612 }
613#ifdef CONFIG_NET_CLS_ACT
614 } else if (!cl) {
615 if (ret & __NET_XMIT_BYPASS)
616 sch->qstats.drops++;
617 kfree_skb(skb);
618 return ret;
619#endif
620 } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
621 NET_XMIT_SUCCESS) {
622 if (net_xmit_drop_count(ret)) {
623 sch->qstats.drops++;
624 cl->qstats.drops++;
625 }
626 return ret;
627 } else
628 htb_activate(q, cl);
629
630 sch->q.qlen++;
631 sch->qstats.requeues++;
632 return NET_XMIT_SUCCESS;
633}
634
635/** 594/**
636 * htb_charge_class - charges amount "bytes" to leaf and ancestors 595 * htb_charge_class - charges amount "bytes" to leaf and ancestors
637 * 596 *
@@ -1566,7 +1525,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1566 .enqueue = htb_enqueue, 1525 .enqueue = htb_enqueue,
1567 .dequeue = htb_dequeue, 1526 .dequeue = htb_dequeue,
1568 .peek = qdisc_peek_dequeued, 1527 .peek = qdisc_peek_dequeued,
1569 .requeue = htb_requeue,
1570 .drop = htb_drop, 1528 .drop = htb_drop,
1571 .init = htb_init, 1529 .init = htb_init,
1572 .reset = htb_reset, 1530 .reset = htb_reset,