diff options
author | Jarek Poplawski <jarkao2@gmail.com> | 2008-02-10 02:44:00 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-02-10 02:44:00 -0500 |
commit | 21347456abfbf5bc7fcace7327476736bbb28abe (patch) | |
tree | 79863d638a3fad6bf7efc9e6f280b6ab22a0c317 /net | |
parent | 238fc7eac8e74681da7a6cb6748afb5422afc1be (diff) |
[NET_SCHED] sch_htb: htb_requeue fix
htb_requeue() enqueues skbs for which htb_classify() returns NULL.
This is wrong because such skbs could be handled by NET_CLS_ACT code,
and the decision could be different than earlier in htb_enqueue().
So htb_requeue() is changed to work and look more like htb_enqueue().
Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/sch_htb.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index e1a579efc215..795c761ad99f 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -609,14 +609,14 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
609 | /* TODO: requeuing packet charges it to policers again !! */ | 609 | /* TODO: requeuing packet charges it to policers again !! */ |
610 | static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | 610 | static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) |
611 | { | 611 | { |
612 | int ret; | ||
612 | struct htb_sched *q = qdisc_priv(sch); | 613 | struct htb_sched *q = qdisc_priv(sch); |
613 | int ret = NET_XMIT_SUCCESS; | ||
614 | struct htb_class *cl = htb_classify(skb, sch, &ret); | 614 | struct htb_class *cl = htb_classify(skb, sch, &ret); |
615 | struct sk_buff *tskb; | 615 | struct sk_buff *tskb; |
616 | 616 | ||
617 | if (cl == HTB_DIRECT || !cl) { | 617 | if (cl == HTB_DIRECT) { |
618 | /* enqueue to helper queue */ | 618 | /* enqueue to helper queue */ |
619 | if (q->direct_queue.qlen < q->direct_qlen && cl) { | 619 | if (q->direct_queue.qlen < q->direct_qlen) { |
620 | __skb_queue_head(&q->direct_queue, skb); | 620 | __skb_queue_head(&q->direct_queue, skb); |
621 | } else { | 621 | } else { |
622 | __skb_queue_head(&q->direct_queue, skb); | 622 | __skb_queue_head(&q->direct_queue, skb); |
@@ -625,6 +625,13 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch) | |||
625 | sch->qstats.drops++; | 625 | sch->qstats.drops++; |
626 | return NET_XMIT_CN; | 626 | return NET_XMIT_CN; |
627 | } | 627 | } |
628 | #ifdef CONFIG_NET_CLS_ACT | ||
629 | } else if (!cl) { | ||
630 | if (ret == NET_XMIT_BYPASS) | ||
631 | sch->qstats.drops++; | ||
632 | kfree_skb(skb); | ||
633 | return ret; | ||
634 | #endif | ||
628 | } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != | 635 | } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != |
629 | NET_XMIT_SUCCESS) { | 636 | NET_XMIT_SUCCESS) { |
630 | sch->qstats.drops++; | 637 | sch->qstats.drops++; |