diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-01-21 02:31:33 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-21 02:31:33 -0500 |
commit | 9190b3b3208d052d98cb601fcc192f3f71a5658b (patch) | |
tree | b642a00320a1b35e33741fcd162072724f228fbf /net/sched/sch_htb.c | |
parent | b30532515f0a62bfe17207ab00883dd262497006 (diff) |
net_sched: accurate bytes/packets stats/rates
In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed
a problem with pfifo_head drops that incorrectly decreased
sch->bstats.bytes and sch->bstats.packets
Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a
previously enqueued packet, and bstats cannot be changed, so
bstats/rates are not accurate (over estimated)
This patch changes the qdisc_bstats updates to be done at dequeue() time
instead of enqueue() time. bstats counters no longer account for dropped
frames, and rates are more correct, since enqueue() bursts dont have
effect on dequeue() rate.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r-- | net/sched/sch_htb.c | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 984c1b0c6836..fc12fe6f5597 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -574,7 +574,6 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
574 | } | 574 | } |
575 | 575 | ||
576 | sch->q.qlen++; | 576 | sch->q.qlen++; |
577 | qdisc_bstats_update(sch, skb); | ||
578 | return NET_XMIT_SUCCESS; | 577 | return NET_XMIT_SUCCESS; |
579 | } | 578 | } |
580 | 579 | ||
@@ -842,7 +841,7 @@ next: | |||
842 | 841 | ||
843 | static struct sk_buff *htb_dequeue(struct Qdisc *sch) | 842 | static struct sk_buff *htb_dequeue(struct Qdisc *sch) |
844 | { | 843 | { |
845 | struct sk_buff *skb = NULL; | 844 | struct sk_buff *skb; |
846 | struct htb_sched *q = qdisc_priv(sch); | 845 | struct htb_sched *q = qdisc_priv(sch); |
847 | int level; | 846 | int level; |
848 | psched_time_t next_event; | 847 | psched_time_t next_event; |
@@ -851,6 +850,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
851 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ | 850 | /* try to dequeue direct packets as high prio (!) to minimize cpu work */ |
852 | skb = __skb_dequeue(&q->direct_queue); | 851 | skb = __skb_dequeue(&q->direct_queue); |
853 | if (skb != NULL) { | 852 | if (skb != NULL) { |
853 | ok: | ||
854 | qdisc_bstats_update(sch, skb); | ||
854 | sch->flags &= ~TCQ_F_THROTTLED; | 855 | sch->flags &= ~TCQ_F_THROTTLED; |
855 | sch->q.qlen--; | 856 | sch->q.qlen--; |
856 | return skb; | 857 | return skb; |
@@ -884,11 +885,8 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch) | |||
884 | int prio = ffz(m); | 885 | int prio = ffz(m); |
885 | m |= 1 << prio; | 886 | m |= 1 << prio; |
886 | skb = htb_dequeue_tree(q, prio, level); | 887 | skb = htb_dequeue_tree(q, prio, level); |
887 | if (likely(skb != NULL)) { | 888 | if (likely(skb != NULL)) |
888 | sch->q.qlen--; | 889 | goto ok; |
889 | sch->flags &= ~TCQ_F_THROTTLED; | ||
890 | goto fin; | ||
891 | } | ||
892 | } | 890 | } |
893 | } | 891 | } |
894 | sch->qstats.overlimits++; | 892 | sch->qstats.overlimits++; |