aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sch_generic.h
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-01-21 02:31:33 -0500
committerDavid S. Miller <davem@davemloft.net>2011-01-21 02:31:33 -0500
commit9190b3b3208d052d98cb601fcc192f3f71a5658b (patch)
treeb642a00320a1b35e33741fcd162072724f228fbf /include/net/sch_generic.h
parentb30532515f0a62bfe17207ab00883dd262497006 (diff)
net_sched: accurate bytes/packets stats/rates
In commit 44b8288308ac9d (net_sched: pfifo_head_drop problem), we fixed a problem with pfifo_head drops that incorrectly decreased sch->bstats.bytes and sch->bstats.packets Several qdiscs (CHOKe, SFQ, pfifo_head, ...) are able to drop a previously enqueued packet, and bstats cannot be changed, so bstats/rates are not accurate (over estimated) This patch changes the qdisc_bstats updates to be done at dequeue() time instead of enqueue() time. bstats counters no longer account for dropped frames, and rates are more correct, since enqueue() bursts dont have effect on dequeue() rate. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Acked-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sch_generic.h')
-rw-r--r--include/net/sch_generic.h8
1 files changed, 5 insertions, 3 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e9eee99d8b1f..160a407c1963 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -445,7 +445,6 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
445{ 445{
446 __skb_queue_tail(list, skb); 446 __skb_queue_tail(list, skb);
447 sch->qstats.backlog += qdisc_pkt_len(skb); 447 sch->qstats.backlog += qdisc_pkt_len(skb);
448 qdisc_bstats_update(sch, skb);
449 448
450 return NET_XMIT_SUCCESS; 449 return NET_XMIT_SUCCESS;
451} 450}
@@ -460,8 +459,10 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
460{ 459{
461 struct sk_buff *skb = __skb_dequeue(list); 460 struct sk_buff *skb = __skb_dequeue(list);
462 461
463 if (likely(skb != NULL)) 462 if (likely(skb != NULL)) {
464 sch->qstats.backlog -= qdisc_pkt_len(skb); 463 sch->qstats.backlog -= qdisc_pkt_len(skb);
464 qdisc_bstats_update(sch, skb);
465 }
465 466
466 return skb; 467 return skb;
467} 468}
@@ -474,10 +475,11 @@ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
474static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch, 475static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
475 struct sk_buff_head *list) 476 struct sk_buff_head *list)
476{ 477{
477 struct sk_buff *skb = __qdisc_dequeue_head(sch, list); 478 struct sk_buff *skb = __skb_dequeue(list);
478 479
479 if (likely(skb != NULL)) { 480 if (likely(skb != NULL)) {
480 unsigned int len = qdisc_pkt_len(skb); 481 unsigned int len = qdisc_pkt_len(skb);
482 sch->qstats.backlog -= len;
481 kfree_skb(skb); 483 kfree_skb(skb);
482 return len; 484 return len;
483 } 485 }