diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-01-09 03:30:54 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-10 19:07:54 -0500 |
commit | bfe0d0298f2a67d94d58c39ea904a999aeeb7c3c (patch) | |
tree | 5a6f966ebabe8d88d6d6c78c61411e325150927f /include | |
parent | f1593d2298acca8b6680100d622911827edb8b0a (diff) |
net_sched: factorize qdisc stats handling
HTB takes into account skb is segmented in stats updates.
Generalize this to all schedulers.
They should use qdisc_bstats_update() helper instead of manipulating
bstats.bytes and bstats.packets
Add bstats_update() helper too for classes that use
gnet_stats_basic_packed fields.
Note : Right now, TCQ_F_CAN_BYPASS shortcurt can be taken only if no
stab is setup on qdisc.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/net/sch_generic.h | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 0af57ebae762..e9eee99d8b1f 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q) | |||
207 | return q->q.qlen; | 207 | return q->q.qlen; |
208 | } | 208 | } |
209 | 209 | ||
210 | static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) | 210 | static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb) |
211 | { | 211 | { |
212 | return (struct qdisc_skb_cb *)skb->cb; | 212 | return (struct qdisc_skb_cb *)skb->cb; |
213 | } | 213 | } |
@@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev) | |||
394 | return true; | 394 | return true; |
395 | } | 395 | } |
396 | 396 | ||
397 | static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) | 397 | static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb) |
398 | { | 398 | { |
399 | return qdisc_skb_cb(skb)->pkt_len; | 399 | return qdisc_skb_cb(skb)->pkt_len; |
400 | } | 400 | } |
@@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) | |||
426 | return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; | 426 | return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; |
427 | } | 427 | } |
428 | 428 | ||
429 | static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len) | 429 | |
430 | static inline void bstats_update(struct gnet_stats_basic_packed *bstats, | ||
431 | const struct sk_buff *skb) | ||
432 | { | ||
433 | bstats->bytes += qdisc_pkt_len(skb); | ||
434 | bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; | ||
435 | } | ||
436 | |||
437 | static inline void qdisc_bstats_update(struct Qdisc *sch, | ||
438 | const struct sk_buff *skb) | ||
430 | { | 439 | { |
431 | sch->bstats.bytes += len; | 440 | bstats_update(&sch->bstats, skb); |
432 | sch->bstats.packets++; | ||
433 | } | 441 | } |
434 | 442 | ||
435 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | 443 | static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, |
@@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, | |||
437 | { | 445 | { |
438 | __skb_queue_tail(list, skb); | 446 | __skb_queue_tail(list, skb); |
439 | sch->qstats.backlog += qdisc_pkt_len(skb); | 447 | sch->qstats.backlog += qdisc_pkt_len(skb); |
440 | __qdisc_update_bstats(sch, qdisc_pkt_len(skb)); | 448 | qdisc_bstats_update(sch, skb); |
441 | 449 | ||
442 | return NET_XMIT_SUCCESS; | 450 | return NET_XMIT_SUCCESS; |
443 | } | 451 | } |