aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorRanjit Manomohan <ranjitm@google.com>2007-07-11 01:43:16 -0400
committerDavid S. Miller <davem@davemloft.net>2007-07-11 01:43:16 -0400
commitc9726d6890f7f3a892c879e067c3ed839f61e745 (patch)
treeaaae7b138f7c409d62a1223742df38d79130acb4 /net/sched/sch_htb.c
parentc6c6e3e05c0b4349824efcdd36650e7be9d5c7c3 (diff)
[NET_SCHED]: Make HTB scheduler work with TSO.
Currently the HTB scheduler does not correctly account for TSO packets which causes large inaccuracies in the bandwidth control when using TSO. This patch allows the HTB scheduler to work with TSO enabled devices. Signed-off-by: Ranjit Manomohan <ranjitm@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c031486b675f..b417a95df322 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -129,15 +129,12 @@ struct htb_class {
129 /* of un.leaf originals should be done. */ 129 /* of un.leaf originals should be done. */
130}; 130};
131 131
132/* TODO: maybe compute rate when size is too large .. or drop ? */
133static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate, 132static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
134 int size) 133 int size)
135{ 134{
136 int slot = size >> rate->rate.cell_log; 135 int slot = size >> rate->rate.cell_log;
137 if (slot > 255) { 136 if (slot > 255)
138 cl->xstats.giants++; 137 return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
139 slot = 255;
140 }
141 return rate->data[slot]; 138 return rate->data[slot];
142} 139}
143 140
@@ -606,13 +603,14 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
606 cl->qstats.drops++; 603 cl->qstats.drops++;
607 return NET_XMIT_DROP; 604 return NET_XMIT_DROP;
608 } else { 605 } else {
609 cl->bstats.packets++; 606 cl->bstats.packets +=
607 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
610 cl->bstats.bytes += skb->len; 608 cl->bstats.bytes += skb->len;
611 htb_activate(q, cl); 609 htb_activate(q, cl);
612 } 610 }
613 611
614 sch->q.qlen++; 612 sch->q.qlen++;
615 sch->bstats.packets++; 613 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
616 sch->bstats.bytes += skb->len; 614 sch->bstats.bytes += skb->len;
617 return NET_XMIT_SUCCESS; 615 return NET_XMIT_SUCCESS;
618} 616}
@@ -661,8 +659,9 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
661 * In such case we remove class from event queue first. 659 * In such case we remove class from event queue first.
662 */ 660 */
663static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, 661static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
664 int level, int bytes) 662 int level, struct sk_buff *skb)
665{ 663{
664 int bytes = skb->len;
666 long toks, diff; 665 long toks, diff;
667 enum htb_cmode old_mode; 666 enum htb_cmode old_mode;
668 667
@@ -698,7 +697,8 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
698 /* update byte stats except for leaves which are already updated */ 697 /* update byte stats except for leaves which are already updated */
699 if (cl->level) { 698 if (cl->level) {
700 cl->bstats.bytes += bytes; 699 cl->bstats.bytes += bytes;
701 cl->bstats.packets++; 700 cl->bstats.packets += skb_is_gso(skb)?
701 skb_shinfo(skb)->gso_segs:1;
702 } 702 }
703 cl = cl->parent; 703 cl = cl->parent;
704 } 704 }
@@ -882,7 +882,7 @@ next:
882 gives us slightly better performance */ 882 gives us slightly better performance */
883 if (!cl->un.leaf.q->q.qlen) 883 if (!cl->un.leaf.q->q.qlen)
884 htb_deactivate(q, cl); 884 htb_deactivate(q, cl);
885 htb_charge_class(q, cl, level, skb->len); 885 htb_charge_class(q, cl, level, skb);
886 } 886 }
887 return skb; 887 return skb;
888} 888}