aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorWANG Cong <xiyou.wangcong@gmail.com>2016-06-01 19:15:15 -0400
committerDavid S. Miller <davem@davemloft.net>2016-06-03 19:24:04 -0400
commit357cc9b4a8a7a0cd0e662537b76e6fa4670b6798 (patch)
treedbf7ce98bdb63a465539a8b2bade8a4da4fc651d /net
parent4116def2337991b39919f3b448326e21c40e0dbb (diff)
sch_hfsc: always keep backlog updated
hfsc updates backlog lazily, that is only when we dump the stats. This is problematic after we begin to update backlog in qdisc_tree_reduce_backlog(). Reported-by: Stas Nichiporovich <stasn77@gmail.com> Tested-by: Stas Nichiporovich <stasn77@gmail.com> Fixes: 2ccccf5fb43f ("net_sched: update hierarchical backlog too") Cc: Jamal Hadi Salim <jhs@mojatatu.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_hfsc.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index d783d7cc3348..1ac9f9f03fe3 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1529 q->eligible = RB_ROOT; 1529 q->eligible = RB_ROOT;
1530 INIT_LIST_HEAD(&q->droplist); 1530 INIT_LIST_HEAD(&q->droplist);
1531 qdisc_watchdog_cancel(&q->watchdog); 1531 qdisc_watchdog_cancel(&q->watchdog);
1532 sch->qstats.backlog = 0;
1532 sch->q.qlen = 0; 1533 sch->q.qlen = 0;
1533} 1534}
1534 1535
@@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1559 struct hfsc_sched *q = qdisc_priv(sch); 1560 struct hfsc_sched *q = qdisc_priv(sch);
1560 unsigned char *b = skb_tail_pointer(skb); 1561 unsigned char *b = skb_tail_pointer(skb);
1561 struct tc_hfsc_qopt qopt; 1562 struct tc_hfsc_qopt qopt;
1562 struct hfsc_class *cl;
1563 unsigned int i;
1564
1565 sch->qstats.backlog = 0;
1566 for (i = 0; i < q->clhash.hashsize; i++) {
1567 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1568 sch->qstats.backlog += cl->qdisc->qstats.backlog;
1569 }
1570 1563
1571 qopt.defcls = q->defcls; 1564 qopt.defcls = q->defcls;
1572 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) 1565 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
@@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1604 if (cl->qdisc->q.qlen == 1) 1597 if (cl->qdisc->q.qlen == 1)
1605 set_active(cl, qdisc_pkt_len(skb)); 1598 set_active(cl, qdisc_pkt_len(skb));
1606 1599
1600 qdisc_qstats_backlog_inc(sch, skb);
1607 sch->q.qlen++; 1601 sch->q.qlen++;
1608 1602
1609 return NET_XMIT_SUCCESS; 1603 return NET_XMIT_SUCCESS;
@@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch)
1672 1666
1673 qdisc_unthrottled(sch); 1667 qdisc_unthrottled(sch);
1674 qdisc_bstats_update(sch, skb); 1668 qdisc_bstats_update(sch, skb);
1669 qdisc_qstats_backlog_dec(sch, skb);
1675 sch->q.qlen--; 1670 sch->q.qlen--;
1676 1671
1677 return skb; 1672 return skb;
@@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch)
1695 } 1690 }
1696 cl->qstats.drops++; 1691 cl->qstats.drops++;
1697 qdisc_qstats_drop(sch); 1692 qdisc_qstats_drop(sch);
1693 sch->qstats.backlog -= len;
1698 sch->q.qlen--; 1694 sch->q.qlen--;
1699 return len; 1695 return len;
1700 } 1696 }