aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_fq_codel.c
diff options
context:
space:
mode:
authorWANG Cong <xiyou.wangcong@gmail.com>2016-02-25 17:55:01 -0500
committerDavid S. Miller <davem@davemloft.net>2016-02-29 17:02:33 -0500
commit2ccccf5fb43ff62b2b96cc58d95fc0b3596516e4 (patch)
treed73ec90b0c0cfd1191ef35a4aa0eaea78af1f100 /net/sched/sch_fq_codel.c
parent86a7996cc8a078793670d82ed97d5a99bb4e8496 (diff)
net_sched: update hierarchical backlog too
When the bottom qdisc decides to, for example, drop some packet, it calls qdisc_tree_decrease_qlen() to update the queue length for all its ancestors, we need to update the backlog too to keep the stats on root qdisc accurate. Cc: Jamal Hadi Salim <jhs@mojatatu.com> Acked-by: Jamal Hadi Salim <jhs@mojatatu.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_fq_codel.c')
-rw-r--r--net/sched/sch_fq_codel.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 4c834e93dafb..d3fc8f9dd3d4 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -175,7 +175,7 @@ static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
175static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) 175static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176{ 176{
177 struct fq_codel_sched_data *q = qdisc_priv(sch); 177 struct fq_codel_sched_data *q = qdisc_priv(sch);
178 unsigned int idx; 178 unsigned int idx, prev_backlog;
179 struct fq_codel_flow *flow; 179 struct fq_codel_flow *flow;
180 int uninitialized_var(ret); 180 int uninitialized_var(ret);
181 181
@@ -203,6 +203,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
203 if (++sch->q.qlen <= sch->limit) 203 if (++sch->q.qlen <= sch->limit)
204 return NET_XMIT_SUCCESS; 204 return NET_XMIT_SUCCESS;
205 205
206 prev_backlog = sch->qstats.backlog;
206 q->drop_overlimit++; 207 q->drop_overlimit++;
207 /* Return Congestion Notification only if we dropped a packet 208 /* Return Congestion Notification only if we dropped a packet
208 * from this flow. 209 * from this flow.
@@ -211,7 +212,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
211 return NET_XMIT_CN; 212 return NET_XMIT_CN;
212 213
213 /* As we dropped a packet, better let upper stack know this */ 214 /* As we dropped a packet, better let upper stack know this */
214 qdisc_tree_decrease_qlen(sch, 1); 215 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog);
215 return NET_XMIT_SUCCESS; 216 return NET_XMIT_SUCCESS;
216} 217}
217 218
@@ -241,6 +242,7 @@ static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
241 struct fq_codel_flow *flow; 242 struct fq_codel_flow *flow;
242 struct list_head *head; 243 struct list_head *head;
243 u32 prev_drop_count, prev_ecn_mark; 244 u32 prev_drop_count, prev_ecn_mark;
245 unsigned int prev_backlog;
244 246
245begin: 247begin:
246 head = &q->new_flows; 248 head = &q->new_flows;
@@ -259,6 +261,7 @@ begin:
259 261
260 prev_drop_count = q->cstats.drop_count; 262 prev_drop_count = q->cstats.drop_count;
261 prev_ecn_mark = q->cstats.ecn_mark; 263 prev_ecn_mark = q->cstats.ecn_mark;
264 prev_backlog = sch->qstats.backlog;
262 265
263 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats, 266 skb = codel_dequeue(sch, &q->cparams, &flow->cvars, &q->cstats,
264 dequeue); 267 dequeue);
@@ -276,12 +279,14 @@ begin:
276 } 279 }
277 qdisc_bstats_update(sch, skb); 280 qdisc_bstats_update(sch, skb);
278 flow->deficit -= qdisc_pkt_len(skb); 281 flow->deficit -= qdisc_pkt_len(skb);
279 /* We cant call qdisc_tree_decrease_qlen() if our qlen is 0, 282 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
280 * or HTB crashes. Defer it for next round. 283 * or HTB crashes. Defer it for next round.
281 */ 284 */
282 if (q->cstats.drop_count && sch->q.qlen) { 285 if (q->cstats.drop_count && sch->q.qlen) {
283 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); 286 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
287 q->cstats.drop_len);
284 q->cstats.drop_count = 0; 288 q->cstats.drop_count = 0;
289 q->cstats.drop_len = 0;
285 } 290 }
286 return skb; 291 return skb;
287} 292}
@@ -372,11 +377,13 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt)
372 while (sch->q.qlen > sch->limit) { 377 while (sch->q.qlen > sch->limit) {
373 struct sk_buff *skb = fq_codel_dequeue(sch); 378 struct sk_buff *skb = fq_codel_dequeue(sch);
374 379
380 q->cstats.drop_len += qdisc_pkt_len(skb);
375 kfree_skb(skb); 381 kfree_skb(skb);
376 q->cstats.drop_count++; 382 q->cstats.drop_count++;
377 } 383 }
378 qdisc_tree_decrease_qlen(sch, q->cstats.drop_count); 384 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
379 q->cstats.drop_count = 0; 385 q->cstats.drop_count = 0;
386 q->cstats.drop_len = 0;
380 387
381 sch_tree_unlock(sch); 388 sch_tree_unlock(sch);
382 return 0; 389 return 0;