aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/sch_generic.h39
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_choke.c14
-rw-r--r--net/sched/sch_codel.c2
-rw-r--r--net/sched/sch_drr.c4
-rw-r--r--net/sched/sch_dsmark.c2
-rw-r--r--net/sched/sch_fifo.c2
-rw-r--r--net/sched/sch_fq.c4
-rw-r--r--net/sched/sch_fq_codel.c8
-rw-r--r--net/sched/sch_gred.c4
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_hhf.c8
-rw-r--r--net/sched/sch_htb.c6
-rw-r--r--net/sched/sch_ingress.c2
-rw-r--r--net/sched/sch_multiq.c4
-rw-r--r--net/sched/sch_netem.c15
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sched/sch_prio.c4
-rw-r--r--net/sched/sch_qfq.c4
-rw-r--r--net/sched/sch_red.c8
-rw-r--r--net/sched/sch_sfb.c10
-rw-r--r--net/sched/sch_sfq.c17
-rw-r--r--net/sched/sch_tbf.c8
25 files changed, 108 insertions, 81 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 4b9351120fd8..23a0f0fc83d8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -521,11 +521,38 @@ static inline void qdisc_bstats_update(struct Qdisc *sch,
521 bstats_update(&sch->bstats, skb); 521 bstats_update(&sch->bstats, skb);
522} 522}
523 523
524static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
525 const struct sk_buff *skb)
526{
527 sch->qstats.backlog -= qdisc_pkt_len(skb);
528}
529
530static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
531 const struct sk_buff *skb)
532{
533 sch->qstats.backlog += qdisc_pkt_len(skb);
534}
535
536static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
537{
538 sch->qstats.drops += count;
539}
540
541static inline void qdisc_qstats_drop(struct Qdisc *sch)
542{
543 sch->qstats.drops++;
544}
545
546static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
547{
548 sch->qstats.overlimits++;
549}
550
524static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, 551static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
525 struct sk_buff_head *list) 552 struct sk_buff_head *list)
526{ 553{
527 __skb_queue_tail(list, skb); 554 __skb_queue_tail(list, skb);
528 sch->qstats.backlog += qdisc_pkt_len(skb); 555 qdisc_qstats_backlog_inc(sch, skb);
529 556
530 return NET_XMIT_SUCCESS; 557 return NET_XMIT_SUCCESS;
531} 558}
@@ -541,7 +568,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
541 struct sk_buff *skb = __skb_dequeue(list); 568 struct sk_buff *skb = __skb_dequeue(list);
542 569
543 if (likely(skb != NULL)) { 570 if (likely(skb != NULL)) {
544 sch->qstats.backlog -= qdisc_pkt_len(skb); 571 qdisc_qstats_backlog_dec(sch, skb);
545 qdisc_bstats_update(sch, skb); 572 qdisc_bstats_update(sch, skb);
546 } 573 }
547 574
@@ -560,7 +587,7 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
560 587
561 if (likely(skb != NULL)) { 588 if (likely(skb != NULL)) {
562 unsigned int len = qdisc_pkt_len(skb); 589 unsigned int len = qdisc_pkt_len(skb);
563 sch->qstats.backlog -= len; 590 qdisc_qstats_backlog_dec(sch, skb);
564 kfree_skb(skb); 591 kfree_skb(skb);
565 return len; 592 return len;
566 } 593 }
@@ -579,7 +606,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
579 struct sk_buff *skb = __skb_dequeue_tail(list); 606 struct sk_buff *skb = __skb_dequeue_tail(list);
580 607
581 if (likely(skb != NULL)) 608 if (likely(skb != NULL))
582 sch->qstats.backlog -= qdisc_pkt_len(skb); 609 qdisc_qstats_backlog_dec(sch, skb);
583 610
584 return skb; 611 return skb;
585} 612}
@@ -661,14 +688,14 @@ static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
661static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) 688static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
662{ 689{
663 kfree_skb(skb); 690 kfree_skb(skb);
664 sch->qstats.drops++; 691 qdisc_qstats_drop(sch);
665 692
666 return NET_XMIT_DROP; 693 return NET_XMIT_DROP;
667} 694}
668 695
669static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) 696static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
670{ 697{
671 sch->qstats.drops++; 698 qdisc_qstats_drop(sch);
672 699
673#ifdef CONFIG_NET_CLS_ACT 700#ifdef CONFIG_NET_CLS_ACT
674 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 701 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index a95e3b48fa51..2862bc61a358 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -763,7 +763,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
763 cops->put(sch, cl); 763 cops->put(sch, cl);
764 } 764 }
765 sch->q.qlen -= n; 765 sch->q.qlen -= n;
766 sch->qstats.drops += drops; 766 __qdisc_qstats_drop(sch, drops);
767 } 767 }
768} 768}
769EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 769EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 01017663e5d8..040212cab988 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -417,7 +417,7 @@ done:
417 if (ret != NET_XMIT_SUCCESS) { 417 if (ret != NET_XMIT_SUCCESS) {
418drop: __maybe_unused 418drop: __maybe_unused
419 if (net_xmit_drop_count(ret)) { 419 if (net_xmit_drop_count(ret)) {
420 sch->qstats.drops++; 420 qdisc_qstats_drop(sch);
421 if (flow) 421 if (flow)
422 flow->qstats.drops++; 422 flow->qstats.drops++;
423 } 423 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 22a3a029a911..60432c3d3cd4 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377#endif 377#endif
378 if (cl == NULL) { 378 if (cl == NULL) {
379 if (ret & __NET_XMIT_BYPASS) 379 if (ret & __NET_XMIT_BYPASS)
380 sch->qstats.drops++; 380 qdisc_qstats_drop(sch);
381 kfree_skb(skb); 381 kfree_skb(skb);
382 return ret; 382 return ret;
383 } 383 }
@@ -395,7 +395,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
395 } 395 }
396 396
397 if (net_xmit_drop_count(ret)) { 397 if (net_xmit_drop_count(ret)) {
398 sch->qstats.drops++; 398 qdisc_qstats_drop(sch);
399 cbq_mark_toplevel(q, cl); 399 cbq_mark_toplevel(q, cl);
400 cl->qstats.drops++; 400 cl->qstats.drops++;
401 } 401 }
@@ -650,11 +650,11 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
650 return 0; 650 return 0;
651 } 651 }
652 if (net_xmit_drop_count(ret)) 652 if (net_xmit_drop_count(ret))
653 sch->qstats.drops++; 653 qdisc_qstats_drop(sch);
654 return 0; 654 return 0;
655 } 655 }
656 656
657 sch->qstats.drops++; 657 qdisc_qstats_drop(sch);
658 return -1; 658 return -1;
659} 659}
660#endif 660#endif
@@ -995,7 +995,7 @@ cbq_dequeue(struct Qdisc *sch)
995 */ 995 */
996 996
997 if (sch->q.qlen) { 997 if (sch->q.qlen) {
998 sch->qstats.overlimits++; 998 qdisc_qstats_overlimit(sch);
999 if (q->wd_expires) 999 if (q->wd_expires)
1000 qdisc_watchdog_schedule(&q->watchdog, 1000 qdisc_watchdog_schedule(&q->watchdog,
1001 now + q->wd_expires); 1001 now + q->wd_expires);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 8abc2625c3a1..c009eb9045ce 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -127,7 +127,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
127 if (idx == q->tail) 127 if (idx == q->tail)
128 choke_zap_tail_holes(q); 128 choke_zap_tail_holes(q);
129 129
130 sch->qstats.backlog -= qdisc_pkt_len(skb); 130 qdisc_qstats_backlog_dec(sch, skb);
131 qdisc_drop(skb, sch); 131 qdisc_drop(skb, sch);
132 qdisc_tree_decrease_qlen(sch, 1); 132 qdisc_tree_decrease_qlen(sch, 1);
133 --sch->q.qlen; 133 --sch->q.qlen;
@@ -302,7 +302,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
302 if (q->vars.qavg > p->qth_max) { 302 if (q->vars.qavg > p->qth_max) {
303 q->vars.qcount = -1; 303 q->vars.qcount = -1;
304 304
305 sch->qstats.overlimits++; 305 qdisc_qstats_overlimit(sch);
306 if (use_harddrop(q) || !use_ecn(q) || 306 if (use_harddrop(q) || !use_ecn(q) ||
307 !INET_ECN_set_ce(skb)) { 307 !INET_ECN_set_ce(skb)) {
308 q->stats.forced_drop++; 308 q->stats.forced_drop++;
@@ -315,7 +315,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
315 q->vars.qcount = 0; 315 q->vars.qcount = 0;
316 q->vars.qR = red_random(p); 316 q->vars.qR = red_random(p);
317 317
318 sch->qstats.overlimits++; 318 qdisc_qstats_overlimit(sch);
319 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { 319 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
320 q->stats.prob_drop++; 320 q->stats.prob_drop++;
321 goto congestion_drop; 321 goto congestion_drop;
@@ -332,7 +332,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
332 q->tab[q->tail] = skb; 332 q->tab[q->tail] = skb;
333 q->tail = (q->tail + 1) & q->tab_mask; 333 q->tail = (q->tail + 1) & q->tab_mask;
334 ++sch->q.qlen; 334 ++sch->q.qlen;
335 sch->qstats.backlog += qdisc_pkt_len(skb); 335 qdisc_qstats_backlog_inc(sch, skb);
336 return NET_XMIT_SUCCESS; 336 return NET_XMIT_SUCCESS;
337 } 337 }
338 338
@@ -345,7 +345,7 @@ congestion_drop:
345 345
346other_drop: 346other_drop:
347 if (ret & __NET_XMIT_BYPASS) 347 if (ret & __NET_XMIT_BYPASS)
348 sch->qstats.drops++; 348 qdisc_qstats_drop(sch);
349 kfree_skb(skb); 349 kfree_skb(skb);
350 return ret; 350 return ret;
351} 351}
@@ -365,7 +365,7 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
365 q->tab[q->head] = NULL; 365 q->tab[q->head] = NULL;
366 choke_zap_head_holes(q); 366 choke_zap_head_holes(q);
367 --sch->q.qlen; 367 --sch->q.qlen;
368 sch->qstats.backlog -= qdisc_pkt_len(skb); 368 qdisc_qstats_backlog_dec(sch, skb);
369 qdisc_bstats_update(sch, skb); 369 qdisc_bstats_update(sch, skb);
370 370
371 return skb; 371 return skb;
@@ -460,7 +460,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
460 ntab[tail++] = skb; 460 ntab[tail++] = skb;
461 continue; 461 continue;
462 } 462 }
463 sch->qstats.backlog -= qdisc_pkt_len(skb); 463 qdisc_qstats_backlog_dec(sch, skb);
464 --sch->q.qlen; 464 --sch->q.qlen;
465 qdisc_drop(skb, sch); 465 qdisc_drop(skb, sch);
466 } 466 }
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 2f9ab17db85a..de28f8e968e8 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -149,7 +149,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
149 while (sch->q.qlen > sch->limit) { 149 while (sch->q.qlen > sch->limit) {
150 struct sk_buff *skb = __skb_dequeue(&sch->q); 150 struct sk_buff *skb = __skb_dequeue(&sch->q);
151 151
152 sch->qstats.backlog -= qdisc_pkt_len(skb); 152 qdisc_qstats_backlog_dec(sch, skb);
153 qdisc_drop(skb, sch); 153 qdisc_drop(skb, sch);
154 } 154 }
155 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); 155 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 7a6243c5d270..907b12fd6825 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -360,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
360 cl = drr_classify(skb, sch, &err); 360 cl = drr_classify(skb, sch, &err);
361 if (cl == NULL) { 361 if (cl == NULL) {
362 if (err & __NET_XMIT_BYPASS) 362 if (err & __NET_XMIT_BYPASS)
363 sch->qstats.drops++; 363 qdisc_qstats_drop(sch);
364 kfree_skb(skb); 364 kfree_skb(skb);
365 return err; 365 return err;
366 } 366 }
@@ -369,7 +369,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
369 if (unlikely(err != NET_XMIT_SUCCESS)) { 369 if (unlikely(err != NET_XMIT_SUCCESS)) {
370 if (net_xmit_drop_count(err)) { 370 if (net_xmit_drop_count(err)) {
371 cl->qstats.drops++; 371 cl->qstats.drops++;
372 sch->qstats.drops++; 372 qdisc_qstats_drop(sch);
373 } 373 }
374 return err; 374 return err;
375 } 375 }
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 485e456c8139..227114f27f94 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -258,7 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
258 err = qdisc_enqueue(skb, p->q); 258 err = qdisc_enqueue(skb, p->q);
259 if (err != NET_XMIT_SUCCESS) { 259 if (err != NET_XMIT_SUCCESS) {
260 if (net_xmit_drop_count(err)) 260 if (net_xmit_drop_count(err))
261 sch->qstats.drops++; 261 qdisc_qstats_drop(sch);
262 return err; 262 return err;
263 } 263 }
264 264
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index e15a9eb29087..2e2398cfc694 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -42,7 +42,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
42 42
43 /* queue full, remove one skb to fulfill the limit */ 43 /* queue full, remove one skb to fulfill the limit */
44 __qdisc_queue_drop_head(sch, &sch->q); 44 __qdisc_queue_drop_head(sch, &sch->q);
45 sch->qstats.drops++; 45 qdisc_qstats_drop(sch);
46 qdisc_enqueue_tail(skb, sch); 46 qdisc_enqueue_tail(skb, sch);
47 47
48 return NET_XMIT_CN; 48 return NET_XMIT_CN;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index e12f997e1b4c..c9b9fcb53206 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -290,7 +290,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
290 flow->head = skb->next; 290 flow->head = skb->next;
291 skb->next = NULL; 291 skb->next = NULL;
292 flow->qlen--; 292 flow->qlen--;
293 sch->qstats.backlog -= qdisc_pkt_len(skb); 293 qdisc_qstats_backlog_dec(sch, skb);
294 sch->q.qlen--; 294 sch->q.qlen--;
295 } 295 }
296 return skb; 296 return skb;
@@ -371,7 +371,7 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
371 f->qlen++; 371 f->qlen++;
372 if (skb_is_retransmit(skb)) 372 if (skb_is_retransmit(skb))
373 q->stat_tcp_retrans++; 373 q->stat_tcp_retrans++;
374 sch->qstats.backlog += qdisc_pkt_len(skb); 374 qdisc_qstats_backlog_inc(sch, skb);
375 if (fq_flow_is_detached(f)) { 375 if (fq_flow_is_detached(f)) {
376 fq_flow_add_tail(&q->new_flows, f); 376 fq_flow_add_tail(&q->new_flows, f);
377 if (time_after(jiffies, f->age + q->flow_refill_delay)) 377 if (time_after(jiffies, f->age + q->flow_refill_delay))
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 105cf5557630..9270e1b2f25d 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -164,8 +164,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
164 q->backlogs[idx] -= len; 164 q->backlogs[idx] -= len;
165 kfree_skb(skb); 165 kfree_skb(skb);
166 sch->q.qlen--; 166 sch->q.qlen--;
167 sch->qstats.drops++; 167 qdisc_qstats_drop(sch);
168 sch->qstats.backlog -= len; 168 qdisc_qstats_backlog_dec(sch, skb);
169 flow->dropped++; 169 flow->dropped++;
170 return idx; 170 return idx;
171} 171}
@@ -180,7 +180,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
180 idx = fq_codel_classify(skb, sch, &ret); 180 idx = fq_codel_classify(skb, sch, &ret);
181 if (idx == 0) { 181 if (idx == 0) {
182 if (ret & __NET_XMIT_BYPASS) 182 if (ret & __NET_XMIT_BYPASS)
183 sch->qstats.drops++; 183 qdisc_qstats_drop(sch);
184 kfree_skb(skb); 184 kfree_skb(skb);
185 return ret; 185 return ret;
186 } 186 }
@@ -190,7 +190,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
190 flow = &q->flows[idx]; 190 flow = &q->flows[idx];
191 flow_queue_add(flow, skb); 191 flow_queue_add(flow, skb);
192 q->backlogs[idx] += qdisc_pkt_len(skb); 192 q->backlogs[idx] += qdisc_pkt_len(skb);
193 sch->qstats.backlog += qdisc_pkt_len(skb); 193 qdisc_qstats_backlog_inc(sch, skb);
194 194
195 if (list_empty(&flow->flowchain)) { 195 if (list_empty(&flow->flowchain)) {
196 list_add_tail(&flow->flowchain, &q->new_flows); 196 list_add_tail(&flow->flowchain, &q->new_flows);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 12cbc09157fc..a4ca4517cdc8 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -209,7 +209,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
209 break; 209 break;
210 210
211 case RED_PROB_MARK: 211 case RED_PROB_MARK:
212 sch->qstats.overlimits++; 212 qdisc_qstats_overlimit(sch);
213 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { 213 if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
214 q->stats.prob_drop++; 214 q->stats.prob_drop++;
215 goto congestion_drop; 215 goto congestion_drop;
@@ -219,7 +219,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
219 break; 219 break;
220 220
221 case RED_HARD_MARK: 221 case RED_HARD_MARK:
222 sch->qstats.overlimits++; 222 qdisc_qstats_overlimit(sch);
223 if (gred_use_harddrop(t) || !gred_use_ecn(t) || 223 if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
224 !INET_ECN_set_ce(skb)) { 224 !INET_ECN_set_ce(skb)) {
225 q->stats.forced_drop++; 225 q->stats.forced_drop++;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 209b966b2eed..ad278251d811 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1591,7 +1591,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1591 cl = hfsc_classify(skb, sch, &err); 1591 cl = hfsc_classify(skb, sch, &err);
1592 if (cl == NULL) { 1592 if (cl == NULL) {
1593 if (err & __NET_XMIT_BYPASS) 1593 if (err & __NET_XMIT_BYPASS)
1594 sch->qstats.drops++; 1594 qdisc_qstats_drop(sch);
1595 kfree_skb(skb); 1595 kfree_skb(skb);
1596 return err; 1596 return err;
1597 } 1597 }
@@ -1600,7 +1600,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1600 if (unlikely(err != NET_XMIT_SUCCESS)) { 1600 if (unlikely(err != NET_XMIT_SUCCESS)) {
1601 if (net_xmit_drop_count(err)) { 1601 if (net_xmit_drop_count(err)) {
1602 cl->qstats.drops++; 1602 cl->qstats.drops++;
1603 sch->qstats.drops++; 1603 qdisc_qstats_drop(sch);
1604 } 1604 }
1605 return err; 1605 return err;
1606 } 1606 }
@@ -1643,7 +1643,7 @@ hfsc_dequeue(struct Qdisc *sch)
1643 */ 1643 */
1644 cl = vttree_get_minvt(&q->root, cur_time); 1644 cl = vttree_get_minvt(&q->root, cur_time);
1645 if (cl == NULL) { 1645 if (cl == NULL) {
1646 sch->qstats.overlimits++; 1646 qdisc_qstats_overlimit(sch);
1647 hfsc_schedule_watchdog(sch); 1647 hfsc_schedule_watchdog(sch);
1648 return NULL; 1648 return NULL;
1649 } 1649 }
@@ -1698,7 +1698,7 @@ hfsc_drop(struct Qdisc *sch)
1698 list_move_tail(&cl->dlist, &q->droplist); 1698 list_move_tail(&cl->dlist, &q->droplist);
1699 } 1699 }
1700 cl->qstats.drops++; 1700 cl->qstats.drops++;
1701 sch->qstats.drops++; 1701 qdisc_qstats_drop(sch);
1702 sch->q.qlen--; 1702 sch->q.qlen--;
1703 return len; 1703 return len;
1704 } 1704 }
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index d85b6812a7d4..15d3aabfe250 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -376,8 +376,8 @@ static unsigned int hhf_drop(struct Qdisc *sch)
376 struct sk_buff *skb = dequeue_head(bucket); 376 struct sk_buff *skb = dequeue_head(bucket);
377 377
378 sch->q.qlen--; 378 sch->q.qlen--;
379 sch->qstats.drops++; 379 qdisc_qstats_drop(sch);
380 sch->qstats.backlog -= qdisc_pkt_len(skb); 380 qdisc_qstats_backlog_dec(sch, skb);
381 kfree_skb(skb); 381 kfree_skb(skb);
382 } 382 }
383 383
@@ -395,7 +395,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
395 395
396 bucket = &q->buckets[idx]; 396 bucket = &q->buckets[idx];
397 bucket_add(bucket, skb); 397 bucket_add(bucket, skb);
398 sch->qstats.backlog += qdisc_pkt_len(skb); 398 qdisc_qstats_backlog_inc(sch, skb);
399 399
400 if (list_empty(&bucket->bucketchain)) { 400 if (list_empty(&bucket->bucketchain)) {
401 unsigned int weight; 401 unsigned int weight;
@@ -457,7 +457,7 @@ begin:
457 if (bucket->head) { 457 if (bucket->head) {
458 skb = dequeue_head(bucket); 458 skb = dequeue_head(bucket);
459 sch->q.qlen--; 459 sch->q.qlen--;
460 sch->qstats.backlog -= qdisc_pkt_len(skb); 460 qdisc_qstats_backlog_dec(sch, skb);
461 } 461 }
462 462
463 if (!skb) { 463 if (!skb) {
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0256dee69bd6..c40ab7a98c50 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -586,13 +586,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
586#ifdef CONFIG_NET_CLS_ACT 586#ifdef CONFIG_NET_CLS_ACT
587 } else if (!cl) { 587 } else if (!cl) {
588 if (ret & __NET_XMIT_BYPASS) 588 if (ret & __NET_XMIT_BYPASS)
589 sch->qstats.drops++; 589 qdisc_qstats_drop(sch);
590 kfree_skb(skb); 590 kfree_skb(skb);
591 return ret; 591 return ret;
592#endif 592#endif
593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { 593 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
594 if (net_xmit_drop_count(ret)) { 594 if (net_xmit_drop_count(ret)) {
595 sch->qstats.drops++; 595 qdisc_qstats_drop(sch);
596 cl->qstats.drops++; 596 cl->qstats.drops++;
597 } 597 }
598 return ret; 598 return ret;
@@ -925,7 +925,7 @@ ok:
925 goto ok; 925 goto ok;
926 } 926 }
927 } 927 }
928 sch->qstats.overlimits++; 928 qdisc_qstats_overlimit(sch);
929 if (likely(next_event > q->now)) { 929 if (likely(next_event > q->now)) {
930 if (!test_bit(__QDISC_STATE_DEACTIVATED, 930 if (!test_bit(__QDISC_STATE_DEACTIVATED,
931 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) { 931 &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index b351125f3849..eb5b8445fef9 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -69,7 +69,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
69 switch (result) { 69 switch (result) {
70 case TC_ACT_SHOT: 70 case TC_ACT_SHOT:
71 result = TC_ACT_SHOT; 71 result = TC_ACT_SHOT;
72 sch->qstats.drops++; 72 qdisc_qstats_drop(sch);
73 break; 73 break;
74 case TC_ACT_STOLEN: 74 case TC_ACT_STOLEN:
75 case TC_ACT_QUEUED: 75 case TC_ACT_QUEUED:
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 4adbf7fefc09..7f4e1d8504b0 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -75,7 +75,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
75 if (qdisc == NULL) { 75 if (qdisc == NULL) {
76 76
77 if (ret & __NET_XMIT_BYPASS) 77 if (ret & __NET_XMIT_BYPASS)
78 sch->qstats.drops++; 78 qdisc_qstats_drop(sch);
79 kfree_skb(skb); 79 kfree_skb(skb);
80 return ret; 80 return ret;
81 } 81 }
@@ -87,7 +87,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
87 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
88 } 88 }
89 if (net_xmit_drop_count(ret)) 89 if (net_xmit_drop_count(ret))
90 sch->qstats.drops++; 90 qdisc_qstats_drop(sch);
91 return ret; 91 return ret;
92} 92}
93 93
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 111d70fddaea..b34331967e02 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -429,12 +429,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
429 /* Drop packet? */ 429 /* Drop packet? */
430 if (loss_event(q)) { 430 if (loss_event(q)) {
431 if (q->ecn && INET_ECN_set_ce(skb)) 431 if (q->ecn && INET_ECN_set_ce(skb))
432 sch->qstats.drops++; /* mark packet */ 432 qdisc_qstats_drop(sch); /* mark packet */
433 else 433 else
434 --count; 434 --count;
435 } 435 }
436 if (count == 0) { 436 if (count == 0) {
437 sch->qstats.drops++; 437 qdisc_qstats_drop(sch);
438 kfree_skb(skb); 438 kfree_skb(skb);
439 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 439 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
440 } 440 }
@@ -478,7 +478,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
478 if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) 478 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
479 return qdisc_reshape_fail(skb, sch); 479 return qdisc_reshape_fail(skb, sch);
480 480
481 sch->qstats.backlog += qdisc_pkt_len(skb); 481 qdisc_qstats_backlog_inc(sch, skb);
482 482
483 cb = netem_skb_cb(skb); 483 cb = netem_skb_cb(skb);
484 if (q->gap == 0 || /* not doing reordering */ 484 if (q->gap == 0 || /* not doing reordering */
@@ -549,15 +549,14 @@ static unsigned int netem_drop(struct Qdisc *sch)
549 sch->q.qlen--; 549 sch->q.qlen--;
550 skb->next = NULL; 550 skb->next = NULL;
551 skb->prev = NULL; 551 skb->prev = NULL;
552 len = qdisc_pkt_len(skb); 552 qdisc_qstats_backlog_dec(sch, skb);
553 sch->qstats.backlog -= len;
554 kfree_skb(skb); 553 kfree_skb(skb);
555 } 554 }
556 } 555 }
557 if (!len && q->qdisc && q->qdisc->ops->drop) 556 if (!len && q->qdisc && q->qdisc->ops->drop)
558 len = q->qdisc->ops->drop(q->qdisc); 557 len = q->qdisc->ops->drop(q->qdisc);
559 if (len) 558 if (len)
560 sch->qstats.drops++; 559 qdisc_qstats_drop(sch);
561 560
562 return len; 561 return len;
563} 562}
@@ -575,7 +574,7 @@ tfifo_dequeue:
575 skb = __skb_dequeue(&sch->q); 574 skb = __skb_dequeue(&sch->q);
576 if (skb) { 575 if (skb) {
577deliver: 576deliver:
578 sch->qstats.backlog -= qdisc_pkt_len(skb); 577 qdisc_qstats_backlog_dec(sch, skb);
579 qdisc_unthrottled(sch); 578 qdisc_unthrottled(sch);
580 qdisc_bstats_update(sch, skb); 579 qdisc_bstats_update(sch, skb);
581 return skb; 580 return skb;
@@ -610,7 +609,7 @@ deliver:
610 609
611 if (unlikely(err != NET_XMIT_SUCCESS)) { 610 if (unlikely(err != NET_XMIT_SUCCESS)) {
612 if (net_xmit_drop_count(err)) { 611 if (net_xmit_drop_count(err)) {
613 sch->qstats.drops++; 612 qdisc_qstats_drop(sch);
614 qdisc_tree_decrease_qlen(sch, 1); 613 qdisc_tree_decrease_qlen(sch, 1);
615 } 614 }
616 } 615 }
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index fefeeb73f15f..33d7a98a7a97 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -232,7 +232,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
232 while (sch->q.qlen > sch->limit) { 232 while (sch->q.qlen > sch->limit) {
233 struct sk_buff *skb = __skb_dequeue(&sch->q); 233 struct sk_buff *skb = __skb_dequeue(&sch->q);
234 234
235 sch->qstats.backlog -= qdisc_pkt_len(skb); 235 qdisc_qstats_backlog_dec(sch, skb);
236 qdisc_drop(skb, sch); 236 qdisc_drop(skb, sch);
237 } 237 }
238 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); 238 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 68a8f25e30c3..b411e78a02fc 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -77,7 +77,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
77 if (qdisc == NULL) { 77 if (qdisc == NULL) {
78 78
79 if (ret & __NET_XMIT_BYPASS) 79 if (ret & __NET_XMIT_BYPASS)
80 sch->qstats.drops++; 80 qdisc_qstats_drop(sch);
81 kfree_skb(skb); 81 kfree_skb(skb);
82 return ret; 82 return ret;
83 } 83 }
@@ -89,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
89 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
90 } 90 }
91 if (net_xmit_drop_count(ret)) 91 if (net_xmit_drop_count(ret))
92 sch->qstats.drops++; 92 qdisc_qstats_drop(sch);
93 return ret; 93 return ret;
94} 94}
95 95
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index d59f8574540a..3fb26555c79b 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -1229,7 +1229,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1229 cl = qfq_classify(skb, sch, &err); 1229 cl = qfq_classify(skb, sch, &err);
1230 if (cl == NULL) { 1230 if (cl == NULL) {
1231 if (err & __NET_XMIT_BYPASS) 1231 if (err & __NET_XMIT_BYPASS)
1232 sch->qstats.drops++; 1232 qdisc_qstats_drop(sch);
1233 kfree_skb(skb); 1233 kfree_skb(skb);
1234 return err; 1234 return err;
1235 } 1235 }
@@ -1249,7 +1249,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1249 pr_debug("qfq_enqueue: enqueue failed %d\n", err); 1249 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
1250 if (net_xmit_drop_count(err)) { 1250 if (net_xmit_drop_count(err)) {
1251 cl->qstats.drops++; 1251 cl->qstats.drops++;
1252 sch->qstats.drops++; 1252 qdisc_qstats_drop(sch);
1253 } 1253 }
1254 return err; 1254 return err;
1255 } 1255 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 633e32defdcc..6c0534cc7758 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -74,7 +74,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
74 break; 74 break;
75 75
76 case RED_PROB_MARK: 76 case RED_PROB_MARK:
77 sch->qstats.overlimits++; 77 qdisc_qstats_overlimit(sch);
78 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { 78 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
79 q->stats.prob_drop++; 79 q->stats.prob_drop++;
80 goto congestion_drop; 80 goto congestion_drop;
@@ -84,7 +84,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
84 break; 84 break;
85 85
86 case RED_HARD_MARK: 86 case RED_HARD_MARK:
87 sch->qstats.overlimits++; 87 qdisc_qstats_overlimit(sch);
88 if (red_use_harddrop(q) || !red_use_ecn(q) || 88 if (red_use_harddrop(q) || !red_use_ecn(q) ||
89 !INET_ECN_set_ce(skb)) { 89 !INET_ECN_set_ce(skb)) {
90 q->stats.forced_drop++; 90 q->stats.forced_drop++;
@@ -100,7 +100,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
100 sch->q.qlen++; 100 sch->q.qlen++;
101 } else if (net_xmit_drop_count(ret)) { 101 } else if (net_xmit_drop_count(ret)) {
102 q->stats.pdrop++; 102 q->stats.pdrop++;
103 sch->qstats.drops++; 103 qdisc_qstats_drop(sch);
104 } 104 }
105 return ret; 105 return ret;
106 106
@@ -142,7 +142,7 @@ static unsigned int red_drop(struct Qdisc *sch)
142 142
143 if (child->ops->drop && (len = child->ops->drop(child)) > 0) { 143 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
144 q->stats.other++; 144 q->stats.other++;
145 sch->qstats.drops++; 145 qdisc_qstats_drop(sch);
146 sch->q.qlen--; 146 sch->q.qlen--;
147 return len; 147 return len;
148 } 148 }
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 1562fb2b3f46..5819dd82630d 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -290,7 +290,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
290 struct flow_keys keys; 290 struct flow_keys keys;
291 291
292 if (unlikely(sch->q.qlen >= q->limit)) { 292 if (unlikely(sch->q.qlen >= q->limit)) {
293 sch->qstats.overlimits++; 293 qdisc_qstats_overlimit(sch);
294 q->stats.queuedrop++; 294 q->stats.queuedrop++;
295 goto drop; 295 goto drop;
296 } 296 }
@@ -348,7 +348,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
348 sfb_skb_cb(skb)->hashes[slot] = 0; 348 sfb_skb_cb(skb)->hashes[slot] = 0;
349 349
350 if (unlikely(minqlen >= q->max)) { 350 if (unlikely(minqlen >= q->max)) {
351 sch->qstats.overlimits++; 351 qdisc_qstats_overlimit(sch);
352 q->stats.bucketdrop++; 352 q->stats.bucketdrop++;
353 goto drop; 353 goto drop;
354 } 354 }
@@ -376,7 +376,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
376 } 376 }
377 } 377 }
378 if (sfb_rate_limit(skb, q)) { 378 if (sfb_rate_limit(skb, q)) {
379 sch->qstats.overlimits++; 379 qdisc_qstats_overlimit(sch);
380 q->stats.penaltydrop++; 380 q->stats.penaltydrop++;
381 goto drop; 381 goto drop;
382 } 382 }
@@ -411,7 +411,7 @@ enqueue:
411 increment_qlen(skb, q); 411 increment_qlen(skb, q);
412 } else if (net_xmit_drop_count(ret)) { 412 } else if (net_xmit_drop_count(ret)) {
413 q->stats.childdrop++; 413 q->stats.childdrop++;
414 sch->qstats.drops++; 414 qdisc_qstats_drop(sch);
415 } 415 }
416 return ret; 416 return ret;
417 417
@@ -420,7 +420,7 @@ drop:
420 return NET_XMIT_CN; 420 return NET_XMIT_CN;
421other_drop: 421other_drop:
422 if (ret & __NET_XMIT_BYPASS) 422 if (ret & __NET_XMIT_BYPASS)
423 sch->qstats.drops++; 423 qdisc_qstats_drop(sch);
424 kfree_skb(skb); 424 kfree_skb(skb);
425 return ret; 425 return ret;
426} 426}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 80c36bd54abc..158dfa641d18 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -331,8 +331,8 @@ drop:
331 sfq_dec(q, x); 331 sfq_dec(q, x);
332 kfree_skb(skb); 332 kfree_skb(skb);
333 sch->q.qlen--; 333 sch->q.qlen--;
334 sch->qstats.drops++; 334 qdisc_qstats_drop(sch);
335 sch->qstats.backlog -= len; 335 qdisc_qstats_backlog_dec(sch, skb);
336 return len; 336 return len;
337 } 337 }
338 338
@@ -379,7 +379,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
379 hash = sfq_classify(skb, sch, &ret); 379 hash = sfq_classify(skb, sch, &ret);
380 if (hash == 0) { 380 if (hash == 0) {
381 if (ret & __NET_XMIT_BYPASS) 381 if (ret & __NET_XMIT_BYPASS)
382 sch->qstats.drops++; 382 qdisc_qstats_drop(sch);
383 kfree_skb(skb); 383 kfree_skb(skb);
384 return ret; 384 return ret;
385 } 385 }
@@ -409,7 +409,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
409 break; 409 break;
410 410
411 case RED_PROB_MARK: 411 case RED_PROB_MARK:
412 sch->qstats.overlimits++; 412 qdisc_qstats_overlimit(sch);
413 if (sfq_prob_mark(q)) { 413 if (sfq_prob_mark(q)) {
414 /* We know we have at least one packet in queue */ 414 /* We know we have at least one packet in queue */
415 if (sfq_headdrop(q) && 415 if (sfq_headdrop(q) &&
@@ -426,7 +426,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
426 goto congestion_drop; 426 goto congestion_drop;
427 427
428 case RED_HARD_MARK: 428 case RED_HARD_MARK:
429 sch->qstats.overlimits++; 429 qdisc_qstats_overlimit(sch);
430 if (sfq_hard_mark(q)) { 430 if (sfq_hard_mark(q)) {
431 /* We know we have at least one packet in queue */ 431 /* We know we have at least one packet in queue */
432 if (sfq_headdrop(q) && 432 if (sfq_headdrop(q) &&
@@ -461,7 +461,7 @@ congestion_drop:
461 } 461 }
462 462
463enqueue: 463enqueue:
464 sch->qstats.backlog += qdisc_pkt_len(skb); 464 qdisc_qstats_backlog_inc(sch, skb);
465 slot->backlog += qdisc_pkt_len(skb); 465 slot->backlog += qdisc_pkt_len(skb);
466 slot_queue_add(slot, skb); 466 slot_queue_add(slot, skb);
467 sfq_inc(q, x); 467 sfq_inc(q, x);
@@ -520,7 +520,7 @@ next_slot:
520 sfq_dec(q, a); 520 sfq_dec(q, a);
521 qdisc_bstats_update(sch, skb); 521 qdisc_bstats_update(sch, skb);
522 sch->q.qlen--; 522 sch->q.qlen--;
523 sch->qstats.backlog -= qdisc_pkt_len(skb); 523 qdisc_qstats_backlog_dec(sch, skb);
524 slot->backlog -= qdisc_pkt_len(skb); 524 slot->backlog -= qdisc_pkt_len(skb);
525 /* Is the slot empty? */ 525 /* Is the slot empty? */
526 if (slot->qlen == 0) { 526 if (slot->qlen == 0) {
@@ -586,7 +586,8 @@ static void sfq_rehash(struct Qdisc *sch)
586 if (x == SFQ_EMPTY_SLOT) { 586 if (x == SFQ_EMPTY_SLOT) {
587 x = q->dep[0].next; /* get a free slot */ 587 x = q->dep[0].next; /* get a free slot */
588 if (x >= SFQ_MAX_FLOWS) { 588 if (x >= SFQ_MAX_FLOWS) {
589drop: sch->qstats.backlog -= qdisc_pkt_len(skb); 589drop:
590 qdisc_qstats_backlog_dec(sch, skb);
590 kfree_skb(skb); 591 kfree_skb(skb);
591 dropped++; 592 dropped++;
592 continue; 593 continue;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 0c39b754083b..77edffe329c4 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -175,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
175 ret = qdisc_enqueue(segs, q->qdisc); 175 ret = qdisc_enqueue(segs, q->qdisc);
176 if (ret != NET_XMIT_SUCCESS) { 176 if (ret != NET_XMIT_SUCCESS) {
177 if (net_xmit_drop_count(ret)) 177 if (net_xmit_drop_count(ret))
178 sch->qstats.drops++; 178 qdisc_qstats_drop(sch);
179 } else { 179 } else {
180 nb++; 180 nb++;
181 } 181 }
@@ -201,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
201 ret = qdisc_enqueue(skb, q->qdisc); 201 ret = qdisc_enqueue(skb, q->qdisc);
202 if (ret != NET_XMIT_SUCCESS) { 202 if (ret != NET_XMIT_SUCCESS) {
203 if (net_xmit_drop_count(ret)) 203 if (net_xmit_drop_count(ret))
204 sch->qstats.drops++; 204 qdisc_qstats_drop(sch);
205 return ret; 205 return ret;
206 } 206 }
207 207
@@ -216,7 +216,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
216 216
217 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { 217 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
218 sch->q.qlen--; 218 sch->q.qlen--;
219 sch->qstats.drops++; 219 qdisc_qstats_drop(sch);
220 } 220 }
221 return len; 221 return len;
222} 222}
@@ -281,7 +281,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
281 (cf. CSZ, HPFQ, HFSC) 281 (cf. CSZ, HPFQ, HFSC)
282 */ 282 */
283 283
284 sch->qstats.overlimits++; 284 qdisc_qstats_overlimit(sch);
285 } 285 }
286 return NULL; 286 return NULL;
287} 287}