aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_atm.c14
-rw-r--r--net/sched/sch_cbq.c27
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sched/sch_generic.c12
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sched/sch_prio.c14
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c8
-rw-r--r--net/sched/sch_tbf.c3
11 files changed, 76 insertions, 55 deletions
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 6b517b9dac5b..43d37256c15e 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
415 case TC_ACT_QUEUED: 415 case TC_ACT_QUEUED:
416 case TC_ACT_STOLEN: 416 case TC_ACT_STOLEN:
417 kfree_skb(skb); 417 kfree_skb(skb);
418 return NET_XMIT_SUCCESS; 418 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
419 case TC_ACT_SHOT: 419 case TC_ACT_SHOT:
420 kfree_skb(skb); 420 kfree_skb(skb);
421 goto drop; 421 goto drop;
@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
432 ret = qdisc_enqueue(skb, flow->q); 432 ret = qdisc_enqueue(skb, flow->q);
433 if (ret != 0) { 433 if (ret != 0) {
434drop: __maybe_unused 434drop: __maybe_unused
435 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
436 if (flow) 436 sch->qstats.drops++;
437 flow->qstats.drops++; 437 if (flow)
438 flow->qstats.drops++;
439 }
438 return ret; 440 return ret;
439 } 441 }
440 sch->bstats.bytes += qdisc_pkt_len(skb); 442 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -455,7 +457,7 @@ drop: __maybe_unused
455 return 0; 457 return 0;
456 } 458 }
457 tasklet_schedule(&p->task); 459 tasklet_schedule(&p->task);
458 return NET_XMIT_BYPASS; 460 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
459} 461}
460 462
461/* 463/*
@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
530 if (!ret) { 532 if (!ret) {
531 sch->q.qlen++; 533 sch->q.qlen++;
532 sch->qstats.requeues++; 534 sch->qstats.requeues++;
533 } else { 535 } else if (net_xmit_drop_count(ret)) {
534 sch->qstats.drops++; 536 sch->qstats.drops++;
535 p->link.qstats.drops++; 537 p->link.qstats.drops++;
536 } 538 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 14954bf4a683..4e261ce62f48 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
230 (cl = cbq_class_lookup(q, prio)) != NULL) 230 (cl = cbq_class_lookup(q, prio)) != NULL)
231 return cl; 231 return cl;
232 232
233 *qerr = NET_XMIT_BYPASS; 233 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
234 for (;;) { 234 for (;;) {
235 int result = 0; 235 int result = 0;
236 defmap = head->defaults; 236 defmap = head->defaults;
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
256 switch (result) { 256 switch (result) {
257 case TC_ACT_QUEUED: 257 case TC_ACT_QUEUED:
258 case TC_ACT_STOLEN: 258 case TC_ACT_STOLEN:
259 *qerr = NET_XMIT_SUCCESS; 259 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
260 case TC_ACT_SHOT: 260 case TC_ACT_SHOT:
261 return NULL; 261 return NULL;
262 case TC_ACT_RECLASSIFY: 262 case TC_ACT_RECLASSIFY:
@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 q->rx_class = cl; 377 q->rx_class = cl;
378#endif 378#endif
379 if (cl == NULL) { 379 if (cl == NULL) {
380 if (ret == NET_XMIT_BYPASS) 380 if (ret & __NET_XMIT_BYPASS)
381 sch->qstats.drops++; 381 sch->qstats.drops++;
382 kfree_skb(skb); 382 kfree_skb(skb);
383 return ret; 383 return ret;
@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
397 return ret; 397 return ret;
398 } 398 }
399 399
400 sch->qstats.drops++; 400 if (net_xmit_drop_count(ret)) {
401 cbq_mark_toplevel(q, cl); 401 sch->qstats.drops++;
402 cl->qstats.drops++; 402 cbq_mark_toplevel(q, cl);
403 cl->qstats.drops++;
404 }
403 return ret; 405 return ret;
404} 406}
405 407
@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
430 cbq_activate_class(cl); 432 cbq_activate_class(cl);
431 return 0; 433 return 0;
432 } 434 }
433 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
434 cl->qstats.drops++; 436 sch->qstats.drops++;
437 cl->qstats.drops++;
438 }
435 return ret; 439 return ret;
436} 440}
437 441
@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
664 q->rx_class = NULL; 668 q->rx_class = NULL;
665 669
666 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { 670 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
671 int ret;
667 672
668 cbq_mark_toplevel(q, cl); 673 cbq_mark_toplevel(q, cl);
669 674
670 q->rx_class = cl; 675 q->rx_class = cl;
671 cl->q->__parent = sch; 676 cl->q->__parent = sch;
672 677
673 if (qdisc_enqueue(skb, cl->q) == 0) { 678 ret = qdisc_enqueue(skb, cl->q);
679 if (ret == NET_XMIT_SUCCESS) {
674 sch->q.qlen++; 680 sch->q.qlen++;
675 sch->bstats.packets++; 681 sch->bstats.packets++;
676 sch->bstats.bytes += qdisc_pkt_len(skb); 682 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
678 cbq_activate_class(cl); 684 cbq_activate_class(cl);
679 return 0; 685 return 0;
680 } 686 }
681 sch->qstats.drops++; 687 if (net_xmit_drop_count(ret))
688 sch->qstats.drops++;
682 return 0; 689 return 0;
683 } 690 }
684 691
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index a935676987e2..edd1298f85f6 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
236 case TC_ACT_QUEUED: 236 case TC_ACT_QUEUED:
237 case TC_ACT_STOLEN: 237 case TC_ACT_STOLEN:
238 kfree_skb(skb); 238 kfree_skb(skb);
239 return NET_XMIT_SUCCESS; 239 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
240 240
241 case TC_ACT_SHOT: 241 case TC_ACT_SHOT:
242 goto drop; 242 goto drop;
@@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
254 254
255 err = qdisc_enqueue(skb, p->q); 255 err = qdisc_enqueue(skb, p->q);
256 if (err != NET_XMIT_SUCCESS) { 256 if (err != NET_XMIT_SUCCESS) {
257 sch->qstats.drops++; 257 if (net_xmit_drop_count(err))
258 sch->qstats.drops++;
258 return err; 259 return err;
259 } 260 }
260 261
@@ -267,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
267drop: 268drop:
268 kfree_skb(skb); 269 kfree_skb(skb);
269 sch->qstats.drops++; 270 sch->qstats.drops++;
270 return NET_XMIT_BYPASS; 271 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271} 272}
272 273
273static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) 274static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
@@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
321 322
322 err = p->q->ops->requeue(skb, p->q); 323 err = p->q->ops->requeue(skb, p->q);
323 if (err != NET_XMIT_SUCCESS) { 324 if (err != NET_XMIT_SUCCESS) {
324 sch->qstats.drops++; 325 if (net_xmit_drop_count(err))
326 sch->qstats.drops++;
325 return err; 327 return err;
326 } 328 }
327 329
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 9c9cd4d94890..7cf83b37459d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,7 +29,7 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * qdisc_root_lock(qdisc) spinlock. 32 * qdisc_lock(qdisc) spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via qdisc root lock 35 * - enqueue, dequeue are serialized via qdisc root lock
@@ -126,7 +126,7 @@ static inline int qdisc_restart(struct Qdisc *q)
126 if (unlikely((skb = dequeue_skb(q)) == NULL)) 126 if (unlikely((skb = dequeue_skb(q)) == NULL))
127 return 0; 127 return 0;
128 128
129 root_lock = qdisc_root_lock(q); 129 root_lock = qdisc_lock(q);
130 130
131 /* And release qdisc */ 131 /* And release qdisc */
132 spin_unlock(root_lock); 132 spin_unlock(root_lock);
@@ -507,7 +507,7 @@ errout:
507} 507}
508EXPORT_SYMBOL(qdisc_create_dflt); 508EXPORT_SYMBOL(qdisc_create_dflt);
509 509
510/* Under qdisc_root_lock(qdisc) and BH! */ 510/* Under qdisc_lock(qdisc) and BH! */
511 511
512void qdisc_reset(struct Qdisc *qdisc) 512void qdisc_reset(struct Qdisc *qdisc)
513{ 513{
@@ -543,7 +543,7 @@ static void __qdisc_destroy(struct rcu_head *head)
543 kfree((char *) qdisc - qdisc->padded); 543 kfree((char *) qdisc - qdisc->padded);
544} 544}
545 545
546/* Under qdisc_root_lock(qdisc) and BH! */ 546/* Under qdisc_lock(qdisc) and BH! */
547 547
548void qdisc_destroy(struct Qdisc *qdisc) 548void qdisc_destroy(struct Qdisc *qdisc)
549{ 549{
@@ -659,7 +659,7 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
659 659
660 dev_queue = netdev_get_tx_queue(dev, i); 660 dev_queue = netdev_get_tx_queue(dev, i);
661 q = dev_queue->qdisc; 661 q = dev_queue->qdisc;
662 root_lock = qdisc_root_lock(q); 662 root_lock = qdisc_lock(q);
663 663
664 if (lock) 664 if (lock)
665 spin_lock_bh(root_lock); 665 spin_lock_bh(root_lock);
@@ -735,7 +735,7 @@ static void shutdown_scheduler_queue(struct net_device *dev,
735 struct Qdisc *qdisc_default = _qdisc_default; 735 struct Qdisc *qdisc_default = _qdisc_default;
736 736
737 if (qdisc) { 737 if (qdisc) {
738 spinlock_t *root_lock = qdisc_root_lock(qdisc); 738 spinlock_t *root_lock = qdisc_lock(qdisc);
739 739
740 dev_queue->qdisc = qdisc_default; 740 dev_queue->qdisc = qdisc_default;
741 dev_queue->qdisc_sleeping = qdisc_default; 741 dev_queue->qdisc_sleeping = qdisc_default;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 0ae7d19dcba8..c2b8d9cce3d2 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1159,14 +1159,14 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1159 if (cl->level == 0) 1159 if (cl->level == 0)
1160 return cl; 1160 return cl;
1161 1161
1162 *qerr = NET_XMIT_BYPASS; 1162 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1163 tcf = q->root.filter_list; 1163 tcf = q->root.filter_list;
1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1165#ifdef CONFIG_NET_CLS_ACT 1165#ifdef CONFIG_NET_CLS_ACT
1166 switch (result) { 1166 switch (result) {
1167 case TC_ACT_QUEUED: 1167 case TC_ACT_QUEUED:
1168 case TC_ACT_STOLEN: 1168 case TC_ACT_STOLEN:
1169 *qerr = NET_XMIT_SUCCESS; 1169 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1170 case TC_ACT_SHOT: 1170 case TC_ACT_SHOT:
1171 return NULL; 1171 return NULL;
1172 } 1172 }
@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1578 1578
1579 cl = hfsc_classify(skb, sch, &err); 1579 cl = hfsc_classify(skb, sch, &err);
1580 if (cl == NULL) { 1580 if (cl == NULL) {
1581 if (err == NET_XMIT_BYPASS) 1581 if (err & __NET_XMIT_BYPASS)
1582 sch->qstats.drops++; 1582 sch->qstats.drops++;
1583 kfree_skb(skb); 1583 kfree_skb(skb);
1584 return err; 1584 return err;
@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1586 1586
1587 err = qdisc_enqueue(skb, cl->qdisc); 1587 err = qdisc_enqueue(skb, cl->qdisc);
1588 if (unlikely(err != NET_XMIT_SUCCESS)) { 1588 if (unlikely(err != NET_XMIT_SUCCESS)) {
1589 cl->qstats.drops++; 1589 if (net_xmit_drop_count(err)) {
1590 sch->qstats.drops++; 1590 cl->qstats.drops++;
1591 sch->qstats.drops++;
1592 }
1591 return err; 1593 return err;
1592 } 1594 }
1593 1595
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 75a40951c4f2..be35422711a3 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -214,14 +214,14 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) 214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
215 return cl; 215 return cl;
216 216
217 *qerr = NET_XMIT_BYPASS; 217 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
218 tcf = q->filter_list; 218 tcf = q->filter_list;
219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
220#ifdef CONFIG_NET_CLS_ACT 220#ifdef CONFIG_NET_CLS_ACT
221 switch (result) { 221 switch (result) {
222 case TC_ACT_QUEUED: 222 case TC_ACT_QUEUED:
223 case TC_ACT_STOLEN: 223 case TC_ACT_STOLEN:
224 *qerr = NET_XMIT_SUCCESS; 224 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
225 case TC_ACT_SHOT: 225 case TC_ACT_SHOT:
226 return NULL; 226 return NULL;
227 } 227 }
@@ -567,14 +567,16 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
567 } 567 }
568#ifdef CONFIG_NET_CLS_ACT 568#ifdef CONFIG_NET_CLS_ACT
569 } else if (!cl) { 569 } else if (!cl) {
570 if (ret == NET_XMIT_BYPASS) 570 if (ret & __NET_XMIT_BYPASS)
571 sch->qstats.drops++; 571 sch->qstats.drops++;
572 kfree_skb(skb); 572 kfree_skb(skb);
573 return ret; 573 return ret;
574#endif 574#endif
575 } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { 575 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
576 sch->qstats.drops++; 576 if (net_xmit_drop_count(ret)) {
577 cl->qstats.drops++; 577 sch->qstats.drops++;
578 cl->qstats.drops++;
579 }
578 return NET_XMIT_DROP; 580 return NET_XMIT_DROP;
579 } else { 581 } else {
580 cl->bstats.packets += 582 cl->bstats.packets +=
@@ -610,15 +612,17 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
610 } 612 }
611#ifdef CONFIG_NET_CLS_ACT 613#ifdef CONFIG_NET_CLS_ACT
612 } else if (!cl) { 614 } else if (!cl) {
613 if (ret == NET_XMIT_BYPASS) 615 if (ret & __NET_XMIT_BYPASS)
614 sch->qstats.drops++; 616 sch->qstats.drops++;
615 kfree_skb(skb); 617 kfree_skb(skb);
616 return ret; 618 return ret;
617#endif 619#endif
618 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != 620 } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
619 NET_XMIT_SUCCESS) { 621 NET_XMIT_SUCCESS) {
620 sch->qstats.drops++; 622 if (net_xmit_drop_count(ret)) {
621 cl->qstats.drops++; 623 sch->qstats.drops++;
624 cl->qstats.drops++;
625 }
622 return NET_XMIT_DROP; 626 return NET_XMIT_DROP;
623 } else 627 } else
624 htb_activate(q, cl); 628 htb_activate(q, cl);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a59085700678..fb0294d0b55e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176 if (count == 0) { 176 if (count == 0) {
177 sch->qstats.drops++; 177 sch->qstats.drops++;
178 kfree_skb(skb); 178 kfree_skb(skb);
179 return NET_XMIT_BYPASS; 179 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
180 } 180 }
181 181
182 skb_orphan(skb); 182 skb_orphan(skb);
@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 sch->q.qlen++; 240 sch->q.qlen++;
241 sch->bstats.bytes += qdisc_pkt_len(skb); 241 sch->bstats.bytes += qdisc_pkt_len(skb);
242 sch->bstats.packets++; 242 sch->bstats.packets++;
243 } else 243 } else if (net_xmit_drop_count(ret)) {
244 sch->qstats.drops++; 244 sch->qstats.drops++;
245 }
245 246
246 pr_debug("netem: enqueue ret %d\n", ret); 247 pr_debug("netem: enqueue ret %d\n", ret);
247 return ret; 248 return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f849243eb095..eac197610edf 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -38,14 +38,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
38 struct tcf_result res; 38 struct tcf_result res;
39 int err; 39 int err;
40 40
41 *qerr = NET_XMIT_BYPASS; 41 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
42 if (TC_H_MAJ(skb->priority) != sch->handle) { 42 if (TC_H_MAJ(skb->priority) != sch->handle) {
43 err = tc_classify(skb, q->filter_list, &res); 43 err = tc_classify(skb, q->filter_list, &res);
44#ifdef CONFIG_NET_CLS_ACT 44#ifdef CONFIG_NET_CLS_ACT
45 switch (err) { 45 switch (err) {
46 case TC_ACT_STOLEN: 46 case TC_ACT_STOLEN:
47 case TC_ACT_QUEUED: 47 case TC_ACT_QUEUED:
48 *qerr = NET_XMIT_SUCCESS; 48 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
49 case TC_ACT_SHOT: 49 case TC_ACT_SHOT:
50 return NULL; 50 return NULL;
51 } 51 }
@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
74#ifdef CONFIG_NET_CLS_ACT 74#ifdef CONFIG_NET_CLS_ACT
75 if (qdisc == NULL) { 75 if (qdisc == NULL) {
76 76
77 if (ret == NET_XMIT_BYPASS) 77 if (ret & __NET_XMIT_BYPASS)
78 sch->qstats.drops++; 78 sch->qstats.drops++;
79 kfree_skb(skb); 79 kfree_skb(skb);
80 return ret; 80 return ret;
@@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 sch->q.qlen++; 88 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
90 } 90 }
91 sch->qstats.drops++; 91 if (net_xmit_drop_count(ret))
92 sch->qstats.drops++;
92 return ret; 93 return ret;
93} 94}
94 95
@@ -102,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
102 qdisc = prio_classify(skb, sch, &ret); 103 qdisc = prio_classify(skb, sch, &ret);
103#ifdef CONFIG_NET_CLS_ACT 104#ifdef CONFIG_NET_CLS_ACT
104 if (qdisc == NULL) { 105 if (qdisc == NULL) {
105 if (ret == NET_XMIT_BYPASS) 106 if (ret & __NET_XMIT_BYPASS)
106 sch->qstats.drops++; 107 sch->qstats.drops++;
107 kfree_skb(skb); 108 kfree_skb(skb);
108 return ret; 109 return ret;
@@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
114 sch->qstats.requeues++; 115 sch->qstats.requeues++;
115 return 0; 116 return 0;
116 } 117 }
117 sch->qstats.drops++; 118 if (net_xmit_drop_count(ret))
119 sch->qstats.drops++;
118 return NET_XMIT_DROP; 120 return NET_XMIT_DROP;
119} 121}
120 122
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3f2d1d7f3bbd..5da05839e225 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 sch->bstats.bytes += qdisc_pkt_len(skb);
98 sch->bstats.packets++; 98 sch->bstats.packets++;
99 sch->q.qlen++; 99 sch->q.qlen++;
100 } else { 100 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 101 q->stats.pdrop++;
102 sch->qstats.drops++; 102 sch->qstats.drops++;
103 } 103 }
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8589da666568..6e041d10dbdb 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -171,14 +171,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
171 if (!q->filter_list) 171 if (!q->filter_list)
172 return sfq_hash(q, skb) + 1; 172 return sfq_hash(q, skb) + 1;
173 173
174 *qerr = NET_XMIT_BYPASS; 174 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
175 result = tc_classify(skb, q->filter_list, &res); 175 result = tc_classify(skb, q->filter_list, &res);
176 if (result >= 0) { 176 if (result >= 0) {
177#ifdef CONFIG_NET_CLS_ACT 177#ifdef CONFIG_NET_CLS_ACT
178 switch (result) { 178 switch (result) {
179 case TC_ACT_STOLEN: 179 case TC_ACT_STOLEN:
180 case TC_ACT_QUEUED: 180 case TC_ACT_QUEUED:
181 *qerr = NET_XMIT_SUCCESS; 181 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
182 case TC_ACT_SHOT: 182 case TC_ACT_SHOT:
183 return 0; 183 return 0;
184 } 184 }
@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
285 285
286 hash = sfq_classify(skb, sch, &ret); 286 hash = sfq_classify(skb, sch, &ret);
287 if (hash == 0) { 287 if (hash == 0) {
288 if (ret == NET_XMIT_BYPASS) 288 if (ret & __NET_XMIT_BYPASS)
289 sch->qstats.drops++; 289 sch->qstats.drops++;
290 kfree_skb(skb); 290 kfree_skb(skb);
291 return ret; 291 return ret;
@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
339 339
340 hash = sfq_classify(skb, sch, &ret); 340 hash = sfq_classify(skb, sch, &ret);
341 if (hash == 0) { 341 if (hash == 0) {
342 if (ret == NET_XMIT_BYPASS) 342 if (ret & __NET_XMIT_BYPASS)
343 sch->qstats.drops++; 343 sch->qstats.drops++;
344 kfree_skb(skb); 344 kfree_skb(skb);
345 return ret; 345 return ret;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b296672f7632..7d3b7ff3bf07 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
135 135
136 ret = qdisc_enqueue(skb, q->qdisc); 136 ret = qdisc_enqueue(skb, q->qdisc);
137 if (ret != 0) { 137 if (ret != 0) {
138 sch->qstats.drops++; 138 if (net_xmit_drop_count(ret))
139 sch->qstats.drops++;
139 return ret; 140 return ret;
140 } 141 }
141 142