aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-15 12:15:17 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-15 12:15:17 -0400
commitf3efbe582b5396d134024c03a5fa253f2a85d9a6 (patch)
treee4e15b7567b82d24cb1e7327398286a2b88df04c /net/sched
parent05d3ed0a1fe3ea05ab9f3b8d32576a0bc2e19660 (diff)
parentb635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff)
Merge branch 'linus' into x86/gart
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_api.c20
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/cls_u32.c10
-rw-r--r--net/sched/sch_api.c115
-rw-r--r--net/sched/sch_atm.c18
-rw-r--r--net/sched/sch_cbq.c31
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sched/sch_generic.c43
-rw-r--r--net/sched/sch_hfsc.c12
-rw-r--r--net/sched/sch_htb.c43
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sched/sch_prio.c14
-rw-r--r--net/sched/sch_red.c2
-rw-r--r--net/sched/sch_sfq.c8
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c9
16 files changed, 181 insertions, 164 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 74e662cbb2c5..9974b3f04f05 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -41,7 +41,7 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
41 return; 41 return;
42 } 42 }
43 } 43 }
44 BUG_TRAP(0); 44 WARN_ON(1);
45} 45}
46EXPORT_SYMBOL(tcf_hash_destroy); 46EXPORT_SYMBOL(tcf_hash_destroy);
47 47
@@ -205,10 +205,9 @@ struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
205{ 205{
206 struct tcf_common *p = NULL; 206 struct tcf_common *p = NULL;
207 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { 207 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
208 if (bind) { 208 if (bind)
209 p->tcfc_bindcnt++; 209 p->tcfc_bindcnt++;
210 p->tcfc_refcnt++; 210 p->tcfc_refcnt++;
211 }
212 a->priv = p; 211 a->priv = p;
213 } 212 }
214 return p; 213 return p;
@@ -752,7 +751,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
752 struct nlattr *tb[TCA_ACT_MAX+1]; 751 struct nlattr *tb[TCA_ACT_MAX+1];
753 struct nlattr *kind; 752 struct nlattr *kind;
754 struct tc_action *a = create_a(0); 753 struct tc_action *a = create_a(0);
755 int err = -EINVAL; 754 int err = -ENOMEM;
756 755
757 if (a == NULL) { 756 if (a == NULL) {
758 printk("tca_action_flush: couldnt create tc_action\n"); 757 printk("tca_action_flush: couldnt create tc_action\n");
@@ -763,7 +762,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
763 if (!skb) { 762 if (!skb) {
764 printk("tca_action_flush: failed skb alloc\n"); 763 printk("tca_action_flush: failed skb alloc\n");
765 kfree(a); 764 kfree(a);
766 return -ENOBUFS; 765 return err;
767 } 766 }
768 767
769 b = skb_tail_pointer(skb); 768 b = skb_tail_pointer(skb);
@@ -791,6 +790,8 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
791 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); 790 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
792 if (err < 0) 791 if (err < 0)
793 goto nla_put_failure; 792 goto nla_put_failure;
793 if (err == 0)
794 goto noflush_out;
794 795
795 nla_nest_end(skb, nest); 796 nla_nest_end(skb, nest);
796 797
@@ -808,6 +809,7 @@ nla_put_failure:
808nlmsg_failure: 809nlmsg_failure:
809 module_put(a->ops->owner); 810 module_put(a->ops->owner);
810err_out: 811err_out:
812noflush_out:
811 kfree_skb(skb); 813 kfree_skb(skb);
812 kfree(a); 814 kfree(a);
813 return err; 815 return err;
@@ -825,8 +827,10 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
825 return ret; 827 return ret;
826 828
827 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { 829 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
828 if (tb[0] != NULL && tb[1] == NULL) 830 if (tb[1] != NULL)
829 return tca_action_flush(tb[0], n, pid); 831 return tca_action_flush(tb[1], n, pid);
832 else
833 return -EINVAL;
830 } 834 }
831 835
832 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 836 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 32c3f9d9fb7a..38015b493947 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -116,7 +116,7 @@ static void tcf_police_destroy(struct tcf_police *p)
116 return; 116 return;
117 } 117 }
118 } 118 }
119 BUG_TRAP(0); 119 WARN_ON(1);
120} 120}
121 121
122static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = { 122static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 527db2559dd2..246f9065ce34 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -345,7 +345,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
345 } 345 }
346 } 346 }
347 } 347 }
348 BUG_TRAP(0); 348 WARN_ON(1);
349 return 0; 349 return 0;
350} 350}
351 351
@@ -368,7 +368,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
368 struct tc_u_common *tp_c = tp->data; 368 struct tc_u_common *tp_c = tp->data;
369 struct tc_u_hnode **hn; 369 struct tc_u_hnode **hn;
370 370
371 BUG_TRAP(!ht->refcnt); 371 WARN_ON(ht->refcnt);
372 372
373 u32_clear_hnode(tp, ht); 373 u32_clear_hnode(tp, ht);
374 374
@@ -380,7 +380,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
380 } 380 }
381 } 381 }
382 382
383 BUG_TRAP(0); 383 WARN_ON(1);
384 return -ENOENT; 384 return -ENOENT;
385} 385}
386 386
@@ -389,7 +389,7 @@ static void u32_destroy(struct tcf_proto *tp)
389 struct tc_u_common *tp_c = tp->data; 389 struct tc_u_common *tp_c = tp->data;
390 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL); 390 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
391 391
392 BUG_TRAP(root_ht != NULL); 392 WARN_ON(root_ht == NULL);
393 393
394 if (root_ht && --root_ht->refcnt == 0) 394 if (root_ht && --root_ht->refcnt == 0)
395 u32_destroy_hnode(tp, root_ht); 395 u32_destroy_hnode(tp, root_ht);
@@ -407,7 +407,7 @@ static void u32_destroy(struct tcf_proto *tp)
407 while ((ht = tp_c->hlist) != NULL) { 407 while ((ht = tp_c->hlist) != NULL) {
408 tp_c->hlist = ht->next; 408 tp_c->hlist = ht->next;
409 409
410 BUG_TRAP(ht->refcnt == 0); 410 WARN_ON(ht->refcnt != 0);
411 411
412 kfree(ht); 412 kfree(ht);
413 } 413 }
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b0601642e227..c25465e5607a 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -183,24 +183,34 @@ EXPORT_SYMBOL(unregister_qdisc);
183 (root qdisc, all its children, children of children etc.) 183 (root qdisc, all its children, children of children etc.)
184 */ 184 */
185 185
186struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
187{
188 struct Qdisc *q;
189
190 if (!(root->flags & TCQ_F_BUILTIN) &&
191 root->handle == handle)
192 return root;
193
194 list_for_each_entry(q, &root->list, list) {
195 if (q->handle == handle)
196 return q;
197 }
198 return NULL;
199}
200
186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 201struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
187{ 202{
188 unsigned int i; 203 unsigned int i;
189 204
190 for (i = 0; i < dev->num_tx_queues; i++) { 205 for (i = 0; i < dev->num_tx_queues; i++) {
191 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 206 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
192 struct Qdisc *q, *txq_root = txq->qdisc; 207 struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
193 208
194 if (!(txq_root->flags & TCQ_F_BUILTIN) && 209 q = qdisc_match_from_root(txq_root, handle);
195 txq_root->handle == handle) 210 if (q)
196 return txq_root; 211 return q;
197
198 list_for_each_entry(q, &txq_root->list, list) {
199 if (q->handle == handle)
200 return q;
201 }
202 } 212 }
203 return NULL; 213 return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
204} 214}
205 215
206static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 216static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -321,7 +331,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
321 if (!s || tsize != s->tsize || (!tab && tsize > 0)) 331 if (!s || tsize != s->tsize || (!tab && tsize > 0))
322 return ERR_PTR(-EINVAL); 332 return ERR_PTR(-EINVAL);
323 333
324 spin_lock(&qdisc_stab_lock); 334 spin_lock_bh(&qdisc_stab_lock);
325 335
326 list_for_each_entry(stab, &qdisc_stab_list, list) { 336 list_for_each_entry(stab, &qdisc_stab_list, list) {
327 if (memcmp(&stab->szopts, s, sizeof(*s))) 337 if (memcmp(&stab->szopts, s, sizeof(*s)))
@@ -329,11 +339,11 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
329 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16))) 339 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
330 continue; 340 continue;
331 stab->refcnt++; 341 stab->refcnt++;
332 spin_unlock(&qdisc_stab_lock); 342 spin_unlock_bh(&qdisc_stab_lock);
333 return stab; 343 return stab;
334 } 344 }
335 345
336 spin_unlock(&qdisc_stab_lock); 346 spin_unlock_bh(&qdisc_stab_lock);
337 347
338 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); 348 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
339 if (!stab) 349 if (!stab)
@@ -344,9 +354,9 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
344 if (tsize > 0) 354 if (tsize > 0)
345 memcpy(stab->data, tab, tsize * sizeof(u16)); 355 memcpy(stab->data, tab, tsize * sizeof(u16));
346 356
347 spin_lock(&qdisc_stab_lock); 357 spin_lock_bh(&qdisc_stab_lock);
348 list_add_tail(&stab->list, &qdisc_stab_list); 358 list_add_tail(&stab->list, &qdisc_stab_list);
349 spin_unlock(&qdisc_stab_lock); 359 spin_unlock_bh(&qdisc_stab_lock);
350 360
351 return stab; 361 return stab;
352} 362}
@@ -356,14 +366,14 @@ void qdisc_put_stab(struct qdisc_size_table *tab)
356 if (!tab) 366 if (!tab)
357 return; 367 return;
358 368
359 spin_lock(&qdisc_stab_lock); 369 spin_lock_bh(&qdisc_stab_lock);
360 370
361 if (--tab->refcnt == 0) { 371 if (--tab->refcnt == 0) {
362 list_del(&tab->list); 372 list_del(&tab->list);
363 kfree(tab); 373 kfree(tab);
364 } 374 }
365 375
366 spin_unlock(&qdisc_stab_lock); 376 spin_unlock_bh(&qdisc_stab_lock);
367} 377}
368EXPORT_SYMBOL(qdisc_put_stab); 378EXPORT_SYMBOL(qdisc_put_stab);
369 379
@@ -572,44 +582,21 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
572static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, 582static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
573 struct Qdisc *qdisc) 583 struct Qdisc *qdisc)
574{ 584{
585 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
575 spinlock_t *root_lock; 586 spinlock_t *root_lock;
576 struct Qdisc *oqdisc;
577 int ingress;
578
579 ingress = 0;
580 if (qdisc && qdisc->flags&TCQ_F_INGRESS)
581 ingress = 1;
582
583 if (ingress) {
584 oqdisc = dev_queue->qdisc;
585 } else {
586 oqdisc = dev_queue->qdisc_sleeping;
587 }
588 587
589 root_lock = qdisc_root_lock(oqdisc); 588 root_lock = qdisc_root_lock(oqdisc);
590 spin_lock_bh(root_lock); 589 spin_lock_bh(root_lock);
591 590
592 if (ingress) { 591 /* Prune old scheduler */
593 /* Prune old scheduler */ 592 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
594 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { 593 qdisc_reset(oqdisc);
595 /* delete */
596 qdisc_reset(oqdisc);
597 dev_queue->qdisc = NULL;
598 } else { /* new */
599 dev_queue->qdisc = qdisc;
600 }
601 594
602 } else { 595 /* ... and graft new one */
603 /* Prune old scheduler */ 596 if (qdisc == NULL)
604 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 597 qdisc = &noop_qdisc;
605 qdisc_reset(oqdisc); 598 dev_queue->qdisc_sleeping = qdisc;
606 599 dev_queue->qdisc = &noop_qdisc;
607 /* ... and graft new one */
608 if (qdisc == NULL)
609 qdisc = &noop_qdisc;
610 dev_queue->qdisc_sleeping = qdisc;
611 dev_queue->qdisc = &noop_qdisc;
612 }
613 600
614 spin_unlock_bh(root_lock); 601 spin_unlock_bh(root_lock);
615 602
@@ -678,7 +665,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
678 665
679 ingress = 0; 666 ingress = 0;
680 num_q = dev->num_tx_queues; 667 num_q = dev->num_tx_queues;
681 if (q && q->flags & TCQ_F_INGRESS) { 668 if ((q && q->flags & TCQ_F_INGRESS) ||
669 (new && new->flags & TCQ_F_INGRESS)) {
682 num_q = 1; 670 num_q = 1;
683 ingress = 1; 671 ingress = 1;
684 } 672 }
@@ -692,13 +680,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
692 if (!ingress) 680 if (!ingress)
693 dev_queue = netdev_get_tx_queue(dev, i); 681 dev_queue = netdev_get_tx_queue(dev, i);
694 682
695 if (ingress) { 683 old = dev_graft_qdisc(dev_queue, new);
696 old = dev_graft_qdisc(dev_queue, q); 684 if (new && i > 0)
697 } else { 685 atomic_inc(&new->refcnt);
698 old = dev_graft_qdisc(dev_queue, new); 686
699 if (new && i > 0)
700 atomic_inc(&new->refcnt);
701 }
702 notify_and_destroy(skb, n, classid, old, new); 687 notify_and_destroy(skb, n, classid, old, new);
703 } 688 }
704 689
@@ -817,8 +802,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
817 goto err_out3; 802 goto err_out3;
818 } 803 }
819 } 804 }
820 if (parent) 805 if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
821 list_add_tail(&sch->list, &dev_queue->qdisc->list); 806 list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
822 807
823 return sch; 808 return sch;
824 } 809 }
@@ -933,7 +918,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
933 return -ENOENT; 918 return -ENOENT;
934 q = qdisc_leaf(p, clid); 919 q = qdisc_leaf(p, clid);
935 } else { /* ingress */ 920 } else { /* ingress */
936 q = dev->rx_queue.qdisc; 921 q = dev->rx_queue.qdisc_sleeping;
937 } 922 }
938 } else { 923 } else {
939 struct netdev_queue *dev_queue; 924 struct netdev_queue *dev_queue;
@@ -1003,7 +988,7 @@ replay:
1003 return -ENOENT; 988 return -ENOENT;
1004 q = qdisc_leaf(p, clid); 989 q = qdisc_leaf(p, clid);
1005 } else { /*ingress */ 990 } else { /*ingress */
1006 q = dev->rx_queue.qdisc; 991 q = dev->rx_queue.qdisc_sleeping;
1007 } 992 }
1008 } else { 993 } else {
1009 struct netdev_queue *dev_queue; 994 struct netdev_queue *dev_queue;
@@ -1261,11 +1246,11 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1261 q_idx = 0; 1246 q_idx = 0;
1262 1247
1263 dev_queue = netdev_get_tx_queue(dev, 0); 1248 dev_queue = netdev_get_tx_queue(dev, 0);
1264 if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0) 1249 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
1265 goto done; 1250 goto done;
1266 1251
1267 dev_queue = &dev->rx_queue; 1252 dev_queue = &dev->rx_queue;
1268 if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0) 1253 if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
1269 goto done; 1254 goto done;
1270 1255
1271cont: 1256cont:
@@ -1554,11 +1539,11 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1554 t = 0; 1539 t = 0;
1555 1540
1556 dev_queue = netdev_get_tx_queue(dev, 0); 1541 dev_queue = netdev_get_tx_queue(dev, 0);
1557 if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) 1542 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1558 goto done; 1543 goto done;
1559 1544
1560 dev_queue = &dev->rx_queue; 1545 dev_queue = &dev->rx_queue;
1561 if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0) 1546 if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
1562 goto done; 1547 goto done;
1563 1548
1564done: 1549done:
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 04faa835be17..43d37256c15e 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -162,7 +162,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
162 qdisc_destroy(flow->q); 162 qdisc_destroy(flow->q);
163 tcf_destroy_chain(&flow->filter_list); 163 tcf_destroy_chain(&flow->filter_list);
164 if (flow->sock) { 164 if (flow->sock) {
165 pr_debug("atm_tc_put: f_count %d\n", 165 pr_debug("atm_tc_put: f_count %ld\n",
166 file_count(flow->sock->file)); 166 file_count(flow->sock->file));
167 flow->vcc->pop = flow->old_pop; 167 flow->vcc->pop = flow->old_pop;
168 sockfd_put(flow->sock); 168 sockfd_put(flow->sock);
@@ -259,7 +259,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
259 sock = sockfd_lookup(fd, &error); 259 sock = sockfd_lookup(fd, &error);
260 if (!sock) 260 if (!sock)
261 return error; /* f_count++ */ 261 return error; /* f_count++ */
262 pr_debug("atm_tc_change: f_count %d\n", file_count(sock->file)); 262 pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
263 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) { 263 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
264 error = -EPROTOTYPE; 264 error = -EPROTOTYPE;
265 goto err_out; 265 goto err_out;
@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
415 case TC_ACT_QUEUED: 415 case TC_ACT_QUEUED:
416 case TC_ACT_STOLEN: 416 case TC_ACT_STOLEN:
417 kfree_skb(skb); 417 kfree_skb(skb);
418 return NET_XMIT_SUCCESS; 418 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
419 case TC_ACT_SHOT: 419 case TC_ACT_SHOT:
420 kfree_skb(skb); 420 kfree_skb(skb);
421 goto drop; 421 goto drop;
@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
432 ret = qdisc_enqueue(skb, flow->q); 432 ret = qdisc_enqueue(skb, flow->q);
433 if (ret != 0) { 433 if (ret != 0) {
434drop: __maybe_unused 434drop: __maybe_unused
435 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
436 if (flow) 436 sch->qstats.drops++;
437 flow->qstats.drops++; 437 if (flow)
438 flow->qstats.drops++;
439 }
438 return ret; 440 return ret;
439 } 441 }
440 sch->bstats.bytes += qdisc_pkt_len(skb); 442 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -455,7 +457,7 @@ drop: __maybe_unused
455 return 0; 457 return 0;
456 } 458 }
457 tasklet_schedule(&p->task); 459 tasklet_schedule(&p->task);
458 return NET_XMIT_BYPASS; 460 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
459} 461}
460 462
461/* 463/*
@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
530 if (!ret) { 532 if (!ret) {
531 sch->q.qlen++; 533 sch->q.qlen++;
532 sch->qstats.requeues++; 534 sch->qstats.requeues++;
533 } else { 535 } else if (net_xmit_drop_count(ret)) {
534 sch->qstats.drops++; 536 sch->qstats.drops++;
535 p->link.qstats.drops++; 537 p->link.qstats.drops++;
536 } 538 }
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index f1d2f8ec8b4c..4e261ce62f48 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -230,7 +230,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
230 (cl = cbq_class_lookup(q, prio)) != NULL) 230 (cl = cbq_class_lookup(q, prio)) != NULL)
231 return cl; 231 return cl;
232 232
233 *qerr = NET_XMIT_BYPASS; 233 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
234 for (;;) { 234 for (;;) {
235 int result = 0; 235 int result = 0;
236 defmap = head->defaults; 236 defmap = head->defaults;
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
256 switch (result) { 256 switch (result) {
257 case TC_ACT_QUEUED: 257 case TC_ACT_QUEUED:
258 case TC_ACT_STOLEN: 258 case TC_ACT_STOLEN:
259 *qerr = NET_XMIT_SUCCESS; 259 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
260 case TC_ACT_SHOT: 260 case TC_ACT_SHOT:
261 return NULL; 261 return NULL;
262 case TC_ACT_RECLASSIFY: 262 case TC_ACT_RECLASSIFY:
@@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 q->rx_class = cl; 377 q->rx_class = cl;
378#endif 378#endif
379 if (cl == NULL) { 379 if (cl == NULL) {
380 if (ret == NET_XMIT_BYPASS) 380 if (ret & __NET_XMIT_BYPASS)
381 sch->qstats.drops++; 381 sch->qstats.drops++;
382 kfree_skb(skb); 382 kfree_skb(skb);
383 return ret; 383 return ret;
@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
397 return ret; 397 return ret;
398 } 398 }
399 399
400 sch->qstats.drops++; 400 if (net_xmit_drop_count(ret)) {
401 cbq_mark_toplevel(q, cl); 401 sch->qstats.drops++;
402 cl->qstats.drops++; 402 cbq_mark_toplevel(q, cl);
403 cl->qstats.drops++;
404 }
403 return ret; 405 return ret;
404} 406}
405 407
@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
430 cbq_activate_class(cl); 432 cbq_activate_class(cl);
431 return 0; 433 return 0;
432 } 434 }
433 sch->qstats.drops++; 435 if (net_xmit_drop_count(ret)) {
434 cl->qstats.drops++; 436 sch->qstats.drops++;
437 cl->qstats.drops++;
438 }
435 return ret; 439 return ret;
436} 440}
437 441
@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
664 q->rx_class = NULL; 668 q->rx_class = NULL;
665 669
666 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) { 670 if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
671 int ret;
667 672
668 cbq_mark_toplevel(q, cl); 673 cbq_mark_toplevel(q, cl);
669 674
670 q->rx_class = cl; 675 q->rx_class = cl;
671 cl->q->__parent = sch; 676 cl->q->__parent = sch;
672 677
673 if (qdisc_enqueue(skb, cl->q) == 0) { 678 ret = qdisc_enqueue(skb, cl->q);
679 if (ret == NET_XMIT_SUCCESS) {
674 sch->q.qlen++; 680 sch->q.qlen++;
675 sch->bstats.packets++; 681 sch->bstats.packets++;
676 sch->bstats.bytes += qdisc_pkt_len(skb); 682 sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
678 cbq_activate_class(cl); 684 cbq_activate_class(cl);
679 return 0; 685 return 0;
680 } 686 }
681 sch->qstats.drops++; 687 if (net_xmit_drop_count(ret))
688 sch->qstats.drops++;
682 return 0; 689 return 0;
683 } 690 }
684 691
@@ -1175,7 +1182,7 @@ static void cbq_unlink_class(struct cbq_class *this)
1175 this->tparent->children = NULL; 1182 this->tparent->children = NULL;
1176 } 1183 }
1177 } else { 1184 } else {
1178 BUG_TRAP(this->sibling == this); 1185 WARN_ON(this->sibling != this);
1179 } 1186 }
1180} 1187}
1181 1188
@@ -1699,7 +1706,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1699{ 1706{
1700 struct cbq_sched_data *q = qdisc_priv(sch); 1707 struct cbq_sched_data *q = qdisc_priv(sch);
1701 1708
1702 BUG_TRAP(!cl->filters); 1709 WARN_ON(cl->filters);
1703 1710
1704 tcf_destroy_chain(&cl->filter_list); 1711 tcf_destroy_chain(&cl->filter_list);
1705 qdisc_destroy(cl->q); 1712 qdisc_destroy(cl->q);
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index a935676987e2..edd1298f85f6 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
236 case TC_ACT_QUEUED: 236 case TC_ACT_QUEUED:
237 case TC_ACT_STOLEN: 237 case TC_ACT_STOLEN:
238 kfree_skb(skb); 238 kfree_skb(skb);
239 return NET_XMIT_SUCCESS; 239 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
240 240
241 case TC_ACT_SHOT: 241 case TC_ACT_SHOT:
242 goto drop; 242 goto drop;
@@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
254 254
255 err = qdisc_enqueue(skb, p->q); 255 err = qdisc_enqueue(skb, p->q);
256 if (err != NET_XMIT_SUCCESS) { 256 if (err != NET_XMIT_SUCCESS) {
257 sch->qstats.drops++; 257 if (net_xmit_drop_count(err))
258 sch->qstats.drops++;
258 return err; 259 return err;
259 } 260 }
260 261
@@ -267,7 +268,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
267drop: 268drop:
268 kfree_skb(skb); 269 kfree_skb(skb);
269 sch->qstats.drops++; 270 sch->qstats.drops++;
270 return NET_XMIT_BYPASS; 271 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271} 272}
272 273
273static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) 274static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
@@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
321 322
322 err = p->q->ops->requeue(skb, p->q); 323 err = p->q->ops->requeue(skb, p->q);
323 if (err != NET_XMIT_SUCCESS) { 324 if (err != NET_XMIT_SUCCESS) {
324 sch->qstats.drops++; 325 if (net_xmit_drop_count(err))
326 sch->qstats.drops++;
325 return err; 327 return err;
326 } 328 }
327 329
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4ac7e3a8c253..468574682caa 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,7 +29,7 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * qdisc_root_lock(qdisc) spinlock. 32 * qdisc_lock(qdisc) spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via qdisc root lock 35 * - enqueue, dequeue are serialized via qdisc root lock
@@ -126,7 +126,7 @@ static inline int qdisc_restart(struct Qdisc *q)
126 if (unlikely((skb = dequeue_skb(q)) == NULL)) 126 if (unlikely((skb = dequeue_skb(q)) == NULL))
127 return 0; 127 return 0;
128 128
129 root_lock = qdisc_root_lock(q); 129 root_lock = qdisc_lock(q);
130 130
131 /* And release qdisc */ 131 /* And release qdisc */
132 spin_unlock(root_lock); 132 spin_unlock(root_lock);
@@ -135,7 +135,8 @@ static inline int qdisc_restart(struct Qdisc *q)
135 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 135 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
136 136
137 HARD_TX_LOCK(dev, txq, smp_processor_id()); 137 HARD_TX_LOCK(dev, txq, smp_processor_id());
138 if (!netif_subqueue_stopped(dev, skb)) 138 if (!netif_tx_queue_stopped(txq) &&
139 !netif_tx_queue_frozen(txq))
139 ret = dev_hard_start_xmit(skb, dev, txq); 140 ret = dev_hard_start_xmit(skb, dev, txq);
140 HARD_TX_UNLOCK(dev, txq); 141 HARD_TX_UNLOCK(dev, txq);
141 142
@@ -162,7 +163,8 @@ static inline int qdisc_restart(struct Qdisc *q)
162 break; 163 break;
163 } 164 }
164 165
165 if (ret && netif_tx_queue_stopped(txq)) 166 if (ret && (netif_tx_queue_stopped(txq) ||
167 netif_tx_queue_frozen(txq)))
166 ret = 0; 168 ret = 0;
167 169
168 return ret; 170 return ret;
@@ -505,7 +507,7 @@ errout:
505} 507}
506EXPORT_SYMBOL(qdisc_create_dflt); 508EXPORT_SYMBOL(qdisc_create_dflt);
507 509
508/* Under qdisc_root_lock(qdisc) and BH! */ 510/* Under qdisc_lock(qdisc) and BH! */
509 511
510void qdisc_reset(struct Qdisc *qdisc) 512void qdisc_reset(struct Qdisc *qdisc)
511{ 513{
@@ -541,7 +543,7 @@ static void __qdisc_destroy(struct rcu_head *head)
541 kfree((char *) qdisc - qdisc->padded); 543 kfree((char *) qdisc - qdisc->padded);
542} 544}
543 545
544/* Under qdisc_root_lock(qdisc) and BH! */ 546/* Under qdisc_lock(qdisc) and BH! */
545 547
546void qdisc_destroy(struct Qdisc *qdisc) 548void qdisc_destroy(struct Qdisc *qdisc)
547{ 549{
@@ -596,7 +598,7 @@ static void transition_one_qdisc(struct net_device *dev,
596 int *need_watchdog_p = _need_watchdog; 598 int *need_watchdog_p = _need_watchdog;
597 599
598 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 600 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
599 if (new_qdisc != &noqueue_qdisc) 601 if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
600 *need_watchdog_p = 1; 602 *need_watchdog_p = 1;
601} 603}
602 604
@@ -619,6 +621,7 @@ void dev_activate(struct net_device *dev)
619 621
620 need_watchdog = 0; 622 need_watchdog = 0;
621 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); 623 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
624 transition_one_qdisc(dev, &dev->rx_queue, NULL);
622 625
623 if (need_watchdog) { 626 if (need_watchdog) {
624 dev->trans_start = jiffies; 627 dev->trans_start = jiffies;
@@ -644,7 +647,7 @@ static void dev_deactivate_queue(struct net_device *dev,
644 } 647 }
645} 648}
646 649
647static bool some_qdisc_is_running(struct net_device *dev, int lock) 650static bool some_qdisc_is_busy(struct net_device *dev, int lock)
648{ 651{
649 unsigned int i; 652 unsigned int i;
650 653
@@ -655,13 +658,14 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
655 int val; 658 int val;
656 659
657 dev_queue = netdev_get_tx_queue(dev, i); 660 dev_queue = netdev_get_tx_queue(dev, i);
658 q = dev_queue->qdisc; 661 q = dev_queue->qdisc_sleeping;
659 root_lock = qdisc_root_lock(q); 662 root_lock = qdisc_lock(q);
660 663
661 if (lock) 664 if (lock)
662 spin_lock_bh(root_lock); 665 spin_lock_bh(root_lock);
663 666
664 val = test_bit(__QDISC_STATE_RUNNING, &q->state); 667 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
668 test_bit(__QDISC_STATE_SCHED, &q->state));
665 669
666 if (lock) 670 if (lock)
667 spin_unlock_bh(root_lock); 671 spin_unlock_bh(root_lock);
@@ -677,6 +681,7 @@ void dev_deactivate(struct net_device *dev)
677 bool running; 681 bool running;
678 682
679 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); 683 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
684 dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
680 685
681 dev_watchdog_down(dev); 686 dev_watchdog_down(dev);
682 687
@@ -685,14 +690,14 @@ void dev_deactivate(struct net_device *dev)
685 690
686 /* Wait for outstanding qdisc_run calls. */ 691 /* Wait for outstanding qdisc_run calls. */
687 do { 692 do {
688 while (some_qdisc_is_running(dev, 0)) 693 while (some_qdisc_is_busy(dev, 0))
689 yield(); 694 yield();
690 695
691 /* 696 /*
692 * Double-check inside queue lock to ensure that all effects 697 * Double-check inside queue lock to ensure that all effects
693 * of the queue run are visible when we return. 698 * of the queue run are visible when we return.
694 */ 699 */
695 running = some_qdisc_is_running(dev, 1); 700 running = some_qdisc_is_busy(dev, 1);
696 701
697 /* 702 /*
698 * The running flag should never be set at this point because 703 * The running flag should never be set at this point because
@@ -718,7 +723,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
718void dev_init_scheduler(struct net_device *dev) 723void dev_init_scheduler(struct net_device *dev)
719{ 724{
720 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); 725 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
721 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); 726 dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
722 727
723 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 728 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
724} 729}
@@ -731,20 +736,20 @@ static void shutdown_scheduler_queue(struct net_device *dev,
731 struct Qdisc *qdisc_default = _qdisc_default; 736 struct Qdisc *qdisc_default = _qdisc_default;
732 737
733 if (qdisc) { 738 if (qdisc) {
734 spinlock_t *root_lock = qdisc_root_lock(qdisc); 739 spinlock_t *root_lock = qdisc_lock(qdisc);
735 740
736 dev_queue->qdisc = qdisc_default; 741 dev_queue->qdisc = qdisc_default;
737 dev_queue->qdisc_sleeping = qdisc_default; 742 dev_queue->qdisc_sleeping = qdisc_default;
738 743
739 spin_lock(root_lock); 744 spin_lock_bh(root_lock);
740 qdisc_destroy(qdisc); 745 qdisc_destroy(qdisc);
741 spin_unlock(root_lock); 746 spin_unlock_bh(root_lock);
742 } 747 }
743} 748}
744 749
745void dev_shutdown(struct net_device *dev) 750void dev_shutdown(struct net_device *dev)
746{ 751{
747 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); 752 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
748 shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); 753 shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
749 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 754 WARN_ON(timer_pending(&dev->watchdog_timer));
750} 755}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 0ae7d19dcba8..c2b8d9cce3d2 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1159,14 +1159,14 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1159 if (cl->level == 0) 1159 if (cl->level == 0)
1160 return cl; 1160 return cl;
1161 1161
1162 *qerr = NET_XMIT_BYPASS; 1162 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1163 tcf = q->root.filter_list; 1163 tcf = q->root.filter_list;
1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 1164 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
1165#ifdef CONFIG_NET_CLS_ACT 1165#ifdef CONFIG_NET_CLS_ACT
1166 switch (result) { 1166 switch (result) {
1167 case TC_ACT_QUEUED: 1167 case TC_ACT_QUEUED:
1168 case TC_ACT_STOLEN: 1168 case TC_ACT_STOLEN:
1169 *qerr = NET_XMIT_SUCCESS; 1169 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1170 case TC_ACT_SHOT: 1170 case TC_ACT_SHOT:
1171 return NULL; 1171 return NULL;
1172 } 1172 }
@@ -1578,7 +1578,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1578 1578
1579 cl = hfsc_classify(skb, sch, &err); 1579 cl = hfsc_classify(skb, sch, &err);
1580 if (cl == NULL) { 1580 if (cl == NULL) {
1581 if (err == NET_XMIT_BYPASS) 1581 if (err & __NET_XMIT_BYPASS)
1582 sch->qstats.drops++; 1582 sch->qstats.drops++;
1583 kfree_skb(skb); 1583 kfree_skb(skb);
1584 return err; 1584 return err;
@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1586 1586
1587 err = qdisc_enqueue(skb, cl->qdisc); 1587 err = qdisc_enqueue(skb, cl->qdisc);
1588 if (unlikely(err != NET_XMIT_SUCCESS)) { 1588 if (unlikely(err != NET_XMIT_SUCCESS)) {
1589 cl->qstats.drops++; 1589 if (net_xmit_drop_count(err)) {
1590 sch->qstats.drops++; 1590 cl->qstats.drops++;
1591 sch->qstats.drops++;
1592 }
1591 return err; 1593 return err;
1592 } 1594 }
1593 1595
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 30c999c61b01..6febd245e62b 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -214,14 +214,14 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0) 214 if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
215 return cl; 215 return cl;
216 216
217 *qerr = NET_XMIT_BYPASS; 217 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
218 tcf = q->filter_list; 218 tcf = q->filter_list;
219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) { 219 while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
220#ifdef CONFIG_NET_CLS_ACT 220#ifdef CONFIG_NET_CLS_ACT
221 switch (result) { 221 switch (result) {
222 case TC_ACT_QUEUED: 222 case TC_ACT_QUEUED:
223 case TC_ACT_STOLEN: 223 case TC_ACT_STOLEN:
224 *qerr = NET_XMIT_SUCCESS; 224 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
225 case TC_ACT_SHOT: 225 case TC_ACT_SHOT:
226 return NULL; 226 return NULL;
227 } 227 }
@@ -524,7 +524,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
524 */ 524 */
525static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) 525static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
526{ 526{
527 BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen); 527 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
528 528
529 if (!cl->prio_activity) { 529 if (!cl->prio_activity) {
530 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio); 530 cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
@@ -542,7 +542,7 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
542 */ 542 */
543static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) 543static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
544{ 544{
545 BUG_TRAP(cl->prio_activity); 545 WARN_ON(!cl->prio_activity);
546 546
547 htb_deactivate_prios(q, cl); 547 htb_deactivate_prios(q, cl);
548 cl->prio_activity = 0; 548 cl->prio_activity = 0;
@@ -567,14 +567,16 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
567 } 567 }
568#ifdef CONFIG_NET_CLS_ACT 568#ifdef CONFIG_NET_CLS_ACT
569 } else if (!cl) { 569 } else if (!cl) {
570 if (ret == NET_XMIT_BYPASS) 570 if (ret & __NET_XMIT_BYPASS)
571 sch->qstats.drops++; 571 sch->qstats.drops++;
572 kfree_skb(skb); 572 kfree_skb(skb);
573 return ret; 573 return ret;
574#endif 574#endif
575 } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) { 575 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
576 sch->qstats.drops++; 576 if (net_xmit_drop_count(ret)) {
577 cl->qstats.drops++; 577 sch->qstats.drops++;
578 cl->qstats.drops++;
579 }
578 return NET_XMIT_DROP; 580 return NET_XMIT_DROP;
579 } else { 581 } else {
580 cl->bstats.packets += 582 cl->bstats.packets +=
@@ -610,15 +612,17 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
610 } 612 }
611#ifdef CONFIG_NET_CLS_ACT 613#ifdef CONFIG_NET_CLS_ACT
612 } else if (!cl) { 614 } else if (!cl) {
613 if (ret == NET_XMIT_BYPASS) 615 if (ret & __NET_XMIT_BYPASS)
614 sch->qstats.drops++; 616 sch->qstats.drops++;
615 kfree_skb(skb); 617 kfree_skb(skb);
616 return ret; 618 return ret;
617#endif 619#endif
618 } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != 620 } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
619 NET_XMIT_SUCCESS) { 621 NET_XMIT_SUCCESS) {
620 sch->qstats.drops++; 622 if (net_xmit_drop_count(ret)) {
621 cl->qstats.drops++; 623 sch->qstats.drops++;
624 cl->qstats.drops++;
625 }
622 return NET_XMIT_DROP; 626 return NET_XMIT_DROP;
623 } else 627 } else
624 htb_activate(q, cl); 628 htb_activate(q, cl);
@@ -757,7 +761,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
757 u32 *pid; 761 u32 *pid;
758 } stk[TC_HTB_MAXDEPTH], *sp = stk; 762 } stk[TC_HTB_MAXDEPTH], *sp = stk;
759 763
760 BUG_TRAP(tree->rb_node); 764 WARN_ON(!tree->rb_node);
761 sp->root = tree->rb_node; 765 sp->root = tree->rb_node;
762 sp->pptr = pptr; 766 sp->pptr = pptr;
763 sp->pid = pid; 767 sp->pid = pid;
@@ -777,7 +781,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
777 *sp->pptr = (*sp->pptr)->rb_left; 781 *sp->pptr = (*sp->pptr)->rb_left;
778 if (sp > stk) { 782 if (sp > stk) {
779 sp--; 783 sp--;
780 BUG_TRAP(*sp->pptr); 784 WARN_ON(!*sp->pptr);
781 if (!*sp->pptr) 785 if (!*sp->pptr)
782 return NULL; 786 return NULL;
783 htb_next_rb_node(sp->pptr); 787 htb_next_rb_node(sp->pptr);
@@ -792,7 +796,7 @@ static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
792 sp->pid = cl->un.inner.last_ptr_id + prio; 796 sp->pid = cl->un.inner.last_ptr_id + prio;
793 } 797 }
794 } 798 }
795 BUG_TRAP(0); 799 WARN_ON(1);
796 return NULL; 800 return NULL;
797} 801}
798 802
@@ -810,7 +814,7 @@ static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
810 814
811 do { 815 do {
812next: 816next:
813 BUG_TRAP(cl); 817 WARN_ON(!cl);
814 if (!cl) 818 if (!cl)
815 return NULL; 819 return NULL;
816 820
@@ -1185,7 +1189,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1185{ 1189{
1186 struct htb_class *parent = cl->parent; 1190 struct htb_class *parent = cl->parent;
1187 1191
1188 BUG_TRAP(!cl->level && cl->un.leaf.q && !cl->prio_activity); 1192 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1189 1193
1190 if (parent->cmode != HTB_CAN_SEND) 1194 if (parent->cmode != HTB_CAN_SEND)
1191 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level); 1195 htb_safe_rb_erase(&parent->pq_node, q->wait_pq + parent->level);
@@ -1205,7 +1209,7 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1205static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) 1209static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1206{ 1210{
1207 if (!cl->level) { 1211 if (!cl->level) {
1208 BUG_TRAP(cl->un.leaf.q); 1212 WARN_ON(!cl->un.leaf.q);
1209 qdisc_destroy(cl->un.leaf.q); 1213 qdisc_destroy(cl->un.leaf.q);
1210 } 1214 }
1211 gen_kill_estimator(&cl->bstats, &cl->rate_est); 1215 gen_kill_estimator(&cl->bstats, &cl->rate_est);
@@ -1275,7 +1279,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1275 1279
1276 /* delete from hash and active; remainder in destroy_class */ 1280 /* delete from hash and active; remainder in destroy_class */
1277 qdisc_class_hash_remove(&q->clhash, &cl->common); 1281 qdisc_class_hash_remove(&q->clhash, &cl->common);
1278 cl->parent->children--; 1282 if (cl->parent)
1283 cl->parent->children--;
1279 1284
1280 if (cl->prio_activity) 1285 if (cl->prio_activity)
1281 htb_deactivate(q, cl); 1286 htb_deactivate(q, cl);
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a59085700678..fb0294d0b55e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -176,7 +176,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
176 if (count == 0) { 176 if (count == 0) {
177 sch->qstats.drops++; 177 sch->qstats.drops++;
178 kfree_skb(skb); 178 kfree_skb(skb);
179 return NET_XMIT_BYPASS; 179 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
180 } 180 }
181 181
182 skb_orphan(skb); 182 skb_orphan(skb);
@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 sch->q.qlen++; 240 sch->q.qlen++;
241 sch->bstats.bytes += qdisc_pkt_len(skb); 241 sch->bstats.bytes += qdisc_pkt_len(skb);
242 sch->bstats.packets++; 242 sch->bstats.packets++;
243 } else 243 } else if (net_xmit_drop_count(ret)) {
244 sch->qstats.drops++; 244 sch->qstats.drops++;
245 }
245 246
246 pr_debug("netem: enqueue ret %d\n", ret); 247 pr_debug("netem: enqueue ret %d\n", ret);
247 return ret; 248 return ret;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index f849243eb095..eac197610edf 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -38,14 +38,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
38 struct tcf_result res; 38 struct tcf_result res;
39 int err; 39 int err;
40 40
41 *qerr = NET_XMIT_BYPASS; 41 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
42 if (TC_H_MAJ(skb->priority) != sch->handle) { 42 if (TC_H_MAJ(skb->priority) != sch->handle) {
43 err = tc_classify(skb, q->filter_list, &res); 43 err = tc_classify(skb, q->filter_list, &res);
44#ifdef CONFIG_NET_CLS_ACT 44#ifdef CONFIG_NET_CLS_ACT
45 switch (err) { 45 switch (err) {
46 case TC_ACT_STOLEN: 46 case TC_ACT_STOLEN:
47 case TC_ACT_QUEUED: 47 case TC_ACT_QUEUED:
48 *qerr = NET_XMIT_SUCCESS; 48 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
49 case TC_ACT_SHOT: 49 case TC_ACT_SHOT:
50 return NULL; 50 return NULL;
51 } 51 }
@@ -74,7 +74,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
74#ifdef CONFIG_NET_CLS_ACT 74#ifdef CONFIG_NET_CLS_ACT
75 if (qdisc == NULL) { 75 if (qdisc == NULL) {
76 76
77 if (ret == NET_XMIT_BYPASS) 77 if (ret & __NET_XMIT_BYPASS)
78 sch->qstats.drops++; 78 sch->qstats.drops++;
79 kfree_skb(skb); 79 kfree_skb(skb);
80 return ret; 80 return ret;
@@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 sch->q.qlen++; 88 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
90 } 90 }
91 sch->qstats.drops++; 91 if (net_xmit_drop_count(ret))
92 sch->qstats.drops++;
92 return ret; 93 return ret;
93} 94}
94 95
@@ -102,7 +103,7 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
102 qdisc = prio_classify(skb, sch, &ret); 103 qdisc = prio_classify(skb, sch, &ret);
103#ifdef CONFIG_NET_CLS_ACT 104#ifdef CONFIG_NET_CLS_ACT
104 if (qdisc == NULL) { 105 if (qdisc == NULL) {
105 if (ret == NET_XMIT_BYPASS) 106 if (ret & __NET_XMIT_BYPASS)
106 sch->qstats.drops++; 107 sch->qstats.drops++;
107 kfree_skb(skb); 108 kfree_skb(skb);
108 return ret; 109 return ret;
@@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
114 sch->qstats.requeues++; 115 sch->qstats.requeues++;
115 return 0; 116 return 0;
116 } 117 }
117 sch->qstats.drops++; 118 if (net_xmit_drop_count(ret))
119 sch->qstats.drops++;
118 return NET_XMIT_DROP; 120 return NET_XMIT_DROP;
119} 121}
120 122
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 3f2d1d7f3bbd..5da05839e225 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 sch->bstats.bytes += qdisc_pkt_len(skb);
98 sch->bstats.packets++; 98 sch->bstats.packets++;
99 sch->q.qlen++; 99 sch->q.qlen++;
100 } else { 100 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 101 q->stats.pdrop++;
102 sch->qstats.drops++; 102 sch->qstats.drops++;
103 } 103 }
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8589da666568..6e041d10dbdb 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -171,14 +171,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
171 if (!q->filter_list) 171 if (!q->filter_list)
172 return sfq_hash(q, skb) + 1; 172 return sfq_hash(q, skb) + 1;
173 173
174 *qerr = NET_XMIT_BYPASS; 174 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
175 result = tc_classify(skb, q->filter_list, &res); 175 result = tc_classify(skb, q->filter_list, &res);
176 if (result >= 0) { 176 if (result >= 0) {
177#ifdef CONFIG_NET_CLS_ACT 177#ifdef CONFIG_NET_CLS_ACT
178 switch (result) { 178 switch (result) {
179 case TC_ACT_STOLEN: 179 case TC_ACT_STOLEN:
180 case TC_ACT_QUEUED: 180 case TC_ACT_QUEUED:
181 *qerr = NET_XMIT_SUCCESS; 181 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
182 case TC_ACT_SHOT: 182 case TC_ACT_SHOT:
183 return 0; 183 return 0;
184 } 184 }
@@ -285,7 +285,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
285 285
286 hash = sfq_classify(skb, sch, &ret); 286 hash = sfq_classify(skb, sch, &ret);
287 if (hash == 0) { 287 if (hash == 0) {
288 if (ret == NET_XMIT_BYPASS) 288 if (ret & __NET_XMIT_BYPASS)
289 sch->qstats.drops++; 289 sch->qstats.drops++;
290 kfree_skb(skb); 290 kfree_skb(skb);
291 return ret; 291 return ret;
@@ -339,7 +339,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
339 339
340 hash = sfq_classify(skb, sch, &ret); 340 hash = sfq_classify(skb, sch, &ret);
341 if (hash == 0) { 341 if (hash == 0) {
342 if (ret == NET_XMIT_BYPASS) 342 if (ret & __NET_XMIT_BYPASS)
343 sch->qstats.drops++; 343 sch->qstats.drops++;
344 kfree_skb(skb); 344 kfree_skb(skb);
345 return ret; 345 return ret;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b296672f7632..7d3b7ff3bf07 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
135 135
136 ret = qdisc_enqueue(skb, q->qdisc); 136 ret = qdisc_enqueue(skb, q->qdisc);
137 if (ret != 0) { 137 if (ret != 0) {
138 sch->qstats.drops++; 138 if (net_xmit_drop_count(ret))
139 sch->qstats.drops++;
139 return ret; 140 return ret;
140 } 141 }
141 142
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 537223642b6e..2c35c678563b 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -305,10 +305,11 @@ restart:
305 305
306 switch (teql_resolve(skb, skb_res, slave)) { 306 switch (teql_resolve(skb, skb_res, slave)) {
307 case 0: 307 case 0:
308 if (netif_tx_trylock(slave)) { 308 if (__netif_tx_trylock(slave_txq)) {
309 if (!__netif_subqueue_stopped(slave, subq) && 309 if (!netif_tx_queue_stopped(slave_txq) &&
310 !netif_tx_queue_frozen(slave_txq) &&
310 slave->hard_start_xmit(skb, slave) == 0) { 311 slave->hard_start_xmit(skb, slave) == 0) {
311 netif_tx_unlock(slave); 312 __netif_tx_unlock(slave_txq);
312 master->slaves = NEXT_SLAVE(q); 313 master->slaves = NEXT_SLAVE(q);
313 netif_wake_queue(dev); 314 netif_wake_queue(dev);
314 master->stats.tx_packets++; 315 master->stats.tx_packets++;
@@ -316,7 +317,7 @@ restart:
316 qdisc_pkt_len(skb); 317 qdisc_pkt_len(skb);
317 return 0; 318 return 0;
318 } 319 }
319 netif_tx_unlock(slave); 320 __netif_tx_unlock(slave_txq);
320 } 321 }
321 if (netif_queue_stopped(dev)) 322 if (netif_queue_stopped(dev))
322 busy = 1; 323 busy = 1;