diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/Makefile | 2 | ||||
-rw-r--r-- | net/sched/act_api.c | 2 | ||||
-rw-r--r-- | net/sched/cls_api.c | 12 | ||||
-rw-r--r-- | net/sched/sch_api.c | 139 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 38 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 223 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 4 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 35 | ||||
-rw-r--r-- | net/sched/sch_ingress.c | 14 | ||||
-rw-r--r-- | net/sched/sch_mq.c | 235 | ||||
-rw-r--r-- | net/sched/sch_multiq.c | 33 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 32 | ||||
-rw-r--r-- | net/sched/sch_red.c | 21 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 7 | ||||
-rw-r--r-- | net/sched/sch_tbf.c | 22 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 10 |
16 files changed, 504 insertions, 325 deletions
diff --git a/net/sched/Makefile b/net/sched/Makefile index 54d950cd4b8d..f14e71bfa58f 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the Linux Traffic Control Unit. | 2 | # Makefile for the Linux Traffic Control Unit. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := sch_generic.o | 5 | obj-y := sch_generic.o sch_mq.o |
6 | 6 | ||
7 | obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o | 7 | obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o |
8 | obj-$(CONFIG_NET_CLS) += cls_api.o | 8 | obj-$(CONFIG_NET_CLS) += cls_api.o |
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 9d03cc33b6cc..2dfb3e7a040d 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -1011,7 +1011,7 @@ replay: | |||
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | static struct nlattr * | 1013 | static struct nlattr * |
1014 | find_dump_kind(struct nlmsghdr *n) | 1014 | find_dump_kind(const struct nlmsghdr *n) |
1015 | { | 1015 | { |
1016 | struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; | 1016 | struct nlattr *tb1, *tb2[TCA_ACT_MAX+1]; |
1017 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; | 1017 | struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 09cdcdfe7e91..6a536949cdc0 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -168,8 +168,7 @@ replay: | |||
168 | 168 | ||
169 | /* Find qdisc */ | 169 | /* Find qdisc */ |
170 | if (!parent) { | 170 | if (!parent) { |
171 | struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); | 171 | q = dev->qdisc; |
172 | q = dev_queue->qdisc_sleeping; | ||
173 | parent = q->handle; | 172 | parent = q->handle; |
174 | } else { | 173 | } else { |
175 | q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); | 174 | q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); |
@@ -181,6 +180,9 @@ replay: | |||
181 | if ((cops = q->ops->cl_ops) == NULL) | 180 | if ((cops = q->ops->cl_ops) == NULL) |
182 | return -EINVAL; | 181 | return -EINVAL; |
183 | 182 | ||
183 | if (cops->tcf_chain == NULL) | ||
184 | return -EOPNOTSUPP; | ||
185 | |||
184 | /* Do we search for filter, attached to class? */ | 186 | /* Do we search for filter, attached to class? */ |
185 | if (TC_H_MIN(parent)) { | 187 | if (TC_H_MIN(parent)) { |
186 | cl = cops->get(q, parent); | 188 | cl = cops->get(q, parent); |
@@ -405,7 +407,6 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, | |||
405 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | 407 | static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) |
406 | { | 408 | { |
407 | struct net *net = sock_net(skb->sk); | 409 | struct net *net = sock_net(skb->sk); |
408 | struct netdev_queue *dev_queue; | ||
409 | int t; | 410 | int t; |
410 | int s_t; | 411 | int s_t; |
411 | struct net_device *dev; | 412 | struct net_device *dev; |
@@ -424,15 +425,16 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) | |||
424 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) | 425 | if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) |
425 | return skb->len; | 426 | return skb->len; |
426 | 427 | ||
427 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
428 | if (!tcm->tcm_parent) | 428 | if (!tcm->tcm_parent) |
429 | q = dev_queue->qdisc_sleeping; | 429 | q = dev->qdisc; |
430 | else | 430 | else |
431 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); | 431 | q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); |
432 | if (!q) | 432 | if (!q) |
433 | goto out; | 433 | goto out; |
434 | if ((cops = q->ops->cl_ops) == NULL) | 434 | if ((cops = q->ops->cl_ops) == NULL) |
435 | goto errout; | 435 | goto errout; |
436 | if (cops->tcf_chain == NULL) | ||
437 | goto errout; | ||
436 | if (TC_H_MIN(tcm->tcm_parent)) { | 438 | if (TC_H_MIN(tcm->tcm_parent)) { |
437 | cl = cops->get(q, tcm->tcm_parent); | 439 | cl = cops->get(q, tcm->tcm_parent); |
438 | if (cl == 0) | 440 | if (cl == 0) |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index fdb694e9f759..692d9a41cd23 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -207,7 +207,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) | |||
207 | static void qdisc_list_add(struct Qdisc *q) | 207 | static void qdisc_list_add(struct Qdisc *q) |
208 | { | 208 | { |
209 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) | 209 | if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) |
210 | list_add_tail(&q->list, &qdisc_root_sleeping(q)->list); | 210 | list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list); |
211 | } | 211 | } |
212 | 212 | ||
213 | void qdisc_list_del(struct Qdisc *q) | 213 | void qdisc_list_del(struct Qdisc *q) |
@@ -219,17 +219,11 @@ EXPORT_SYMBOL(qdisc_list_del); | |||
219 | 219 | ||
220 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | 220 | struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) |
221 | { | 221 | { |
222 | unsigned int i; | ||
223 | struct Qdisc *q; | 222 | struct Qdisc *q; |
224 | 223 | ||
225 | for (i = 0; i < dev->num_tx_queues; i++) { | 224 | q = qdisc_match_from_root(dev->qdisc, handle); |
226 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 225 | if (q) |
227 | struct Qdisc *txq_root = txq->qdisc_sleeping; | 226 | goto out; |
228 | |||
229 | q = qdisc_match_from_root(txq_root, handle); | ||
230 | if (q) | ||
231 | goto out; | ||
232 | } | ||
233 | 227 | ||
234 | q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); | 228 | q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle); |
235 | out: | 229 | out: |
@@ -616,32 +610,6 @@ static u32 qdisc_alloc_handle(struct net_device *dev) | |||
616 | return i>0 ? autohandle : 0; | 610 | return i>0 ? autohandle : 0; |
617 | } | 611 | } |
618 | 612 | ||
619 | /* Attach toplevel qdisc to device queue. */ | ||
620 | |||
621 | static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, | ||
622 | struct Qdisc *qdisc) | ||
623 | { | ||
624 | struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; | ||
625 | spinlock_t *root_lock; | ||
626 | |||
627 | root_lock = qdisc_lock(oqdisc); | ||
628 | spin_lock_bh(root_lock); | ||
629 | |||
630 | /* Prune old scheduler */ | ||
631 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) | ||
632 | qdisc_reset(oqdisc); | ||
633 | |||
634 | /* ... and graft new one */ | ||
635 | if (qdisc == NULL) | ||
636 | qdisc = &noop_qdisc; | ||
637 | dev_queue->qdisc_sleeping = qdisc; | ||
638 | rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); | ||
639 | |||
640 | spin_unlock_bh(root_lock); | ||
641 | |||
642 | return oqdisc; | ||
643 | } | ||
644 | |||
645 | void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | 613 | void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) |
646 | { | 614 | { |
647 | const struct Qdisc_class_ops *cops; | 615 | const struct Qdisc_class_ops *cops; |
@@ -710,6 +678,11 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
710 | if (dev->flags & IFF_UP) | 678 | if (dev->flags & IFF_UP) |
711 | dev_deactivate(dev); | 679 | dev_deactivate(dev); |
712 | 680 | ||
681 | if (new && new->ops->attach) { | ||
682 | new->ops->attach(new); | ||
683 | num_q = 0; | ||
684 | } | ||
685 | |||
713 | for (i = 0; i < num_q; i++) { | 686 | for (i = 0; i < num_q; i++) { |
714 | struct netdev_queue *dev_queue = &dev->rx_queue; | 687 | struct netdev_queue *dev_queue = &dev->rx_queue; |
715 | 688 | ||
@@ -720,22 +693,27 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, | |||
720 | if (new && i > 0) | 693 | if (new && i > 0) |
721 | atomic_inc(&new->refcnt); | 694 | atomic_inc(&new->refcnt); |
722 | 695 | ||
723 | notify_and_destroy(skb, n, classid, old, new); | 696 | qdisc_destroy(old); |
724 | } | 697 | } |
725 | 698 | ||
699 | notify_and_destroy(skb, n, classid, dev->qdisc, new); | ||
700 | if (new && !new->ops->attach) | ||
701 | atomic_inc(&new->refcnt); | ||
702 | dev->qdisc = new ? : &noop_qdisc; | ||
703 | |||
726 | if (dev->flags & IFF_UP) | 704 | if (dev->flags & IFF_UP) |
727 | dev_activate(dev); | 705 | dev_activate(dev); |
728 | } else { | 706 | } else { |
729 | const struct Qdisc_class_ops *cops = parent->ops->cl_ops; | 707 | const struct Qdisc_class_ops *cops = parent->ops->cl_ops; |
730 | 708 | ||
731 | err = -EINVAL; | 709 | err = -EOPNOTSUPP; |
732 | 710 | if (cops && cops->graft) { | |
733 | if (cops) { | ||
734 | unsigned long cl = cops->get(parent, classid); | 711 | unsigned long cl = cops->get(parent, classid); |
735 | if (cl) { | 712 | if (cl) { |
736 | err = cops->graft(parent, cl, new, &old); | 713 | err = cops->graft(parent, cl, new, &old); |
737 | cops->put(parent, cl); | 714 | cops->put(parent, cl); |
738 | } | 715 | } else |
716 | err = -ENOENT; | ||
739 | } | 717 | } |
740 | if (!err) | 718 | if (!err) |
741 | notify_and_destroy(skb, n, classid, old, new); | 719 | notify_and_destroy(skb, n, classid, old, new); |
@@ -755,7 +733,8 @@ static struct lock_class_key qdisc_rx_lock; | |||
755 | 733 | ||
756 | static struct Qdisc * | 734 | static struct Qdisc * |
757 | qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | 735 | qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, |
758 | u32 parent, u32 handle, struct nlattr **tca, int *errp) | 736 | struct Qdisc *p, u32 parent, u32 handle, |
737 | struct nlattr **tca, int *errp) | ||
759 | { | 738 | { |
760 | int err; | 739 | int err; |
761 | struct nlattr *kind = tca[TCA_KIND]; | 740 | struct nlattr *kind = tca[TCA_KIND]; |
@@ -832,24 +811,21 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
832 | if (tca[TCA_RATE]) { | 811 | if (tca[TCA_RATE]) { |
833 | spinlock_t *root_lock; | 812 | spinlock_t *root_lock; |
834 | 813 | ||
814 | err = -EOPNOTSUPP; | ||
815 | if (sch->flags & TCQ_F_MQROOT) | ||
816 | goto err_out4; | ||
817 | |||
835 | if ((sch->parent != TC_H_ROOT) && | 818 | if ((sch->parent != TC_H_ROOT) && |
836 | !(sch->flags & TCQ_F_INGRESS)) | 819 | !(sch->flags & TCQ_F_INGRESS) && |
820 | (!p || !(p->flags & TCQ_F_MQROOT))) | ||
837 | root_lock = qdisc_root_sleeping_lock(sch); | 821 | root_lock = qdisc_root_sleeping_lock(sch); |
838 | else | 822 | else |
839 | root_lock = qdisc_lock(sch); | 823 | root_lock = qdisc_lock(sch); |
840 | 824 | ||
841 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, | 825 | err = gen_new_estimator(&sch->bstats, &sch->rate_est, |
842 | root_lock, tca[TCA_RATE]); | 826 | root_lock, tca[TCA_RATE]); |
843 | if (err) { | 827 | if (err) |
844 | /* | 828 | goto err_out4; |
845 | * Any broken qdiscs that would require | ||
846 | * a ops->reset() here? The qdisc was never | ||
847 | * in action so it shouldn't be necessary. | ||
848 | */ | ||
849 | if (ops->destroy) | ||
850 | ops->destroy(sch); | ||
851 | goto err_out3; | ||
852 | } | ||
853 | } | 829 | } |
854 | 830 | ||
855 | qdisc_list_add(sch); | 831 | qdisc_list_add(sch); |
@@ -865,6 +841,15 @@ err_out2: | |||
865 | err_out: | 841 | err_out: |
866 | *errp = err; | 842 | *errp = err; |
867 | return NULL; | 843 | return NULL; |
844 | |||
845 | err_out4: | ||
846 | /* | ||
847 | * Any broken qdiscs that would require a ops->reset() here? | ||
848 | * The qdisc was never in action so it shouldn't be necessary. | ||
849 | */ | ||
850 | if (ops->destroy) | ||
851 | ops->destroy(sch); | ||
852 | goto err_out3; | ||
868 | } | 853 | } |
869 | 854 | ||
870 | static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) | 855 | static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) |
@@ -889,13 +874,16 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) | |||
889 | qdisc_put_stab(sch->stab); | 874 | qdisc_put_stab(sch->stab); |
890 | sch->stab = stab; | 875 | sch->stab = stab; |
891 | 876 | ||
892 | if (tca[TCA_RATE]) | 877 | if (tca[TCA_RATE]) { |
893 | /* NB: ignores errors from replace_estimator | 878 | /* NB: ignores errors from replace_estimator |
894 | because change can't be undone. */ | 879 | because change can't be undone. */ |
880 | if (sch->flags & TCQ_F_MQROOT) | ||
881 | goto out; | ||
895 | gen_replace_estimator(&sch->bstats, &sch->rate_est, | 882 | gen_replace_estimator(&sch->bstats, &sch->rate_est, |
896 | qdisc_root_sleeping_lock(sch), | 883 | qdisc_root_sleeping_lock(sch), |
897 | tca[TCA_RATE]); | 884 | tca[TCA_RATE]); |
898 | 885 | } | |
886 | out: | ||
899 | return 0; | 887 | return 0; |
900 | } | 888 | } |
901 | 889 | ||
@@ -974,9 +962,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
974 | q = dev->rx_queue.qdisc_sleeping; | 962 | q = dev->rx_queue.qdisc_sleeping; |
975 | } | 963 | } |
976 | } else { | 964 | } else { |
977 | struct netdev_queue *dev_queue; | 965 | q = dev->qdisc; |
978 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
979 | q = dev_queue->qdisc_sleeping; | ||
980 | } | 966 | } |
981 | if (!q) | 967 | if (!q) |
982 | return -ENOENT; | 968 | return -ENOENT; |
@@ -1044,9 +1030,7 @@ replay: | |||
1044 | q = dev->rx_queue.qdisc_sleeping; | 1030 | q = dev->rx_queue.qdisc_sleeping; |
1045 | } | 1031 | } |
1046 | } else { | 1032 | } else { |
1047 | struct netdev_queue *dev_queue; | 1033 | q = dev->qdisc; |
1048 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
1049 | q = dev_queue->qdisc_sleeping; | ||
1050 | } | 1034 | } |
1051 | 1035 | ||
1052 | /* It may be default qdisc, ignore it */ | 1036 | /* It may be default qdisc, ignore it */ |
@@ -1123,13 +1107,19 @@ create_n_graft: | |||
1123 | if (!(n->nlmsg_flags&NLM_F_CREATE)) | 1107 | if (!(n->nlmsg_flags&NLM_F_CREATE)) |
1124 | return -ENOENT; | 1108 | return -ENOENT; |
1125 | if (clid == TC_H_INGRESS) | 1109 | if (clid == TC_H_INGRESS) |
1126 | q = qdisc_create(dev, &dev->rx_queue, | 1110 | q = qdisc_create(dev, &dev->rx_queue, p, |
1127 | tcm->tcm_parent, tcm->tcm_parent, | 1111 | tcm->tcm_parent, tcm->tcm_parent, |
1128 | tca, &err); | 1112 | tca, &err); |
1129 | else | 1113 | else { |
1130 | q = qdisc_create(dev, netdev_get_tx_queue(dev, 0), | 1114 | unsigned int ntx = 0; |
1115 | |||
1116 | if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) | ||
1117 | ntx = p->ops->cl_ops->select_queue(p, tcm); | ||
1118 | |||
1119 | q = qdisc_create(dev, netdev_get_tx_queue(dev, ntx), p, | ||
1131 | tcm->tcm_parent, tcm->tcm_handle, | 1120 | tcm->tcm_parent, tcm->tcm_handle, |
1132 | tca, &err); | 1121 | tca, &err); |
1122 | } | ||
1133 | if (q == NULL) { | 1123 | if (q == NULL) { |
1134 | if (err == -EAGAIN) | 1124 | if (err == -EAGAIN) |
1135 | goto replay; | 1125 | goto replay; |
@@ -1291,8 +1281,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) | |||
1291 | s_q_idx = 0; | 1281 | s_q_idx = 0; |
1292 | q_idx = 0; | 1282 | q_idx = 0; |
1293 | 1283 | ||
1294 | dev_queue = netdev_get_tx_queue(dev, 0); | 1284 | if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) |
1295 | if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0) | ||
1296 | goto done; | 1285 | goto done; |
1297 | 1286 | ||
1298 | dev_queue = &dev->rx_queue; | 1287 | dev_queue = &dev->rx_queue; |
@@ -1323,7 +1312,6 @@ done: | |||
1323 | static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | 1312 | static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) |
1324 | { | 1313 | { |
1325 | struct net *net = sock_net(skb->sk); | 1314 | struct net *net = sock_net(skb->sk); |
1326 | struct netdev_queue *dev_queue; | ||
1327 | struct tcmsg *tcm = NLMSG_DATA(n); | 1315 | struct tcmsg *tcm = NLMSG_DATA(n); |
1328 | struct nlattr *tca[TCA_MAX + 1]; | 1316 | struct nlattr *tca[TCA_MAX + 1]; |
1329 | struct net_device *dev; | 1317 | struct net_device *dev; |
@@ -1361,7 +1349,6 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1361 | 1349 | ||
1362 | /* Step 1. Determine qdisc handle X:0 */ | 1350 | /* Step 1. Determine qdisc handle X:0 */ |
1363 | 1351 | ||
1364 | dev_queue = netdev_get_tx_queue(dev, 0); | ||
1365 | if (pid != TC_H_ROOT) { | 1352 | if (pid != TC_H_ROOT) { |
1366 | u32 qid1 = TC_H_MAJ(pid); | 1353 | u32 qid1 = TC_H_MAJ(pid); |
1367 | 1354 | ||
@@ -1372,7 +1359,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1372 | } else if (qid1) { | 1359 | } else if (qid1) { |
1373 | qid = qid1; | 1360 | qid = qid1; |
1374 | } else if (qid == 0) | 1361 | } else if (qid == 0) |
1375 | qid = dev_queue->qdisc_sleeping->handle; | 1362 | qid = dev->qdisc->handle; |
1376 | 1363 | ||
1377 | /* Now qid is genuine qdisc handle consistent | 1364 | /* Now qid is genuine qdisc handle consistent |
1378 | both with parent and child. | 1365 | both with parent and child. |
@@ -1383,7 +1370,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1383 | pid = TC_H_MAKE(qid, pid); | 1370 | pid = TC_H_MAKE(qid, pid); |
1384 | } else { | 1371 | } else { |
1385 | if (qid == 0) | 1372 | if (qid == 0) |
1386 | qid = dev_queue->qdisc_sleeping->handle; | 1373 | qid = dev->qdisc->handle; |
1387 | } | 1374 | } |
1388 | 1375 | ||
1389 | /* OK. Locate qdisc */ | 1376 | /* OK. Locate qdisc */ |
@@ -1417,7 +1404,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1417 | goto out; | 1404 | goto out; |
1418 | break; | 1405 | break; |
1419 | case RTM_DELTCLASS: | 1406 | case RTM_DELTCLASS: |
1420 | err = cops->delete(q, cl); | 1407 | err = -EOPNOTSUPP; |
1408 | if (cops->delete) | ||
1409 | err = cops->delete(q, cl); | ||
1421 | if (err == 0) | 1410 | if (err == 0) |
1422 | tclass_notify(skb, n, q, cl, RTM_DELTCLASS); | 1411 | tclass_notify(skb, n, q, cl, RTM_DELTCLASS); |
1423 | goto out; | 1412 | goto out; |
@@ -1431,7 +1420,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
1431 | } | 1420 | } |
1432 | 1421 | ||
1433 | new_cl = cl; | 1422 | new_cl = cl; |
1434 | err = cops->change(q, clid, pid, tca, &new_cl); | 1423 | err = -EOPNOTSUPP; |
1424 | if (cops->change) | ||
1425 | err = cops->change(q, clid, pid, tca, &new_cl); | ||
1435 | if (err == 0) | 1426 | if (err == 0) |
1436 | tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); | 1427 | tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); |
1437 | 1428 | ||
@@ -1586,8 +1577,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) | |||
1586 | s_t = cb->args[0]; | 1577 | s_t = cb->args[0]; |
1587 | t = 0; | 1578 | t = 0; |
1588 | 1579 | ||
1589 | dev_queue = netdev_get_tx_queue(dev, 0); | 1580 | if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0) |
1590 | if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0) | ||
1591 | goto done; | 1581 | goto done; |
1592 | 1582 | ||
1593 | dev_queue = &dev->rx_queue; | 1583 | dev_queue = &dev->rx_queue; |
@@ -1707,6 +1697,7 @@ static int __init pktsched_init(void) | |||
1707 | { | 1697 | { |
1708 | register_qdisc(&pfifo_qdisc_ops); | 1698 | register_qdisc(&pfifo_qdisc_ops); |
1709 | register_qdisc(&bfifo_qdisc_ops); | 1699 | register_qdisc(&bfifo_qdisc_ops); |
1700 | register_qdisc(&mq_qdisc_ops); | ||
1710 | proc_net_fops_create(&init_net, "psched", 0, &psched_fops); | 1701 | proc_net_fops_create(&init_net, "psched", 0, &psched_fops); |
1711 | 1702 | ||
1712 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); | 1703 | rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index d5798e17a832..5b132c473264 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -1621,29 +1621,25 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1621 | { | 1621 | { |
1622 | struct cbq_class *cl = (struct cbq_class*)arg; | 1622 | struct cbq_class *cl = (struct cbq_class*)arg; |
1623 | 1623 | ||
1624 | if (cl) { | 1624 | if (new == NULL) { |
1625 | if (new == NULL) { | 1625 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1626 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1626 | &pfifo_qdisc_ops, cl->common.classid); |
1627 | &pfifo_qdisc_ops, | 1627 | if (new == NULL) |
1628 | cl->common.classid); | 1628 | return -ENOBUFS; |
1629 | if (new == NULL) | 1629 | } else { |
1630 | return -ENOBUFS; | ||
1631 | } else { | ||
1632 | #ifdef CONFIG_NET_CLS_ACT | 1630 | #ifdef CONFIG_NET_CLS_ACT |
1633 | if (cl->police == TC_POLICE_RECLASSIFY) | 1631 | if (cl->police == TC_POLICE_RECLASSIFY) |
1634 | new->reshape_fail = cbq_reshape_fail; | 1632 | new->reshape_fail = cbq_reshape_fail; |
1635 | #endif | 1633 | #endif |
1636 | } | ||
1637 | sch_tree_lock(sch); | ||
1638 | *old = cl->q; | ||
1639 | cl->q = new; | ||
1640 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
1641 | qdisc_reset(*old); | ||
1642 | sch_tree_unlock(sch); | ||
1643 | |||
1644 | return 0; | ||
1645 | } | 1634 | } |
1646 | return -ENOENT; | 1635 | sch_tree_lock(sch); |
1636 | *old = cl->q; | ||
1637 | cl->q = new; | ||
1638 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | ||
1639 | qdisc_reset(*old); | ||
1640 | sch_tree_unlock(sch); | ||
1641 | |||
1642 | return 0; | ||
1647 | } | 1643 | } |
1648 | 1644 | ||
1649 | static struct Qdisc * | 1645 | static struct Qdisc * |
@@ -1651,7 +1647,7 @@ cbq_leaf(struct Qdisc *sch, unsigned long arg) | |||
1651 | { | 1647 | { |
1652 | struct cbq_class *cl = (struct cbq_class*)arg; | 1648 | struct cbq_class *cl = (struct cbq_class*)arg; |
1653 | 1649 | ||
1654 | return cl ? cl->q : NULL; | 1650 | return cl->q; |
1655 | } | 1651 | } |
1656 | 1652 | ||
1657 | static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) | 1653 | static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg) |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 27d03816ec3e..4ae6aa562f2b 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -37,15 +37,11 @@ | |||
37 | * - updates to tree and tree walking are only done under the rtnl mutex. | 37 | * - updates to tree and tree walking are only done under the rtnl mutex. |
38 | */ | 38 | */ |
39 | 39 | ||
40 | static inline int qdisc_qlen(struct Qdisc *q) | ||
41 | { | ||
42 | return q->q.qlen; | ||
43 | } | ||
44 | |||
45 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | 40 | static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) |
46 | { | 41 | { |
47 | q->gso_skb = skb; | 42 | q->gso_skb = skb; |
48 | q->qstats.requeues++; | 43 | q->qstats.requeues++; |
44 | q->q.qlen++; /* it's still part of the queue */ | ||
49 | __netif_schedule(q); | 45 | __netif_schedule(q); |
50 | 46 | ||
51 | return 0; | 47 | return 0; |
@@ -61,9 +57,11 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | |||
61 | 57 | ||
62 | /* check the reason of requeuing without tx lock first */ | 58 | /* check the reason of requeuing without tx lock first */ |
63 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 59 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
64 | if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) | 60 | if (!netif_tx_queue_stopped(txq) && |
61 | !netif_tx_queue_frozen(txq)) { | ||
65 | q->gso_skb = NULL; | 62 | q->gso_skb = NULL; |
66 | else | 63 | q->q.qlen--; |
64 | } else | ||
67 | skb = NULL; | 65 | skb = NULL; |
68 | } else { | 66 | } else { |
69 | skb = q->dequeue(q); | 67 | skb = q->dequeue(q); |
@@ -103,44 +101,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
103 | } | 101 | } |
104 | 102 | ||
105 | /* | 103 | /* |
106 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. | 104 | * Transmit one skb, and handle the return status as required. Holding the |
107 | * | 105 | * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this |
108 | * __QDISC_STATE_RUNNING guarantees only one CPU can process | 106 | * function. |
109 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for | ||
110 | * this queue. | ||
111 | * | ||
112 | * netif_tx_lock serializes accesses to device driver. | ||
113 | * | ||
114 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, | ||
115 | * if one is grabbed, another must be free. | ||
116 | * | ||
117 | * Note, that this procedure can be called by a watchdog timer | ||
118 | * | 107 | * |
119 | * Returns to the caller: | 108 | * Returns to the caller: |
120 | * 0 - queue is empty or throttled. | 109 | * 0 - queue is empty or throttled. |
121 | * >0 - queue is not empty. | 110 | * >0 - queue is not empty. |
122 | * | ||
123 | */ | 111 | */ |
124 | static inline int qdisc_restart(struct Qdisc *q) | 112 | int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, |
113 | struct net_device *dev, struct netdev_queue *txq, | ||
114 | spinlock_t *root_lock) | ||
125 | { | 115 | { |
126 | struct netdev_queue *txq; | ||
127 | int ret = NETDEV_TX_BUSY; | 116 | int ret = NETDEV_TX_BUSY; |
128 | struct net_device *dev; | ||
129 | spinlock_t *root_lock; | ||
130 | struct sk_buff *skb; | ||
131 | |||
132 | /* Dequeue packet */ | ||
133 | if (unlikely((skb = dequeue_skb(q)) == NULL)) | ||
134 | return 0; | ||
135 | |||
136 | root_lock = qdisc_lock(q); | ||
137 | 117 | ||
138 | /* And release qdisc */ | 118 | /* And release qdisc */ |
139 | spin_unlock(root_lock); | 119 | spin_unlock(root_lock); |
140 | 120 | ||
141 | dev = qdisc_dev(q); | ||
142 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
143 | |||
144 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
145 | if (!netif_tx_queue_stopped(txq) && | 122 | if (!netif_tx_queue_stopped(txq) && |
146 | !netif_tx_queue_frozen(txq)) | 123 | !netif_tx_queue_frozen(txq)) |
@@ -177,6 +154,44 @@ static inline int qdisc_restart(struct Qdisc *q) | |||
177 | return ret; | 154 | return ret; |
178 | } | 155 | } |
179 | 156 | ||
157 | /* | ||
158 | * NOTE: Called under qdisc_lock(q) with locally disabled BH. | ||
159 | * | ||
160 | * __QDISC_STATE_RUNNING guarantees only one CPU can process | ||
161 | * this qdisc at a time. qdisc_lock(q) serializes queue accesses for | ||
162 | * this queue. | ||
163 | * | ||
164 | * netif_tx_lock serializes accesses to device driver. | ||
165 | * | ||
166 | * qdisc_lock(q) and netif_tx_lock are mutually exclusive, | ||
167 | * if one is grabbed, another must be free. | ||
168 | * | ||
169 | * Note, that this procedure can be called by a watchdog timer | ||
170 | * | ||
171 | * Returns to the caller: | ||
172 | * 0 - queue is empty or throttled. | ||
173 | * >0 - queue is not empty. | ||
174 | * | ||
175 | */ | ||
176 | static inline int qdisc_restart(struct Qdisc *q) | ||
177 | { | ||
178 | struct netdev_queue *txq; | ||
179 | struct net_device *dev; | ||
180 | spinlock_t *root_lock; | ||
181 | struct sk_buff *skb; | ||
182 | |||
183 | /* Dequeue packet */ | ||
184 | skb = dequeue_skb(q); | ||
185 | if (unlikely(!skb)) | ||
186 | return 0; | ||
187 | |||
188 | root_lock = qdisc_lock(q); | ||
189 | dev = qdisc_dev(q); | ||
190 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | ||
191 | |||
192 | return sch_direct_xmit(skb, q, dev, txq, root_lock); | ||
193 | } | ||
194 | |||
180 | void __qdisc_run(struct Qdisc *q) | 195 | void __qdisc_run(struct Qdisc *q) |
181 | { | 196 | { |
182 | unsigned long start_time = jiffies; | 197 | unsigned long start_time = jiffies; |
@@ -391,18 +406,38 @@ static const u8 prio2band[TC_PRIO_MAX+1] = | |||
391 | 406 | ||
392 | #define PFIFO_FAST_BANDS 3 | 407 | #define PFIFO_FAST_BANDS 3 |
393 | 408 | ||
394 | static inline struct sk_buff_head *prio2list(struct sk_buff *skb, | 409 | /* |
395 | struct Qdisc *qdisc) | 410 | * Private data for a pfifo_fast scheduler containing: |
411 | * - queues for the three band | ||
412 | * - bitmap indicating which of the bands contain skbs | ||
413 | */ | ||
414 | struct pfifo_fast_priv { | ||
415 | u32 bitmap; | ||
416 | struct sk_buff_head q[PFIFO_FAST_BANDS]; | ||
417 | }; | ||
418 | |||
419 | /* | ||
420 | * Convert a bitmap to the first band number where an skb is queued, where: | ||
421 | * bitmap=0 means there are no skbs on any band. | ||
422 | * bitmap=1 means there is an skb on band 0. | ||
423 | * bitmap=7 means there are skbs on all 3 bands, etc. | ||
424 | */ | ||
425 | static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; | ||
426 | |||
427 | static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, | ||
428 | int band) | ||
396 | { | 429 | { |
397 | struct sk_buff_head *list = qdisc_priv(qdisc); | 430 | return priv->q + band; |
398 | return list + prio2band[skb->priority & TC_PRIO_MAX]; | ||
399 | } | 431 | } |
400 | 432 | ||
401 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | 433 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) |
402 | { | 434 | { |
403 | struct sk_buff_head *list = prio2list(skb, qdisc); | 435 | if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { |
436 | int band = prio2band[skb->priority & TC_PRIO_MAX]; | ||
437 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); | ||
438 | struct sk_buff_head *list = band2list(priv, band); | ||
404 | 439 | ||
405 | if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { | 440 | priv->bitmap |= (1 << band); |
406 | qdisc->q.qlen++; | 441 | qdisc->q.qlen++; |
407 | return __qdisc_enqueue_tail(skb, qdisc, list); | 442 | return __qdisc_enqueue_tail(skb, qdisc, list); |
408 | } | 443 | } |
@@ -412,14 +447,18 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | |||
412 | 447 | ||
413 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | 448 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) |
414 | { | 449 | { |
415 | int prio; | 450 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
416 | struct sk_buff_head *list = qdisc_priv(qdisc); | 451 | int band = bitmap2band[priv->bitmap]; |
417 | 452 | ||
418 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 453 | if (likely(band >= 0)) { |
419 | if (!skb_queue_empty(list + prio)) { | 454 | struct sk_buff_head *list = band2list(priv, band); |
420 | qdisc->q.qlen--; | 455 | struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); |
421 | return __qdisc_dequeue_head(qdisc, list + prio); | 456 | |
422 | } | 457 | qdisc->q.qlen--; |
458 | if (skb_queue_empty(list)) | ||
459 | priv->bitmap &= ~(1 << band); | ||
460 | |||
461 | return skb; | ||
423 | } | 462 | } |
424 | 463 | ||
425 | return NULL; | 464 | return NULL; |
@@ -427,12 +466,13 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | |||
427 | 466 | ||
428 | static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | 467 | static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) |
429 | { | 468 | { |
430 | int prio; | 469 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
431 | struct sk_buff_head *list = qdisc_priv(qdisc); | 470 | int band = bitmap2band[priv->bitmap]; |
432 | 471 | ||
433 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 472 | if (band >= 0) { |
434 | if (!skb_queue_empty(list + prio)) | 473 | struct sk_buff_head *list = band2list(priv, band); |
435 | return skb_peek(list + prio); | 474 | |
475 | return skb_peek(list); | ||
436 | } | 476 | } |
437 | 477 | ||
438 | return NULL; | 478 | return NULL; |
@@ -441,11 +481,12 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) | |||
441 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 481 | static void pfifo_fast_reset(struct Qdisc* qdisc) |
442 | { | 482 | { |
443 | int prio; | 483 | int prio; |
444 | struct sk_buff_head *list = qdisc_priv(qdisc); | 484 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
445 | 485 | ||
446 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 486 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
447 | __qdisc_reset_queue(qdisc, list + prio); | 487 | __qdisc_reset_queue(qdisc, band2list(priv, prio)); |
448 | 488 | ||
489 | priv->bitmap = 0; | ||
449 | qdisc->qstats.backlog = 0; | 490 | qdisc->qstats.backlog = 0; |
450 | qdisc->q.qlen = 0; | 491 | qdisc->q.qlen = 0; |
451 | } | 492 | } |
@@ -465,17 +506,17 @@ nla_put_failure: | |||
465 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) | 506 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) |
466 | { | 507 | { |
467 | int prio; | 508 | int prio; |
468 | struct sk_buff_head *list = qdisc_priv(qdisc); | 509 | struct pfifo_fast_priv *priv = qdisc_priv(qdisc); |
469 | 510 | ||
470 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 511 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
471 | skb_queue_head_init(list + prio); | 512 | skb_queue_head_init(band2list(priv, prio)); |
472 | 513 | ||
473 | return 0; | 514 | return 0; |
474 | } | 515 | } |
475 | 516 | ||
476 | static struct Qdisc_ops pfifo_fast_ops __read_mostly = { | 517 | struct Qdisc_ops pfifo_fast_ops __read_mostly = { |
477 | .id = "pfifo_fast", | 518 | .id = "pfifo_fast", |
478 | .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), | 519 | .priv_size = sizeof(struct pfifo_fast_priv), |
479 | .enqueue = pfifo_fast_enqueue, | 520 | .enqueue = pfifo_fast_enqueue, |
480 | .dequeue = pfifo_fast_dequeue, | 521 | .dequeue = pfifo_fast_dequeue, |
481 | .peek = pfifo_fast_peek, | 522 | .peek = pfifo_fast_peek, |
@@ -547,8 +588,11 @@ void qdisc_reset(struct Qdisc *qdisc) | |||
547 | if (ops->reset) | 588 | if (ops->reset) |
548 | ops->reset(qdisc); | 589 | ops->reset(qdisc); |
549 | 590 | ||
550 | kfree_skb(qdisc->gso_skb); | 591 | if (qdisc->gso_skb) { |
551 | qdisc->gso_skb = NULL; | 592 | kfree_skb(qdisc->gso_skb); |
593 | qdisc->gso_skb = NULL; | ||
594 | qdisc->q.qlen = 0; | ||
595 | } | ||
552 | } | 596 | } |
553 | EXPORT_SYMBOL(qdisc_reset); | 597 | EXPORT_SYMBOL(qdisc_reset); |
554 | 598 | ||
@@ -579,17 +623,29 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
579 | } | 623 | } |
580 | EXPORT_SYMBOL(qdisc_destroy); | 624 | EXPORT_SYMBOL(qdisc_destroy); |
581 | 625 | ||
582 | static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) | 626 | /* Attach toplevel qdisc to device queue. */ |
627 | struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, | ||
628 | struct Qdisc *qdisc) | ||
583 | { | 629 | { |
584 | unsigned int i; | 630 | struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; |
631 | spinlock_t *root_lock; | ||
585 | 632 | ||
586 | for (i = 0; i < dev->num_tx_queues; i++) { | 633 | root_lock = qdisc_lock(oqdisc); |
587 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); | 634 | spin_lock_bh(root_lock); |
588 | 635 | ||
589 | if (txq->qdisc_sleeping != &noop_qdisc) | 636 | /* Prune old scheduler */ |
590 | return false; | 637 | if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) |
591 | } | 638 | qdisc_reset(oqdisc); |
592 | return true; | 639 | |
640 | /* ... and graft new one */ | ||
641 | if (qdisc == NULL) | ||
642 | qdisc = &noop_qdisc; | ||
643 | dev_queue->qdisc_sleeping = qdisc; | ||
644 | rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); | ||
645 | |||
646 | spin_unlock_bh(root_lock); | ||
647 | |||
648 | return oqdisc; | ||
593 | } | 649 | } |
594 | 650 | ||
595 | static void attach_one_default_qdisc(struct net_device *dev, | 651 | static void attach_one_default_qdisc(struct net_device *dev, |
@@ -605,12 +661,35 @@ static void attach_one_default_qdisc(struct net_device *dev, | |||
605 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 661 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
606 | return; | 662 | return; |
607 | } | 663 | } |
664 | |||
665 | /* Can by-pass the queue discipline for default qdisc */ | ||
666 | qdisc->flags |= TCQ_F_CAN_BYPASS; | ||
608 | } else { | 667 | } else { |
609 | qdisc = &noqueue_qdisc; | 668 | qdisc = &noqueue_qdisc; |
610 | } | 669 | } |
611 | dev_queue->qdisc_sleeping = qdisc; | 670 | dev_queue->qdisc_sleeping = qdisc; |
612 | } | 671 | } |
613 | 672 | ||
673 | static void attach_default_qdiscs(struct net_device *dev) | ||
674 | { | ||
675 | struct netdev_queue *txq; | ||
676 | struct Qdisc *qdisc; | ||
677 | |||
678 | txq = netdev_get_tx_queue(dev, 0); | ||
679 | |||
680 | if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { | ||
681 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); | ||
682 | dev->qdisc = txq->qdisc_sleeping; | ||
683 | atomic_inc(&dev->qdisc->refcnt); | ||
684 | } else { | ||
685 | qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); | ||
686 | if (qdisc) { | ||
687 | qdisc->ops->attach(qdisc); | ||
688 | dev->qdisc = qdisc; | ||
689 | } | ||
690 | } | ||
691 | } | ||
692 | |||
614 | static void transition_one_qdisc(struct net_device *dev, | 693 | static void transition_one_qdisc(struct net_device *dev, |
615 | struct netdev_queue *dev_queue, | 694 | struct netdev_queue *dev_queue, |
616 | void *_need_watchdog) | 695 | void *_need_watchdog) |
@@ -638,8 +717,8 @@ void dev_activate(struct net_device *dev) | |||
638 | virtual interfaces | 717 | virtual interfaces |
639 | */ | 718 | */ |
640 | 719 | ||
641 | if (dev_all_qdisc_sleeping_noop(dev)) | 720 | if (dev->qdisc == &noop_qdisc) |
642 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); | 721 | attach_default_qdiscs(dev); |
643 | 722 | ||
644 | if (!netif_carrier_ok(dev)) | 723 | if (!netif_carrier_ok(dev)) |
645 | /* Delay activation until next carrier-on event */ | 724 | /* Delay activation until next carrier-on event */ |
@@ -730,6 +809,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, | |||
730 | 809 | ||
731 | void dev_init_scheduler(struct net_device *dev) | 810 | void dev_init_scheduler(struct net_device *dev) |
732 | { | 811 | { |
812 | dev->qdisc = &noop_qdisc; | ||
733 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); | 813 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
734 | dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); | 814 | dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); |
735 | 815 | ||
@@ -755,5 +835,8 @@ void dev_shutdown(struct net_device *dev) | |||
755 | { | 835 | { |
756 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); | 836 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
757 | shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); | 837 | shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); |
838 | qdisc_destroy(dev->qdisc); | ||
839 | dev->qdisc = &noop_qdisc; | ||
840 | |||
758 | WARN_ON(timer_pending(&dev->watchdog_timer)); | 841 | WARN_ON(timer_pending(&dev->watchdog_timer)); |
759 | } | 842 | } |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index dad0144423da..375d64cb1a3d 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1203,8 +1203,6 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1203 | { | 1203 | { |
1204 | struct hfsc_class *cl = (struct hfsc_class *)arg; | 1204 | struct hfsc_class *cl = (struct hfsc_class *)arg; |
1205 | 1205 | ||
1206 | if (cl == NULL) | ||
1207 | return -ENOENT; | ||
1208 | if (cl->level > 0) | 1206 | if (cl->level > 0) |
1209 | return -EINVAL; | 1207 | return -EINVAL; |
1210 | if (new == NULL) { | 1208 | if (new == NULL) { |
@@ -1228,7 +1226,7 @@ hfsc_class_leaf(struct Qdisc *sch, unsigned long arg) | |||
1228 | { | 1226 | { |
1229 | struct hfsc_class *cl = (struct hfsc_class *)arg; | 1227 | struct hfsc_class *cl = (struct hfsc_class *)arg; |
1230 | 1228 | ||
1231 | if (cl != NULL && cl->level == 0) | 1229 | if (cl->level == 0) |
1232 | return cl->qdisc; | 1230 | return cl->qdisc; |
1233 | 1231 | ||
1234 | return NULL; | 1232 | return NULL; |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index ec4d46399d59..85acab9dc6fd 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1117,30 +1117,29 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1117 | { | 1117 | { |
1118 | struct htb_class *cl = (struct htb_class *)arg; | 1118 | struct htb_class *cl = (struct htb_class *)arg; |
1119 | 1119 | ||
1120 | if (cl && !cl->level) { | 1120 | if (cl->level) |
1121 | if (new == NULL && | 1121 | return -EINVAL; |
1122 | (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, | 1122 | if (new == NULL && |
1123 | &pfifo_qdisc_ops, | 1123 | (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1124 | cl->common.classid)) | 1124 | &pfifo_qdisc_ops, |
1125 | == NULL) | 1125 | cl->common.classid)) == NULL) |
1126 | return -ENOBUFS; | 1126 | return -ENOBUFS; |
1127 | sch_tree_lock(sch); | 1127 | |
1128 | *old = cl->un.leaf.q; | 1128 | sch_tree_lock(sch); |
1129 | cl->un.leaf.q = new; | 1129 | *old = cl->un.leaf.q; |
1130 | if (*old != NULL) { | 1130 | cl->un.leaf.q = new; |
1131 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); | 1131 | if (*old != NULL) { |
1132 | qdisc_reset(*old); | 1132 | qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); |
1133 | } | 1133 | qdisc_reset(*old); |
1134 | sch_tree_unlock(sch); | ||
1135 | return 0; | ||
1136 | } | 1134 | } |
1137 | return -ENOENT; | 1135 | sch_tree_unlock(sch); |
1136 | return 0; | ||
1138 | } | 1137 | } |
1139 | 1138 | ||
1140 | static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) | 1139 | static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg) |
1141 | { | 1140 | { |
1142 | struct htb_class *cl = (struct htb_class *)arg; | 1141 | struct htb_class *cl = (struct htb_class *)arg; |
1143 | return (cl && !cl->level) ? cl->un.leaf.q : NULL; | 1142 | return !cl->level ? cl->un.leaf.q : NULL; |
1144 | } | 1143 | } |
1145 | 1144 | ||
1146 | static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) | 1145 | static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg) |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 4a2b77374358..a9e646bdb605 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
@@ -22,12 +22,6 @@ struct ingress_qdisc_data { | |||
22 | 22 | ||
23 | /* ------------------------- Class/flow operations ------------------------- */ | 23 | /* ------------------------- Class/flow operations ------------------------- */ |
24 | 24 | ||
25 | static int ingress_graft(struct Qdisc *sch, unsigned long arg, | ||
26 | struct Qdisc *new, struct Qdisc **old) | ||
27 | { | ||
28 | return -EOPNOTSUPP; | ||
29 | } | ||
30 | |||
31 | static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) | 25 | static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg) |
32 | { | 26 | { |
33 | return NULL; | 27 | return NULL; |
@@ -48,12 +42,6 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl) | |||
48 | { | 42 | { |
49 | } | 43 | } |
50 | 44 | ||
51 | static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent, | ||
52 | struct nlattr **tca, unsigned long *arg) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) | 45 | static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
58 | { | 46 | { |
59 | return; | 47 | return; |
@@ -123,11 +111,9 @@ nla_put_failure: | |||
123 | } | 111 | } |
124 | 112 | ||
125 | static const struct Qdisc_class_ops ingress_class_ops = { | 113 | static const struct Qdisc_class_ops ingress_class_ops = { |
126 | .graft = ingress_graft, | ||
127 | .leaf = ingress_leaf, | 114 | .leaf = ingress_leaf, |
128 | .get = ingress_get, | 115 | .get = ingress_get, |
129 | .put = ingress_put, | 116 | .put = ingress_put, |
130 | .change = ingress_change, | ||
131 | .walk = ingress_walk, | 117 | .walk = ingress_walk, |
132 | .tcf_chain = ingress_find_tcf, | 118 | .tcf_chain = ingress_find_tcf, |
133 | .bind_tcf = ingress_bind_filter, | 119 | .bind_tcf = ingress_bind_filter, |
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c new file mode 100644 index 000000000000..dd5ee022f1f7 --- /dev/null +++ b/net/sched/sch_mq.c | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * net/sched/sch_mq.c Classful multiqueue dummy scheduler | ||
3 | * | ||
4 | * Copyright (c) 2009 Patrick McHardy <kaber@trash.net> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * version 2 as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/skbuff.h> | ||
16 | #include <net/netlink.h> | ||
17 | #include <net/pkt_sched.h> | ||
18 | |||
19 | struct mq_sched { | ||
20 | struct Qdisc **qdiscs; | ||
21 | }; | ||
22 | |||
23 | static void mq_destroy(struct Qdisc *sch) | ||
24 | { | ||
25 | struct net_device *dev = qdisc_dev(sch); | ||
26 | struct mq_sched *priv = qdisc_priv(sch); | ||
27 | unsigned int ntx; | ||
28 | |||
29 | if (!priv->qdiscs) | ||
30 | return; | ||
31 | for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) | ||
32 | qdisc_destroy(priv->qdiscs[ntx]); | ||
33 | kfree(priv->qdiscs); | ||
34 | } | ||
35 | |||
36 | static int mq_init(struct Qdisc *sch, struct nlattr *opt) | ||
37 | { | ||
38 | struct net_device *dev = qdisc_dev(sch); | ||
39 | struct mq_sched *priv = qdisc_priv(sch); | ||
40 | struct netdev_queue *dev_queue; | ||
41 | struct Qdisc *qdisc; | ||
42 | unsigned int ntx; | ||
43 | |||
44 | if (sch->parent != TC_H_ROOT) | ||
45 | return -EOPNOTSUPP; | ||
46 | |||
47 | if (!netif_is_multiqueue(dev)) | ||
48 | return -EOPNOTSUPP; | ||
49 | |||
50 | /* pre-allocate qdiscs, attachment can't fail */ | ||
51 | priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), | ||
52 | GFP_KERNEL); | ||
53 | if (priv->qdiscs == NULL) | ||
54 | return -ENOMEM; | ||
55 | |||
56 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | ||
57 | dev_queue = netdev_get_tx_queue(dev, ntx); | ||
58 | qdisc = qdisc_create_dflt(dev, dev_queue, &pfifo_fast_ops, | ||
59 | TC_H_MAKE(TC_H_MAJ(sch->handle), | ||
60 | TC_H_MIN(ntx + 1))); | ||
61 | if (qdisc == NULL) | ||
62 | goto err; | ||
63 | qdisc->flags |= TCQ_F_CAN_BYPASS; | ||
64 | priv->qdiscs[ntx] = qdisc; | ||
65 | } | ||
66 | |||
67 | sch->flags |= TCQ_F_MQROOT; | ||
68 | return 0; | ||
69 | |||
70 | err: | ||
71 | mq_destroy(sch); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | |||
75 | static void mq_attach(struct Qdisc *sch) | ||
76 | { | ||
77 | struct net_device *dev = qdisc_dev(sch); | ||
78 | struct mq_sched *priv = qdisc_priv(sch); | ||
79 | struct Qdisc *qdisc; | ||
80 | unsigned int ntx; | ||
81 | |||
82 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | ||
83 | qdisc = priv->qdiscs[ntx]; | ||
84 | qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc); | ||
85 | if (qdisc) | ||
86 | qdisc_destroy(qdisc); | ||
87 | } | ||
88 | kfree(priv->qdiscs); | ||
89 | priv->qdiscs = NULL; | ||
90 | } | ||
91 | |||
92 | static int mq_dump(struct Qdisc *sch, struct sk_buff *skb) | ||
93 | { | ||
94 | struct net_device *dev = qdisc_dev(sch); | ||
95 | struct Qdisc *qdisc; | ||
96 | unsigned int ntx; | ||
97 | |||
98 | sch->q.qlen = 0; | ||
99 | memset(&sch->bstats, 0, sizeof(sch->bstats)); | ||
100 | memset(&sch->qstats, 0, sizeof(sch->qstats)); | ||
101 | |||
102 | for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { | ||
103 | qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping; | ||
104 | spin_lock_bh(qdisc_lock(qdisc)); | ||
105 | sch->q.qlen += qdisc->q.qlen; | ||
106 | sch->bstats.bytes += qdisc->bstats.bytes; | ||
107 | sch->bstats.packets += qdisc->bstats.packets; | ||
108 | sch->qstats.qlen += qdisc->qstats.qlen; | ||
109 | sch->qstats.backlog += qdisc->qstats.backlog; | ||
110 | sch->qstats.drops += qdisc->qstats.drops; | ||
111 | sch->qstats.requeues += qdisc->qstats.requeues; | ||
112 | sch->qstats.overlimits += qdisc->qstats.overlimits; | ||
113 | spin_unlock_bh(qdisc_lock(qdisc)); | ||
114 | } | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl) | ||
119 | { | ||
120 | struct net_device *dev = qdisc_dev(sch); | ||
121 | unsigned long ntx = cl - 1; | ||
122 | |||
123 | if (ntx >= dev->num_tx_queues) | ||
124 | return NULL; | ||
125 | return netdev_get_tx_queue(dev, ntx); | ||
126 | } | ||
127 | |||
128 | static unsigned int mq_select_queue(struct Qdisc *sch, struct tcmsg *tcm) | ||
129 | { | ||
130 | unsigned int ntx = TC_H_MIN(tcm->tcm_parent); | ||
131 | |||
132 | if (!mq_queue_get(sch, ntx)) | ||
133 | return 0; | ||
134 | return ntx - 1; | ||
135 | } | ||
136 | |||
137 | static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, | ||
138 | struct Qdisc **old) | ||
139 | { | ||
140 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); | ||
141 | struct net_device *dev = qdisc_dev(sch); | ||
142 | |||
143 | if (dev->flags & IFF_UP) | ||
144 | dev_deactivate(dev); | ||
145 | |||
146 | *old = dev_graft_qdisc(dev_queue, new); | ||
147 | |||
148 | if (dev->flags & IFF_UP) | ||
149 | dev_activate(dev); | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl) | ||
154 | { | ||
155 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); | ||
156 | |||
157 | return dev_queue->qdisc_sleeping; | ||
158 | } | ||
159 | |||
160 | static unsigned long mq_get(struct Qdisc *sch, u32 classid) | ||
161 | { | ||
162 | unsigned int ntx = TC_H_MIN(classid); | ||
163 | |||
164 | if (!mq_queue_get(sch, ntx)) | ||
165 | return 0; | ||
166 | return ntx; | ||
167 | } | ||
168 | |||
169 | static void mq_put(struct Qdisc *sch, unsigned long cl) | ||
170 | { | ||
171 | return; | ||
172 | } | ||
173 | |||
174 | static int mq_dump_class(struct Qdisc *sch, unsigned long cl, | ||
175 | struct sk_buff *skb, struct tcmsg *tcm) | ||
176 | { | ||
177 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); | ||
178 | |||
179 | tcm->tcm_parent = TC_H_ROOT; | ||
180 | tcm->tcm_handle |= TC_H_MIN(cl); | ||
181 | tcm->tcm_info = dev_queue->qdisc_sleeping->handle; | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, | ||
186 | struct gnet_dump *d) | ||
187 | { | ||
188 | struct netdev_queue *dev_queue = mq_queue_get(sch, cl); | ||
189 | |||
190 | sch = dev_queue->qdisc_sleeping; | ||
191 | if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || | ||
192 | gnet_stats_copy_queue(d, &sch->qstats) < 0) | ||
193 | return -1; | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | ||
198 | { | ||
199 | struct net_device *dev = qdisc_dev(sch); | ||
200 | unsigned int ntx; | ||
201 | |||
202 | if (arg->stop) | ||
203 | return; | ||
204 | |||
205 | arg->count = arg->skip; | ||
206 | for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { | ||
207 | if (arg->fn(sch, ntx + 1, arg) < 0) { | ||
208 | arg->stop = 1; | ||
209 | break; | ||
210 | } | ||
211 | arg->count++; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | static const struct Qdisc_class_ops mq_class_ops = { | ||
216 | .select_queue = mq_select_queue, | ||
217 | .graft = mq_graft, | ||
218 | .leaf = mq_leaf, | ||
219 | .get = mq_get, | ||
220 | .put = mq_put, | ||
221 | .walk = mq_walk, | ||
222 | .dump = mq_dump_class, | ||
223 | .dump_stats = mq_dump_class_stats, | ||
224 | }; | ||
225 | |||
226 | struct Qdisc_ops mq_qdisc_ops __read_mostly = { | ||
227 | .cl_ops = &mq_class_ops, | ||
228 | .id = "mq", | ||
229 | .priv_size = sizeof(struct mq_sched), | ||
230 | .init = mq_init, | ||
231 | .destroy = mq_destroy, | ||
232 | .attach = mq_attach, | ||
233 | .dump = mq_dump, | ||
234 | .owner = THIS_MODULE, | ||
235 | }; | ||
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c index 912731203047..069f81c97277 100644 --- a/net/sched/sch_multiq.c +++ b/net/sched/sch_multiq.c | |||
@@ -298,9 +298,6 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
298 | struct multiq_sched_data *q = qdisc_priv(sch); | 298 | struct multiq_sched_data *q = qdisc_priv(sch); |
299 | unsigned long band = arg - 1; | 299 | unsigned long band = arg - 1; |
300 | 300 | ||
301 | if (band >= q->bands) | ||
302 | return -EINVAL; | ||
303 | |||
304 | if (new == NULL) | 301 | if (new == NULL) |
305 | new = &noop_qdisc; | 302 | new = &noop_qdisc; |
306 | 303 | ||
@@ -320,9 +317,6 @@ multiq_leaf(struct Qdisc *sch, unsigned long arg) | |||
320 | struct multiq_sched_data *q = qdisc_priv(sch); | 317 | struct multiq_sched_data *q = qdisc_priv(sch); |
321 | unsigned long band = arg - 1; | 318 | unsigned long band = arg - 1; |
322 | 319 | ||
323 | if (band >= q->bands) | ||
324 | return NULL; | ||
325 | |||
326 | return q->queues[band]; | 320 | return q->queues[band]; |
327 | } | 321 | } |
328 | 322 | ||
@@ -348,36 +342,13 @@ static void multiq_put(struct Qdisc *q, unsigned long cl) | |||
348 | return; | 342 | return; |
349 | } | 343 | } |
350 | 344 | ||
351 | static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent, | ||
352 | struct nlattr **tca, unsigned long *arg) | ||
353 | { | ||
354 | unsigned long cl = *arg; | ||
355 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
356 | |||
357 | if (cl - 1 > q->bands) | ||
358 | return -ENOENT; | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | static int multiq_delete(struct Qdisc *sch, unsigned long cl) | ||
363 | { | ||
364 | struct multiq_sched_data *q = qdisc_priv(sch); | ||
365 | if (cl - 1 > q->bands) | ||
366 | return -ENOENT; | ||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | |||
371 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, | 345 | static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, |
372 | struct sk_buff *skb, struct tcmsg *tcm) | 346 | struct sk_buff *skb, struct tcmsg *tcm) |
373 | { | 347 | { |
374 | struct multiq_sched_data *q = qdisc_priv(sch); | 348 | struct multiq_sched_data *q = qdisc_priv(sch); |
375 | 349 | ||
376 | if (cl - 1 > q->bands) | ||
377 | return -ENOENT; | ||
378 | tcm->tcm_handle |= TC_H_MIN(cl); | 350 | tcm->tcm_handle |= TC_H_MIN(cl); |
379 | if (q->queues[cl-1]) | 351 | tcm->tcm_info = q->queues[cl-1]->handle; |
380 | tcm->tcm_info = q->queues[cl-1]->handle; | ||
381 | return 0; | 352 | return 0; |
382 | } | 353 | } |
383 | 354 | ||
@@ -430,8 +401,6 @@ static const struct Qdisc_class_ops multiq_class_ops = { | |||
430 | .leaf = multiq_leaf, | 401 | .leaf = multiq_leaf, |
431 | .get = multiq_get, | 402 | .get = multiq_get, |
432 | .put = multiq_put, | 403 | .put = multiq_put, |
433 | .change = multiq_change, | ||
434 | .delete = multiq_delete, | ||
435 | .walk = multiq_walk, | 404 | .walk = multiq_walk, |
436 | .tcf_chain = multiq_find_tcf, | 405 | .tcf_chain = multiq_find_tcf, |
437 | .bind_tcf = multiq_bind, | 406 | .bind_tcf = multiq_bind, |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index 94cecef70145..0f73c412d04b 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -262,9 +262,6 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
262 | struct prio_sched_data *q = qdisc_priv(sch); | 262 | struct prio_sched_data *q = qdisc_priv(sch); |
263 | unsigned long band = arg - 1; | 263 | unsigned long band = arg - 1; |
264 | 264 | ||
265 | if (band >= q->bands) | ||
266 | return -EINVAL; | ||
267 | |||
268 | if (new == NULL) | 265 | if (new == NULL) |
269 | new = &noop_qdisc; | 266 | new = &noop_qdisc; |
270 | 267 | ||
@@ -284,9 +281,6 @@ prio_leaf(struct Qdisc *sch, unsigned long arg) | |||
284 | struct prio_sched_data *q = qdisc_priv(sch); | 281 | struct prio_sched_data *q = qdisc_priv(sch); |
285 | unsigned long band = arg - 1; | 282 | unsigned long band = arg - 1; |
286 | 283 | ||
287 | if (band >= q->bands) | ||
288 | return NULL; | ||
289 | |||
290 | return q->queues[band]; | 284 | return q->queues[band]; |
291 | } | 285 | } |
292 | 286 | ||
@@ -311,35 +305,13 @@ static void prio_put(struct Qdisc *q, unsigned long cl) | |||
311 | return; | 305 | return; |
312 | } | 306 | } |
313 | 307 | ||
314 | static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct nlattr **tca, unsigned long *arg) | ||
315 | { | ||
316 | unsigned long cl = *arg; | ||
317 | struct prio_sched_data *q = qdisc_priv(sch); | ||
318 | |||
319 | if (cl - 1 > q->bands) | ||
320 | return -ENOENT; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int prio_delete(struct Qdisc *sch, unsigned long cl) | ||
325 | { | ||
326 | struct prio_sched_data *q = qdisc_priv(sch); | ||
327 | if (cl - 1 > q->bands) | ||
328 | return -ENOENT; | ||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | |||
333 | static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, | 308 | static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, |
334 | struct tcmsg *tcm) | 309 | struct tcmsg *tcm) |
335 | { | 310 | { |
336 | struct prio_sched_data *q = qdisc_priv(sch); | 311 | struct prio_sched_data *q = qdisc_priv(sch); |
337 | 312 | ||
338 | if (cl - 1 > q->bands) | ||
339 | return -ENOENT; | ||
340 | tcm->tcm_handle |= TC_H_MIN(cl); | 313 | tcm->tcm_handle |= TC_H_MIN(cl); |
341 | if (q->queues[cl-1]) | 314 | tcm->tcm_info = q->queues[cl-1]->handle; |
342 | tcm->tcm_info = q->queues[cl-1]->handle; | ||
343 | return 0; | 315 | return 0; |
344 | } | 316 | } |
345 | 317 | ||
@@ -392,8 +364,6 @@ static const struct Qdisc_class_ops prio_class_ops = { | |||
392 | .leaf = prio_leaf, | 364 | .leaf = prio_leaf, |
393 | .get = prio_get, | 365 | .get = prio_get, |
394 | .put = prio_put, | 366 | .put = prio_put, |
395 | .change = prio_change, | ||
396 | .delete = prio_delete, | ||
397 | .walk = prio_walk, | 367 | .walk = prio_walk, |
398 | .tcf_chain = prio_find_tcf, | 368 | .tcf_chain = prio_find_tcf, |
399 | .bind_tcf = prio_bind, | 369 | .bind_tcf = prio_bind, |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 2bdf241f6315..072cdf442f8e 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
@@ -268,8 +268,6 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl, | |||
268 | { | 268 | { |
269 | struct red_sched_data *q = qdisc_priv(sch); | 269 | struct red_sched_data *q = qdisc_priv(sch); |
270 | 270 | ||
271 | if (cl != 1) | ||
272 | return -ENOENT; | ||
273 | tcm->tcm_handle |= TC_H_MIN(1); | 271 | tcm->tcm_handle |= TC_H_MIN(1); |
274 | tcm->tcm_info = q->qdisc->handle; | 272 | tcm->tcm_info = q->qdisc->handle; |
275 | return 0; | 273 | return 0; |
@@ -308,17 +306,6 @@ static void red_put(struct Qdisc *sch, unsigned long arg) | |||
308 | return; | 306 | return; |
309 | } | 307 | } |
310 | 308 | ||
311 | static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | ||
312 | struct nlattr **tca, unsigned long *arg) | ||
313 | { | ||
314 | return -ENOSYS; | ||
315 | } | ||
316 | |||
317 | static int red_delete(struct Qdisc *sch, unsigned long cl) | ||
318 | { | ||
319 | return -ENOSYS; | ||
320 | } | ||
321 | |||
322 | static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) | 309 | static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
323 | { | 310 | { |
324 | if (!walker->stop) { | 311 | if (!walker->stop) { |
@@ -331,20 +318,12 @@ static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
331 | } | 318 | } |
332 | } | 319 | } |
333 | 320 | ||
334 | static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl) | ||
335 | { | ||
336 | return NULL; | ||
337 | } | ||
338 | |||
339 | static const struct Qdisc_class_ops red_class_ops = { | 321 | static const struct Qdisc_class_ops red_class_ops = { |
340 | .graft = red_graft, | 322 | .graft = red_graft, |
341 | .leaf = red_leaf, | 323 | .leaf = red_leaf, |
342 | .get = red_get, | 324 | .get = red_get, |
343 | .put = red_put, | 325 | .put = red_put, |
344 | .change = red_change_class, | ||
345 | .delete = red_delete, | ||
346 | .walk = red_walk, | 326 | .walk = red_walk, |
347 | .tcf_chain = red_find_tcf, | ||
348 | .dump = red_dump_class, | 327 | .dump = red_dump_class, |
349 | }; | 328 | }; |
350 | 329 | ||
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 8706920a6d45..cb21380c0605 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -496,12 +496,6 @@ nla_put_failure: | |||
496 | return -1; | 496 | return -1; |
497 | } | 497 | } |
498 | 498 | ||
499 | static int sfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | ||
500 | struct nlattr **tca, unsigned long *arg) | ||
501 | { | ||
502 | return -EOPNOTSUPP; | ||
503 | } | ||
504 | |||
505 | static unsigned long sfq_get(struct Qdisc *sch, u32 classid) | 499 | static unsigned long sfq_get(struct Qdisc *sch, u32 classid) |
506 | { | 500 | { |
507 | return 0; | 501 | return 0; |
@@ -560,7 +554,6 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) | |||
560 | 554 | ||
561 | static const struct Qdisc_class_ops sfq_class_ops = { | 555 | static const struct Qdisc_class_ops sfq_class_ops = { |
562 | .get = sfq_get, | 556 | .get = sfq_get, |
563 | .change = sfq_change_class, | ||
564 | .tcf_chain = sfq_find_tcf, | 557 | .tcf_chain = sfq_find_tcf, |
565 | .dump = sfq_dump_class, | 558 | .dump = sfq_dump_class, |
566 | .dump_stats = sfq_dump_class_stats, | 559 | .dump_stats = sfq_dump_class_stats, |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index e22dfe85e43e..8fb8107ab188 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
@@ -368,9 +368,6 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl, | |||
368 | { | 368 | { |
369 | struct tbf_sched_data *q = qdisc_priv(sch); | 369 | struct tbf_sched_data *q = qdisc_priv(sch); |
370 | 370 | ||
371 | if (cl != 1) /* only one class */ | ||
372 | return -ENOENT; | ||
373 | |||
374 | tcm->tcm_handle |= TC_H_MIN(1); | 371 | tcm->tcm_handle |= TC_H_MIN(1); |
375 | tcm->tcm_info = q->qdisc->handle; | 372 | tcm->tcm_info = q->qdisc->handle; |
376 | 373 | ||
@@ -410,17 +407,6 @@ static void tbf_put(struct Qdisc *sch, unsigned long arg) | |||
410 | { | 407 | { |
411 | } | 408 | } |
412 | 409 | ||
413 | static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | ||
414 | struct nlattr **tca, unsigned long *arg) | ||
415 | { | ||
416 | return -ENOSYS; | ||
417 | } | ||
418 | |||
419 | static int tbf_delete(struct Qdisc *sch, unsigned long arg) | ||
420 | { | ||
421 | return -ENOSYS; | ||
422 | } | ||
423 | |||
424 | static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) | 410 | static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
425 | { | 411 | { |
426 | if (!walker->stop) { | 412 | if (!walker->stop) { |
@@ -433,21 +419,13 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) | |||
433 | } | 419 | } |
434 | } | 420 | } |
435 | 421 | ||
436 | static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl) | ||
437 | { | ||
438 | return NULL; | ||
439 | } | ||
440 | |||
441 | static const struct Qdisc_class_ops tbf_class_ops = | 422 | static const struct Qdisc_class_ops tbf_class_ops = |
442 | { | 423 | { |
443 | .graft = tbf_graft, | 424 | .graft = tbf_graft, |
444 | .leaf = tbf_leaf, | 425 | .leaf = tbf_leaf, |
445 | .get = tbf_get, | 426 | .get = tbf_get, |
446 | .put = tbf_put, | 427 | .put = tbf_put, |
447 | .change = tbf_change_class, | ||
448 | .delete = tbf_delete, | ||
449 | .walk = tbf_walk, | 428 | .walk = tbf_walk, |
450 | .tcf_chain = tbf_find_tcf, | ||
451 | .dump = tbf_dump_class, | 429 | .dump = tbf_dump_class, |
452 | }; | 430 | }; |
453 | 431 | ||
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 9c002b6e0533..5a002c247231 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -268,7 +268,7 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
268 | return __teql_resolve(skb, skb_res, dev); | 268 | return __teql_resolve(skb, skb_res, dev); |
269 | } | 269 | } |
270 | 270 | ||
271 | static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 271 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
272 | { | 272 | { |
273 | struct teql_master *master = netdev_priv(dev); | 273 | struct teql_master *master = netdev_priv(dev); |
274 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | 274 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); |
@@ -307,14 +307,14 @@ restart: | |||
307 | 307 | ||
308 | if (!netif_tx_queue_stopped(slave_txq) && | 308 | if (!netif_tx_queue_stopped(slave_txq) && |
309 | !netif_tx_queue_frozen(slave_txq) && | 309 | !netif_tx_queue_frozen(slave_txq) && |
310 | slave_ops->ndo_start_xmit(skb, slave) == 0) { | 310 | slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { |
311 | txq_trans_update(slave_txq); | 311 | txq_trans_update(slave_txq); |
312 | __netif_tx_unlock(slave_txq); | 312 | __netif_tx_unlock(slave_txq); |
313 | master->slaves = NEXT_SLAVE(q); | 313 | master->slaves = NEXT_SLAVE(q); |
314 | netif_wake_queue(dev); | 314 | netif_wake_queue(dev); |
315 | txq->tx_packets++; | 315 | txq->tx_packets++; |
316 | txq->tx_bytes += length; | 316 | txq->tx_bytes += length; |
317 | return 0; | 317 | return NETDEV_TX_OK; |
318 | } | 318 | } |
319 | __netif_tx_unlock(slave_txq); | 319 | __netif_tx_unlock(slave_txq); |
320 | } | 320 | } |
@@ -323,7 +323,7 @@ restart: | |||
323 | break; | 323 | break; |
324 | case 1: | 324 | case 1: |
325 | master->slaves = NEXT_SLAVE(q); | 325 | master->slaves = NEXT_SLAVE(q); |
326 | return 0; | 326 | return NETDEV_TX_OK; |
327 | default: | 327 | default: |
328 | nores = 1; | 328 | nores = 1; |
329 | break; | 329 | break; |
@@ -345,7 +345,7 @@ restart: | |||
345 | drop: | 345 | drop: |
346 | txq->tx_dropped++; | 346 | txq->tx_dropped++; |
347 | dev_kfree_skb(skb); | 347 | dev_kfree_skb(skb); |
348 | return 0; | 348 | return NETDEV_TX_OK; |
349 | } | 349 | } |
350 | 350 | ||
351 | static int teql_master_open(struct net_device *dev) | 351 | static int teql_master_open(struct net_device *dev) |