aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_cbq.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_cbq.c')
-rw-r--r--net/sched/sch_cbq.c155
1 files changed, 78 insertions, 77 deletions
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 2a3c97f7dc63..f1d2f8ec8b4c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -73,11 +73,10 @@ struct cbq_sched_data;
73 73
74struct cbq_class 74struct cbq_class
75{ 75{
76 struct cbq_class *next; /* hash table link */ 76 struct Qdisc_class_common common;
77 struct cbq_class *next_alive; /* next class with backlog in this priority band */ 77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78 78
79/* Parameters */ 79/* Parameters */
80 u32 classid;
81 unsigned char priority; /* class priority */ 80 unsigned char priority; /* class priority */
82 unsigned char priority2; /* priority to be used after overlimit */ 81 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */ 82 unsigned char ewma_log; /* time constant for idle time calculation */
@@ -144,7 +143,7 @@ struct cbq_class
144 143
145struct cbq_sched_data 144struct cbq_sched_data
146{ 145{
147 struct cbq_class *classes[16]; /* Hash table of all classes */ 146 struct Qdisc_class_hash clhash; /* Hash table of all classes */
148 int nclasses[TC_CBQ_MAXPRIO+1]; 147 int nclasses[TC_CBQ_MAXPRIO+1];
149 unsigned quanta[TC_CBQ_MAXPRIO+1]; 148 unsigned quanta[TC_CBQ_MAXPRIO+1];
150 149
@@ -177,23 +176,15 @@ struct cbq_sched_data
177 176
178#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) 177#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len)
179 178
180
181static __inline__ unsigned cbq_hash(u32 h)
182{
183 h ^= h>>8;
184 h ^= h>>4;
185 return h&0xF;
186}
187
188static __inline__ struct cbq_class * 179static __inline__ struct cbq_class *
189cbq_class_lookup(struct cbq_sched_data *q, u32 classid) 180cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
190{ 181{
191 struct cbq_class *cl; 182 struct Qdisc_class_common *clc;
192 183
193 for (cl = q->classes[cbq_hash(classid)]; cl; cl = cl->next) 184 clc = qdisc_class_find(&q->clhash, classid);
194 if (cl->classid == classid) 185 if (clc == NULL)
195 return cl; 186 return NULL;
196 return NULL; 187 return container_of(clc, struct cbq_class, common);
197} 188}
198 189
199#ifdef CONFIG_NET_CLS_ACT 190#ifdef CONFIG_NET_CLS_ACT
@@ -379,7 +370,6 @@ static int
379cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 370cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
380{ 371{
381 struct cbq_sched_data *q = qdisc_priv(sch); 372 struct cbq_sched_data *q = qdisc_priv(sch);
382 int len = skb->len;
383 int uninitialized_var(ret); 373 int uninitialized_var(ret);
384 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 374 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
385 375
@@ -396,10 +386,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
396#ifdef CONFIG_NET_CLS_ACT 386#ifdef CONFIG_NET_CLS_ACT
397 cl->q->__parent = sch; 387 cl->q->__parent = sch;
398#endif 388#endif
399 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { 389 ret = qdisc_enqueue(skb, cl->q);
390 if (ret == NET_XMIT_SUCCESS) {
400 sch->q.qlen++; 391 sch->q.qlen++;
401 sch->bstats.packets++; 392 sch->bstats.packets++;
402 sch->bstats.bytes+=len; 393 sch->bstats.bytes += qdisc_pkt_len(skb);
403 cbq_mark_toplevel(q, cl); 394 cbq_mark_toplevel(q, cl);
404 if (!cl->next_alive) 395 if (!cl->next_alive)
405 cbq_activate_class(cl); 396 cbq_activate_class(cl);
@@ -659,14 +650,13 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
659 } 650 }
660 651
661 sch->flags &= ~TCQ_F_THROTTLED; 652 sch->flags &= ~TCQ_F_THROTTLED;
662 netif_schedule(sch->dev); 653 __netif_schedule(sch);
663 return HRTIMER_NORESTART; 654 return HRTIMER_NORESTART;
664} 655}
665 656
666#ifdef CONFIG_NET_CLS_ACT 657#ifdef CONFIG_NET_CLS_ACT
667static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) 658static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
668{ 659{
669 int len = skb->len;
670 struct Qdisc *sch = child->__parent; 660 struct Qdisc *sch = child->__parent;
671 struct cbq_sched_data *q = qdisc_priv(sch); 661 struct cbq_sched_data *q = qdisc_priv(sch);
672 struct cbq_class *cl = q->rx_class; 662 struct cbq_class *cl = q->rx_class;
@@ -680,10 +670,10 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
680 q->rx_class = cl; 670 q->rx_class = cl;
681 cl->q->__parent = sch; 671 cl->q->__parent = sch;
682 672
683 if (cl->q->enqueue(skb, cl->q) == 0) { 673 if (qdisc_enqueue(skb, cl->q) == 0) {
684 sch->q.qlen++; 674 sch->q.qlen++;
685 sch->bstats.packets++; 675 sch->bstats.packets++;
686 sch->bstats.bytes+=len; 676 sch->bstats.bytes += qdisc_pkt_len(skb);
687 if (!cl->next_alive) 677 if (!cl->next_alive)
688 cbq_activate_class(cl); 678 cbq_activate_class(cl);
689 return 0; 679 return 0;
@@ -889,7 +879,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
889 if (skb == NULL) 879 if (skb == NULL)
890 goto skip_class; 880 goto skip_class;
891 881
892 cl->deficit -= skb->len; 882 cl->deficit -= qdisc_pkt_len(skb);
893 q->tx_class = cl; 883 q->tx_class = cl;
894 q->tx_borrowed = borrow; 884 q->tx_borrowed = borrow;
895 if (borrow != cl) { 885 if (borrow != cl) {
@@ -897,11 +887,11 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
897 borrow->xstats.borrows++; 887 borrow->xstats.borrows++;
898 cl->xstats.borrows++; 888 cl->xstats.borrows++;
899#else 889#else
900 borrow->xstats.borrows += skb->len; 890 borrow->xstats.borrows += qdisc_pkt_len(skb);
901 cl->xstats.borrows += skb->len; 891 cl->xstats.borrows += qdisc_pkt_len(skb);
902#endif 892#endif
903 } 893 }
904 q->tx_len = skb->len; 894 q->tx_len = qdisc_pkt_len(skb);
905 895
906 if (cl->deficit <= 0) { 896 if (cl->deficit <= 0) {
907 q->active[prio] = cl; 897 q->active[prio] = cl;
@@ -1071,13 +1061,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
1071static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) 1061static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1072{ 1062{
1073 struct cbq_class *cl; 1063 struct cbq_class *cl;
1074 unsigned h; 1064 struct hlist_node *n;
1065 unsigned int h;
1075 1066
1076 if (q->quanta[prio] == 0) 1067 if (q->quanta[prio] == 0)
1077 return; 1068 return;
1078 1069
1079 for (h=0; h<16; h++) { 1070 for (h = 0; h < q->clhash.hashsize; h++) {
1080 for (cl = q->classes[h]; cl; cl = cl->next) { 1071 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1081 /* BUGGGG... Beware! This expression suffer of 1072 /* BUGGGG... Beware! This expression suffer of
1082 arithmetic overflows! 1073 arithmetic overflows!
1083 */ 1074 */
@@ -1085,9 +1076,9 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1085 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ 1076 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1086 q->quanta[prio]; 1077 q->quanta[prio];
1087 } 1078 }
1088 if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) { 1079 if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
1089 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->classid, cl->quantum); 1080 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
1090 cl->quantum = cl->qdisc->dev->mtu/2 + 1; 1081 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1091 } 1082 }
1092 } 1083 }
1093 } 1084 }
@@ -1114,10 +1105,12 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1114 if (split->defaults[i]) 1105 if (split->defaults[i])
1115 continue; 1106 continue;
1116 1107
1117 for (h=0; h<16; h++) { 1108 for (h = 0; h < q->clhash.hashsize; h++) {
1109 struct hlist_node *n;
1118 struct cbq_class *c; 1110 struct cbq_class *c;
1119 1111
1120 for (c = q->classes[h]; c; c = c->next) { 1112 hlist_for_each_entry(c, n, &q->clhash.hash[h],
1113 common.hnode) {
1121 if (c->split == split && c->level < level && 1114 if (c->split == split && c->level < level &&
1122 c->defmap&(1<<i)) { 1115 c->defmap&(1<<i)) {
1123 split->defaults[i] = c; 1116 split->defaults[i] = c;
@@ -1135,12 +1128,12 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
1135 if (splitid == 0) { 1128 if (splitid == 0) {
1136 if ((split = cl->split) == NULL) 1129 if ((split = cl->split) == NULL)
1137 return; 1130 return;
1138 splitid = split->classid; 1131 splitid = split->common.classid;
1139 } 1132 }
1140 1133
1141 if (split == NULL || split->classid != splitid) { 1134 if (split == NULL || split->common.classid != splitid) {
1142 for (split = cl->tparent; split; split = split->tparent) 1135 for (split = cl->tparent; split; split = split->tparent)
1143 if (split->classid == splitid) 1136 if (split->common.classid == splitid)
1144 break; 1137 break;
1145 } 1138 }
1146 1139
@@ -1163,13 +1156,7 @@ static void cbq_unlink_class(struct cbq_class *this)
1163 struct cbq_class *cl, **clp; 1156 struct cbq_class *cl, **clp;
1164 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 1157 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1165 1158
1166 for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) { 1159 qdisc_class_hash_remove(&q->clhash, &this->common);
1167 if (cl == this) {
1168 *clp = cl->next;
1169 cl->next = NULL;
1170 break;
1171 }
1172 }
1173 1160
1174 if (this->tparent) { 1161 if (this->tparent) {
1175 clp=&this->sibling; 1162 clp=&this->sibling;
@@ -1195,12 +1182,10 @@ static void cbq_unlink_class(struct cbq_class *this)
1195static void cbq_link_class(struct cbq_class *this) 1182static void cbq_link_class(struct cbq_class *this)
1196{ 1183{
1197 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 1184 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1198 unsigned h = cbq_hash(this->classid);
1199 struct cbq_class *parent = this->tparent; 1185 struct cbq_class *parent = this->tparent;
1200 1186
1201 this->sibling = this; 1187 this->sibling = this;
1202 this->next = q->classes[h]; 1188 qdisc_class_hash_insert(&q->clhash, &this->common);
1203 q->classes[h] = this;
1204 1189
1205 if (parent == NULL) 1190 if (parent == NULL)
1206 return; 1191 return;
@@ -1242,6 +1227,7 @@ cbq_reset(struct Qdisc* sch)
1242{ 1227{
1243 struct cbq_sched_data *q = qdisc_priv(sch); 1228 struct cbq_sched_data *q = qdisc_priv(sch);
1244 struct cbq_class *cl; 1229 struct cbq_class *cl;
1230 struct hlist_node *n;
1245 int prio; 1231 int prio;
1246 unsigned h; 1232 unsigned h;
1247 1233
@@ -1258,8 +1244,8 @@ cbq_reset(struct Qdisc* sch)
1258 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) 1244 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1259 q->active[prio] = NULL; 1245 q->active[prio] = NULL;
1260 1246
1261 for (h = 0; h < 16; h++) { 1247 for (h = 0; h < q->clhash.hashsize; h++) {
1262 for (cl = q->classes[h]; cl; cl = cl->next) { 1248 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1263 qdisc_reset(cl->q); 1249 qdisc_reset(cl->q);
1264 1250
1265 cl->next_alive = NULL; 1251 cl->next_alive = NULL;
@@ -1406,11 +1392,16 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1406 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) 1392 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1407 return -EINVAL; 1393 return -EINVAL;
1408 1394
1395 err = qdisc_class_hash_init(&q->clhash);
1396 if (err < 0)
1397 goto put_rtab;
1398
1409 q->link.refcnt = 1; 1399 q->link.refcnt = 1;
1410 q->link.sibling = &q->link; 1400 q->link.sibling = &q->link;
1411 q->link.classid = sch->handle; 1401 q->link.common.classid = sch->handle;
1412 q->link.qdisc = sch; 1402 q->link.qdisc = sch;
1413 if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1403 if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1404 &pfifo_qdisc_ops,
1414 sch->handle))) 1405 sch->handle)))
1415 q->link.q = &noop_qdisc; 1406 q->link.q = &noop_qdisc;
1416 1407
@@ -1419,7 +1410,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1419 q->link.cpriority = TC_CBQ_MAXPRIO-1; 1410 q->link.cpriority = TC_CBQ_MAXPRIO-1;
1420 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; 1411 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1421 q->link.overlimit = cbq_ovl_classic; 1412 q->link.overlimit = cbq_ovl_classic;
1422 q->link.allot = psched_mtu(sch->dev); 1413 q->link.allot = psched_mtu(qdisc_dev(sch));
1423 q->link.quantum = q->link.allot; 1414 q->link.quantum = q->link.allot;
1424 q->link.weight = q->link.R_tab->rate.rate; 1415 q->link.weight = q->link.R_tab->rate.rate;
1425 1416
@@ -1441,6 +1432,10 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1441 1432
1442 cbq_addprio(q, &q->link); 1433 cbq_addprio(q, &q->link);
1443 return 0; 1434 return 0;
1435
1436put_rtab:
1437 qdisc_put_rtab(q->link.R_tab);
1438 return err;
1444} 1439}
1445 1440
1446static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) 1441static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
@@ -1521,7 +1516,7 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1521 struct tc_cbq_fopt opt; 1516 struct tc_cbq_fopt opt;
1522 1517
1523 if (cl->split || cl->defmap) { 1518 if (cl->split || cl->defmap) {
1524 opt.split = cl->split ? cl->split->classid : 0; 1519 opt.split = cl->split ? cl->split->common.classid : 0;
1525 opt.defmap = cl->defmap; 1520 opt.defmap = cl->defmap;
1526 opt.defchange = ~0; 1521 opt.defchange = ~0;
1527 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); 1522 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
@@ -1602,10 +1597,10 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1602 struct nlattr *nest; 1597 struct nlattr *nest;
1603 1598
1604 if (cl->tparent) 1599 if (cl->tparent)
1605 tcm->tcm_parent = cl->tparent->classid; 1600 tcm->tcm_parent = cl->tparent->common.classid;
1606 else 1601 else
1607 tcm->tcm_parent = TC_H_ROOT; 1602 tcm->tcm_parent = TC_H_ROOT;
1608 tcm->tcm_handle = cl->classid; 1603 tcm->tcm_handle = cl->common.classid;
1609 tcm->tcm_info = cl->q->handle; 1604 tcm->tcm_info = cl->q->handle;
1610 1605
1611 nest = nla_nest_start(skb, TCA_OPTIONS); 1606 nest = nla_nest_start(skb, TCA_OPTIONS);
@@ -1650,8 +1645,10 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1650 1645
1651 if (cl) { 1646 if (cl) {
1652 if (new == NULL) { 1647 if (new == NULL) {
1653 if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1648 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1654 cl->classid)) == NULL) 1649 &pfifo_qdisc_ops,
1650 cl->common.classid);
1651 if (new == NULL)
1655 return -ENOBUFS; 1652 return -ENOBUFS;
1656 } else { 1653 } else {
1657#ifdef CONFIG_NET_CLS_ACT 1654#ifdef CONFIG_NET_CLS_ACT
@@ -1716,6 +1713,7 @@ static void
1716cbq_destroy(struct Qdisc* sch) 1713cbq_destroy(struct Qdisc* sch)
1717{ 1714{
1718 struct cbq_sched_data *q = qdisc_priv(sch); 1715 struct cbq_sched_data *q = qdisc_priv(sch);
1716 struct hlist_node *n, *next;
1719 struct cbq_class *cl; 1717 struct cbq_class *cl;
1720 unsigned h; 1718 unsigned h;
1721 1719
@@ -1727,18 +1725,16 @@ cbq_destroy(struct Qdisc* sch)
1727 * classes from root to leafs which means that filters can still 1725 * classes from root to leafs which means that filters can still
1728 * be bound to classes which have been destroyed already. --TGR '04 1726 * be bound to classes which have been destroyed already. --TGR '04
1729 */ 1727 */
1730 for (h = 0; h < 16; h++) { 1728 for (h = 0; h < q->clhash.hashsize; h++) {
1731 for (cl = q->classes[h]; cl; cl = cl->next) 1729 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode)
1732 tcf_destroy_chain(&cl->filter_list); 1730 tcf_destroy_chain(&cl->filter_list);
1733 } 1731 }
1734 for (h = 0; h < 16; h++) { 1732 for (h = 0; h < q->clhash.hashsize; h++) {
1735 struct cbq_class *next; 1733 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h],
1736 1734 common.hnode)
1737 for (cl = q->classes[h]; cl; cl = next) {
1738 next = cl->next;
1739 cbq_destroy_class(sch, cl); 1735 cbq_destroy_class(sch, cl);
1740 }
1741 } 1736 }
1737 qdisc_class_hash_destroy(&q->clhash);
1742} 1738}
1743 1739
1744static void cbq_put(struct Qdisc *sch, unsigned long arg) 1740static void cbq_put(struct Qdisc *sch, unsigned long arg)
@@ -1747,12 +1743,13 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1747 1743
1748 if (--cl->refcnt == 0) { 1744 if (--cl->refcnt == 0) {
1749#ifdef CONFIG_NET_CLS_ACT 1745#ifdef CONFIG_NET_CLS_ACT
1746 spinlock_t *root_lock = qdisc_root_lock(sch);
1750 struct cbq_sched_data *q = qdisc_priv(sch); 1747 struct cbq_sched_data *q = qdisc_priv(sch);
1751 1748
1752 spin_lock_bh(&sch->dev->queue_lock); 1749 spin_lock_bh(root_lock);
1753 if (q->rx_class == cl) 1750 if (q->rx_class == cl)
1754 q->rx_class = NULL; 1751 q->rx_class = NULL;
1755 spin_unlock_bh(&sch->dev->queue_lock); 1752 spin_unlock_bh(root_lock);
1756#endif 1753#endif
1757 1754
1758 cbq_destroy_class(sch, cl); 1755 cbq_destroy_class(sch, cl);
@@ -1781,7 +1778,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1781 if (cl) { 1778 if (cl) {
1782 /* Check parent */ 1779 /* Check parent */
1783 if (parentid) { 1780 if (parentid) {
1784 if (cl->tparent && cl->tparent->classid != parentid) 1781 if (cl->tparent &&
1782 cl->tparent->common.classid != parentid)
1785 return -EINVAL; 1783 return -EINVAL;
1786 if (!cl->tparent && parentid != TC_H_ROOT) 1784 if (!cl->tparent && parentid != TC_H_ROOT)
1787 return -EINVAL; 1785 return -EINVAL;
@@ -1830,7 +1828,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1830 1828
1831 if (tca[TCA_RATE]) 1829 if (tca[TCA_RATE])
1832 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1830 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1833 &sch->dev->queue_lock, 1831 qdisc_root_lock(sch),
1834 tca[TCA_RATE]); 1832 tca[TCA_RATE]);
1835 return 0; 1833 return 0;
1836 } 1834 }
@@ -1881,9 +1879,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1881 cl->R_tab = rtab; 1879 cl->R_tab = rtab;
1882 rtab = NULL; 1880 rtab = NULL;
1883 cl->refcnt = 1; 1881 cl->refcnt = 1;
1884 if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) 1882 if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1883 &pfifo_qdisc_ops, classid)))
1885 cl->q = &noop_qdisc; 1884 cl->q = &noop_qdisc;
1886 cl->classid = classid; 1885 cl->common.classid = classid;
1887 cl->tparent = parent; 1886 cl->tparent = parent;
1888 cl->qdisc = sch; 1887 cl->qdisc = sch;
1889 cl->allot = parent->allot; 1888 cl->allot = parent->allot;
@@ -1916,9 +1915,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1916 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1915 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1917 sch_tree_unlock(sch); 1916 sch_tree_unlock(sch);
1918 1917
1918 qdisc_class_hash_grow(sch, &q->clhash);
1919
1919 if (tca[TCA_RATE]) 1920 if (tca[TCA_RATE])
1920 gen_new_estimator(&cl->bstats, &cl->rate_est, 1921 gen_new_estimator(&cl->bstats, &cl->rate_est,
1921 &sch->dev->queue_lock, tca[TCA_RATE]); 1922 qdisc_root_lock(sch), tca[TCA_RATE]);
1922 1923
1923 *arg = (unsigned long)cl; 1924 *arg = (unsigned long)cl;
1924 return 0; 1925 return 0;
@@ -2008,15 +2009,15 @@ static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
2008static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) 2009static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2009{ 2010{
2010 struct cbq_sched_data *q = qdisc_priv(sch); 2011 struct cbq_sched_data *q = qdisc_priv(sch);
2012 struct cbq_class *cl;
2013 struct hlist_node *n;
2011 unsigned h; 2014 unsigned h;
2012 2015
2013 if (arg->stop) 2016 if (arg->stop)
2014 return; 2017 return;
2015 2018
2016 for (h = 0; h < 16; h++) { 2019 for (h = 0; h < q->clhash.hashsize; h++) {
2017 struct cbq_class *cl; 2020 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
2018
2019 for (cl = q->classes[h]; cl; cl = cl->next) {
2020 if (arg->count < arg->skip) { 2021 if (arg->count < arg->skip) {
2021 arg->count++; 2022 arg->count++;
2022 continue; 2023 continue;