aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_gact.c2
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c4
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/act_simple.c2
-rw-r--r--net/sched/cls_api.c20
-rw-r--r--net/sched/cls_flow.c52
-rw-r--r--net/sched/cls_route.c12
-rw-r--r--net/sched/cls_u32.c18
-rw-r--r--net/sched/sch_api.c613
-rw-r--r--net/sched/sch_atm.c12
-rw-r--r--net/sched/sch_cbq.c155
-rw-r--r--net/sched/sch_dsmark.c10
-rw-r--r--net/sched/sch_fifo.c49
-rw-r--r--net/sched/sch_generic.c467
-rw-r--r--net/sched/sch_gred.c14
-rw-r--r--net/sched/sch_hfsc.c110
-rw-r--r--net/sched/sch_htb.c195
-rw-r--r--net/sched/sch_ingress.c2
-rw-r--r--net/sched/sch_netem.c65
-rw-r--r--net/sched/sch_prio.c143
-rw-r--r--net/sched/sch_red.c37
-rw-r--r--net/sched/sch_sfq.c20
-rw-r--r--net/sched/sch_tbf.c42
-rw-r--r--net/sched/sch_teql.c50
27 files changed, 1213 insertions, 895 deletions
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 422872c4f14b..ac04289da5d7 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -139,7 +139,7 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result
139#else 139#else
140 action = gact->tcf_action; 140 action = gact->tcf_action;
141#endif 141#endif
142 gact->tcf_bstats.bytes += skb->len; 142 gact->tcf_bstats.bytes += qdisc_pkt_len(skb);
143 gact->tcf_bstats.packets++; 143 gact->tcf_bstats.packets++;
144 if (action == TC_ACT_SHOT) 144 if (action == TC_ACT_SHOT)
145 gact->tcf_qstats.drops++; 145 gact->tcf_qstats.drops++;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index da696fd3e341..d1263b3c96c3 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -205,7 +205,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
205 spin_lock(&ipt->tcf_lock); 205 spin_lock(&ipt->tcf_lock);
206 206
207 ipt->tcf_tm.lastuse = jiffies; 207 ipt->tcf_tm.lastuse = jiffies;
208 ipt->tcf_bstats.bytes += skb->len; 208 ipt->tcf_bstats.bytes += qdisc_pkt_len(skb);
209 ipt->tcf_bstats.packets++; 209 ipt->tcf_bstats.packets++;
210 210
211 /* yes, we have to worry about both in and out dev 211 /* yes, we have to worry about both in and out dev
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 1aff005d95cd..70341c020b6d 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -164,7 +164,7 @@ bad_mirred:
164 if (skb2 != NULL) 164 if (skb2 != NULL)
165 kfree_skb(skb2); 165 kfree_skb(skb2);
166 m->tcf_qstats.overlimits++; 166 m->tcf_qstats.overlimits++;
167 m->tcf_bstats.bytes += skb->len; 167 m->tcf_bstats.bytes += qdisc_pkt_len(skb);
168 m->tcf_bstats.packets++; 168 m->tcf_bstats.packets++;
169 spin_unlock(&m->tcf_lock); 169 spin_unlock(&m->tcf_lock);
170 /* should we be asking for packet to be dropped? 170 /* should we be asking for packet to be dropped?
@@ -184,7 +184,7 @@ bad_mirred:
184 goto bad_mirred; 184 goto bad_mirred;
185 } 185 }
186 186
187 m->tcf_bstats.bytes += skb2->len; 187 m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
188 m->tcf_bstats.packets++; 188 m->tcf_bstats.packets++;
189 if (!(at & AT_EGRESS)) 189 if (!(at & AT_EGRESS))
190 if (m->tcfm_ok_push) 190 if (m->tcfm_ok_push)
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 0a3c8339767a..7b39ed485bca 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -124,7 +124,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
124 egress = p->flags & TCA_NAT_FLAG_EGRESS; 124 egress = p->flags & TCA_NAT_FLAG_EGRESS;
125 action = p->tcf_action; 125 action = p->tcf_action;
126 126
127 p->tcf_bstats.bytes += skb->len; 127 p->tcf_bstats.bytes += qdisc_pkt_len(skb);
128 p->tcf_bstats.packets++; 128 p->tcf_bstats.packets++;
129 129
130 spin_unlock(&p->tcf_lock); 130 spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 3cc4cb9e500e..d5f4e3404864 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -182,7 +182,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
182bad: 182bad:
183 p->tcf_qstats.overlimits++; 183 p->tcf_qstats.overlimits++;
184done: 184done:
185 p->tcf_bstats.bytes += skb->len; 185 p->tcf_bstats.bytes += qdisc_pkt_len(skb);
186 p->tcf_bstats.packets++; 186 p->tcf_bstats.packets++;
187 spin_unlock(&p->tcf_lock); 187 spin_unlock(&p->tcf_lock);
188 return p->tcf_action; 188 return p->tcf_action;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 0898120bbcc0..32c3f9d9fb7a 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -272,7 +272,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
272 272
273 spin_lock(&police->tcf_lock); 273 spin_lock(&police->tcf_lock);
274 274
275 police->tcf_bstats.bytes += skb->len; 275 police->tcf_bstats.bytes += qdisc_pkt_len(skb);
276 police->tcf_bstats.packets++; 276 police->tcf_bstats.packets++;
277 277
278 if (police->tcfp_ewma_rate && 278 if (police->tcfp_ewma_rate &&
@@ -282,7 +282,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
282 return police->tcf_action; 282 return police->tcf_action;
283 } 283 }
284 284
285 if (skb->len <= police->tcfp_mtu) { 285 if (qdisc_pkt_len(skb) <= police->tcfp_mtu) {
286 if (police->tcfp_R_tab == NULL) { 286 if (police->tcfp_R_tab == NULL) {
287 spin_unlock(&police->tcf_lock); 287 spin_unlock(&police->tcf_lock);
288 return police->tcfp_result; 288 return police->tcfp_result;
@@ -295,12 +295,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
295 ptoks = toks + police->tcfp_ptoks; 295 ptoks = toks + police->tcfp_ptoks;
296 if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) 296 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
297 ptoks = (long)L2T_P(police, police->tcfp_mtu); 297 ptoks = (long)L2T_P(police, police->tcfp_mtu);
298 ptoks -= L2T_P(police, skb->len); 298 ptoks -= L2T_P(police, qdisc_pkt_len(skb));
299 } 299 }
300 toks += police->tcfp_toks; 300 toks += police->tcfp_toks;
301 if (toks > (long)police->tcfp_burst) 301 if (toks > (long)police->tcfp_burst)
302 toks = police->tcfp_burst; 302 toks = police->tcfp_burst;
303 toks -= L2T(police, skb->len); 303 toks -= L2T(police, qdisc_pkt_len(skb));
304 if ((toks|ptoks) >= 0) { 304 if ((toks|ptoks) >= 0) {
305 police->tcfp_t_c = now; 305 police->tcfp_t_c = now;
306 police->tcfp_toks = toks; 306 police->tcfp_toks = toks;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 1d421d059caf..e7851ce92cfe 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -41,7 +41,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
41 41
42 spin_lock(&d->tcf_lock); 42 spin_lock(&d->tcf_lock);
43 d->tcf_tm.lastuse = jiffies; 43 d->tcf_tm.lastuse = jiffies;
44 d->tcf_bstats.bytes += skb->len; 44 d->tcf_bstats.bytes += qdisc_pkt_len(skb);
45 d->tcf_bstats.packets++; 45 d->tcf_bstats.packets++;
46 46
47 /* print policy string followed by _ then packet count 47 /* print policy string followed by _ then packet count
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 9360fc81e8c7..d2b6f54a6261 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -120,6 +120,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
120{ 120{
121 struct net *net = sock_net(skb->sk); 121 struct net *net = sock_net(skb->sk);
122 struct nlattr *tca[TCA_MAX + 1]; 122 struct nlattr *tca[TCA_MAX + 1];
123 spinlock_t *root_lock;
123 struct tcmsg *t; 124 struct tcmsg *t;
124 u32 protocol; 125 u32 protocol;
125 u32 prio; 126 u32 prio;
@@ -166,7 +167,8 @@ replay:
166 167
167 /* Find qdisc */ 168 /* Find qdisc */
168 if (!parent) { 169 if (!parent) {
169 q = dev->qdisc_sleeping; 170 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
171 q = dev_queue->qdisc_sleeping;
170 parent = q->handle; 172 parent = q->handle;
171 } else { 173 } else {
172 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent)); 174 q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
@@ -203,6 +205,8 @@ replay:
203 } 205 }
204 } 206 }
205 207
208 root_lock = qdisc_root_lock(q);
209
206 if (tp == NULL) { 210 if (tp == NULL) {
207 /* Proto-tcf does not exist, create new one */ 211 /* Proto-tcf does not exist, create new one */
208 212
@@ -262,10 +266,10 @@ replay:
262 goto errout; 266 goto errout;
263 } 267 }
264 268
265 qdisc_lock_tree(dev); 269 spin_lock_bh(root_lock);
266 tp->next = *back; 270 tp->next = *back;
267 *back = tp; 271 *back = tp;
268 qdisc_unlock_tree(dev); 272 spin_unlock_bh(root_lock);
269 273
270 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) 274 } else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind))
271 goto errout; 275 goto errout;
@@ -274,9 +278,9 @@ replay:
274 278
275 if (fh == 0) { 279 if (fh == 0) {
276 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) { 280 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
277 qdisc_lock_tree(dev); 281 spin_lock_bh(root_lock);
278 *back = tp->next; 282 *back = tp->next;
279 qdisc_unlock_tree(dev); 283 spin_lock_bh(root_lock);
280 284
281 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 285 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
282 tcf_destroy(tp); 286 tcf_destroy(tp);
@@ -334,7 +338,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
334 tcm->tcm_family = AF_UNSPEC; 338 tcm->tcm_family = AF_UNSPEC;
335 tcm->tcm__pad1 = 0; 339 tcm->tcm__pad1 = 0;
336 tcm->tcm__pad1 = 0; 340 tcm->tcm__pad1 = 0;
337 tcm->tcm_ifindex = tp->q->dev->ifindex; 341 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
338 tcm->tcm_parent = tp->classid; 342 tcm->tcm_parent = tp->classid;
339 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 343 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
340 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); 344 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
@@ -390,6 +394,7 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
390static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) 394static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
391{ 395{
392 struct net *net = sock_net(skb->sk); 396 struct net *net = sock_net(skb->sk);
397 struct netdev_queue *dev_queue;
393 int t; 398 int t;
394 int s_t; 399 int s_t;
395 struct net_device *dev; 400 struct net_device *dev;
@@ -408,8 +413,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
408 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 413 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
409 return skb->len; 414 return skb->len;
410 415
416 dev_queue = netdev_get_tx_queue(dev, 0);
411 if (!tcm->tcm_parent) 417 if (!tcm->tcm_parent)
412 q = dev->qdisc_sleeping; 418 q = dev_queue->qdisc_sleeping;
413 else 419 else
414 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); 420 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
415 if (!q) 421 if (!q)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 971b867e0484..8f63a1a94014 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -36,6 +36,8 @@ struct flow_filter {
36 struct list_head list; 36 struct list_head list;
37 struct tcf_exts exts; 37 struct tcf_exts exts;
38 struct tcf_ematch_tree ematches; 38 struct tcf_ematch_tree ematches;
39 struct timer_list perturb_timer;
40 u32 perturb_period;
39 u32 handle; 41 u32 handle;
40 42
41 u32 nkeys; 43 u32 nkeys;
@@ -47,11 +49,9 @@ struct flow_filter {
47 u32 addend; 49 u32 addend;
48 u32 divisor; 50 u32 divisor;
49 u32 baseclass; 51 u32 baseclass;
52 u32 hashrnd;
50}; 53};
51 54
52static u32 flow_hashrnd __read_mostly;
53static int flow_hashrnd_initted __read_mostly;
54
55static const struct tcf_ext_map flow_ext_map = { 55static const struct tcf_ext_map flow_ext_map = {
56 .action = TCA_FLOW_ACT, 56 .action = TCA_FLOW_ACT,
57 .police = TCA_FLOW_POLICE, 57 .police = TCA_FLOW_POLICE,
@@ -348,7 +348,7 @@ static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp,
348 } 348 }
349 349
350 if (f->mode == FLOW_MODE_HASH) 350 if (f->mode == FLOW_MODE_HASH)
351 classid = jhash2(keys, f->nkeys, flow_hashrnd); 351 classid = jhash2(keys, f->nkeys, f->hashrnd);
352 else { 352 else {
353 classid = keys[0]; 353 classid = keys[0];
354 classid = (classid & f->mask) ^ f->xor; 354 classid = (classid & f->mask) ^ f->xor;
@@ -369,6 +369,15 @@ static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp,
369 return -1; 369 return -1;
370} 370}
371 371
372static void flow_perturbation(unsigned long arg)
373{
374 struct flow_filter *f = (struct flow_filter *)arg;
375
376 get_random_bytes(&f->hashrnd, 4);
377 if (f->perturb_period)
378 mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
379}
380
372static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { 381static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
373 [TCA_FLOW_KEYS] = { .type = NLA_U32 }, 382 [TCA_FLOW_KEYS] = { .type = NLA_U32 },
374 [TCA_FLOW_MODE] = { .type = NLA_U32 }, 383 [TCA_FLOW_MODE] = { .type = NLA_U32 },
@@ -381,6 +390,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
381 [TCA_FLOW_ACT] = { .type = NLA_NESTED }, 390 [TCA_FLOW_ACT] = { .type = NLA_NESTED },
382 [TCA_FLOW_POLICE] = { .type = NLA_NESTED }, 391 [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
383 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED }, 392 [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
393 [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
384}; 394};
385 395
386static int flow_change(struct tcf_proto *tp, unsigned long base, 396static int flow_change(struct tcf_proto *tp, unsigned long base,
@@ -394,6 +404,7 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
394 struct tcf_exts e; 404 struct tcf_exts e;
395 struct tcf_ematch_tree t; 405 struct tcf_ematch_tree t;
396 unsigned int nkeys = 0; 406 unsigned int nkeys = 0;
407 unsigned int perturb_period = 0;
397 u32 baseclass = 0; 408 u32 baseclass = 0;
398 u32 keymask = 0; 409 u32 keymask = 0;
399 u32 mode; 410 u32 mode;
@@ -442,6 +453,14 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
442 mode = nla_get_u32(tb[TCA_FLOW_MODE]); 453 mode = nla_get_u32(tb[TCA_FLOW_MODE]);
443 if (mode != FLOW_MODE_HASH && nkeys > 1) 454 if (mode != FLOW_MODE_HASH && nkeys > 1)
444 goto err2; 455 goto err2;
456
457 if (mode == FLOW_MODE_HASH)
458 perturb_period = f->perturb_period;
459 if (tb[TCA_FLOW_PERTURB]) {
460 if (mode != FLOW_MODE_HASH)
461 goto err2;
462 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
463 }
445 } else { 464 } else {
446 err = -EINVAL; 465 err = -EINVAL;
447 if (!handle) 466 if (!handle)
@@ -455,6 +474,12 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
455 if (mode != FLOW_MODE_HASH && nkeys > 1) 474 if (mode != FLOW_MODE_HASH && nkeys > 1)
456 goto err2; 475 goto err2;
457 476
477 if (tb[TCA_FLOW_PERTURB]) {
478 if (mode != FLOW_MODE_HASH)
479 goto err2;
480 perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
481 }
482
458 if (TC_H_MAJ(baseclass) == 0) 483 if (TC_H_MAJ(baseclass) == 0)
459 baseclass = TC_H_MAKE(tp->q->handle, baseclass); 484 baseclass = TC_H_MAKE(tp->q->handle, baseclass);
460 if (TC_H_MIN(baseclass) == 0) 485 if (TC_H_MIN(baseclass) == 0)
@@ -467,6 +492,11 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
467 492
468 f->handle = handle; 493 f->handle = handle;
469 f->mask = ~0U; 494 f->mask = ~0U;
495
496 get_random_bytes(&f->hashrnd, 4);
497 f->perturb_timer.function = flow_perturbation;
498 f->perturb_timer.data = (unsigned long)f;
499 init_timer_deferrable(&f->perturb_timer);
470 } 500 }
471 501
472 tcf_exts_change(tp, &f->exts, &e); 502 tcf_exts_change(tp, &f->exts, &e);
@@ -495,6 +525,11 @@ static int flow_change(struct tcf_proto *tp, unsigned long base,
495 if (baseclass) 525 if (baseclass)
496 f->baseclass = baseclass; 526 f->baseclass = baseclass;
497 527
528 f->perturb_period = perturb_period;
529 del_timer(&f->perturb_timer);
530 if (perturb_period)
531 mod_timer(&f->perturb_timer, jiffies + perturb_period);
532
498 if (*arg == 0) 533 if (*arg == 0)
499 list_add_tail(&f->list, &head->filters); 534 list_add_tail(&f->list, &head->filters);
500 535
@@ -512,6 +547,7 @@ err1:
512 547
513static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f) 548static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
514{ 549{
550 del_timer_sync(&f->perturb_timer);
515 tcf_exts_destroy(tp, &f->exts); 551 tcf_exts_destroy(tp, &f->exts);
516 tcf_em_tree_destroy(tp, &f->ematches); 552 tcf_em_tree_destroy(tp, &f->ematches);
517 kfree(f); 553 kfree(f);
@@ -532,11 +568,6 @@ static int flow_init(struct tcf_proto *tp)
532{ 568{
533 struct flow_head *head; 569 struct flow_head *head;
534 570
535 if (!flow_hashrnd_initted) {
536 get_random_bytes(&flow_hashrnd, 4);
537 flow_hashrnd_initted = 1;
538 }
539
540 head = kzalloc(sizeof(*head), GFP_KERNEL); 571 head = kzalloc(sizeof(*head), GFP_KERNEL);
541 if (head == NULL) 572 if (head == NULL)
542 return -ENOBUFS; 573 return -ENOBUFS;
@@ -605,6 +636,9 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
605 if (f->baseclass) 636 if (f->baseclass)
606 NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); 637 NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
607 638
639 if (f->perturb_period)
640 NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
641
608 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) 642 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
609 goto nla_put_failure; 643 goto nla_put_failure;
610#ifdef CONFIG_NET_EMATCH 644#ifdef CONFIG_NET_EMATCH
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 784dcb870b98..481260a4f10f 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -73,11 +73,13 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
73} 73}
74 74
75static inline 75static inline
76void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id) 76void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
77{ 77{
78 qdisc_lock_tree(dev); 78 spinlock_t *root_lock = qdisc_root_lock(q);
79
80 spin_lock_bh(root_lock);
79 memset(head->fastmap, 0, sizeof(head->fastmap)); 81 memset(head->fastmap, 0, sizeof(head->fastmap));
80 qdisc_unlock_tree(dev); 82 spin_unlock_bh(root_lock);
81} 83}
82 84
83static inline void 85static inline void
@@ -302,7 +304,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
302 *fp = f->next; 304 *fp = f->next;
303 tcf_tree_unlock(tp); 305 tcf_tree_unlock(tp);
304 306
305 route4_reset_fastmap(tp->q->dev, head, f->id); 307 route4_reset_fastmap(tp->q, head, f->id);
306 route4_delete_filter(tp, f); 308 route4_delete_filter(tp, f);
307 309
308 /* Strip tree */ 310 /* Strip tree */
@@ -500,7 +502,7 @@ reinsert:
500 } 502 }
501 tcf_tree_unlock(tp); 503 tcf_tree_unlock(tp);
502 504
503 route4_reset_fastmap(tp->q->dev, head, f->id); 505 route4_reset_fastmap(tp->q, head, f->id);
504 *arg = (unsigned long)f; 506 *arg = (unsigned long)f;
505 return 0; 507 return 0;
506 508
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4d755444c449..527db2559dd2 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -75,7 +75,6 @@ struct tc_u_hnode
75 75
76struct tc_u_common 76struct tc_u_common
77{ 77{
78 struct tc_u_common *next;
79 struct tc_u_hnode *hlist; 78 struct tc_u_hnode *hlist;
80 struct Qdisc *q; 79 struct Qdisc *q;
81 int refcnt; 80 int refcnt;
@@ -87,8 +86,6 @@ static const struct tcf_ext_map u32_ext_map = {
87 .police = TCA_U32_POLICE 86 .police = TCA_U32_POLICE
88}; 87};
89 88
90static struct tc_u_common *u32_list;
91
92static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift) 89static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
93{ 90{
94 unsigned h = ntohl(key & sel->hmask)>>fshift; 91 unsigned h = ntohl(key & sel->hmask)>>fshift;
@@ -287,9 +284,7 @@ static int u32_init(struct tcf_proto *tp)
287 struct tc_u_hnode *root_ht; 284 struct tc_u_hnode *root_ht;
288 struct tc_u_common *tp_c; 285 struct tc_u_common *tp_c;
289 286
290 for (tp_c = u32_list; tp_c; tp_c = tp_c->next) 287 tp_c = tp->q->u32_node;
291 if (tp_c->q == tp->q)
292 break;
293 288
294 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL); 289 root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
295 if (root_ht == NULL) 290 if (root_ht == NULL)
@@ -307,8 +302,7 @@ static int u32_init(struct tcf_proto *tp)
307 return -ENOBUFS; 302 return -ENOBUFS;
308 } 303 }
309 tp_c->q = tp->q; 304 tp_c->q = tp->q;
310 tp_c->next = u32_list; 305 tp->q->u32_node = tp_c;
311 u32_list = tp_c;
312 } 306 }
313 307
314 tp_c->refcnt++; 308 tp_c->refcnt++;
@@ -402,14 +396,8 @@ static void u32_destroy(struct tcf_proto *tp)
402 396
403 if (--tp_c->refcnt == 0) { 397 if (--tp_c->refcnt == 0) {
404 struct tc_u_hnode *ht; 398 struct tc_u_hnode *ht;
405 struct tc_u_common **tp_cp;
406 399
407 for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) { 400 tp->q->u32_node = NULL;
408 if (*tp_cp == tp_c) {
409 *tp_cp = tp_c->next;
410 break;
411 }
412 }
413 401
414 for (ht = tp_c->hlist; ht; ht = ht->next) { 402 for (ht = tp_c->hlist; ht; ht = ht->next) {
415 ht->refcnt--; 403 ht->refcnt--;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 10f01ad04380..5219d5f9d754 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -99,7 +99,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
99 ---requeue 99 ---requeue
100 100
101 requeues once dequeued packet. It is used for non-standard or 101 requeues once dequeued packet. It is used for non-standard or
102 just buggy devices, which can defer output even if dev->tbusy=0. 102 just buggy devices, which can defer output even if netif_queue_stopped()=0.
103 103
104 ---reset 104 ---reset
105 105
@@ -185,11 +185,20 @@ EXPORT_SYMBOL(unregister_qdisc);
185 185
186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) 186struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
187{ 187{
188 struct Qdisc *q; 188 unsigned int i;
189
190 for (i = 0; i < dev->num_tx_queues; i++) {
191 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
192 struct Qdisc *q, *txq_root = txq->qdisc;
193
194 if (!(txq_root->flags & TCQ_F_BUILTIN) &&
195 txq_root->handle == handle)
196 return txq_root;
189 197
190 list_for_each_entry(q, &dev->qdisc_list, list) { 198 list_for_each_entry(q, &txq_root->list, list) {
191 if (q->handle == handle) 199 if (q->handle == handle)
192 return q; 200 return q;
201 }
193 } 202 }
194 return NULL; 203 return NULL;
195} 204}
@@ -277,15 +286,137 @@ void qdisc_put_rtab(struct qdisc_rate_table *tab)
277} 286}
278EXPORT_SYMBOL(qdisc_put_rtab); 287EXPORT_SYMBOL(qdisc_put_rtab);
279 288
289static LIST_HEAD(qdisc_stab_list);
290static DEFINE_SPINLOCK(qdisc_stab_lock);
291
292static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
293 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
294 [TCA_STAB_DATA] = { .type = NLA_BINARY },
295};
296
297static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
298{
299 struct nlattr *tb[TCA_STAB_MAX + 1];
300 struct qdisc_size_table *stab;
301 struct tc_sizespec *s;
302 unsigned int tsize = 0;
303 u16 *tab = NULL;
304 int err;
305
306 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy);
307 if (err < 0)
308 return ERR_PTR(err);
309 if (!tb[TCA_STAB_BASE])
310 return ERR_PTR(-EINVAL);
311
312 s = nla_data(tb[TCA_STAB_BASE]);
313
314 if (s->tsize > 0) {
315 if (!tb[TCA_STAB_DATA])
316 return ERR_PTR(-EINVAL);
317 tab = nla_data(tb[TCA_STAB_DATA]);
318 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
319 }
320
321 if (!s || tsize != s->tsize || (!tab && tsize > 0))
322 return ERR_PTR(-EINVAL);
323
324 spin_lock(&qdisc_stab_lock);
325
326 list_for_each_entry(stab, &qdisc_stab_list, list) {
327 if (memcmp(&stab->szopts, s, sizeof(*s)))
328 continue;
329 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
330 continue;
331 stab->refcnt++;
332 spin_unlock(&qdisc_stab_lock);
333 return stab;
334 }
335
336 spin_unlock(&qdisc_stab_lock);
337
338 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
339 if (!stab)
340 return ERR_PTR(-ENOMEM);
341
342 stab->refcnt = 1;
343 stab->szopts = *s;
344 if (tsize > 0)
345 memcpy(stab->data, tab, tsize * sizeof(u16));
346
347 spin_lock(&qdisc_stab_lock);
348 list_add_tail(&stab->list, &qdisc_stab_list);
349 spin_unlock(&qdisc_stab_lock);
350
351 return stab;
352}
353
354void qdisc_put_stab(struct qdisc_size_table *tab)
355{
356 if (!tab)
357 return;
358
359 spin_lock(&qdisc_stab_lock);
360
361 if (--tab->refcnt == 0) {
362 list_del(&tab->list);
363 kfree(tab);
364 }
365
366 spin_unlock(&qdisc_stab_lock);
367}
368EXPORT_SYMBOL(qdisc_put_stab);
369
370static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
371{
372 struct nlattr *nest;
373
374 nest = nla_nest_start(skb, TCA_STAB);
375 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts);
376 nla_nest_end(skb, nest);
377
378 return skb->len;
379
380nla_put_failure:
381 return -1;
382}
383
384void qdisc_calculate_pkt_len(struct sk_buff *skb, struct qdisc_size_table *stab)
385{
386 int pkt_len, slot;
387
388 pkt_len = skb->len + stab->szopts.overhead;
389 if (unlikely(!stab->szopts.tsize))
390 goto out;
391
392 slot = pkt_len + stab->szopts.cell_align;
393 if (unlikely(slot < 0))
394 slot = 0;
395
396 slot >>= stab->szopts.cell_log;
397 if (likely(slot < stab->szopts.tsize))
398 pkt_len = stab->data[slot];
399 else
400 pkt_len = stab->data[stab->szopts.tsize - 1] *
401 (slot / stab->szopts.tsize) +
402 stab->data[slot % stab->szopts.tsize];
403
404 pkt_len <<= stab->szopts.size_log;
405out:
406 if (unlikely(pkt_len < 1))
407 pkt_len = 1;
408 qdisc_skb_cb(skb)->pkt_len = pkt_len;
409}
410EXPORT_SYMBOL(qdisc_calculate_pkt_len);
411
280static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) 412static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
281{ 413{
282 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 414 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
283 timer); 415 timer);
284 struct net_device *dev = wd->qdisc->dev;
285 416
286 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 417 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
287 smp_wmb(); 418 smp_wmb();
288 netif_schedule(dev); 419 __netif_schedule(wd->qdisc);
289 420
290 return HRTIMER_NORESTART; 421 return HRTIMER_NORESTART;
291} 422}
@@ -316,6 +447,110 @@ void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
316} 447}
317EXPORT_SYMBOL(qdisc_watchdog_cancel); 448EXPORT_SYMBOL(qdisc_watchdog_cancel);
318 449
450struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
451{
452 unsigned int size = n * sizeof(struct hlist_head), i;
453 struct hlist_head *h;
454
455 if (size <= PAGE_SIZE)
456 h = kmalloc(size, GFP_KERNEL);
457 else
458 h = (struct hlist_head *)
459 __get_free_pages(GFP_KERNEL, get_order(size));
460
461 if (h != NULL) {
462 for (i = 0; i < n; i++)
463 INIT_HLIST_HEAD(&h[i]);
464 }
465 return h;
466}
467
468static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
469{
470 unsigned int size = n * sizeof(struct hlist_head);
471
472 if (size <= PAGE_SIZE)
473 kfree(h);
474 else
475 free_pages((unsigned long)h, get_order(size));
476}
477
478void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
479{
480 struct Qdisc_class_common *cl;
481 struct hlist_node *n, *next;
482 struct hlist_head *nhash, *ohash;
483 unsigned int nsize, nmask, osize;
484 unsigned int i, h;
485
486 /* Rehash when load factor exceeds 0.75 */
487 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
488 return;
489 nsize = clhash->hashsize * 2;
490 nmask = nsize - 1;
491 nhash = qdisc_class_hash_alloc(nsize);
492 if (nhash == NULL)
493 return;
494
495 ohash = clhash->hash;
496 osize = clhash->hashsize;
497
498 sch_tree_lock(sch);
499 for (i = 0; i < osize; i++) {
500 hlist_for_each_entry_safe(cl, n, next, &ohash[i], hnode) {
501 h = qdisc_class_hash(cl->classid, nmask);
502 hlist_add_head(&cl->hnode, &nhash[h]);
503 }
504 }
505 clhash->hash = nhash;
506 clhash->hashsize = nsize;
507 clhash->hashmask = nmask;
508 sch_tree_unlock(sch);
509
510 qdisc_class_hash_free(ohash, osize);
511}
512EXPORT_SYMBOL(qdisc_class_hash_grow);
513
514int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
515{
516 unsigned int size = 4;
517
518 clhash->hash = qdisc_class_hash_alloc(size);
519 if (clhash->hash == NULL)
520 return -ENOMEM;
521 clhash->hashsize = size;
522 clhash->hashmask = size - 1;
523 clhash->hashelems = 0;
524 return 0;
525}
526EXPORT_SYMBOL(qdisc_class_hash_init);
527
528void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
529{
530 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
531}
532EXPORT_SYMBOL(qdisc_class_hash_destroy);
533
534void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
535 struct Qdisc_class_common *cl)
536{
537 unsigned int h;
538
539 INIT_HLIST_NODE(&cl->hnode);
540 h = qdisc_class_hash(cl->classid, clhash->hashmask);
541 hlist_add_head(&cl->hnode, &clhash->hash[h]);
542 clhash->hashelems++;
543}
544EXPORT_SYMBOL(qdisc_class_hash_insert);
545
546void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
547 struct Qdisc_class_common *cl)
548{
549 hlist_del(&cl->hnode);
550 clhash->hashelems--;
551}
552EXPORT_SYMBOL(qdisc_class_hash_remove);
553
319/* Allocate an unique handle from space managed by kernel */ 554/* Allocate an unique handle from space managed by kernel */
320 555
321static u32 qdisc_alloc_handle(struct net_device *dev) 556static u32 qdisc_alloc_handle(struct net_device *dev)
@@ -332,32 +567,39 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
332 return i>0 ? autohandle : 0; 567 return i>0 ? autohandle : 0;
333} 568}
334 569
335/* Attach toplevel qdisc to device dev */ 570/* Attach toplevel qdisc to device queue. */
336 571
337static struct Qdisc * 572static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
338dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc) 573 struct Qdisc *qdisc)
339{ 574{
575 spinlock_t *root_lock;
340 struct Qdisc *oqdisc; 576 struct Qdisc *oqdisc;
577 int ingress;
578
579 ingress = 0;
580 if (qdisc && qdisc->flags&TCQ_F_INGRESS)
581 ingress = 1;
582
583 if (ingress) {
584 oqdisc = dev_queue->qdisc;
585 } else {
586 oqdisc = dev_queue->qdisc_sleeping;
587 }
341 588
342 if (dev->flags & IFF_UP) 589 root_lock = qdisc_root_lock(oqdisc);
343 dev_deactivate(dev); 590 spin_lock_bh(root_lock);
344 591
345 qdisc_lock_tree(dev); 592 if (ingress) {
346 if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
347 oqdisc = dev->qdisc_ingress;
348 /* Prune old scheduler */ 593 /* Prune old scheduler */
349 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) { 594 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
350 /* delete */ 595 /* delete */
351 qdisc_reset(oqdisc); 596 qdisc_reset(oqdisc);
352 dev->qdisc_ingress = NULL; 597 dev_queue->qdisc = NULL;
353 } else { /* new */ 598 } else { /* new */
354 dev->qdisc_ingress = qdisc; 599 dev_queue->qdisc = qdisc;
355 } 600 }
356 601
357 } else { 602 } else {
358
359 oqdisc = dev->qdisc_sleeping;
360
361 /* Prune old scheduler */ 603 /* Prune old scheduler */
362 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) 604 if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
363 qdisc_reset(oqdisc); 605 qdisc_reset(oqdisc);
@@ -365,14 +607,11 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
365 /* ... and graft new one */ 607 /* ... and graft new one */
366 if (qdisc == NULL) 608 if (qdisc == NULL)
367 qdisc = &noop_qdisc; 609 qdisc = &noop_qdisc;
368 dev->qdisc_sleeping = qdisc; 610 dev_queue->qdisc_sleeping = qdisc;
369 dev->qdisc = &noop_qdisc; 611 dev_queue->qdisc = &noop_qdisc;
370 } 612 }
371 613
372 qdisc_unlock_tree(dev); 614 spin_unlock_bh(root_lock);
373
374 if (dev->flags & IFF_UP)
375 dev_activate(dev);
376 615
377 return oqdisc; 616 return oqdisc;
378} 617}
@@ -389,7 +628,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
389 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) 628 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
390 return; 629 return;
391 630
392 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); 631 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
393 if (sch == NULL) { 632 if (sch == NULL) {
394 WARN_ON(parentid != TC_H_ROOT); 633 WARN_ON(parentid != TC_H_ROOT);
395 return; 634 return;
@@ -405,26 +644,66 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
405} 644}
406EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 645EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
407 646
408/* Graft qdisc "new" to class "classid" of qdisc "parent" or 647static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid,
409 to device "dev". 648 struct Qdisc *old, struct Qdisc *new)
649{
650 if (new || old)
651 qdisc_notify(skb, n, clid, old, new);
652
653 if (old) {
654 spin_lock_bh(&old->q.lock);
655 qdisc_destroy(old);
656 spin_unlock_bh(&old->q.lock);
657 }
658}
410 659
411 Old qdisc is not destroyed but returned in *old. 660/* Graft qdisc "new" to class "classid" of qdisc "parent" or
661 * to device "dev".
662 *
663 * When appropriate send a netlink notification using 'skb'
664 * and "n".
665 *
666 * On success, destroy old qdisc.
412 */ 667 */
413 668
414static int qdisc_graft(struct net_device *dev, struct Qdisc *parent, 669static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
415 u32 classid, 670 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
416 struct Qdisc *new, struct Qdisc **old) 671 struct Qdisc *new, struct Qdisc *old)
417{ 672{
673 struct Qdisc *q = old;
418 int err = 0; 674 int err = 0;
419 struct Qdisc *q = *old;
420
421 675
422 if (parent == NULL) { 676 if (parent == NULL) {
423 if (q && q->flags&TCQ_F_INGRESS) { 677 unsigned int i, num_q, ingress;
424 *old = dev_graft_qdisc(dev, q); 678
425 } else { 679 ingress = 0;
426 *old = dev_graft_qdisc(dev, new); 680 num_q = dev->num_tx_queues;
681 if (q && q->flags & TCQ_F_INGRESS) {
682 num_q = 1;
683 ingress = 1;
684 }
685
686 if (dev->flags & IFF_UP)
687 dev_deactivate(dev);
688
689 for (i = 0; i < num_q; i++) {
690 struct netdev_queue *dev_queue = &dev->rx_queue;
691
692 if (!ingress)
693 dev_queue = netdev_get_tx_queue(dev, i);
694
695 if (ingress) {
696 old = dev_graft_qdisc(dev_queue, q);
697 } else {
698 old = dev_graft_qdisc(dev_queue, new);
699 if (new && i > 0)
700 atomic_inc(&new->refcnt);
701 }
702 notify_and_destroy(skb, n, classid, old, new);
427 } 703 }
704
705 if (dev->flags & IFF_UP)
706 dev_activate(dev);
428 } else { 707 } else {
429 const struct Qdisc_class_ops *cops = parent->ops->cl_ops; 708 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
430 709
@@ -433,10 +712,12 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
433 if (cops) { 712 if (cops) {
434 unsigned long cl = cops->get(parent, classid); 713 unsigned long cl = cops->get(parent, classid);
435 if (cl) { 714 if (cl) {
436 err = cops->graft(parent, cl, new, old); 715 err = cops->graft(parent, cl, new, &old);
437 cops->put(parent, cl); 716 cops->put(parent, cl);
438 } 717 }
439 } 718 }
719 if (!err)
720 notify_and_destroy(skb, n, classid, old, new);
440 } 721 }
441 return err; 722 return err;
442} 723}
@@ -448,13 +729,14 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
448 */ 729 */
449 730
450static struct Qdisc * 731static struct Qdisc *
451qdisc_create(struct net_device *dev, u32 parent, u32 handle, 732qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
452 struct nlattr **tca, int *errp) 733 u32 parent, u32 handle, struct nlattr **tca, int *errp)
453{ 734{
454 int err; 735 int err;
455 struct nlattr *kind = tca[TCA_KIND]; 736 struct nlattr *kind = tca[TCA_KIND];
456 struct Qdisc *sch; 737 struct Qdisc *sch;
457 struct Qdisc_ops *ops; 738 struct Qdisc_ops *ops;
739 struct qdisc_size_table *stab;
458 740
459 ops = qdisc_lookup_ops(kind); 741 ops = qdisc_lookup_ops(kind);
460#ifdef CONFIG_KMOD 742#ifdef CONFIG_KMOD
@@ -489,7 +771,7 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
489 if (ops == NULL) 771 if (ops == NULL)
490 goto err_out; 772 goto err_out;
491 773
492 sch = qdisc_alloc(dev, ops); 774 sch = qdisc_alloc(dev_queue, ops);
493 if (IS_ERR(sch)) { 775 if (IS_ERR(sch)) {
494 err = PTR_ERR(sch); 776 err = PTR_ERR(sch);
495 goto err_out2; 777 goto err_out2;
@@ -499,10 +781,8 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
499 781
500 if (handle == TC_H_INGRESS) { 782 if (handle == TC_H_INGRESS) {
501 sch->flags |= TCQ_F_INGRESS; 783 sch->flags |= TCQ_F_INGRESS;
502 sch->stats_lock = &dev->ingress_lock;
503 handle = TC_H_MAKE(TC_H_INGRESS, 0); 784 handle = TC_H_MAKE(TC_H_INGRESS, 0);
504 } else { 785 } else {
505 sch->stats_lock = &dev->queue_lock;
506 if (handle == 0) { 786 if (handle == 0) {
507 handle = qdisc_alloc_handle(dev); 787 handle = qdisc_alloc_handle(dev);
508 err = -ENOMEM; 788 err = -ENOMEM;
@@ -514,9 +794,17 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
514 sch->handle = handle; 794 sch->handle = handle;
515 795
516 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { 796 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
797 if (tca[TCA_STAB]) {
798 stab = qdisc_get_stab(tca[TCA_STAB]);
799 if (IS_ERR(stab)) {
800 err = PTR_ERR(stab);
801 goto err_out3;
802 }
803 sch->stab = stab;
804 }
517 if (tca[TCA_RATE]) { 805 if (tca[TCA_RATE]) {
518 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 806 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
519 sch->stats_lock, 807 qdisc_root_lock(sch),
520 tca[TCA_RATE]); 808 tca[TCA_RATE]);
521 if (err) { 809 if (err) {
522 /* 810 /*
@@ -529,13 +817,13 @@ qdisc_create(struct net_device *dev, u32 parent, u32 handle,
529 goto err_out3; 817 goto err_out3;
530 } 818 }
531 } 819 }
532 qdisc_lock_tree(dev); 820 if (parent)
533 list_add_tail(&sch->list, &dev->qdisc_list); 821 list_add_tail(&sch->list, &dev_queue->qdisc->list);
534 qdisc_unlock_tree(dev);
535 822
536 return sch; 823 return sch;
537 } 824 }
538err_out3: 825err_out3:
826 qdisc_put_stab(sch->stab);
539 dev_put(dev); 827 dev_put(dev);
540 kfree((char *) sch - sch->padded); 828 kfree((char *) sch - sch->padded);
541err_out2: 829err_out2:
@@ -547,18 +835,29 @@ err_out:
547 835
548static int qdisc_change(struct Qdisc *sch, struct nlattr **tca) 836static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
549{ 837{
550 if (tca[TCA_OPTIONS]) { 838 struct qdisc_size_table *stab = NULL;
551 int err; 839 int err = 0;
552 840
841 if (tca[TCA_OPTIONS]) {
553 if (sch->ops->change == NULL) 842 if (sch->ops->change == NULL)
554 return -EINVAL; 843 return -EINVAL;
555 err = sch->ops->change(sch, tca[TCA_OPTIONS]); 844 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
556 if (err) 845 if (err)
557 return err; 846 return err;
558 } 847 }
848
849 if (tca[TCA_STAB]) {
850 stab = qdisc_get_stab(tca[TCA_STAB]);
851 if (IS_ERR(stab))
852 return PTR_ERR(stab);
853 }
854
855 qdisc_put_stab(sch->stab);
856 sch->stab = stab;
857
559 if (tca[TCA_RATE]) 858 if (tca[TCA_RATE])
560 gen_replace_estimator(&sch->bstats, &sch->rate_est, 859 gen_replace_estimator(&sch->bstats, &sch->rate_est,
561 sch->stats_lock, tca[TCA_RATE]); 860 qdisc_root_lock(sch), tca[TCA_RATE]);
562 return 0; 861 return 0;
563} 862}
564 863
@@ -634,10 +933,12 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
634 return -ENOENT; 933 return -ENOENT;
635 q = qdisc_leaf(p, clid); 934 q = qdisc_leaf(p, clid);
636 } else { /* ingress */ 935 } else { /* ingress */
637 q = dev->qdisc_ingress; 936 q = dev->rx_queue.qdisc;
638 } 937 }
639 } else { 938 } else {
640 q = dev->qdisc_sleeping; 939 struct netdev_queue *dev_queue;
940 dev_queue = netdev_get_tx_queue(dev, 0);
941 q = dev_queue->qdisc_sleeping;
641 } 942 }
642 if (!q) 943 if (!q)
643 return -ENOENT; 944 return -ENOENT;
@@ -657,14 +958,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
657 return -EINVAL; 958 return -EINVAL;
658 if (q->handle == 0) 959 if (q->handle == 0)
659 return -ENOENT; 960 return -ENOENT;
660 if ((err = qdisc_graft(dev, p, clid, NULL, &q)) != 0) 961 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
661 return err; 962 return err;
662 if (q) {
663 qdisc_notify(skb, n, clid, q, NULL);
664 qdisc_lock_tree(dev);
665 qdisc_destroy(q);
666 qdisc_unlock_tree(dev);
667 }
668 } else { 963 } else {
669 qdisc_notify(skb, n, clid, NULL, q); 964 qdisc_notify(skb, n, clid, NULL, q);
670 } 965 }
@@ -708,10 +1003,12 @@ replay:
708 return -ENOENT; 1003 return -ENOENT;
709 q = qdisc_leaf(p, clid); 1004 q = qdisc_leaf(p, clid);
710 } else { /*ingress */ 1005 } else { /*ingress */
711 q = dev->qdisc_ingress; 1006 q = dev->rx_queue.qdisc;
712 } 1007 }
713 } else { 1008 } else {
714 q = dev->qdisc_sleeping; 1009 struct netdev_queue *dev_queue;
1010 dev_queue = netdev_get_tx_queue(dev, 0);
1011 q = dev_queue->qdisc_sleeping;
715 } 1012 }
716 1013
717 /* It may be default qdisc, ignore it */ 1014 /* It may be default qdisc, ignore it */
@@ -788,10 +1085,12 @@ create_n_graft:
788 if (!(n->nlmsg_flags&NLM_F_CREATE)) 1085 if (!(n->nlmsg_flags&NLM_F_CREATE))
789 return -ENOENT; 1086 return -ENOENT;
790 if (clid == TC_H_INGRESS) 1087 if (clid == TC_H_INGRESS)
791 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_parent, 1088 q = qdisc_create(dev, &dev->rx_queue,
1089 tcm->tcm_parent, tcm->tcm_parent,
792 tca, &err); 1090 tca, &err);
793 else 1091 else
794 q = qdisc_create(dev, tcm->tcm_parent, tcm->tcm_handle, 1092 q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
1093 tcm->tcm_parent, tcm->tcm_handle,
795 tca, &err); 1094 tca, &err);
796 if (q == NULL) { 1095 if (q == NULL) {
797 if (err == -EAGAIN) 1096 if (err == -EAGAIN)
@@ -801,22 +1100,18 @@ create_n_graft:
801 1100
802graft: 1101graft:
803 if (1) { 1102 if (1) {
804 struct Qdisc *old_q = NULL; 1103 spinlock_t *root_lock;
805 err = qdisc_graft(dev, p, clid, q, &old_q); 1104
1105 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
806 if (err) { 1106 if (err) {
807 if (q) { 1107 if (q) {
808 qdisc_lock_tree(dev); 1108 root_lock = qdisc_root_lock(q);
1109 spin_lock_bh(root_lock);
809 qdisc_destroy(q); 1110 qdisc_destroy(q);
810 qdisc_unlock_tree(dev); 1111 spin_unlock_bh(root_lock);
811 } 1112 }
812 return err; 1113 return err;
813 } 1114 }
814 qdisc_notify(skb, n, clid, old_q, q);
815 if (old_q) {
816 qdisc_lock_tree(dev);
817 qdisc_destroy(old_q);
818 qdisc_unlock_tree(dev);
819 }
820 } 1115 }
821 return 0; 1116 return 0;
822} 1117}
@@ -834,7 +1129,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
834 tcm->tcm_family = AF_UNSPEC; 1129 tcm->tcm_family = AF_UNSPEC;
835 tcm->tcm__pad1 = 0; 1130 tcm->tcm__pad1 = 0;
836 tcm->tcm__pad2 = 0; 1131 tcm->tcm__pad2 = 0;
837 tcm->tcm_ifindex = q->dev->ifindex; 1132 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
838 tcm->tcm_parent = clid; 1133 tcm->tcm_parent = clid;
839 tcm->tcm_handle = q->handle; 1134 tcm->tcm_handle = q->handle;
840 tcm->tcm_info = atomic_read(&q->refcnt); 1135 tcm->tcm_info = atomic_read(&q->refcnt);
@@ -843,8 +1138,11 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
843 goto nla_put_failure; 1138 goto nla_put_failure;
844 q->qstats.qlen = q->q.qlen; 1139 q->qstats.qlen = q->q.qlen;
845 1140
1141 if (q->stab && qdisc_dump_stab(skb, q->stab) < 0)
1142 goto nla_put_failure;
1143
846 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1144 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
847 TCA_XSTATS, q->stats_lock, &d) < 0) 1145 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
848 goto nla_put_failure; 1146 goto nla_put_failure;
849 1147
850 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 1148 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -894,13 +1192,57 @@ err_out:
894 return -EINVAL; 1192 return -EINVAL;
895} 1193}
896 1194
1195static bool tc_qdisc_dump_ignore(struct Qdisc *q)
1196{
1197 return (q->flags & TCQ_F_BUILTIN) ? true : false;
1198}
1199
1200static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1201 struct netlink_callback *cb,
1202 int *q_idx_p, int s_q_idx)
1203{
1204 int ret = 0, q_idx = *q_idx_p;
1205 struct Qdisc *q;
1206
1207 if (!root)
1208 return 0;
1209
1210 q = root;
1211 if (q_idx < s_q_idx) {
1212 q_idx++;
1213 } else {
1214 if (!tc_qdisc_dump_ignore(q) &&
1215 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1216 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1217 goto done;
1218 q_idx++;
1219 }
1220 list_for_each_entry(q, &root->list, list) {
1221 if (q_idx < s_q_idx) {
1222 q_idx++;
1223 continue;
1224 }
1225 if (!tc_qdisc_dump_ignore(q) &&
1226 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
1227 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
1228 goto done;
1229 q_idx++;
1230 }
1231
1232out:
1233 *q_idx_p = q_idx;
1234 return ret;
1235done:
1236 ret = -1;
1237 goto out;
1238}
1239
897static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) 1240static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
898{ 1241{
899 struct net *net = sock_net(skb->sk); 1242 struct net *net = sock_net(skb->sk);
900 int idx, q_idx; 1243 int idx, q_idx;
901 int s_idx, s_q_idx; 1244 int s_idx, s_q_idx;
902 struct net_device *dev; 1245 struct net_device *dev;
903 struct Qdisc *q;
904 1246
905 if (net != &init_net) 1247 if (net != &init_net)
906 return 0; 1248 return 0;
@@ -910,21 +1252,22 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
910 read_lock(&dev_base_lock); 1252 read_lock(&dev_base_lock);
911 idx = 0; 1253 idx = 0;
912 for_each_netdev(&init_net, dev) { 1254 for_each_netdev(&init_net, dev) {
1255 struct netdev_queue *dev_queue;
1256
913 if (idx < s_idx) 1257 if (idx < s_idx)
914 goto cont; 1258 goto cont;
915 if (idx > s_idx) 1259 if (idx > s_idx)
916 s_q_idx = 0; 1260 s_q_idx = 0;
917 q_idx = 0; 1261 q_idx = 0;
918 list_for_each_entry(q, &dev->qdisc_list, list) { 1262
919 if (q_idx < s_q_idx) { 1263 dev_queue = netdev_get_tx_queue(dev, 0);
920 q_idx++; 1264 if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
921 continue; 1265 goto done;
922 } 1266
923 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, 1267 dev_queue = &dev->rx_queue;
924 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) 1268 if (tc_dump_qdisc_root(dev_queue->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
925 goto done; 1269 goto done;
926 q_idx++; 1270
927 }
928cont: 1271cont:
929 idx++; 1272 idx++;
930 } 1273 }
@@ -949,6 +1292,7 @@ done:
949static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1292static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
950{ 1293{
951 struct net *net = sock_net(skb->sk); 1294 struct net *net = sock_net(skb->sk);
1295 struct netdev_queue *dev_queue;
952 struct tcmsg *tcm = NLMSG_DATA(n); 1296 struct tcmsg *tcm = NLMSG_DATA(n);
953 struct nlattr *tca[TCA_MAX + 1]; 1297 struct nlattr *tca[TCA_MAX + 1];
954 struct net_device *dev; 1298 struct net_device *dev;
@@ -986,6 +1330,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
986 1330
987 /* Step 1. Determine qdisc handle X:0 */ 1331 /* Step 1. Determine qdisc handle X:0 */
988 1332
1333 dev_queue = netdev_get_tx_queue(dev, 0);
989 if (pid != TC_H_ROOT) { 1334 if (pid != TC_H_ROOT) {
990 u32 qid1 = TC_H_MAJ(pid); 1335 u32 qid1 = TC_H_MAJ(pid);
991 1336
@@ -996,7 +1341,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
996 } else if (qid1) { 1341 } else if (qid1) {
997 qid = qid1; 1342 qid = qid1;
998 } else if (qid == 0) 1343 } else if (qid == 0)
999 qid = dev->qdisc_sleeping->handle; 1344 qid = dev_queue->qdisc_sleeping->handle;
1000 1345
1001 /* Now qid is genuine qdisc handle consistent 1346 /* Now qid is genuine qdisc handle consistent
1002 both with parent and child. 1347 both with parent and child.
@@ -1007,7 +1352,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1007 pid = TC_H_MAKE(qid, pid); 1352 pid = TC_H_MAKE(qid, pid);
1008 } else { 1353 } else {
1009 if (qid == 0) 1354 if (qid == 0)
1010 qid = dev->qdisc_sleeping->handle; 1355 qid = dev_queue->qdisc_sleeping->handle;
1011 } 1356 }
1012 1357
1013 /* OK. Locate qdisc */ 1358 /* OK. Locate qdisc */
@@ -1080,7 +1425,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1080 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1425 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1081 tcm = NLMSG_DATA(nlh); 1426 tcm = NLMSG_DATA(nlh);
1082 tcm->tcm_family = AF_UNSPEC; 1427 tcm->tcm_family = AF_UNSPEC;
1083 tcm->tcm_ifindex = q->dev->ifindex; 1428 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1084 tcm->tcm_parent = q->handle; 1429 tcm->tcm_parent = q->handle;
1085 tcm->tcm_handle = q->handle; 1430 tcm->tcm_handle = q->handle;
1086 tcm->tcm_info = 0; 1431 tcm->tcm_info = 0;
@@ -1089,7 +1434,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1089 goto nla_put_failure; 1434 goto nla_put_failure;
1090 1435
1091 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1436 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1092 TCA_XSTATS, q->stats_lock, &d) < 0) 1437 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
1093 goto nla_put_failure; 1438 goto nla_put_failure;
1094 1439
1095 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1440 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
@@ -1140,15 +1485,62 @@ static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, struct qdisc_walk
1140 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS); 1485 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTCLASS);
1141} 1486}
1142 1487
1488static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1489 struct tcmsg *tcm, struct netlink_callback *cb,
1490 int *t_p, int s_t)
1491{
1492 struct qdisc_dump_args arg;
1493
1494 if (tc_qdisc_dump_ignore(q) ||
1495 *t_p < s_t || !q->ops->cl_ops ||
1496 (tcm->tcm_parent &&
1497 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1498 (*t_p)++;
1499 return 0;
1500 }
1501 if (*t_p > s_t)
1502 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1503 arg.w.fn = qdisc_class_dump;
1504 arg.skb = skb;
1505 arg.cb = cb;
1506 arg.w.stop = 0;
1507 arg.w.skip = cb->args[1];
1508 arg.w.count = 0;
1509 q->ops->cl_ops->walk(q, &arg.w);
1510 cb->args[1] = arg.w.count;
1511 if (arg.w.stop)
1512 return -1;
1513 (*t_p)++;
1514 return 0;
1515}
1516
1517static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1518 struct tcmsg *tcm, struct netlink_callback *cb,
1519 int *t_p, int s_t)
1520{
1521 struct Qdisc *q;
1522
1523 if (!root)
1524 return 0;
1525
1526 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1527 return -1;
1528
1529 list_for_each_entry(q, &root->list, list) {
1530 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1531 return -1;
1532 }
1533
1534 return 0;
1535}
1536
1143static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) 1537static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1144{ 1538{
1539 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1145 struct net *net = sock_net(skb->sk); 1540 struct net *net = sock_net(skb->sk);
1146 int t; 1541 struct netdev_queue *dev_queue;
1147 int s_t;
1148 struct net_device *dev; 1542 struct net_device *dev;
1149 struct Qdisc *q; 1543 int t, s_t;
1150 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
1151 struct qdisc_dump_args arg;
1152 1544
1153 if (net != &init_net) 1545 if (net != &init_net)
1154 return 0; 1546 return 0;
@@ -1161,28 +1553,15 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1161 s_t = cb->args[0]; 1553 s_t = cb->args[0];
1162 t = 0; 1554 t = 0;
1163 1555
1164 list_for_each_entry(q, &dev->qdisc_list, list) { 1556 dev_queue = netdev_get_tx_queue(dev, 0);
1165 if (t < s_t || !q->ops->cl_ops || 1557 if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0)
1166 (tcm->tcm_parent && 1558 goto done;
1167 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1168 t++;
1169 continue;
1170 }
1171 if (t > s_t)
1172 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1173 arg.w.fn = qdisc_class_dump;
1174 arg.skb = skb;
1175 arg.cb = cb;
1176 arg.w.stop = 0;
1177 arg.w.skip = cb->args[1];
1178 arg.w.count = 0;
1179 q->ops->cl_ops->walk(q, &arg.w);
1180 cb->args[1] = arg.w.count;
1181 if (arg.w.stop)
1182 break;
1183 t++;
1184 }
1185 1559
1560 dev_queue = &dev->rx_queue;
1561 if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0)
1562 goto done;
1563
1564done:
1186 cb->args[0] = t; 1565 cb->args[0] = t;
1187 1566
1188 dev_put(dev); 1567 dev_put(dev);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index db0e23ae85f8..04faa835be17 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -296,7 +296,8 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
296 goto err_out; 296 goto err_out;
297 } 297 }
298 flow->filter_list = NULL; 298 flow->filter_list = NULL;
299 flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); 299 flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
300 &pfifo_qdisc_ops, classid);
300 if (!flow->q) 301 if (!flow->q)
301 flow->q = &noop_qdisc; 302 flow->q = &noop_qdisc;
302 pr_debug("atm_tc_change: qdisc %p\n", flow->q); 303 pr_debug("atm_tc_change: qdisc %p\n", flow->q);
@@ -428,7 +429,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
428#endif 429#endif
429 } 430 }
430 431
431 ret = flow->q->enqueue(skb, flow->q); 432 ret = qdisc_enqueue(skb, flow->q);
432 if (ret != 0) { 433 if (ret != 0) {
433drop: __maybe_unused 434drop: __maybe_unused
434 sch->qstats.drops++; 435 sch->qstats.drops++;
@@ -436,9 +437,9 @@ drop: __maybe_unused
436 flow->qstats.drops++; 437 flow->qstats.drops++;
437 return ret; 438 return ret;
438 } 439 }
439 sch->bstats.bytes += skb->len; 440 sch->bstats.bytes += qdisc_pkt_len(skb);
440 sch->bstats.packets++; 441 sch->bstats.packets++;
441 flow->bstats.bytes += skb->len; 442 flow->bstats.bytes += qdisc_pkt_len(skb);
442 flow->bstats.packets++; 443 flow->bstats.packets++;
443 /* 444 /*
444 * Okay, this may seem weird. We pretend we've dropped the packet if 445 * Okay, this may seem weird. We pretend we've dropped the packet if
@@ -555,7 +556,8 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
555 556
556 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); 557 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
557 p->flows = &p->link; 558 p->flows = &p->link;
558 p->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); 559 p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
560 &pfifo_qdisc_ops, sch->handle);
559 if (!p->link.q) 561 if (!p->link.q)
560 p->link.q = &noop_qdisc; 562 p->link.q = &noop_qdisc;
561 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); 563 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 2a3c97f7dc63..f1d2f8ec8b4c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -73,11 +73,10 @@ struct cbq_sched_data;
73 73
74struct cbq_class 74struct cbq_class
75{ 75{
76 struct cbq_class *next; /* hash table link */ 76 struct Qdisc_class_common common;
77 struct cbq_class *next_alive; /* next class with backlog in this priority band */ 77 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78 78
79/* Parameters */ 79/* Parameters */
80 u32 classid;
81 unsigned char priority; /* class priority */ 80 unsigned char priority; /* class priority */
82 unsigned char priority2; /* priority to be used after overlimit */ 81 unsigned char priority2; /* priority to be used after overlimit */
83 unsigned char ewma_log; /* time constant for idle time calculation */ 82 unsigned char ewma_log; /* time constant for idle time calculation */
@@ -144,7 +143,7 @@ struct cbq_class
144 143
145struct cbq_sched_data 144struct cbq_sched_data
146{ 145{
147 struct cbq_class *classes[16]; /* Hash table of all classes */ 146 struct Qdisc_class_hash clhash; /* Hash table of all classes */
148 int nclasses[TC_CBQ_MAXPRIO+1]; 147 int nclasses[TC_CBQ_MAXPRIO+1];
149 unsigned quanta[TC_CBQ_MAXPRIO+1]; 148 unsigned quanta[TC_CBQ_MAXPRIO+1];
150 149
@@ -177,23 +176,15 @@ struct cbq_sched_data
177 176
178#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len) 177#define L2T(cl,len) qdisc_l2t((cl)->R_tab,len)
179 178
180
181static __inline__ unsigned cbq_hash(u32 h)
182{
183 h ^= h>>8;
184 h ^= h>>4;
185 return h&0xF;
186}
187
188static __inline__ struct cbq_class * 179static __inline__ struct cbq_class *
189cbq_class_lookup(struct cbq_sched_data *q, u32 classid) 180cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
190{ 181{
191 struct cbq_class *cl; 182 struct Qdisc_class_common *clc;
192 183
193 for (cl = q->classes[cbq_hash(classid)]; cl; cl = cl->next) 184 clc = qdisc_class_find(&q->clhash, classid);
194 if (cl->classid == classid) 185 if (clc == NULL)
195 return cl; 186 return NULL;
196 return NULL; 187 return container_of(clc, struct cbq_class, common);
197} 188}
198 189
199#ifdef CONFIG_NET_CLS_ACT 190#ifdef CONFIG_NET_CLS_ACT
@@ -379,7 +370,6 @@ static int
379cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 370cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
380{ 371{
381 struct cbq_sched_data *q = qdisc_priv(sch); 372 struct cbq_sched_data *q = qdisc_priv(sch);
382 int len = skb->len;
383 int uninitialized_var(ret); 373 int uninitialized_var(ret);
384 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 374 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
385 375
@@ -396,10 +386,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
396#ifdef CONFIG_NET_CLS_ACT 386#ifdef CONFIG_NET_CLS_ACT
397 cl->q->__parent = sch; 387 cl->q->__parent = sch;
398#endif 388#endif
399 if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) { 389 ret = qdisc_enqueue(skb, cl->q);
390 if (ret == NET_XMIT_SUCCESS) {
400 sch->q.qlen++; 391 sch->q.qlen++;
401 sch->bstats.packets++; 392 sch->bstats.packets++;
402 sch->bstats.bytes+=len; 393 sch->bstats.bytes += qdisc_pkt_len(skb);
403 cbq_mark_toplevel(q, cl); 394 cbq_mark_toplevel(q, cl);
404 if (!cl->next_alive) 395 if (!cl->next_alive)
405 cbq_activate_class(cl); 396 cbq_activate_class(cl);
@@ -659,14 +650,13 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
659 } 650 }
660 651
661 sch->flags &= ~TCQ_F_THROTTLED; 652 sch->flags &= ~TCQ_F_THROTTLED;
662 netif_schedule(sch->dev); 653 __netif_schedule(sch);
663 return HRTIMER_NORESTART; 654 return HRTIMER_NORESTART;
664} 655}
665 656
666#ifdef CONFIG_NET_CLS_ACT 657#ifdef CONFIG_NET_CLS_ACT
667static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) 658static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
668{ 659{
669 int len = skb->len;
670 struct Qdisc *sch = child->__parent; 660 struct Qdisc *sch = child->__parent;
671 struct cbq_sched_data *q = qdisc_priv(sch); 661 struct cbq_sched_data *q = qdisc_priv(sch);
672 struct cbq_class *cl = q->rx_class; 662 struct cbq_class *cl = q->rx_class;
@@ -680,10 +670,10 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
680 q->rx_class = cl; 670 q->rx_class = cl;
681 cl->q->__parent = sch; 671 cl->q->__parent = sch;
682 672
683 if (cl->q->enqueue(skb, cl->q) == 0) { 673 if (qdisc_enqueue(skb, cl->q) == 0) {
684 sch->q.qlen++; 674 sch->q.qlen++;
685 sch->bstats.packets++; 675 sch->bstats.packets++;
686 sch->bstats.bytes+=len; 676 sch->bstats.bytes += qdisc_pkt_len(skb);
687 if (!cl->next_alive) 677 if (!cl->next_alive)
688 cbq_activate_class(cl); 678 cbq_activate_class(cl);
689 return 0; 679 return 0;
@@ -889,7 +879,7 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
889 if (skb == NULL) 879 if (skb == NULL)
890 goto skip_class; 880 goto skip_class;
891 881
892 cl->deficit -= skb->len; 882 cl->deficit -= qdisc_pkt_len(skb);
893 q->tx_class = cl; 883 q->tx_class = cl;
894 q->tx_borrowed = borrow; 884 q->tx_borrowed = borrow;
895 if (borrow != cl) { 885 if (borrow != cl) {
@@ -897,11 +887,11 @@ cbq_dequeue_prio(struct Qdisc *sch, int prio)
897 borrow->xstats.borrows++; 887 borrow->xstats.borrows++;
898 cl->xstats.borrows++; 888 cl->xstats.borrows++;
899#else 889#else
900 borrow->xstats.borrows += skb->len; 890 borrow->xstats.borrows += qdisc_pkt_len(skb);
901 cl->xstats.borrows += skb->len; 891 cl->xstats.borrows += qdisc_pkt_len(skb);
902#endif 892#endif
903 } 893 }
904 q->tx_len = skb->len; 894 q->tx_len = qdisc_pkt_len(skb);
905 895
906 if (cl->deficit <= 0) { 896 if (cl->deficit <= 0) {
907 q->active[prio] = cl; 897 q->active[prio] = cl;
@@ -1071,13 +1061,14 @@ static void cbq_adjust_levels(struct cbq_class *this)
1071static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) 1061static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1072{ 1062{
1073 struct cbq_class *cl; 1063 struct cbq_class *cl;
1074 unsigned h; 1064 struct hlist_node *n;
1065 unsigned int h;
1075 1066
1076 if (q->quanta[prio] == 0) 1067 if (q->quanta[prio] == 0)
1077 return; 1068 return;
1078 1069
1079 for (h=0; h<16; h++) { 1070 for (h = 0; h < q->clhash.hashsize; h++) {
1080 for (cl = q->classes[h]; cl; cl = cl->next) { 1071 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1081 /* BUGGGG... Beware! This expression suffer of 1072 /* BUGGGG... Beware! This expression suffer of
1082 arithmetic overflows! 1073 arithmetic overflows!
1083 */ 1074 */
@@ -1085,9 +1076,9 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1085 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ 1076 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1086 q->quanta[prio]; 1077 q->quanta[prio];
1087 } 1078 }
1088 if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) { 1079 if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
1089 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->classid, cl->quantum); 1080 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
1090 cl->quantum = cl->qdisc->dev->mtu/2 + 1; 1081 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1091 } 1082 }
1092 } 1083 }
1093 } 1084 }
@@ -1114,10 +1105,12 @@ static void cbq_sync_defmap(struct cbq_class *cl)
1114 if (split->defaults[i]) 1105 if (split->defaults[i])
1115 continue; 1106 continue;
1116 1107
1117 for (h=0; h<16; h++) { 1108 for (h = 0; h < q->clhash.hashsize; h++) {
1109 struct hlist_node *n;
1118 struct cbq_class *c; 1110 struct cbq_class *c;
1119 1111
1120 for (c = q->classes[h]; c; c = c->next) { 1112 hlist_for_each_entry(c, n, &q->clhash.hash[h],
1113 common.hnode) {
1121 if (c->split == split && c->level < level && 1114 if (c->split == split && c->level < level &&
1122 c->defmap&(1<<i)) { 1115 c->defmap&(1<<i)) {
1123 split->defaults[i] = c; 1116 split->defaults[i] = c;
@@ -1135,12 +1128,12 @@ static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 ma
1135 if (splitid == 0) { 1128 if (splitid == 0) {
1136 if ((split = cl->split) == NULL) 1129 if ((split = cl->split) == NULL)
1137 return; 1130 return;
1138 splitid = split->classid; 1131 splitid = split->common.classid;
1139 } 1132 }
1140 1133
1141 if (split == NULL || split->classid != splitid) { 1134 if (split == NULL || split->common.classid != splitid) {
1142 for (split = cl->tparent; split; split = split->tparent) 1135 for (split = cl->tparent; split; split = split->tparent)
1143 if (split->classid == splitid) 1136 if (split->common.classid == splitid)
1144 break; 1137 break;
1145 } 1138 }
1146 1139
@@ -1163,13 +1156,7 @@ static void cbq_unlink_class(struct cbq_class *this)
1163 struct cbq_class *cl, **clp; 1156 struct cbq_class *cl, **clp;
1164 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 1157 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1165 1158
1166 for (clp = &q->classes[cbq_hash(this->classid)]; (cl = *clp) != NULL; clp = &cl->next) { 1159 qdisc_class_hash_remove(&q->clhash, &this->common);
1167 if (cl == this) {
1168 *clp = cl->next;
1169 cl->next = NULL;
1170 break;
1171 }
1172 }
1173 1160
1174 if (this->tparent) { 1161 if (this->tparent) {
1175 clp=&this->sibling; 1162 clp=&this->sibling;
@@ -1195,12 +1182,10 @@ static void cbq_unlink_class(struct cbq_class *this)
1195static void cbq_link_class(struct cbq_class *this) 1182static void cbq_link_class(struct cbq_class *this)
1196{ 1183{
1197 struct cbq_sched_data *q = qdisc_priv(this->qdisc); 1184 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
1198 unsigned h = cbq_hash(this->classid);
1199 struct cbq_class *parent = this->tparent; 1185 struct cbq_class *parent = this->tparent;
1200 1186
1201 this->sibling = this; 1187 this->sibling = this;
1202 this->next = q->classes[h]; 1188 qdisc_class_hash_insert(&q->clhash, &this->common);
1203 q->classes[h] = this;
1204 1189
1205 if (parent == NULL) 1190 if (parent == NULL)
1206 return; 1191 return;
@@ -1242,6 +1227,7 @@ cbq_reset(struct Qdisc* sch)
1242{ 1227{
1243 struct cbq_sched_data *q = qdisc_priv(sch); 1228 struct cbq_sched_data *q = qdisc_priv(sch);
1244 struct cbq_class *cl; 1229 struct cbq_class *cl;
1230 struct hlist_node *n;
1245 int prio; 1231 int prio;
1246 unsigned h; 1232 unsigned h;
1247 1233
@@ -1258,8 +1244,8 @@ cbq_reset(struct Qdisc* sch)
1258 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++) 1244 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1259 q->active[prio] = NULL; 1245 q->active[prio] = NULL;
1260 1246
1261 for (h = 0; h < 16; h++) { 1247 for (h = 0; h < q->clhash.hashsize; h++) {
1262 for (cl = q->classes[h]; cl; cl = cl->next) { 1248 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
1263 qdisc_reset(cl->q); 1249 qdisc_reset(cl->q);
1264 1250
1265 cl->next_alive = NULL; 1251 cl->next_alive = NULL;
@@ -1406,11 +1392,16 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1406 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) 1392 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1407 return -EINVAL; 1393 return -EINVAL;
1408 1394
1395 err = qdisc_class_hash_init(&q->clhash);
1396 if (err < 0)
1397 goto put_rtab;
1398
1409 q->link.refcnt = 1; 1399 q->link.refcnt = 1;
1410 q->link.sibling = &q->link; 1400 q->link.sibling = &q->link;
1411 q->link.classid = sch->handle; 1401 q->link.common.classid = sch->handle;
1412 q->link.qdisc = sch; 1402 q->link.qdisc = sch;
1413 if (!(q->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1403 if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1404 &pfifo_qdisc_ops,
1414 sch->handle))) 1405 sch->handle)))
1415 q->link.q = &noop_qdisc; 1406 q->link.q = &noop_qdisc;
1416 1407
@@ -1419,7 +1410,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1419 q->link.cpriority = TC_CBQ_MAXPRIO-1; 1410 q->link.cpriority = TC_CBQ_MAXPRIO-1;
1420 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; 1411 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1421 q->link.overlimit = cbq_ovl_classic; 1412 q->link.overlimit = cbq_ovl_classic;
1422 q->link.allot = psched_mtu(sch->dev); 1413 q->link.allot = psched_mtu(qdisc_dev(sch));
1423 q->link.quantum = q->link.allot; 1414 q->link.quantum = q->link.allot;
1424 q->link.weight = q->link.R_tab->rate.rate; 1415 q->link.weight = q->link.R_tab->rate.rate;
1425 1416
@@ -1441,6 +1432,10 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1441 1432
1442 cbq_addprio(q, &q->link); 1433 cbq_addprio(q, &q->link);
1443 return 0; 1434 return 0;
1435
1436put_rtab:
1437 qdisc_put_rtab(q->link.R_tab);
1438 return err;
1444} 1439}
1445 1440
1446static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) 1441static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
@@ -1521,7 +1516,7 @@ static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1521 struct tc_cbq_fopt opt; 1516 struct tc_cbq_fopt opt;
1522 1517
1523 if (cl->split || cl->defmap) { 1518 if (cl->split || cl->defmap) {
1524 opt.split = cl->split ? cl->split->classid : 0; 1519 opt.split = cl->split ? cl->split->common.classid : 0;
1525 opt.defmap = cl->defmap; 1520 opt.defmap = cl->defmap;
1526 opt.defchange = ~0; 1521 opt.defchange = ~0;
1527 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); 1522 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt);
@@ -1602,10 +1597,10 @@ cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1602 struct nlattr *nest; 1597 struct nlattr *nest;
1603 1598
1604 if (cl->tparent) 1599 if (cl->tparent)
1605 tcm->tcm_parent = cl->tparent->classid; 1600 tcm->tcm_parent = cl->tparent->common.classid;
1606 else 1601 else
1607 tcm->tcm_parent = TC_H_ROOT; 1602 tcm->tcm_parent = TC_H_ROOT;
1608 tcm->tcm_handle = cl->classid; 1603 tcm->tcm_handle = cl->common.classid;
1609 tcm->tcm_info = cl->q->handle; 1604 tcm->tcm_info = cl->q->handle;
1610 1605
1611 nest = nla_nest_start(skb, TCA_OPTIONS); 1606 nest = nla_nest_start(skb, TCA_OPTIONS);
@@ -1650,8 +1645,10 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1650 1645
1651 if (cl) { 1646 if (cl) {
1652 if (new == NULL) { 1647 if (new == NULL) {
1653 if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1648 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1654 cl->classid)) == NULL) 1649 &pfifo_qdisc_ops,
1650 cl->common.classid);
1651 if (new == NULL)
1655 return -ENOBUFS; 1652 return -ENOBUFS;
1656 } else { 1653 } else {
1657#ifdef CONFIG_NET_CLS_ACT 1654#ifdef CONFIG_NET_CLS_ACT
@@ -1716,6 +1713,7 @@ static void
1716cbq_destroy(struct Qdisc* sch) 1713cbq_destroy(struct Qdisc* sch)
1717{ 1714{
1718 struct cbq_sched_data *q = qdisc_priv(sch); 1715 struct cbq_sched_data *q = qdisc_priv(sch);
1716 struct hlist_node *n, *next;
1719 struct cbq_class *cl; 1717 struct cbq_class *cl;
1720 unsigned h; 1718 unsigned h;
1721 1719
@@ -1727,18 +1725,16 @@ cbq_destroy(struct Qdisc* sch)
1727 * classes from root to leafs which means that filters can still 1725 * classes from root to leafs which means that filters can still
1728 * be bound to classes which have been destroyed already. --TGR '04 1726 * be bound to classes which have been destroyed already. --TGR '04
1729 */ 1727 */
1730 for (h = 0; h < 16; h++) { 1728 for (h = 0; h < q->clhash.hashsize; h++) {
1731 for (cl = q->classes[h]; cl; cl = cl->next) 1729 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode)
1732 tcf_destroy_chain(&cl->filter_list); 1730 tcf_destroy_chain(&cl->filter_list);
1733 } 1731 }
1734 for (h = 0; h < 16; h++) { 1732 for (h = 0; h < q->clhash.hashsize; h++) {
1735 struct cbq_class *next; 1733 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[h],
1736 1734 common.hnode)
1737 for (cl = q->classes[h]; cl; cl = next) {
1738 next = cl->next;
1739 cbq_destroy_class(sch, cl); 1735 cbq_destroy_class(sch, cl);
1740 }
1741 } 1736 }
1737 qdisc_class_hash_destroy(&q->clhash);
1742} 1738}
1743 1739
1744static void cbq_put(struct Qdisc *sch, unsigned long arg) 1740static void cbq_put(struct Qdisc *sch, unsigned long arg)
@@ -1747,12 +1743,13 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1747 1743
1748 if (--cl->refcnt == 0) { 1744 if (--cl->refcnt == 0) {
1749#ifdef CONFIG_NET_CLS_ACT 1745#ifdef CONFIG_NET_CLS_ACT
1746 spinlock_t *root_lock = qdisc_root_lock(sch);
1750 struct cbq_sched_data *q = qdisc_priv(sch); 1747 struct cbq_sched_data *q = qdisc_priv(sch);
1751 1748
1752 spin_lock_bh(&sch->dev->queue_lock); 1749 spin_lock_bh(root_lock);
1753 if (q->rx_class == cl) 1750 if (q->rx_class == cl)
1754 q->rx_class = NULL; 1751 q->rx_class = NULL;
1755 spin_unlock_bh(&sch->dev->queue_lock); 1752 spin_unlock_bh(root_lock);
1756#endif 1753#endif
1757 1754
1758 cbq_destroy_class(sch, cl); 1755 cbq_destroy_class(sch, cl);
@@ -1781,7 +1778,8 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1781 if (cl) { 1778 if (cl) {
1782 /* Check parent */ 1779 /* Check parent */
1783 if (parentid) { 1780 if (parentid) {
1784 if (cl->tparent && cl->tparent->classid != parentid) 1781 if (cl->tparent &&
1782 cl->tparent->common.classid != parentid)
1785 return -EINVAL; 1783 return -EINVAL;
1786 if (!cl->tparent && parentid != TC_H_ROOT) 1784 if (!cl->tparent && parentid != TC_H_ROOT)
1787 return -EINVAL; 1785 return -EINVAL;
@@ -1830,7 +1828,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1830 1828
1831 if (tca[TCA_RATE]) 1829 if (tca[TCA_RATE])
1832 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1830 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1833 &sch->dev->queue_lock, 1831 qdisc_root_lock(sch),
1834 tca[TCA_RATE]); 1832 tca[TCA_RATE]);
1835 return 0; 1833 return 0;
1836 } 1834 }
@@ -1881,9 +1879,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1881 cl->R_tab = rtab; 1879 cl->R_tab = rtab;
1882 rtab = NULL; 1880 rtab = NULL;
1883 cl->refcnt = 1; 1881 cl->refcnt = 1;
1884 if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid))) 1882 if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1883 &pfifo_qdisc_ops, classid)))
1885 cl->q = &noop_qdisc; 1884 cl->q = &noop_qdisc;
1886 cl->classid = classid; 1885 cl->common.classid = classid;
1887 cl->tparent = parent; 1886 cl->tparent = parent;
1888 cl->qdisc = sch; 1887 cl->qdisc = sch;
1889 cl->allot = parent->allot; 1888 cl->allot = parent->allot;
@@ -1916,9 +1915,11 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1916 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT])); 1915 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
1917 sch_tree_unlock(sch); 1916 sch_tree_unlock(sch);
1918 1917
1918 qdisc_class_hash_grow(sch, &q->clhash);
1919
1919 if (tca[TCA_RATE]) 1920 if (tca[TCA_RATE])
1920 gen_new_estimator(&cl->bstats, &cl->rate_est, 1921 gen_new_estimator(&cl->bstats, &cl->rate_est,
1921 &sch->dev->queue_lock, tca[TCA_RATE]); 1922 qdisc_root_lock(sch), tca[TCA_RATE]);
1922 1923
1923 *arg = (unsigned long)cl; 1924 *arg = (unsigned long)cl;
1924 return 0; 1925 return 0;
@@ -2008,15 +2009,15 @@ static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
2008static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg) 2009static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2009{ 2010{
2010 struct cbq_sched_data *q = qdisc_priv(sch); 2011 struct cbq_sched_data *q = qdisc_priv(sch);
2012 struct cbq_class *cl;
2013 struct hlist_node *n;
2011 unsigned h; 2014 unsigned h;
2012 2015
2013 if (arg->stop) 2016 if (arg->stop)
2014 return; 2017 return;
2015 2018
2016 for (h = 0; h < 16; h++) { 2019 for (h = 0; h < q->clhash.hashsize; h++) {
2017 struct cbq_class *cl; 2020 hlist_for_each_entry(cl, n, &q->clhash.hash[h], common.hnode) {
2018
2019 for (cl = q->classes[h]; cl; cl = cl->next) {
2020 if (arg->count < arg->skip) { 2021 if (arg->count < arg->skip) {
2021 arg->count++; 2022 arg->count++;
2022 continue; 2023 continue;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index c4c1317cd47d..a935676987e2 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -60,7 +60,8 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
60 sch, p, new, old); 60 sch, p, new, old);
61 61
62 if (new == NULL) { 62 if (new == NULL) {
63 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 63 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
64 &pfifo_qdisc_ops,
64 sch->handle); 65 sch->handle);
65 if (new == NULL) 66 if (new == NULL)
66 new = &noop_qdisc; 67 new = &noop_qdisc;
@@ -251,13 +252,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
251 } 252 }
252 } 253 }
253 254
254 err = p->q->enqueue(skb, p->q); 255 err = qdisc_enqueue(skb, p->q);
255 if (err != NET_XMIT_SUCCESS) { 256 if (err != NET_XMIT_SUCCESS) {
256 sch->qstats.drops++; 257 sch->qstats.drops++;
257 return err; 258 return err;
258 } 259 }
259 260
260 sch->bstats.bytes += skb->len; 261 sch->bstats.bytes += qdisc_pkt_len(skb);
261 sch->bstats.packets++; 262 sch->bstats.packets++;
262 sch->q.qlen++; 263 sch->q.qlen++;
263 264
@@ -390,7 +391,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
390 p->default_index = default_index; 391 p->default_index = default_index;
391 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); 392 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
392 393
393 p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, sch->handle); 394 p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
395 &pfifo_qdisc_ops, sch->handle);
394 if (p->q == NULL) 396 if (p->q == NULL)
395 p->q = &noop_qdisc; 397 p->q = &noop_qdisc;
396 398
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 95ed48221652..23d258bfe8ac 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -27,7 +27,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc* sch)
27{ 27{
28 struct fifo_sched_data *q = qdisc_priv(sch); 28 struct fifo_sched_data *q = qdisc_priv(sch);
29 29
30 if (likely(sch->qstats.backlog + skb->len <= q->limit)) 30 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit))
31 return qdisc_enqueue_tail(skb, sch); 31 return qdisc_enqueue_tail(skb, sch);
32 32
33 return qdisc_reshape_fail(skb, sch); 33 return qdisc_reshape_fail(skb, sch);
@@ -48,10 +48,10 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
48 struct fifo_sched_data *q = qdisc_priv(sch); 48 struct fifo_sched_data *q = qdisc_priv(sch);
49 49
50 if (opt == NULL) { 50 if (opt == NULL) {
51 u32 limit = sch->dev->tx_queue_len ? : 1; 51 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
52 52
53 if (sch->ops == &bfifo_qdisc_ops) 53 if (sch->ops == &bfifo_qdisc_ops)
54 limit *= sch->dev->mtu; 54 limit *= qdisc_dev(sch)->mtu;
55 55
56 q->limit = limit; 56 q->limit = limit;
57 } else { 57 } else {
@@ -107,3 +107,46 @@ struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
107 .owner = THIS_MODULE, 107 .owner = THIS_MODULE,
108}; 108};
109EXPORT_SYMBOL(bfifo_qdisc_ops); 109EXPORT_SYMBOL(bfifo_qdisc_ops);
110
111/* Pass size change message down to embedded FIFO */
112int fifo_set_limit(struct Qdisc *q, unsigned int limit)
113{
114 struct nlattr *nla;
115 int ret = -ENOMEM;
116
117 /* Hack to avoid sending change message to non-FIFO */
118 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
119 return 0;
120
121 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
122 if (nla) {
123 nla->nla_type = RTM_NEWQDISC;
124 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
125 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
126
127 ret = q->ops->change(q, nla);
128 kfree(nla);
129 }
130 return ret;
131}
132EXPORT_SYMBOL(fifo_set_limit);
133
134struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
135 unsigned int limit)
136{
137 struct Qdisc *q;
138 int err = -ENOMEM;
139
140 q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
141 ops, TC_H_MAKE(sch->handle, 1));
142 if (q) {
143 err = fifo_set_limit(q, limit);
144 if (err < 0) {
145 qdisc_destroy(q);
146 q = NULL;
147 }
148 }
149
150 return q ? : ERR_PTR(err);
151}
152EXPORT_SYMBOL(fifo_create_dflt);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 13afa7214392..0ddf69286f92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,58 +29,36 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * dev->queue_lock spinlock. 32 * qdisc_root_lock(qdisc) spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via top level device 35 * - enqueue, dequeue are serialized via qdisc root lock
36 * spinlock dev->queue_lock. 36 * - ingress filtering is also serialized via qdisc root lock
37 * - ingress filtering is serialized via top level device
38 * spinlock dev->ingress_lock.
39 * - updates to tree and tree walking are only done under the rtnl mutex. 37 * - updates to tree and tree walking are only done under the rtnl mutex.
40 */ 38 */
41 39
42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->queue_lock)
44 __acquires(dev->ingress_lock)
45{
46 spin_lock_bh(&dev->queue_lock);
47 spin_lock(&dev->ingress_lock);
48}
49EXPORT_SYMBOL(qdisc_lock_tree);
50
51void qdisc_unlock_tree(struct net_device *dev)
52 __releases(dev->ingress_lock)
53 __releases(dev->queue_lock)
54{
55 spin_unlock(&dev->ingress_lock);
56 spin_unlock_bh(&dev->queue_lock);
57}
58EXPORT_SYMBOL(qdisc_unlock_tree);
59
60static inline int qdisc_qlen(struct Qdisc *q) 40static inline int qdisc_qlen(struct Qdisc *q)
61{ 41{
62 return q->q.qlen; 42 return q->q.qlen;
63} 43}
64 44
65static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev, 45static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
66 struct Qdisc *q)
67{ 46{
68 if (unlikely(skb->next)) 47 if (unlikely(skb->next))
69 dev->gso_skb = skb; 48 q->gso_skb = skb;
70 else 49 else
71 q->ops->requeue(skb, q); 50 q->ops->requeue(skb, q);
72 51
73 netif_schedule(dev); 52 __netif_schedule(q);
74 return 0; 53 return 0;
75} 54}
76 55
77static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, 56static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
78 struct Qdisc *q)
79{ 57{
80 struct sk_buff *skb; 58 struct sk_buff *skb;
81 59
82 if ((skb = dev->gso_skb)) 60 if ((skb = q->gso_skb))
83 dev->gso_skb = NULL; 61 q->gso_skb = NULL;
84 else 62 else
85 skb = q->dequeue(q); 63 skb = q->dequeue(q);
86 64
@@ -88,12 +66,12 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
88} 66}
89 67
90static inline int handle_dev_cpu_collision(struct sk_buff *skb, 68static inline int handle_dev_cpu_collision(struct sk_buff *skb,
91 struct net_device *dev, 69 struct netdev_queue *dev_queue,
92 struct Qdisc *q) 70 struct Qdisc *q)
93{ 71{
94 int ret; 72 int ret;
95 73
96 if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { 74 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
97 /* 75 /*
98 * Same CPU holding the lock. It may be a transient 76 * Same CPU holding the lock. It may be a transient
99 * configuration error, when hard_start_xmit() recurses. We 77 * configuration error, when hard_start_xmit() recurses. We
@@ -103,7 +81,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
103 kfree_skb(skb); 81 kfree_skb(skb);
104 if (net_ratelimit()) 82 if (net_ratelimit())
105 printk(KERN_WARNING "Dead loop on netdevice %s, " 83 printk(KERN_WARNING "Dead loop on netdevice %s, "
106 "fix it urgently!\n", dev->name); 84 "fix it urgently!\n", dev_queue->dev->name);
107 ret = qdisc_qlen(q); 85 ret = qdisc_qlen(q);
108 } else { 86 } else {
109 /* 87 /*
@@ -111,22 +89,22 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
111 * some time. 89 * some time.
112 */ 90 */
113 __get_cpu_var(netdev_rx_stat).cpu_collision++; 91 __get_cpu_var(netdev_rx_stat).cpu_collision++;
114 ret = dev_requeue_skb(skb, dev, q); 92 ret = dev_requeue_skb(skb, q);
115 } 93 }
116 94
117 return ret; 95 return ret;
118} 96}
119 97
120/* 98/*
121 * NOTE: Called under dev->queue_lock with locally disabled BH. 99 * NOTE: Called under qdisc_lock(q) with locally disabled BH.
122 * 100 *
123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this 101 * __QDISC_STATE_RUNNING guarantees only one CPU can process
124 * device at a time. dev->queue_lock serializes queue accesses for 102 * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
125 * this device AND dev->qdisc pointer itself. 103 * this queue.
126 * 104 *
127 * netif_tx_lock serializes accesses to device driver. 105 * netif_tx_lock serializes accesses to device driver.
128 * 106 *
129 * dev->queue_lock and netif_tx_lock are mutually exclusive, 107 * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
130 * if one is grabbed, another must be free. 108 * if one is grabbed, another must be free.
131 * 109 *
132 * Note, that this procedure can be called by a watchdog timer 110 * Note, that this procedure can be called by a watchdog timer
@@ -136,27 +114,32 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
136 * >0 - queue is not empty. 114 * >0 - queue is not empty.
137 * 115 *
138 */ 116 */
139static inline int qdisc_restart(struct net_device *dev) 117static inline int qdisc_restart(struct Qdisc *q)
140{ 118{
141 struct Qdisc *q = dev->qdisc; 119 struct netdev_queue *txq;
142 struct sk_buff *skb;
143 int ret = NETDEV_TX_BUSY; 120 int ret = NETDEV_TX_BUSY;
121 struct net_device *dev;
122 spinlock_t *root_lock;
123 struct sk_buff *skb;
144 124
145 /* Dequeue packet */ 125 /* Dequeue packet */
146 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) 126 if (unlikely((skb = dequeue_skb(q)) == NULL))
147 return 0; 127 return 0;
148 128
129 root_lock = qdisc_root_lock(q);
149 130
150 /* And release queue */ 131 /* And release qdisc */
151 spin_unlock(&dev->queue_lock); 132 spin_unlock(root_lock);
152 133
153 HARD_TX_LOCK(dev, smp_processor_id()); 134 dev = qdisc_dev(q);
135 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
136
137 HARD_TX_LOCK(dev, txq, smp_processor_id());
154 if (!netif_subqueue_stopped(dev, skb)) 138 if (!netif_subqueue_stopped(dev, skb))
155 ret = dev_hard_start_xmit(skb, dev); 139 ret = dev_hard_start_xmit(skb, dev, txq);
156 HARD_TX_UNLOCK(dev); 140 HARD_TX_UNLOCK(dev, txq);
157 141
158 spin_lock(&dev->queue_lock); 142 spin_lock(root_lock);
159 q = dev->qdisc;
160 143
161 switch (ret) { 144 switch (ret) {
162 case NETDEV_TX_OK: 145 case NETDEV_TX_OK:
@@ -166,7 +149,7 @@ static inline int qdisc_restart(struct net_device *dev)
166 149
167 case NETDEV_TX_LOCKED: 150 case NETDEV_TX_LOCKED:
168 /* Driver try lock failed */ 151 /* Driver try lock failed */
169 ret = handle_dev_cpu_collision(skb, dev, q); 152 ret = handle_dev_cpu_collision(skb, txq, q);
170 break; 153 break;
171 154
172 default: 155 default:
@@ -175,33 +158,33 @@ static inline int qdisc_restart(struct net_device *dev)
175 printk(KERN_WARNING "BUG %s code %d qlen %d\n", 158 printk(KERN_WARNING "BUG %s code %d qlen %d\n",
176 dev->name, ret, q->q.qlen); 159 dev->name, ret, q->q.qlen);
177 160
178 ret = dev_requeue_skb(skb, dev, q); 161 ret = dev_requeue_skb(skb, q);
179 break; 162 break;
180 } 163 }
181 164
165 if (ret && netif_tx_queue_stopped(txq))
166 ret = 0;
167
182 return ret; 168 return ret;
183} 169}
184 170
185void __qdisc_run(struct net_device *dev) 171void __qdisc_run(struct Qdisc *q)
186{ 172{
187 unsigned long start_time = jiffies; 173 unsigned long start_time = jiffies;
188 174
189 while (qdisc_restart(dev)) { 175 while (qdisc_restart(q)) {
190 if (netif_queue_stopped(dev))
191 break;
192
193 /* 176 /*
194 * Postpone processing if 177 * Postpone processing if
195 * 1. another process needs the CPU; 178 * 1. another process needs the CPU;
196 * 2. we've been doing it for too long. 179 * 2. we've been doing it for too long.
197 */ 180 */
198 if (need_resched() || jiffies != start_time) { 181 if (need_resched() || jiffies != start_time) {
199 netif_schedule(dev); 182 __netif_schedule(q);
200 break; 183 break;
201 } 184 }
202 } 185 }
203 186
204 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 187 clear_bit(__QDISC_STATE_RUNNING, &q->state);
205} 188}
206 189
207static void dev_watchdog(unsigned long arg) 190static void dev_watchdog(unsigned long arg)
@@ -209,19 +192,35 @@ static void dev_watchdog(unsigned long arg)
209 struct net_device *dev = (struct net_device *)arg; 192 struct net_device *dev = (struct net_device *)arg;
210 193
211 netif_tx_lock(dev); 194 netif_tx_lock(dev);
212 if (dev->qdisc != &noop_qdisc) { 195 if (!qdisc_tx_is_noop(dev)) {
213 if (netif_device_present(dev) && 196 if (netif_device_present(dev) &&
214 netif_running(dev) && 197 netif_running(dev) &&
215 netif_carrier_ok(dev)) { 198 netif_carrier_ok(dev)) {
216 if (netif_queue_stopped(dev) && 199 int some_queue_stopped = 0;
217 time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { 200 unsigned int i;
201
202 for (i = 0; i < dev->num_tx_queues; i++) {
203 struct netdev_queue *txq;
204
205 txq = netdev_get_tx_queue(dev, i);
206 if (netif_tx_queue_stopped(txq)) {
207 some_queue_stopped = 1;
208 break;
209 }
210 }
218 211
219 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", 212 if (some_queue_stopped &&
213 time_after(jiffies, (dev->trans_start +
214 dev->watchdog_timeo))) {
215 printk(KERN_INFO "NETDEV WATCHDOG: %s: "
216 "transmit timed out\n",
220 dev->name); 217 dev->name);
221 dev->tx_timeout(dev); 218 dev->tx_timeout(dev);
222 WARN_ON_ONCE(1); 219 WARN_ON_ONCE(1);
223 } 220 }
224 if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo))) 221 if (!mod_timer(&dev->watchdog_timer,
222 round_jiffies(jiffies +
223 dev->watchdog_timeo)))
225 dev_hold(dev); 224 dev_hold(dev);
226 } 225 }
227 } 226 }
@@ -317,12 +316,18 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
317 .owner = THIS_MODULE, 316 .owner = THIS_MODULE,
318}; 317};
319 318
319static struct netdev_queue noop_netdev_queue = {
320 .qdisc = &noop_qdisc,
321};
322
320struct Qdisc noop_qdisc = { 323struct Qdisc noop_qdisc = {
321 .enqueue = noop_enqueue, 324 .enqueue = noop_enqueue,
322 .dequeue = noop_dequeue, 325 .dequeue = noop_dequeue,
323 .flags = TCQ_F_BUILTIN, 326 .flags = TCQ_F_BUILTIN,
324 .ops = &noop_qdisc_ops, 327 .ops = &noop_qdisc_ops,
325 .list = LIST_HEAD_INIT(noop_qdisc.list), 328 .list = LIST_HEAD_INIT(noop_qdisc.list),
329 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
330 .dev_queue = &noop_netdev_queue,
326}; 331};
327EXPORT_SYMBOL(noop_qdisc); 332EXPORT_SYMBOL(noop_qdisc);
328 333
@@ -335,112 +340,65 @@ static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
335 .owner = THIS_MODULE, 340 .owner = THIS_MODULE,
336}; 341};
337 342
343static struct Qdisc noqueue_qdisc;
344static struct netdev_queue noqueue_netdev_queue = {
345 .qdisc = &noqueue_qdisc,
346};
347
338static struct Qdisc noqueue_qdisc = { 348static struct Qdisc noqueue_qdisc = {
339 .enqueue = NULL, 349 .enqueue = NULL,
340 .dequeue = noop_dequeue, 350 .dequeue = noop_dequeue,
341 .flags = TCQ_F_BUILTIN, 351 .flags = TCQ_F_BUILTIN,
342 .ops = &noqueue_qdisc_ops, 352 .ops = &noqueue_qdisc_ops,
343 .list = LIST_HEAD_INIT(noqueue_qdisc.list), 353 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
354 .q.lock = __SPIN_LOCK_UNLOCKED(noqueue_qdisc.q.lock),
355 .dev_queue = &noqueue_netdev_queue,
344}; 356};
345 357
346 358
347static const u8 prio2band[TC_PRIO_MAX+1] = 359static int fifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
348 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
349
350/* 3-band FIFO queue: old style, but should be a bit faster than
351 generic prio+fifo combination.
352 */
353
354#define PFIFO_FAST_BANDS 3
355
356static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
357 struct Qdisc *qdisc)
358{ 360{
359 struct sk_buff_head *list = qdisc_priv(qdisc); 361 struct sk_buff_head *list = &qdisc->q;
360 return list + prio2band[skb->priority & TC_PRIO_MAX];
361}
362
363static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
364{
365 struct sk_buff_head *list = prio2list(skb, qdisc);
366 362
367 if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { 363 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len)
368 qdisc->q.qlen++;
369 return __qdisc_enqueue_tail(skb, qdisc, list); 364 return __qdisc_enqueue_tail(skb, qdisc, list);
370 }
371 365
372 return qdisc_drop(skb, qdisc); 366 return qdisc_drop(skb, qdisc);
373} 367}
374 368
375static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) 369static struct sk_buff *fifo_fast_dequeue(struct Qdisc* qdisc)
376{ 370{
377 int prio; 371 struct sk_buff_head *list = &qdisc->q;
378 struct sk_buff_head *list = qdisc_priv(qdisc);
379 372
380 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { 373 if (!skb_queue_empty(list))
381 if (!skb_queue_empty(list + prio)) { 374 return __qdisc_dequeue_head(qdisc, list);
382 qdisc->q.qlen--;
383 return __qdisc_dequeue_head(qdisc, list + prio);
384 }
385 }
386 375
387 return NULL; 376 return NULL;
388} 377}
389 378
390static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) 379static int fifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
391{ 380{
392 qdisc->q.qlen++; 381 return __qdisc_requeue(skb, qdisc, &qdisc->q);
393 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
394} 382}
395 383
396static void pfifo_fast_reset(struct Qdisc* qdisc) 384static void fifo_fast_reset(struct Qdisc* qdisc)
397{ 385{
398 int prio; 386 __qdisc_reset_queue(qdisc, &qdisc->q);
399 struct sk_buff_head *list = qdisc_priv(qdisc);
400
401 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
402 __qdisc_reset_queue(qdisc, list + prio);
403
404 qdisc->qstats.backlog = 0; 387 qdisc->qstats.backlog = 0;
405 qdisc->q.qlen = 0;
406}
407
408static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
409{
410 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
411
412 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
413 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
414 return skb->len;
415
416nla_put_failure:
417 return -1;
418} 388}
419 389
420static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) 390static struct Qdisc_ops fifo_fast_ops __read_mostly = {
421{ 391 .id = "fifo_fast",
422 int prio; 392 .priv_size = 0,
423 struct sk_buff_head *list = qdisc_priv(qdisc); 393 .enqueue = fifo_fast_enqueue,
424 394 .dequeue = fifo_fast_dequeue,
425 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) 395 .requeue = fifo_fast_requeue,
426 skb_queue_head_init(list + prio); 396 .reset = fifo_fast_reset,
427
428 return 0;
429}
430
431static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
432 .id = "pfifo_fast",
433 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
434 .enqueue = pfifo_fast_enqueue,
435 .dequeue = pfifo_fast_dequeue,
436 .requeue = pfifo_fast_requeue,
437 .init = pfifo_fast_init,
438 .reset = pfifo_fast_reset,
439 .dump = pfifo_fast_dump,
440 .owner = THIS_MODULE, 397 .owner = THIS_MODULE,
441}; 398};
442 399
443struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) 400struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
401 struct Qdisc_ops *ops)
444{ 402{
445 void *p; 403 void *p;
446 struct Qdisc *sch; 404 struct Qdisc *sch;
@@ -462,8 +420,8 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
462 sch->ops = ops; 420 sch->ops = ops;
463 sch->enqueue = ops->enqueue; 421 sch->enqueue = ops->enqueue;
464 sch->dequeue = ops->dequeue; 422 sch->dequeue = ops->dequeue;
465 sch->dev = dev; 423 sch->dev_queue = dev_queue;
466 dev_hold(dev); 424 dev_hold(qdisc_dev(sch));
467 atomic_set(&sch->refcnt, 1); 425 atomic_set(&sch->refcnt, 1);
468 426
469 return sch; 427 return sch;
@@ -471,15 +429,16 @@ errout:
471 return ERR_PTR(err); 429 return ERR_PTR(err);
472} 430}
473 431
474struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, 432struct Qdisc * qdisc_create_dflt(struct net_device *dev,
433 struct netdev_queue *dev_queue,
434 struct Qdisc_ops *ops,
475 unsigned int parentid) 435 unsigned int parentid)
476{ 436{
477 struct Qdisc *sch; 437 struct Qdisc *sch;
478 438
479 sch = qdisc_alloc(dev, ops); 439 sch = qdisc_alloc(dev_queue, ops);
480 if (IS_ERR(sch)) 440 if (IS_ERR(sch))
481 goto errout; 441 goto errout;
482 sch->stats_lock = &dev->queue_lock;
483 sch->parent = parentid; 442 sch->parent = parentid;
484 443
485 if (!ops->init || ops->init(sch, NULL) == 0) 444 if (!ops->init || ops->init(sch, NULL) == 0)
@@ -491,7 +450,7 @@ errout:
491} 450}
492EXPORT_SYMBOL(qdisc_create_dflt); 451EXPORT_SYMBOL(qdisc_create_dflt);
493 452
494/* Under dev->queue_lock and BH! */ 453/* Under qdisc_root_lock(qdisc) and BH! */
495 454
496void qdisc_reset(struct Qdisc *qdisc) 455void qdisc_reset(struct Qdisc *qdisc)
497{ 456{
@@ -508,86 +467,164 @@ EXPORT_SYMBOL(qdisc_reset);
508static void __qdisc_destroy(struct rcu_head *head) 467static void __qdisc_destroy(struct rcu_head *head)
509{ 468{
510 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); 469 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
470 const struct Qdisc_ops *ops = qdisc->ops;
471
472#ifdef CONFIG_NET_SCHED
473 qdisc_put_stab(qdisc->stab);
474#endif
475 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
476 if (ops->reset)
477 ops->reset(qdisc);
478 if (ops->destroy)
479 ops->destroy(qdisc);
480
481 module_put(ops->owner);
482 dev_put(qdisc_dev(qdisc));
483
484 kfree_skb(qdisc->gso_skb);
485
511 kfree((char *) qdisc - qdisc->padded); 486 kfree((char *) qdisc - qdisc->padded);
512} 487}
513 488
514/* Under dev->queue_lock and BH! */ 489/* Under qdisc_root_lock(qdisc) and BH! */
515 490
516void qdisc_destroy(struct Qdisc *qdisc) 491void qdisc_destroy(struct Qdisc *qdisc)
517{ 492{
518 const struct Qdisc_ops *ops = qdisc->ops;
519
520 if (qdisc->flags & TCQ_F_BUILTIN || 493 if (qdisc->flags & TCQ_F_BUILTIN ||
521 !atomic_dec_and_test(&qdisc->refcnt)) 494 !atomic_dec_and_test(&qdisc->refcnt))
522 return; 495 return;
523 496
524 list_del(&qdisc->list); 497 if (qdisc->parent)
525 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); 498 list_del(&qdisc->list);
526 if (ops->reset)
527 ops->reset(qdisc);
528 if (ops->destroy)
529 ops->destroy(qdisc);
530 499
531 module_put(ops->owner);
532 dev_put(qdisc->dev);
533 call_rcu(&qdisc->q_rcu, __qdisc_destroy); 500 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
534} 501}
535EXPORT_SYMBOL(qdisc_destroy); 502EXPORT_SYMBOL(qdisc_destroy);
536 503
504static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
505{
506 unsigned int i;
507
508 for (i = 0; i < dev->num_tx_queues; i++) {
509 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
510
511 if (txq->qdisc_sleeping != &noop_qdisc)
512 return false;
513 }
514 return true;
515}
516
517static void attach_one_default_qdisc(struct net_device *dev,
518 struct netdev_queue *dev_queue,
519 void *_unused)
520{
521 struct Qdisc *qdisc;
522
523 if (dev->tx_queue_len) {
524 qdisc = qdisc_create_dflt(dev, dev_queue,
525 &fifo_fast_ops, TC_H_ROOT);
526 if (!qdisc) {
527 printk(KERN_INFO "%s: activation failed\n", dev->name);
528 return;
529 }
530 } else {
531 qdisc = &noqueue_qdisc;
532 }
533 dev_queue->qdisc_sleeping = qdisc;
534}
535
536static void transition_one_qdisc(struct net_device *dev,
537 struct netdev_queue *dev_queue,
538 void *_need_watchdog)
539{
540 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
541 int *need_watchdog_p = _need_watchdog;
542
543 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
544 if (new_qdisc != &noqueue_qdisc)
545 *need_watchdog_p = 1;
546}
547
537void dev_activate(struct net_device *dev) 548void dev_activate(struct net_device *dev)
538{ 549{
550 int need_watchdog;
551
539 /* No queueing discipline is attached to device; 552 /* No queueing discipline is attached to device;
540 create default one i.e. pfifo_fast for devices, 553 * create default one i.e. fifo_fast for devices,
541 which need queueing and noqueue_qdisc for 554 * which need queueing and noqueue_qdisc for
542 virtual interfaces 555 * virtual interfaces.
543 */ 556 */
544 557
545 if (dev->qdisc_sleeping == &noop_qdisc) { 558 if (dev_all_qdisc_sleeping_noop(dev))
546 struct Qdisc *qdisc; 559 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
547 if (dev->tx_queue_len) {
548 qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
549 TC_H_ROOT);
550 if (qdisc == NULL) {
551 printk(KERN_INFO "%s: activation failed\n", dev->name);
552 return;
553 }
554 list_add_tail(&qdisc->list, &dev->qdisc_list);
555 } else {
556 qdisc = &noqueue_qdisc;
557 }
558 dev->qdisc_sleeping = qdisc;
559 }
560 560
561 if (!netif_carrier_ok(dev)) 561 if (!netif_carrier_ok(dev))
562 /* Delay activation until next carrier-on event */ 562 /* Delay activation until next carrier-on event */
563 return; 563 return;
564 564
565 spin_lock_bh(&dev->queue_lock); 565 need_watchdog = 0;
566 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); 566 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
567 if (dev->qdisc != &noqueue_qdisc) { 567
568 if (need_watchdog) {
568 dev->trans_start = jiffies; 569 dev->trans_start = jiffies;
569 dev_watchdog_up(dev); 570 dev_watchdog_up(dev);
570 } 571 }
571 spin_unlock_bh(&dev->queue_lock);
572} 572}
573 573
574void dev_deactivate(struct net_device *dev) 574static void dev_deactivate_queue(struct net_device *dev,
575 struct netdev_queue *dev_queue,
576 void *_qdisc_default)
575{ 577{
578 struct Qdisc *qdisc_default = _qdisc_default;
579 struct sk_buff *skb = NULL;
576 struct Qdisc *qdisc; 580 struct Qdisc *qdisc;
577 struct sk_buff *skb;
578 int running;
579 581
580 spin_lock_bh(&dev->queue_lock); 582 qdisc = dev_queue->qdisc;
581 qdisc = dev->qdisc; 583 if (qdisc) {
582 dev->qdisc = &noop_qdisc; 584 spin_lock_bh(qdisc_lock(qdisc));
583 585
584 qdisc_reset(qdisc); 586 dev_queue->qdisc = qdisc_default;
587 qdisc_reset(qdisc);
585 588
586 skb = dev->gso_skb; 589 spin_unlock_bh(qdisc_lock(qdisc));
587 dev->gso_skb = NULL; 590 }
588 spin_unlock_bh(&dev->queue_lock);
589 591
590 kfree_skb(skb); 592 kfree_skb(skb);
593}
594
595static bool some_qdisc_is_running(struct net_device *dev, int lock)
596{
597 unsigned int i;
598
599 for (i = 0; i < dev->num_tx_queues; i++) {
600 struct netdev_queue *dev_queue;
601 spinlock_t *root_lock;
602 struct Qdisc *q;
603 int val;
604
605 dev_queue = netdev_get_tx_queue(dev, i);
606 q = dev_queue->qdisc;
607 root_lock = qdisc_root_lock(q);
608
609 if (lock)
610 spin_lock_bh(root_lock);
611
612 val = test_bit(__QDISC_STATE_RUNNING, &q->state);
613
614 if (lock)
615 spin_unlock_bh(root_lock);
616
617 if (val)
618 return true;
619 }
620 return false;
621}
622
623void dev_deactivate(struct net_device *dev)
624{
625 bool running;
626
627 netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
591 628
592 dev_watchdog_down(dev); 629 dev_watchdog_down(dev);
593 630
@@ -596,16 +633,14 @@ void dev_deactivate(struct net_device *dev)
596 633
597 /* Wait for outstanding qdisc_run calls. */ 634 /* Wait for outstanding qdisc_run calls. */
598 do { 635 do {
599 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state)) 636 while (some_qdisc_is_running(dev, 0))
600 yield(); 637 yield();
601 638
602 /* 639 /*
603 * Double-check inside queue lock to ensure that all effects 640 * Double-check inside queue lock to ensure that all effects
604 * of the queue run are visible when we return. 641 * of the queue run are visible when we return.
605 */ 642 */
606 spin_lock_bh(&dev->queue_lock); 643 running = some_qdisc_is_running(dev, 1);
607 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
608 spin_unlock_bh(&dev->queue_lock);
609 644
610 /* 645 /*
611 * The running flag should never be set at this point because 646 * The running flag should never be set at this point because
@@ -618,32 +653,46 @@ void dev_deactivate(struct net_device *dev)
618 } while (WARN_ON_ONCE(running)); 653 } while (WARN_ON_ONCE(running));
619} 654}
620 655
656static void dev_init_scheduler_queue(struct net_device *dev,
657 struct netdev_queue *dev_queue,
658 void *_qdisc)
659{
660 struct Qdisc *qdisc = _qdisc;
661
662 dev_queue->qdisc = qdisc;
663 dev_queue->qdisc_sleeping = qdisc;
664}
665
621void dev_init_scheduler(struct net_device *dev) 666void dev_init_scheduler(struct net_device *dev)
622{ 667{
623 qdisc_lock_tree(dev); 668 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
624 dev->qdisc = &noop_qdisc; 669 dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
625 dev->qdisc_sleeping = &noop_qdisc;
626 INIT_LIST_HEAD(&dev->qdisc_list);
627 qdisc_unlock_tree(dev);
628 670
629 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); 671 setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
630} 672}
631 673
632void dev_shutdown(struct net_device *dev) 674static void shutdown_scheduler_queue(struct net_device *dev,
675 struct netdev_queue *dev_queue,
676 void *_qdisc_default)
633{ 677{
634 struct Qdisc *qdisc; 678 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
679 struct Qdisc *qdisc_default = _qdisc_default;
635 680
636 qdisc_lock_tree(dev); 681 if (qdisc) {
637 qdisc = dev->qdisc_sleeping; 682 spinlock_t *root_lock = qdisc_root_lock(qdisc);
638 dev->qdisc = &noop_qdisc; 683
639 dev->qdisc_sleeping = &noop_qdisc; 684 dev_queue->qdisc = qdisc_default;
640 qdisc_destroy(qdisc); 685 dev_queue->qdisc_sleeping = qdisc_default;
641#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) 686
642 if ((qdisc = dev->qdisc_ingress) != NULL) { 687 spin_lock(root_lock);
643 dev->qdisc_ingress = NULL;
644 qdisc_destroy(qdisc); 688 qdisc_destroy(qdisc);
689 spin_unlock(root_lock);
645 } 690 }
646#endif 691}
692
693void dev_shutdown(struct net_device *dev)
694{
695 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
696 shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
647 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 697 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
648 qdisc_unlock_tree(dev);
649} 698}
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index c89fba56db56..c1ad6b8de105 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -164,7 +164,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
164 * if no default DP has been configured. This 164 * if no default DP has been configured. This
165 * allows for DP flows to be left untouched. 165 * allows for DP flows to be left untouched.
166 */ 166 */
167 if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len) 167 if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
168 return qdisc_enqueue_tail(skb, sch); 168 return qdisc_enqueue_tail(skb, sch);
169 else 169 else
170 goto drop; 170 goto drop;
@@ -188,7 +188,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
188 } 188 }
189 189
190 q->packetsin++; 190 q->packetsin++;
191 q->bytesin += skb->len; 191 q->bytesin += qdisc_pkt_len(skb);
192 192
193 if (gred_wred_mode(t)) 193 if (gred_wred_mode(t))
194 gred_load_wred_set(t, q); 194 gred_load_wred_set(t, q);
@@ -226,8 +226,8 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
226 break; 226 break;
227 } 227 }
228 228
229 if (q->backlog + skb->len <= q->limit) { 229 if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
230 q->backlog += skb->len; 230 q->backlog += qdisc_pkt_len(skb);
231 return qdisc_enqueue_tail(skb, sch); 231 return qdisc_enqueue_tail(skb, sch);
232 } 232 }
233 233
@@ -254,7 +254,7 @@ static int gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
254 } else { 254 } else {
255 if (red_is_idling(&q->parms)) 255 if (red_is_idling(&q->parms))
256 red_end_of_idle_period(&q->parms); 256 red_end_of_idle_period(&q->parms);
257 q->backlog += skb->len; 257 q->backlog += qdisc_pkt_len(skb);
258 } 258 }
259 259
260 return qdisc_requeue(skb, sch); 260 return qdisc_requeue(skb, sch);
@@ -277,7 +277,7 @@ static struct sk_buff *gred_dequeue(struct Qdisc* sch)
277 "VQ 0x%x after dequeue, screwing up " 277 "VQ 0x%x after dequeue, screwing up "
278 "backlog.\n", tc_index_to_dp(skb)); 278 "backlog.\n", tc_index_to_dp(skb));
279 } else { 279 } else {
280 q->backlog -= skb->len; 280 q->backlog -= qdisc_pkt_len(skb);
281 281
282 if (!q->backlog && !gred_wred_mode(t)) 282 if (!q->backlog && !gred_wred_mode(t))
283 red_start_of_idle_period(&q->parms); 283 red_start_of_idle_period(&q->parms);
@@ -299,7 +299,7 @@ static unsigned int gred_drop(struct Qdisc* sch)
299 299
300 skb = qdisc_dequeue_tail(sch); 300 skb = qdisc_dequeue_tail(sch);
301 if (skb) { 301 if (skb) {
302 unsigned int len = skb->len; 302 unsigned int len = qdisc_pkt_len(skb);
303 struct gred_sched_data *q; 303 struct gred_sched_data *q;
304 u16 dp = tc_index_to_dp(skb); 304 u16 dp = tc_index_to_dp(skb);
305 305
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index e817aa00441d..0ae7d19dcba8 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -113,7 +113,7 @@ enum hfsc_class_flags
113 113
114struct hfsc_class 114struct hfsc_class
115{ 115{
116 u32 classid; /* class id */ 116 struct Qdisc_class_common cl_common;
117 unsigned int refcnt; /* usage count */ 117 unsigned int refcnt; /* usage count */
118 118
119 struct gnet_stats_basic bstats; 119 struct gnet_stats_basic bstats;
@@ -134,7 +134,6 @@ struct hfsc_class
134 struct rb_node vt_node; /* parent's vt_tree member */ 134 struct rb_node vt_node; /* parent's vt_tree member */
135 struct rb_root cf_tree; /* active children sorted by cl_f */ 135 struct rb_root cf_tree; /* active children sorted by cl_f */
136 struct rb_node cf_node; /* parent's cf_heap member */ 136 struct rb_node cf_node; /* parent's cf_heap member */
137 struct list_head hlist; /* hash list member */
138 struct list_head dlist; /* drop list member */ 137 struct list_head dlist; /* drop list member */
139 138
140 u64 cl_total; /* total work in bytes */ 139 u64 cl_total; /* total work in bytes */
@@ -177,13 +176,11 @@ struct hfsc_class
177 unsigned long cl_nactive; /* number of active children */ 176 unsigned long cl_nactive; /* number of active children */
178}; 177};
179 178
180#define HFSC_HSIZE 16
181
182struct hfsc_sched 179struct hfsc_sched
183{ 180{
184 u16 defcls; /* default class id */ 181 u16 defcls; /* default class id */
185 struct hfsc_class root; /* root class */ 182 struct hfsc_class root; /* root class */
186 struct list_head clhash[HFSC_HSIZE]; /* class hash */ 183 struct Qdisc_class_hash clhash; /* class hash */
187 struct rb_root eligible; /* eligible tree */ 184 struct rb_root eligible; /* eligible tree */
188 struct list_head droplist; /* active leaf class list (for 185 struct list_head droplist; /* active leaf class list (for
189 dropping) */ 186 dropping) */
@@ -898,7 +895,7 @@ qdisc_peek_len(struct Qdisc *sch)
898 printk("qdisc_peek_len: non work-conserving qdisc ?\n"); 895 printk("qdisc_peek_len: non work-conserving qdisc ?\n");
899 return 0; 896 return 0;
900 } 897 }
901 len = skb->len; 898 len = qdisc_pkt_len(skb);
902 if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { 899 if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) {
903 if (net_ratelimit()) 900 if (net_ratelimit())
904 printk("qdisc_peek_len: failed to requeue\n"); 901 printk("qdisc_peek_len: failed to requeue\n");
@@ -933,26 +930,16 @@ hfsc_adjust_levels(struct hfsc_class *cl)
933 } while ((cl = cl->cl_parent) != NULL); 930 } while ((cl = cl->cl_parent) != NULL);
934} 931}
935 932
936static inline unsigned int
937hfsc_hash(u32 h)
938{
939 h ^= h >> 8;
940 h ^= h >> 4;
941
942 return h & (HFSC_HSIZE - 1);
943}
944
945static inline struct hfsc_class * 933static inline struct hfsc_class *
946hfsc_find_class(u32 classid, struct Qdisc *sch) 934hfsc_find_class(u32 classid, struct Qdisc *sch)
947{ 935{
948 struct hfsc_sched *q = qdisc_priv(sch); 936 struct hfsc_sched *q = qdisc_priv(sch);
949 struct hfsc_class *cl; 937 struct Qdisc_class_common *clc;
950 938
951 list_for_each_entry(cl, &q->clhash[hfsc_hash(classid)], hlist) { 939 clc = qdisc_class_find(&q->clhash, classid);
952 if (cl->classid == classid) 940 if (clc == NULL)
953 return cl; 941 return NULL;
954 } 942 return container_of(clc, struct hfsc_class, cl_common);
955 return NULL;
956} 943}
957 944
958static void 945static void
@@ -1032,7 +1019,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1032 1019
1033 if (cl != NULL) { 1020 if (cl != NULL) {
1034 if (parentid) { 1021 if (parentid) {
1035 if (cl->cl_parent && cl->cl_parent->classid != parentid) 1022 if (cl->cl_parent &&
1023 cl->cl_parent->cl_common.classid != parentid)
1036 return -EINVAL; 1024 return -EINVAL;
1037 if (cl->cl_parent == NULL && parentid != TC_H_ROOT) 1025 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
1038 return -EINVAL; 1026 return -EINVAL;
@@ -1057,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1057 1045
1058 if (tca[TCA_RATE]) 1046 if (tca[TCA_RATE])
1059 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1060 &sch->dev->queue_lock, 1048 qdisc_root_lock(sch),
1061 tca[TCA_RATE]); 1049 tca[TCA_RATE]);
1062 return 0; 1050 return 0;
1063 } 1051 }
@@ -1091,11 +1079,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1091 if (usc != NULL) 1079 if (usc != NULL)
1092 hfsc_change_usc(cl, usc, 0); 1080 hfsc_change_usc(cl, usc, 0);
1093 1081
1082 cl->cl_common.classid = classid;
1094 cl->refcnt = 1; 1083 cl->refcnt = 1;
1095 cl->classid = classid;
1096 cl->sched = q; 1084 cl->sched = q;
1097 cl->cl_parent = parent; 1085 cl->cl_parent = parent;
1098 cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); 1086 cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1087 &pfifo_qdisc_ops, classid);
1099 if (cl->qdisc == NULL) 1088 if (cl->qdisc == NULL)
1100 cl->qdisc = &noop_qdisc; 1089 cl->qdisc = &noop_qdisc;
1101 INIT_LIST_HEAD(&cl->children); 1090 INIT_LIST_HEAD(&cl->children);
@@ -1103,7 +1092,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1103 cl->cf_tree = RB_ROOT; 1092 cl->cf_tree = RB_ROOT;
1104 1093
1105 sch_tree_lock(sch); 1094 sch_tree_lock(sch);
1106 list_add_tail(&cl->hlist, &q->clhash[hfsc_hash(classid)]); 1095 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1107 list_add_tail(&cl->siblings, &parent->children); 1096 list_add_tail(&cl->siblings, &parent->children);
1108 if (parent->level == 0) 1097 if (parent->level == 0)
1109 hfsc_purge_queue(sch, parent); 1098 hfsc_purge_queue(sch, parent);
@@ -1111,9 +1100,11 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1111 cl->cl_pcvtoff = parent->cl_cvtoff; 1100 cl->cl_pcvtoff = parent->cl_cvtoff;
1112 sch_tree_unlock(sch); 1101 sch_tree_unlock(sch);
1113 1102
1103 qdisc_class_hash_grow(sch, &q->clhash);
1104
1114 if (tca[TCA_RATE]) 1105 if (tca[TCA_RATE])
1115 gen_new_estimator(&cl->bstats, &cl->rate_est, 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1116 &sch->dev->queue_lock, tca[TCA_RATE]); 1107 qdisc_root_lock(sch), tca[TCA_RATE]);
1117 *arg = (unsigned long)cl; 1108 *arg = (unsigned long)cl;
1118 return 0; 1109 return 0;
1119} 1110}
@@ -1145,7 +1136,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1145 hfsc_adjust_levels(cl->cl_parent); 1136 hfsc_adjust_levels(cl->cl_parent);
1146 1137
1147 hfsc_purge_queue(sch, cl); 1138 hfsc_purge_queue(sch, cl);
1148 list_del(&cl->hlist); 1139 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1149 1140
1150 if (--cl->refcnt == 0) 1141 if (--cl->refcnt == 0)
1151 hfsc_destroy_class(sch, cl); 1142 hfsc_destroy_class(sch, cl);
@@ -1211,8 +1202,9 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1211 if (cl->level > 0) 1202 if (cl->level > 0)
1212 return -EINVAL; 1203 return -EINVAL;
1213 if (new == NULL) { 1204 if (new == NULL) {
1214 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1205 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1215 cl->classid); 1206 &pfifo_qdisc_ops,
1207 cl->cl_common.classid);
1216 if (new == NULL) 1208 if (new == NULL)
1217 new = &noop_qdisc; 1209 new = &noop_qdisc;
1218 } 1210 }
@@ -1345,8 +1337,9 @@ hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1345 struct hfsc_class *cl = (struct hfsc_class *)arg; 1337 struct hfsc_class *cl = (struct hfsc_class *)arg;
1346 struct nlattr *nest; 1338 struct nlattr *nest;
1347 1339
1348 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; 1340 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1349 tcm->tcm_handle = cl->classid; 1341 TC_H_ROOT;
1342 tcm->tcm_handle = cl->cl_common.classid;
1350 if (cl->level == 0) 1343 if (cl->level == 0)
1351 tcm->tcm_info = cl->qdisc->handle; 1344 tcm->tcm_info = cl->qdisc->handle;
1352 1345
@@ -1390,14 +1383,16 @@ static void
1390hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1383hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1391{ 1384{
1392 struct hfsc_sched *q = qdisc_priv(sch); 1385 struct hfsc_sched *q = qdisc_priv(sch);
1386 struct hlist_node *n;
1393 struct hfsc_class *cl; 1387 struct hfsc_class *cl;
1394 unsigned int i; 1388 unsigned int i;
1395 1389
1396 if (arg->stop) 1390 if (arg->stop)
1397 return; 1391 return;
1398 1392
1399 for (i = 0; i < HFSC_HSIZE; i++) { 1393 for (i = 0; i < q->clhash.hashsize; i++) {
1400 list_for_each_entry(cl, &q->clhash[i], hlist) { 1394 hlist_for_each_entry(cl, n, &q->clhash.hash[i],
1395 cl_common.hnode) {
1401 if (arg->count < arg->skip) { 1396 if (arg->count < arg->skip) {
1402 arg->count++; 1397 arg->count++;
1403 continue; 1398 continue;
@@ -1433,23 +1428,25 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1433{ 1428{
1434 struct hfsc_sched *q = qdisc_priv(sch); 1429 struct hfsc_sched *q = qdisc_priv(sch);
1435 struct tc_hfsc_qopt *qopt; 1430 struct tc_hfsc_qopt *qopt;
1436 unsigned int i; 1431 int err;
1437 1432
1438 if (opt == NULL || nla_len(opt) < sizeof(*qopt)) 1433 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1439 return -EINVAL; 1434 return -EINVAL;
1440 qopt = nla_data(opt); 1435 qopt = nla_data(opt);
1441 1436
1442 q->defcls = qopt->defcls; 1437 q->defcls = qopt->defcls;
1443 for (i = 0; i < HFSC_HSIZE; i++) 1438 err = qdisc_class_hash_init(&q->clhash);
1444 INIT_LIST_HEAD(&q->clhash[i]); 1439 if (err < 0)
1440 return err;
1445 q->eligible = RB_ROOT; 1441 q->eligible = RB_ROOT;
1446 INIT_LIST_HEAD(&q->droplist); 1442 INIT_LIST_HEAD(&q->droplist);
1447 skb_queue_head_init(&q->requeue); 1443 skb_queue_head_init(&q->requeue);
1448 1444
1445 q->root.cl_common.classid = sch->handle;
1449 q->root.refcnt = 1; 1446 q->root.refcnt = 1;
1450 q->root.classid = sch->handle;
1451 q->root.sched = q; 1447 q->root.sched = q;
1452 q->root.qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1448 q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1449 &pfifo_qdisc_ops,
1453 sch->handle); 1450 sch->handle);
1454 if (q->root.qdisc == NULL) 1451 if (q->root.qdisc == NULL)
1455 q->root.qdisc = &noop_qdisc; 1452 q->root.qdisc = &noop_qdisc;
@@ -1457,7 +1454,8 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1457 q->root.vt_tree = RB_ROOT; 1454 q->root.vt_tree = RB_ROOT;
1458 q->root.cf_tree = RB_ROOT; 1455 q->root.cf_tree = RB_ROOT;
1459 1456
1460 list_add(&q->root.hlist, &q->clhash[hfsc_hash(q->root.classid)]); 1457 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1458 qdisc_class_hash_grow(sch, &q->clhash);
1461 1459
1462 qdisc_watchdog_init(&q->watchdog, sch); 1460 qdisc_watchdog_init(&q->watchdog, sch);
1463 1461
@@ -1520,10 +1518,11 @@ hfsc_reset_qdisc(struct Qdisc *sch)
1520{ 1518{
1521 struct hfsc_sched *q = qdisc_priv(sch); 1519 struct hfsc_sched *q = qdisc_priv(sch);
1522 struct hfsc_class *cl; 1520 struct hfsc_class *cl;
1521 struct hlist_node *n;
1523 unsigned int i; 1522 unsigned int i;
1524 1523
1525 for (i = 0; i < HFSC_HSIZE; i++) { 1524 for (i = 0; i < q->clhash.hashsize; i++) {
1526 list_for_each_entry(cl, &q->clhash[i], hlist) 1525 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1527 hfsc_reset_class(cl); 1526 hfsc_reset_class(cl);
1528 } 1527 }
1529 __skb_queue_purge(&q->requeue); 1528 __skb_queue_purge(&q->requeue);
@@ -1537,17 +1536,20 @@ static void
1537hfsc_destroy_qdisc(struct Qdisc *sch) 1536hfsc_destroy_qdisc(struct Qdisc *sch)
1538{ 1537{
1539 struct hfsc_sched *q = qdisc_priv(sch); 1538 struct hfsc_sched *q = qdisc_priv(sch);
1540 struct hfsc_class *cl, *next; 1539 struct hlist_node *n, *next;
1540 struct hfsc_class *cl;
1541 unsigned int i; 1541 unsigned int i;
1542 1542
1543 for (i = 0; i < HFSC_HSIZE; i++) { 1543 for (i = 0; i < q->clhash.hashsize; i++) {
1544 list_for_each_entry(cl, &q->clhash[i], hlist) 1544 hlist_for_each_entry(cl, n, &q->clhash.hash[i], cl_common.hnode)
1545 tcf_destroy_chain(&cl->filter_list); 1545 tcf_destroy_chain(&cl->filter_list);
1546 } 1546 }
1547 for (i = 0; i < HFSC_HSIZE; i++) { 1547 for (i = 0; i < q->clhash.hashsize; i++) {
1548 list_for_each_entry_safe(cl, next, &q->clhash[i], hlist) 1548 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1549 cl_common.hnode)
1549 hfsc_destroy_class(sch, cl); 1550 hfsc_destroy_class(sch, cl);
1550 } 1551 }
1552 qdisc_class_hash_destroy(&q->clhash);
1551 __skb_queue_purge(&q->requeue); 1553 __skb_queue_purge(&q->requeue);
1552 qdisc_watchdog_cancel(&q->watchdog); 1554 qdisc_watchdog_cancel(&q->watchdog);
1553} 1555}
@@ -1572,7 +1574,6 @@ static int
1572hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1574hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1573{ 1575{
1574 struct hfsc_class *cl; 1576 struct hfsc_class *cl;
1575 unsigned int len;
1576 int err; 1577 int err;
1577 1578
1578 cl = hfsc_classify(skb, sch, &err); 1579 cl = hfsc_classify(skb, sch, &err);
@@ -1583,8 +1584,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1583 return err; 1584 return err;
1584 } 1585 }
1585 1586
1586 len = skb->len; 1587 err = qdisc_enqueue(skb, cl->qdisc);
1587 err = cl->qdisc->enqueue(skb, cl->qdisc);
1588 if (unlikely(err != NET_XMIT_SUCCESS)) { 1588 if (unlikely(err != NET_XMIT_SUCCESS)) {
1589 cl->qstats.drops++; 1589 cl->qstats.drops++;
1590 sch->qstats.drops++; 1590 sch->qstats.drops++;
@@ -1592,12 +1592,12 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1592 } 1592 }
1593 1593
1594 if (cl->qdisc->q.qlen == 1) 1594 if (cl->qdisc->q.qlen == 1)
1595 set_active(cl, len); 1595 set_active(cl, qdisc_pkt_len(skb));
1596 1596
1597 cl->bstats.packets++; 1597 cl->bstats.packets++;
1598 cl->bstats.bytes += len; 1598 cl->bstats.bytes += qdisc_pkt_len(skb);
1599 sch->bstats.packets++; 1599 sch->bstats.packets++;
1600 sch->bstats.bytes += len; 1600 sch->bstats.bytes += qdisc_pkt_len(skb);
1601 sch->q.qlen++; 1601 sch->q.qlen++;
1602 1602
1603 return NET_XMIT_SUCCESS; 1603 return NET_XMIT_SUCCESS;
@@ -1647,9 +1647,9 @@ hfsc_dequeue(struct Qdisc *sch)
1647 return NULL; 1647 return NULL;
1648 } 1648 }
1649 1649
1650 update_vf(cl, skb->len, cur_time); 1650 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1651 if (realtime) 1651 if (realtime)
1652 cl->cl_cumul += skb->len; 1652 cl->cl_cumul += qdisc_pkt_len(skb);
1653 1653
1654 if (cl->qdisc->q.qlen != 0) { 1654 if (cl->qdisc->q.qlen != 0) {
1655 if (cl->cl_flags & HFSC_RSC) { 1655 if (cl->cl_flags & HFSC_RSC) {
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 3fb58f428f72..30c999c61b01 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -24,8 +24,6 @@
24 * Jiri Fojtasek 24 * Jiri Fojtasek
25 * fixed requeue routine 25 * fixed requeue routine
26 * and many others. thanks. 26 * and many others. thanks.
27 *
28 * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
29 */ 27 */
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
@@ -53,7 +51,6 @@
53 one less than their parent. 51 one less than their parent.
54*/ 52*/
55 53
56#define HTB_HSIZE 16 /* classid hash size */
57static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */ 54static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
58#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ 55#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
59 56
@@ -74,8 +71,8 @@ enum htb_cmode {
74 71
75/* interior & leaf nodes; props specific to leaves are marked L: */ 72/* interior & leaf nodes; props specific to leaves are marked L: */
76struct htb_class { 73struct htb_class {
74 struct Qdisc_class_common common;
77 /* general class parameters */ 75 /* general class parameters */
78 u32 classid;
79 struct gnet_stats_basic bstats; 76 struct gnet_stats_basic bstats;
80 struct gnet_stats_queue qstats; 77 struct gnet_stats_queue qstats;
81 struct gnet_stats_rate_est rate_est; 78 struct gnet_stats_rate_est rate_est;
@@ -84,10 +81,8 @@ struct htb_class {
84 81
85 /* topology */ 82 /* topology */
86 int level; /* our level (see above) */ 83 int level; /* our level (see above) */
84 unsigned int children;
87 struct htb_class *parent; /* parent class */ 85 struct htb_class *parent; /* parent class */
88 struct hlist_node hlist; /* classid hash list item */
89 struct list_head sibling; /* sibling list item */
90 struct list_head children; /* children list */
91 86
92 union { 87 union {
93 struct htb_class_leaf { 88 struct htb_class_leaf {
@@ -142,8 +137,7 @@ static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
142} 137}
143 138
144struct htb_sched { 139struct htb_sched {
145 struct list_head root; /* root classes list */ 140 struct Qdisc_class_hash clhash;
146 struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
147 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */ 141 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
148 142
149 /* self list - roots of self generating tree */ 143 /* self list - roots of self generating tree */
@@ -165,7 +159,6 @@ struct htb_sched {
165 159
166 /* filters for qdisc itself */ 160 /* filters for qdisc itself */
167 struct tcf_proto *filter_list; 161 struct tcf_proto *filter_list;
168 int filter_cnt;
169 162
170 int rate2quantum; /* quant = rate / rate2quantum */ 163 int rate2quantum; /* quant = rate / rate2quantum */
171 psched_time_t now; /* cached dequeue time */ 164 psched_time_t now; /* cached dequeue time */
@@ -178,32 +171,16 @@ struct htb_sched {
178 long direct_pkts; 171 long direct_pkts;
179}; 172};
180 173
181/* compute hash of size HTB_HSIZE for given handle */
182static inline int htb_hash(u32 h)
183{
184#if HTB_HSIZE != 16
185#error "Declare new hash for your HTB_HSIZE"
186#endif
187 h ^= h >> 8; /* stolen from cbq_hash */
188 h ^= h >> 4;
189 return h & 0xf;
190}
191
192/* find class in global hash table using given handle */ 174/* find class in global hash table using given handle */
193static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch) 175static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
194{ 176{
195 struct htb_sched *q = qdisc_priv(sch); 177 struct htb_sched *q = qdisc_priv(sch);
196 struct hlist_node *p; 178 struct Qdisc_class_common *clc;
197 struct htb_class *cl;
198 179
199 if (TC_H_MAJ(handle) != sch->handle) 180 clc = qdisc_class_find(&q->clhash, handle);
181 if (clc == NULL)
200 return NULL; 182 return NULL;
201 183 return container_of(clc, struct htb_class, common);
202 hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
203 if (cl->classid == handle)
204 return cl;
205 }
206 return NULL;
207} 184}
208 185
209/** 186/**
@@ -284,7 +261,7 @@ static void htb_add_to_id_tree(struct rb_root *root,
284 parent = *p; 261 parent = *p;
285 c = rb_entry(parent, struct htb_class, node[prio]); 262 c = rb_entry(parent, struct htb_class, node[prio]);
286 263
287 if (cl->classid > c->classid) 264 if (cl->common.classid > c->common.classid)
288 p = &parent->rb_right; 265 p = &parent->rb_right;
289 else 266 else
290 p = &parent->rb_left; 267 p = &parent->rb_left;
@@ -448,7 +425,7 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
448 /* we are removing child which is pointed to from 425 /* we are removing child which is pointed to from
449 parent feed - forget the pointer but remember 426 parent feed - forget the pointer but remember
450 classid */ 427 classid */
451 p->un.inner.last_ptr_id[prio] = cl->classid; 428 p->un.inner.last_ptr_id[prio] = cl->common.classid;
452 p->un.inner.ptr[prio] = NULL; 429 p->un.inner.ptr[prio] = NULL;
453 } 430 }
454 431
@@ -595,21 +572,20 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
595 kfree_skb(skb); 572 kfree_skb(skb);
596 return ret; 573 return ret;
597#endif 574#endif
598 } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != 575 } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
599 NET_XMIT_SUCCESS) {
600 sch->qstats.drops++; 576 sch->qstats.drops++;
601 cl->qstats.drops++; 577 cl->qstats.drops++;
602 return NET_XMIT_DROP; 578 return NET_XMIT_DROP;
603 } else { 579 } else {
604 cl->bstats.packets += 580 cl->bstats.packets +=
605 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 581 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
606 cl->bstats.bytes += skb->len; 582 cl->bstats.bytes += qdisc_pkt_len(skb);
607 htb_activate(q, cl); 583 htb_activate(q, cl);
608 } 584 }
609 585
610 sch->q.qlen++; 586 sch->q.qlen++;
611 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 587 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
612 sch->bstats.bytes += skb->len; 588 sch->bstats.bytes += qdisc_pkt_len(skb);
613 return NET_XMIT_SUCCESS; 589 return NET_XMIT_SUCCESS;
614} 590}
615 591
@@ -666,7 +642,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
666static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, 642static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
667 int level, struct sk_buff *skb) 643 int level, struct sk_buff *skb)
668{ 644{
669 int bytes = skb->len; 645 int bytes = qdisc_pkt_len(skb);
670 long toks, diff; 646 long toks, diff;
671 enum htb_cmode old_mode; 647 enum htb_cmode old_mode;
672 648
@@ -753,10 +729,10 @@ static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
753 while (n) { 729 while (n) {
754 struct htb_class *cl = 730 struct htb_class *cl =
755 rb_entry(n, struct htb_class, node[prio]); 731 rb_entry(n, struct htb_class, node[prio]);
756 if (id == cl->classid) 732 if (id == cl->common.classid)
757 return n; 733 return n;
758 734
759 if (id > cl->classid) { 735 if (id > cl->common.classid) {
760 n = n->rb_right; 736 n = n->rb_right;
761 } else { 737 } else {
762 r = n; 738 r = n;
@@ -866,7 +842,7 @@ next:
866 if (!cl->warned) { 842 if (!cl->warned) {
867 printk(KERN_WARNING 843 printk(KERN_WARNING
868 "htb: class %X isn't work conserving ?!\n", 844 "htb: class %X isn't work conserving ?!\n",
869 cl->classid); 845 cl->common.classid);
870 cl->warned = 1; 846 cl->warned = 1;
871 } 847 }
872 q->nwc_hit++; 848 q->nwc_hit++;
@@ -879,7 +855,8 @@ next:
879 } while (cl != start); 855 } while (cl != start);
880 856
881 if (likely(skb != NULL)) { 857 if (likely(skb != NULL)) {
882 if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { 858 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
859 if (cl->un.leaf.deficit[level] < 0) {
883 cl->un.leaf.deficit[level] += cl->un.leaf.quantum; 860 cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
884 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> 861 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
885 ptr[0]) + prio); 862 ptr[0]) + prio);
@@ -977,13 +954,12 @@ static unsigned int htb_drop(struct Qdisc *sch)
977static void htb_reset(struct Qdisc *sch) 954static void htb_reset(struct Qdisc *sch)
978{ 955{
979 struct htb_sched *q = qdisc_priv(sch); 956 struct htb_sched *q = qdisc_priv(sch);
980 int i; 957 struct htb_class *cl;
981 958 struct hlist_node *n;
982 for (i = 0; i < HTB_HSIZE; i++) { 959 unsigned int i;
983 struct hlist_node *p;
984 struct htb_class *cl;
985 960
986 hlist_for_each_entry(cl, p, q->hash + i, hlist) { 961 for (i = 0; i < q->clhash.hashsize; i++) {
962 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
987 if (cl->level) 963 if (cl->level)
988 memset(&cl->un.inner, 0, sizeof(cl->un.inner)); 964 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
989 else { 965 else {
@@ -1041,16 +1017,16 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1041 return -EINVAL; 1017 return -EINVAL;
1042 } 1018 }
1043 1019
1044 INIT_LIST_HEAD(&q->root); 1020 err = qdisc_class_hash_init(&q->clhash);
1045 for (i = 0; i < HTB_HSIZE; i++) 1021 if (err < 0)
1046 INIT_HLIST_HEAD(q->hash + i); 1022 return err;
1047 for (i = 0; i < TC_HTB_NUMPRIO; i++) 1023 for (i = 0; i < TC_HTB_NUMPRIO; i++)
1048 INIT_LIST_HEAD(q->drops + i); 1024 INIT_LIST_HEAD(q->drops + i);
1049 1025
1050 qdisc_watchdog_init(&q->watchdog, sch); 1026 qdisc_watchdog_init(&q->watchdog, sch);
1051 skb_queue_head_init(&q->direct_queue); 1027 skb_queue_head_init(&q->direct_queue);
1052 1028
1053 q->direct_qlen = sch->dev->tx_queue_len; 1029 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1054 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1030 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1055 q->direct_qlen = 2; 1031 q->direct_qlen = 2;
1056 1032
@@ -1063,11 +1039,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1063 1039
1064static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1040static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1065{ 1041{
1042 spinlock_t *root_lock = qdisc_root_lock(sch);
1066 struct htb_sched *q = qdisc_priv(sch); 1043 struct htb_sched *q = qdisc_priv(sch);
1067 struct nlattr *nest; 1044 struct nlattr *nest;
1068 struct tc_htb_glob gopt; 1045 struct tc_htb_glob gopt;
1069 1046
1070 spin_lock_bh(&sch->dev->queue_lock); 1047 spin_lock_bh(root_lock);
1071 1048
1072 gopt.direct_pkts = q->direct_pkts; 1049 gopt.direct_pkts = q->direct_pkts;
1073 gopt.version = HTB_VER; 1050 gopt.version = HTB_VER;
@@ -1081,11 +1058,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1081 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1058 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1082 nla_nest_end(skb, nest); 1059 nla_nest_end(skb, nest);
1083 1060
1084 spin_unlock_bh(&sch->dev->queue_lock); 1061 spin_unlock_bh(root_lock);
1085 return skb->len; 1062 return skb->len;
1086 1063
1087nla_put_failure: 1064nla_put_failure:
1088 spin_unlock_bh(&sch->dev->queue_lock); 1065 spin_unlock_bh(root_lock);
1089 nla_nest_cancel(skb, nest); 1066 nla_nest_cancel(skb, nest);
1090 return -1; 1067 return -1;
1091} 1068}
@@ -1094,12 +1071,13 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1094 struct sk_buff *skb, struct tcmsg *tcm) 1071 struct sk_buff *skb, struct tcmsg *tcm)
1095{ 1072{
1096 struct htb_class *cl = (struct htb_class *)arg; 1073 struct htb_class *cl = (struct htb_class *)arg;
1074 spinlock_t *root_lock = qdisc_root_lock(sch);
1097 struct nlattr *nest; 1075 struct nlattr *nest;
1098 struct tc_htb_opt opt; 1076 struct tc_htb_opt opt;
1099 1077
1100 spin_lock_bh(&sch->dev->queue_lock); 1078 spin_lock_bh(root_lock);
1101 tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT; 1079 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1102 tcm->tcm_handle = cl->classid; 1080 tcm->tcm_handle = cl->common.classid;
1103 if (!cl->level && cl->un.leaf.q) 1081 if (!cl->level && cl->un.leaf.q)
1104 tcm->tcm_info = cl->un.leaf.q->handle; 1082 tcm->tcm_info = cl->un.leaf.q->handle;
1105 1083
@@ -1119,11 +1097,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1119 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1097 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1120 1098
1121 nla_nest_end(skb, nest); 1099 nla_nest_end(skb, nest);
1122 spin_unlock_bh(&sch->dev->queue_lock); 1100 spin_unlock_bh(root_lock);
1123 return skb->len; 1101 return skb->len;
1124 1102
1125nla_put_failure: 1103nla_put_failure:
1126 spin_unlock_bh(&sch->dev->queue_lock); 1104 spin_unlock_bh(root_lock);
1127 nla_nest_cancel(skb, nest); 1105 nla_nest_cancel(skb, nest);
1128 return -1; 1106 return -1;
1129} 1107}
@@ -1153,8 +1131,9 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1153 1131
1154 if (cl && !cl->level) { 1132 if (cl && !cl->level) {
1155 if (new == NULL && 1133 if (new == NULL &&
1156 (new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1134 (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1157 cl->classid)) 1135 &pfifo_qdisc_ops,
1136 cl->common.classid))
1158 == NULL) 1137 == NULL)
1159 return -ENOBUFS; 1138 return -ENOBUFS;
1160 sch_tree_lock(sch); 1139 sch_tree_lock(sch);
@@ -1195,12 +1174,9 @@ static inline int htb_parent_last_child(struct htb_class *cl)
1195 if (!cl->parent) 1174 if (!cl->parent)
1196 /* the root class */ 1175 /* the root class */
1197 return 0; 1176 return 0;
1198 1177 if (cl->parent->children > 1)
1199 if (!(cl->parent->children.next == &cl->sibling &&
1200 cl->parent->children.prev == &cl->sibling))
1201 /* not the last child */ 1178 /* not the last child */
1202 return 0; 1179 return 0;
1203
1204 return 1; 1180 return 1;
1205} 1181}
1206 1182
@@ -1228,8 +1204,6 @@ static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1228 1204
1229static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl) 1205static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1230{ 1206{
1231 struct htb_sched *q = qdisc_priv(sch);
1232
1233 if (!cl->level) { 1207 if (!cl->level) {
1234 BUG_TRAP(cl->un.leaf.q); 1208 BUG_TRAP(cl->un.leaf.q);
1235 qdisc_destroy(cl->un.leaf.q); 1209 qdisc_destroy(cl->un.leaf.q);
@@ -1239,21 +1213,6 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1239 qdisc_put_rtab(cl->ceil); 1213 qdisc_put_rtab(cl->ceil);
1240 1214
1241 tcf_destroy_chain(&cl->filter_list); 1215 tcf_destroy_chain(&cl->filter_list);
1242
1243 while (!list_empty(&cl->children))
1244 htb_destroy_class(sch, list_entry(cl->children.next,
1245 struct htb_class, sibling));
1246
1247 /* note: this delete may happen twice (see htb_delete) */
1248 hlist_del_init(&cl->hlist);
1249 list_del(&cl->sibling);
1250
1251 if (cl->prio_activity)
1252 htb_deactivate(q, cl);
1253
1254 if (cl->cmode != HTB_CAN_SEND)
1255 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1256
1257 kfree(cl); 1216 kfree(cl);
1258} 1217}
1259 1218
@@ -1261,6 +1220,9 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1261static void htb_destroy(struct Qdisc *sch) 1220static void htb_destroy(struct Qdisc *sch)
1262{ 1221{
1263 struct htb_sched *q = qdisc_priv(sch); 1222 struct htb_sched *q = qdisc_priv(sch);
1223 struct hlist_node *n, *next;
1224 struct htb_class *cl;
1225 unsigned int i;
1264 1226
1265 qdisc_watchdog_cancel(&q->watchdog); 1227 qdisc_watchdog_cancel(&q->watchdog);
1266 /* This line used to be after htb_destroy_class call below 1228 /* This line used to be after htb_destroy_class call below
@@ -1269,10 +1231,16 @@ static void htb_destroy(struct Qdisc *sch)
1269 unbind_filter on it (without Oops). */ 1231 unbind_filter on it (without Oops). */
1270 tcf_destroy_chain(&q->filter_list); 1232 tcf_destroy_chain(&q->filter_list);
1271 1233
1272 while (!list_empty(&q->root)) 1234 for (i = 0; i < q->clhash.hashsize; i++) {
1273 htb_destroy_class(sch, list_entry(q->root.next, 1235 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1274 struct htb_class, sibling)); 1236 tcf_destroy_chain(&cl->filter_list);
1275 1237 }
1238 for (i = 0; i < q->clhash.hashsize; i++) {
1239 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1240 common.hnode)
1241 htb_destroy_class(sch, cl);
1242 }
1243 qdisc_class_hash_destroy(&q->clhash);
1276 __skb_queue_purge(&q->direct_queue); 1244 __skb_queue_purge(&q->direct_queue);
1277} 1245}
1278 1246
@@ -1287,12 +1255,13 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1287 // TODO: why don't allow to delete subtree ? references ? does 1255 // TODO: why don't allow to delete subtree ? references ? does
1288 // tc subsys quarantee us that in htb_destroy it holds no class 1256 // tc subsys quarantee us that in htb_destroy it holds no class
1289 // refs so that we can remove children safely there ? 1257 // refs so that we can remove children safely there ?
1290 if (!list_empty(&cl->children) || cl->filter_cnt) 1258 if (cl->children || cl->filter_cnt)
1291 return -EBUSY; 1259 return -EBUSY;
1292 1260
1293 if (!cl->level && htb_parent_last_child(cl)) { 1261 if (!cl->level && htb_parent_last_child(cl)) {
1294 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 1262 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1295 cl->parent->classid); 1263 &pfifo_qdisc_ops,
1264 cl->parent->common.classid);
1296 last_child = 1; 1265 last_child = 1;
1297 } 1266 }
1298 1267
@@ -1305,11 +1274,15 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1305 } 1274 }
1306 1275
1307 /* delete from hash and active; remainder in destroy_class */ 1276 /* delete from hash and active; remainder in destroy_class */
1308 hlist_del_init(&cl->hlist); 1277 qdisc_class_hash_remove(&q->clhash, &cl->common);
1278 cl->parent->children--;
1309 1279
1310 if (cl->prio_activity) 1280 if (cl->prio_activity)
1311 htb_deactivate(q, cl); 1281 htb_deactivate(q, cl);
1312 1282
1283 if (cl->cmode != HTB_CAN_SEND)
1284 htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
1285
1313 if (last_child) 1286 if (last_child)
1314 htb_parent_to_leaf(q, cl, new_q); 1287 htb_parent_to_leaf(q, cl, new_q);
1315 1288
@@ -1394,12 +1367,10 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1394 goto failure; 1367 goto failure;
1395 1368
1396 gen_new_estimator(&cl->bstats, &cl->rate_est, 1369 gen_new_estimator(&cl->bstats, &cl->rate_est,
1397 &sch->dev->queue_lock, 1370 qdisc_root_lock(sch),
1398 tca[TCA_RATE] ? : &est.nla); 1371 tca[TCA_RATE] ? : &est.nla);
1399 cl->refcnt = 1; 1372 cl->refcnt = 1;
1400 INIT_LIST_HEAD(&cl->sibling); 1373 cl->children = 0;
1401 INIT_HLIST_NODE(&cl->hlist);
1402 INIT_LIST_HEAD(&cl->children);
1403 INIT_LIST_HEAD(&cl->un.leaf.drop_list); 1374 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1404 RB_CLEAR_NODE(&cl->pq_node); 1375 RB_CLEAR_NODE(&cl->pq_node);
1405 1376
@@ -1409,7 +1380,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1409 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) 1380 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1410 so that can't be used inside of sch_tree_lock 1381 so that can't be used inside of sch_tree_lock
1411 -- thanks to Karlis Peisenieks */ 1382 -- thanks to Karlis Peisenieks */
1412 new_q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid); 1383 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1384 &pfifo_qdisc_ops, classid);
1413 sch_tree_lock(sch); 1385 sch_tree_lock(sch);
1414 if (parent && !parent->level) { 1386 if (parent && !parent->level) {
1415 unsigned int qlen = parent->un.leaf.q->q.qlen; 1387 unsigned int qlen = parent->un.leaf.q->q.qlen;
@@ -1433,7 +1405,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1433 /* leaf (we) needs elementary qdisc */ 1405 /* leaf (we) needs elementary qdisc */
1434 cl->un.leaf.q = new_q ? new_q : &noop_qdisc; 1406 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1435 1407
1436 cl->classid = classid; 1408 cl->common.classid = classid;
1437 cl->parent = parent; 1409 cl->parent = parent;
1438 1410
1439 /* set class to be in HTB_CAN_SEND state */ 1411 /* set class to be in HTB_CAN_SEND state */
@@ -1444,13 +1416,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1444 cl->cmode = HTB_CAN_SEND; 1416 cl->cmode = HTB_CAN_SEND;
1445 1417
1446 /* attach to the hash list and parent's family */ 1418 /* attach to the hash list and parent's family */
1447 hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); 1419 qdisc_class_hash_insert(&q->clhash, &cl->common);
1448 list_add_tail(&cl->sibling, 1420 if (parent)
1449 parent ? &parent->children : &q->root); 1421 parent->children++;
1450 } else { 1422 } else {
1451 if (tca[TCA_RATE]) 1423 if (tca[TCA_RATE])
1452 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1424 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1453 &sch->dev->queue_lock, 1425 qdisc_root_lock(sch),
1454 tca[TCA_RATE]); 1426 tca[TCA_RATE]);
1455 sch_tree_lock(sch); 1427 sch_tree_lock(sch);
1456 } 1428 }
@@ -1462,13 +1434,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1462 if (!hopt->quantum && cl->un.leaf.quantum < 1000) { 1434 if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
1463 printk(KERN_WARNING 1435 printk(KERN_WARNING
1464 "HTB: quantum of class %X is small. Consider r2q change.\n", 1436 "HTB: quantum of class %X is small. Consider r2q change.\n",
1465 cl->classid); 1437 cl->common.classid);
1466 cl->un.leaf.quantum = 1000; 1438 cl->un.leaf.quantum = 1000;
1467 } 1439 }
1468 if (!hopt->quantum && cl->un.leaf.quantum > 200000) { 1440 if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
1469 printk(KERN_WARNING 1441 printk(KERN_WARNING
1470 "HTB: quantum of class %X is big. Consider r2q change.\n", 1442 "HTB: quantum of class %X is big. Consider r2q change.\n",
1471 cl->classid); 1443 cl->common.classid);
1472 cl->un.leaf.quantum = 200000; 1444 cl->un.leaf.quantum = 200000;
1473 } 1445 }
1474 if (hopt->quantum) 1446 if (hopt->quantum)
@@ -1491,6 +1463,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1491 cl->ceil = ctab; 1463 cl->ceil = ctab;
1492 sch_tree_unlock(sch); 1464 sch_tree_unlock(sch);
1493 1465
1466 qdisc_class_hash_grow(sch, &q->clhash);
1467
1494 *arg = (unsigned long)cl; 1468 *arg = (unsigned long)cl;
1495 return 0; 1469 return 0;
1496 1470
@@ -1514,7 +1488,6 @@ static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1514static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent, 1488static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1515 u32 classid) 1489 u32 classid)
1516{ 1490{
1517 struct htb_sched *q = qdisc_priv(sch);
1518 struct htb_class *cl = htb_find(classid, sch); 1491 struct htb_class *cl = htb_find(classid, sch);
1519 1492
1520 /*if (cl && !cl->level) return 0; 1493 /*if (cl && !cl->level) return 0;
@@ -1528,35 +1501,29 @@ static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1528 */ 1501 */
1529 if (cl) 1502 if (cl)
1530 cl->filter_cnt++; 1503 cl->filter_cnt++;
1531 else
1532 q->filter_cnt++;
1533 return (unsigned long)cl; 1504 return (unsigned long)cl;
1534} 1505}
1535 1506
1536static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg) 1507static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1537{ 1508{
1538 struct htb_sched *q = qdisc_priv(sch);
1539 struct htb_class *cl = (struct htb_class *)arg; 1509 struct htb_class *cl = (struct htb_class *)arg;
1540 1510
1541 if (cl) 1511 if (cl)
1542 cl->filter_cnt--; 1512 cl->filter_cnt--;
1543 else
1544 q->filter_cnt--;
1545} 1513}
1546 1514
1547static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg) 1515static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1548{ 1516{
1549 struct htb_sched *q = qdisc_priv(sch); 1517 struct htb_sched *q = qdisc_priv(sch);
1550 int i; 1518 struct htb_class *cl;
1519 struct hlist_node *n;
1520 unsigned int i;
1551 1521
1552 if (arg->stop) 1522 if (arg->stop)
1553 return; 1523 return;
1554 1524
1555 for (i = 0; i < HTB_HSIZE; i++) { 1525 for (i = 0; i < q->clhash.hashsize; i++) {
1556 struct hlist_node *p; 1526 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
1557 struct htb_class *cl;
1558
1559 hlist_for_each_entry(cl, p, q->hash + i, hlist) {
1560 if (arg->count < arg->skip) { 1527 if (arg->count < arg->skip) {
1561 arg->count++; 1528 arg->count++;
1562 continue; 1529 continue;
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 956c80ad5965..4a2b77374358 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -77,7 +77,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
77 result = tc_classify(skb, p->filter_list, &res); 77 result = tc_classify(skb, p->filter_list, &res);
78 78
79 sch->bstats.packets++; 79 sch->bstats.packets++;
80 sch->bstats.bytes += skb->len; 80 sch->bstats.bytes += qdisc_pkt_len(skb);
81 switch (result) { 81 switch (result) {
82 case TC_ACT_SHOT: 82 case TC_ACT_SHOT:
83 result = TC_ACT_SHOT; 83 result = TC_ACT_SHOT;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index c9c649b26eaa..a59085700678 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -82,6 +82,13 @@ struct netem_skb_cb {
82 psched_time_t time_to_send; 82 psched_time_t time_to_send;
83}; 83};
84 84
85static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
86{
87 BUILD_BUG_ON(sizeof(skb->cb) <
88 sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb));
89 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
90}
91
85/* init_crandom - initialize correlated random number generator 92/* init_crandom - initialize correlated random number generator
86 * Use entropy source for initial seed. 93 * Use entropy source for initial seed.
87 */ 94 */
@@ -180,11 +187,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
180 * skb will be queued. 187 * skb will be queued.
181 */ 188 */
182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 189 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 struct Qdisc *rootq = sch->dev->qdisc; 190 struct Qdisc *rootq = qdisc_root(sch);
184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 191 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 q->duplicate = 0; 192 q->duplicate = 0;
186 193
187 rootq->enqueue(skb2, rootq); 194 qdisc_enqueue_root(skb2, rootq);
188 q->duplicate = dupsave; 195 q->duplicate = dupsave;
189 } 196 }
190 197
@@ -205,7 +212,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
205 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 212 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
206 } 213 }
207 214
208 cb = (struct netem_skb_cb *)skb->cb; 215 cb = netem_skb_cb(skb);
209 if (q->gap == 0 /* not doing reordering */ 216 if (q->gap == 0 /* not doing reordering */
210 || q->counter < q->gap /* inside last reordering gap */ 217 || q->counter < q->gap /* inside last reordering gap */
211 || q->reorder < get_crandom(&q->reorder_cor)) { 218 || q->reorder < get_crandom(&q->reorder_cor)) {
@@ -218,7 +225,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
218 now = psched_get_time(); 225 now = psched_get_time();
219 cb->time_to_send = now + delay; 226 cb->time_to_send = now + delay;
220 ++q->counter; 227 ++q->counter;
221 ret = q->qdisc->enqueue(skb, q->qdisc); 228 ret = qdisc_enqueue(skb, q->qdisc);
222 } else { 229 } else {
223 /* 230 /*
224 * Do re-ordering by putting one out of N packets at the front 231 * Do re-ordering by putting one out of N packets at the front
@@ -231,7 +238,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
231 238
232 if (likely(ret == NET_XMIT_SUCCESS)) { 239 if (likely(ret == NET_XMIT_SUCCESS)) {
233 sch->q.qlen++; 240 sch->q.qlen++;
234 sch->bstats.bytes += skb->len; 241 sch->bstats.bytes += qdisc_pkt_len(skb);
235 sch->bstats.packets++; 242 sch->bstats.packets++;
236 } else 243 } else
237 sch->qstats.drops++; 244 sch->qstats.drops++;
@@ -277,8 +284,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
277 284
278 skb = q->qdisc->dequeue(q->qdisc); 285 skb = q->qdisc->dequeue(q->qdisc);
279 if (skb) { 286 if (skb) {
280 const struct netem_skb_cb *cb 287 const struct netem_skb_cb *cb = netem_skb_cb(skb);
281 = (const struct netem_skb_cb *)skb->cb;
282 psched_time_t now = psched_get_time(); 288 psched_time_t now = psched_get_time();
283 289
284 /* if more time remaining? */ 290 /* if more time remaining? */
@@ -310,28 +316,6 @@ static void netem_reset(struct Qdisc *sch)
310 qdisc_watchdog_cancel(&q->watchdog); 316 qdisc_watchdog_cancel(&q->watchdog);
311} 317}
312 318
313/* Pass size change message down to embedded FIFO */
314static int set_fifo_limit(struct Qdisc *q, int limit)
315{
316 struct nlattr *nla;
317 int ret = -ENOMEM;
318
319 /* Hack to avoid sending change message to non-FIFO */
320 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
321 return 0;
322
323 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
324 if (nla) {
325 nla->nla_type = RTM_NEWQDISC;
326 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
327 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
328
329 ret = q->ops->change(q, nla);
330 kfree(nla);
331 }
332 return ret;
333}
334
335/* 319/*
336 * Distribution data is a variable size payload containing 320 * Distribution data is a variable size payload containing
337 * signed 16 bit values. 321 * signed 16 bit values.
@@ -341,6 +325,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
341 struct netem_sched_data *q = qdisc_priv(sch); 325 struct netem_sched_data *q = qdisc_priv(sch);
342 unsigned long n = nla_len(attr)/sizeof(__s16); 326 unsigned long n = nla_len(attr)/sizeof(__s16);
343 const __s16 *data = nla_data(attr); 327 const __s16 *data = nla_data(attr);
328 spinlock_t *root_lock;
344 struct disttable *d; 329 struct disttable *d;
345 int i; 330 int i;
346 331
@@ -355,9 +340,11 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
355 for (i = 0; i < n; i++) 340 for (i = 0; i < n; i++)
356 d->table[i] = data[i]; 341 d->table[i] = data[i];
357 342
358 spin_lock_bh(&sch->dev->queue_lock); 343 root_lock = qdisc_root_lock(sch);
344
345 spin_lock_bh(root_lock);
359 d = xchg(&q->delay_dist, d); 346 d = xchg(&q->delay_dist, d);
360 spin_unlock_bh(&sch->dev->queue_lock); 347 spin_unlock_bh(root_lock);
361 348
362 kfree(d); 349 kfree(d);
363 return 0; 350 return 0;
@@ -416,7 +403,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
416 if (ret < 0) 403 if (ret < 0)
417 return ret; 404 return ret;
418 405
419 ret = set_fifo_limit(q->qdisc, qopt->limit); 406 ret = fifo_set_limit(q->qdisc, qopt->limit);
420 if (ret) { 407 if (ret) {
421 pr_debug("netem: can't set fifo limit\n"); 408 pr_debug("netem: can't set fifo limit\n");
422 return ret; 409 return ret;
@@ -476,7 +463,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
476{ 463{
477 struct fifo_sched_data *q = qdisc_priv(sch); 464 struct fifo_sched_data *q = qdisc_priv(sch);
478 struct sk_buff_head *list = &sch->q; 465 struct sk_buff_head *list = &sch->q;
479 psched_time_t tnext = ((struct netem_skb_cb *)nskb->cb)->time_to_send; 466 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
480 struct sk_buff *skb; 467 struct sk_buff *skb;
481 468
482 if (likely(skb_queue_len(list) < q->limit)) { 469 if (likely(skb_queue_len(list) < q->limit)) {
@@ -487,8 +474,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
487 } 474 }
488 475
489 skb_queue_reverse_walk(list, skb) { 476 skb_queue_reverse_walk(list, skb) {
490 const struct netem_skb_cb *cb 477 const struct netem_skb_cb *cb = netem_skb_cb(skb);
491 = (const struct netem_skb_cb *)skb->cb;
492 478
493 if (tnext >= cb->time_to_send) 479 if (tnext >= cb->time_to_send)
494 break; 480 break;
@@ -496,8 +482,8 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
496 482
497 __skb_queue_after(list, skb, nskb); 483 __skb_queue_after(list, skb, nskb);
498 484
499 sch->qstats.backlog += nskb->len; 485 sch->qstats.backlog += qdisc_pkt_len(nskb);
500 sch->bstats.bytes += nskb->len; 486 sch->bstats.bytes += qdisc_pkt_len(nskb);
501 sch->bstats.packets++; 487 sch->bstats.packets++;
502 488
503 return NET_XMIT_SUCCESS; 489 return NET_XMIT_SUCCESS;
@@ -517,7 +503,7 @@ static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
517 503
518 q->limit = ctl->limit; 504 q->limit = ctl->limit;
519 } else 505 } else
520 q->limit = max_t(u32, sch->dev->tx_queue_len, 1); 506 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
521 507
522 q->oldest = PSCHED_PASTPERFECT; 508 q->oldest = PSCHED_PASTPERFECT;
523 return 0; 509 return 0;
@@ -558,7 +544,8 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
558 544
559 qdisc_watchdog_init(&q->watchdog, sch); 545 qdisc_watchdog_init(&q->watchdog, sch);
560 546
561 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops, 547 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
548 &tfifo_qdisc_ops,
562 TC_H_MAKE(sch->handle, 1)); 549 TC_H_MAKE(sch->handle, 1));
563 if (!q->qdisc) { 550 if (!q->qdisc) {
564 pr_debug("netem: qdisc create failed\n"); 551 pr_debug("netem: qdisc create failed\n");
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 5532f1031ab5..f849243eb095 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -24,11 +24,9 @@
24struct prio_sched_data 24struct prio_sched_data
25{ 25{
26 int bands; 26 int bands;
27 int curband; /* for round-robin */
28 struct tcf_proto *filter_list; 27 struct tcf_proto *filter_list;
29 u8 prio2band[TC_PRIO_MAX+1]; 28 u8 prio2band[TC_PRIO_MAX+1];
30 struct Qdisc *queues[TCQ_PRIO_BANDS]; 29 struct Qdisc *queues[TCQ_PRIO_BANDS];
31 int mq;
32}; 30};
33 31
34 32
@@ -55,17 +53,14 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
55 if (!q->filter_list || err < 0) { 53 if (!q->filter_list || err < 0) {
56 if (TC_H_MAJ(band)) 54 if (TC_H_MAJ(band))
57 band = 0; 55 band = 0;
58 band = q->prio2band[band&TC_PRIO_MAX]; 56 return q->queues[q->prio2band[band&TC_PRIO_MAX]];
59 goto out;
60 } 57 }
61 band = res.classid; 58 band = res.classid;
62 } 59 }
63 band = TC_H_MIN(band) - 1; 60 band = TC_H_MIN(band) - 1;
64 if (band >= q->bands) 61 if (band >= q->bands)
65 band = q->prio2band[0]; 62 return q->queues[q->prio2band[0]];
66out: 63
67 if (q->mq)
68 skb_set_queue_mapping(skb, band);
69 return q->queues[band]; 64 return q->queues[band];
70} 65}
71 66
@@ -86,8 +81,9 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
86 } 81 }
87#endif 82#endif
88 83
89 if ((ret = qdisc->enqueue(skb, qdisc)) == NET_XMIT_SUCCESS) { 84 ret = qdisc_enqueue(skb, qdisc);
90 sch->bstats.bytes += skb->len; 85 if (ret == NET_XMIT_SUCCESS) {
86 sch->bstats.bytes += qdisc_pkt_len(skb);
91 sch->bstats.packets++; 87 sch->bstats.packets++;
92 sch->q.qlen++; 88 sch->q.qlen++;
93 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
@@ -123,67 +119,23 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
123} 119}
124 120
125 121
126static struct sk_buff * 122static struct sk_buff *prio_dequeue(struct Qdisc* sch)
127prio_dequeue(struct Qdisc* sch)
128{ 123{
129 struct sk_buff *skb;
130 struct prio_sched_data *q = qdisc_priv(sch); 124 struct prio_sched_data *q = qdisc_priv(sch);
131 int prio; 125 int prio;
132 struct Qdisc *qdisc;
133 126
134 for (prio = 0; prio < q->bands; prio++) { 127 for (prio = 0; prio < q->bands; prio++) {
135 /* Check if the target subqueue is available before 128 struct Qdisc *qdisc = q->queues[prio];
136 * pulling an skb. This way we avoid excessive requeues 129 struct sk_buff *skb = qdisc->dequeue(qdisc);
137 * for slower queues. 130 if (skb) {
138 */ 131 sch->q.qlen--;
139 if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { 132 return skb;
140 qdisc = q->queues[prio];
141 skb = qdisc->dequeue(qdisc);
142 if (skb) {
143 sch->q.qlen--;
144 return skb;
145 }
146 } 133 }
147 } 134 }
148 return NULL; 135 return NULL;
149 136
150} 137}
151 138
152static struct sk_buff *rr_dequeue(struct Qdisc* sch)
153{
154 struct sk_buff *skb;
155 struct prio_sched_data *q = qdisc_priv(sch);
156 struct Qdisc *qdisc;
157 int bandcount;
158
159 /* Only take one pass through the queues. If nothing is available,
160 * return nothing.
161 */
162 for (bandcount = 0; bandcount < q->bands; bandcount++) {
163 /* Check if the target subqueue is available before
164 * pulling an skb. This way we avoid excessive requeues
165 * for slower queues. If the queue is stopped, try the
166 * next queue.
167 */
168 if (!__netif_subqueue_stopped(sch->dev,
169 (q->mq ? q->curband : 0))) {
170 qdisc = q->queues[q->curband];
171 skb = qdisc->dequeue(qdisc);
172 if (skb) {
173 sch->q.qlen--;
174 q->curband++;
175 if (q->curband >= q->bands)
176 q->curband = 0;
177 return skb;
178 }
179 }
180 q->curband++;
181 if (q->curband >= q->bands)
182 q->curband = 0;
183 }
184 return NULL;
185}
186
187static unsigned int prio_drop(struct Qdisc* sch) 139static unsigned int prio_drop(struct Qdisc* sch)
188{ 140{
189 struct prio_sched_data *q = qdisc_priv(sch); 141 struct prio_sched_data *q = qdisc_priv(sch);
@@ -228,45 +180,22 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
228{ 180{
229 struct prio_sched_data *q = qdisc_priv(sch); 181 struct prio_sched_data *q = qdisc_priv(sch);
230 struct tc_prio_qopt *qopt; 182 struct tc_prio_qopt *qopt;
231 struct nlattr *tb[TCA_PRIO_MAX + 1];
232 int err;
233 int i; 183 int i;
234 184
235 err = nla_parse_nested_compat(tb, TCA_PRIO_MAX, opt, NULL, qopt, 185 if (nla_len(opt) < sizeof(*qopt))
236 sizeof(*qopt)); 186 return -EINVAL;
237 if (err < 0) 187 qopt = nla_data(opt);
238 return err;
239
240 q->bands = qopt->bands;
241 /* If we're multiqueue, make sure the number of incoming bands
242 * matches the number of queues on the device we're associating with.
243 * If the number of bands requested is zero, then set q->bands to
244 * dev->egress_subqueue_count. Also, the root qdisc must be the
245 * only one that is enabled for multiqueue, since it's the only one
246 * that interacts with the underlying device.
247 */
248 q->mq = nla_get_flag(tb[TCA_PRIO_MQ]);
249 if (q->mq) {
250 if (sch->parent != TC_H_ROOT)
251 return -EINVAL;
252 if (netif_is_multiqueue(sch->dev)) {
253 if (q->bands == 0)
254 q->bands = sch->dev->egress_subqueue_count;
255 else if (q->bands != sch->dev->egress_subqueue_count)
256 return -EINVAL;
257 } else
258 return -EOPNOTSUPP;
259 }
260 188
261 if (q->bands > TCQ_PRIO_BANDS || q->bands < 2) 189 if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
262 return -EINVAL; 190 return -EINVAL;
263 191
264 for (i=0; i<=TC_PRIO_MAX; i++) { 192 for (i=0; i<=TC_PRIO_MAX; i++) {
265 if (qopt->priomap[i] >= q->bands) 193 if (qopt->priomap[i] >= qopt->bands)
266 return -EINVAL; 194 return -EINVAL;
267 } 195 }
268 196
269 sch_tree_lock(sch); 197 sch_tree_lock(sch);
198 q->bands = qopt->bands;
270 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 199 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
271 200
272 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) { 201 for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
@@ -281,7 +210,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
281 for (i=0; i<q->bands; i++) { 210 for (i=0; i<q->bands; i++) {
282 if (q->queues[i] == &noop_qdisc) { 211 if (q->queues[i] == &noop_qdisc) {
283 struct Qdisc *child; 212 struct Qdisc *child;
284 child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, 213 child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
214 &pfifo_qdisc_ops,
285 TC_H_MAKE(sch->handle, i + 1)); 215 TC_H_MAKE(sch->handle, i + 1));
286 if (child) { 216 if (child) {
287 sch_tree_lock(sch); 217 sch_tree_lock(sch);
@@ -331,10 +261,6 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
331 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt); 261 nest = nla_nest_compat_start(skb, TCA_OPTIONS, sizeof(opt), &opt);
332 if (nest == NULL) 262 if (nest == NULL)
333 goto nla_put_failure; 263 goto nla_put_failure;
334 if (q->mq) {
335 if (nla_put_flag(skb, TCA_PRIO_MQ) < 0)
336 goto nla_put_failure;
337 }
338 nla_nest_compat_end(skb, nest); 264 nla_nest_compat_end(skb, nest);
339 265
340 return skb->len; 266 return skb->len;
@@ -507,44 +433,17 @@ static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
507 .owner = THIS_MODULE, 433 .owner = THIS_MODULE,
508}; 434};
509 435
510static struct Qdisc_ops rr_qdisc_ops __read_mostly = {
511 .next = NULL,
512 .cl_ops = &prio_class_ops,
513 .id = "rr",
514 .priv_size = sizeof(struct prio_sched_data),
515 .enqueue = prio_enqueue,
516 .dequeue = rr_dequeue,
517 .requeue = prio_requeue,
518 .drop = prio_drop,
519 .init = prio_init,
520 .reset = prio_reset,
521 .destroy = prio_destroy,
522 .change = prio_tune,
523 .dump = prio_dump,
524 .owner = THIS_MODULE,
525};
526
527static int __init prio_module_init(void) 436static int __init prio_module_init(void)
528{ 437{
529 int err; 438 return register_qdisc(&prio_qdisc_ops);
530
531 err = register_qdisc(&prio_qdisc_ops);
532 if (err < 0)
533 return err;
534 err = register_qdisc(&rr_qdisc_ops);
535 if (err < 0)
536 unregister_qdisc(&prio_qdisc_ops);
537 return err;
538} 439}
539 440
540static void __exit prio_module_exit(void) 441static void __exit prio_module_exit(void)
541{ 442{
542 unregister_qdisc(&prio_qdisc_ops); 443 unregister_qdisc(&prio_qdisc_ops);
543 unregister_qdisc(&rr_qdisc_ops);
544} 444}
545 445
546module_init(prio_module_init) 446module_init(prio_module_init)
547module_exit(prio_module_exit) 447module_exit(prio_module_exit)
548 448
549MODULE_LICENSE("GPL"); 449MODULE_LICENSE("GPL");
550MODULE_ALIAS("sch_rr");
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 5c569853b9c0..3f2d1d7f3bbd 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -92,9 +92,9 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
92 break; 92 break;
93 } 93 }
94 94
95 ret = child->enqueue(skb, child); 95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) { 96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += skb->len; 97 sch->bstats.bytes += qdisc_pkt_len(skb);
98 sch->bstats.packets++; 98 sch->bstats.packets++;
99 sch->q.qlen++; 99 sch->q.qlen++;
100 } else { 100 } else {
@@ -174,33 +174,6 @@ static void red_destroy(struct Qdisc *sch)
174 qdisc_destroy(q->qdisc); 174 qdisc_destroy(q->qdisc);
175} 175}
176 176
177static struct Qdisc *red_create_dflt(struct Qdisc *sch, u32 limit)
178{
179 struct Qdisc *q;
180 struct nlattr *nla;
181 int ret;
182
183 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
184 TC_H_MAKE(sch->handle, 1));
185 if (q) {
186 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)),
187 GFP_KERNEL);
188 if (nla) {
189 nla->nla_type = RTM_NEWQDISC;
190 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
191 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
192
193 ret = q->ops->change(q, nla);
194 kfree(nla);
195
196 if (ret == 0)
197 return q;
198 }
199 qdisc_destroy(q);
200 }
201 return NULL;
202}
203
204static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { 177static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
205 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) }, 178 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
206 [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, 179 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
@@ -228,9 +201,9 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
228 ctl = nla_data(tb[TCA_RED_PARMS]); 201 ctl = nla_data(tb[TCA_RED_PARMS]);
229 202
230 if (ctl->limit > 0) { 203 if (ctl->limit > 0) {
231 child = red_create_dflt(sch, ctl->limit); 204 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
232 if (child == NULL) 205 if (IS_ERR(child))
233 return -ENOMEM; 206 return PTR_ERR(child);
234 } 207 }
235 208
236 sch_tree_lock(sch); 209 sch_tree_lock(sch);
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 6a97afbfb952..8589da666568 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -245,7 +245,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
245 if (d > 1) { 245 if (d > 1) {
246 sfq_index x = q->dep[d + SFQ_DEPTH].next; 246 sfq_index x = q->dep[d + SFQ_DEPTH].next;
247 skb = q->qs[x].prev; 247 skb = q->qs[x].prev;
248 len = skb->len; 248 len = qdisc_pkt_len(skb);
249 __skb_unlink(skb, &q->qs[x]); 249 __skb_unlink(skb, &q->qs[x]);
250 kfree_skb(skb); 250 kfree_skb(skb);
251 sfq_dec(q, x); 251 sfq_dec(q, x);
@@ -261,7 +261,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
261 q->next[q->tail] = q->next[d]; 261 q->next[q->tail] = q->next[d];
262 q->allot[q->next[d]] += q->quantum; 262 q->allot[q->next[d]] += q->quantum;
263 skb = q->qs[d].prev; 263 skb = q->qs[d].prev;
264 len = skb->len; 264 len = qdisc_pkt_len(skb);
265 __skb_unlink(skb, &q->qs[d]); 265 __skb_unlink(skb, &q->qs[d]);
266 kfree_skb(skb); 266 kfree_skb(skb);
267 sfq_dec(q, d); 267 sfq_dec(q, d);
@@ -305,7 +305,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
305 if (q->qs[x].qlen >= q->limit) 305 if (q->qs[x].qlen >= q->limit)
306 return qdisc_drop(skb, sch); 306 return qdisc_drop(skb, sch);
307 307
308 sch->qstats.backlog += skb->len; 308 sch->qstats.backlog += qdisc_pkt_len(skb);
309 __skb_queue_tail(&q->qs[x], skb); 309 __skb_queue_tail(&q->qs[x], skb);
310 sfq_inc(q, x); 310 sfq_inc(q, x);
311 if (q->qs[x].qlen == 1) { /* The flow is new */ 311 if (q->qs[x].qlen == 1) { /* The flow is new */
@@ -320,7 +320,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
320 } 320 }
321 } 321 }
322 if (++sch->q.qlen <= q->limit) { 322 if (++sch->q.qlen <= q->limit) {
323 sch->bstats.bytes += skb->len; 323 sch->bstats.bytes += qdisc_pkt_len(skb);
324 sch->bstats.packets++; 324 sch->bstats.packets++;
325 return 0; 325 return 0;
326 } 326 }
@@ -352,7 +352,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
352 q->hash[x] = hash; 352 q->hash[x] = hash;
353 } 353 }
354 354
355 sch->qstats.backlog += skb->len; 355 sch->qstats.backlog += qdisc_pkt_len(skb);
356 __skb_queue_head(&q->qs[x], skb); 356 __skb_queue_head(&q->qs[x], skb);
357 /* If selected queue has length q->limit+1, this means that 357 /* If selected queue has length q->limit+1, this means that
358 * all another queues are empty and we do simple tail drop. 358 * all another queues are empty and we do simple tail drop.
@@ -363,7 +363,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
363 skb = q->qs[x].prev; 363 skb = q->qs[x].prev;
364 __skb_unlink(skb, &q->qs[x]); 364 __skb_unlink(skb, &q->qs[x]);
365 sch->qstats.drops++; 365 sch->qstats.drops++;
366 sch->qstats.backlog -= skb->len; 366 sch->qstats.backlog -= qdisc_pkt_len(skb);
367 kfree_skb(skb); 367 kfree_skb(skb);
368 return NET_XMIT_CN; 368 return NET_XMIT_CN;
369 } 369 }
@@ -411,7 +411,7 @@ sfq_dequeue(struct Qdisc *sch)
411 skb = __skb_dequeue(&q->qs[a]); 411 skb = __skb_dequeue(&q->qs[a]);
412 sfq_dec(q, a); 412 sfq_dec(q, a);
413 sch->q.qlen--; 413 sch->q.qlen--;
414 sch->qstats.backlog -= skb->len; 414 sch->qstats.backlog -= qdisc_pkt_len(skb);
415 415
416 /* Is the slot empty? */ 416 /* Is the slot empty? */
417 if (q->qs[a].qlen == 0) { 417 if (q->qs[a].qlen == 0) {
@@ -423,7 +423,7 @@ sfq_dequeue(struct Qdisc *sch)
423 } 423 }
424 q->next[q->tail] = a; 424 q->next[q->tail] = a;
425 q->allot[a] += q->quantum; 425 q->allot[a] += q->quantum;
426 } else if ((q->allot[a] -= skb->len) <= 0) { 426 } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) {
427 q->tail = a; 427 q->tail = a;
428 a = q->next[a]; 428 a = q->next[a];
429 q->allot[a] += q->quantum; 429 q->allot[a] += q->quantum;
@@ -461,7 +461,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
461 return -EINVAL; 461 return -EINVAL;
462 462
463 sch_tree_lock(sch); 463 sch_tree_lock(sch);
464 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 464 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
465 q->perturb_period = ctl->perturb_period * HZ; 465 q->perturb_period = ctl->perturb_period * HZ;
466 if (ctl->limit) 466 if (ctl->limit)
467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); 467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
@@ -502,7 +502,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
502 q->max_depth = 0; 502 q->max_depth = 0;
503 q->tail = SFQ_DEPTH; 503 q->tail = SFQ_DEPTH;
504 if (opt == NULL) { 504 if (opt == NULL) {
505 q->quantum = psched_mtu(sch->dev); 505 q->quantum = psched_mtu(qdisc_dev(sch));
506 q->perturb_period = 0; 506 q->perturb_period = 0;
507 q->perturbation = net_random(); 507 q->perturbation = net_random();
508 } else { 508 } else {
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 0b7d78f59d8c..b296672f7632 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -123,7 +123,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
123 struct tbf_sched_data *q = qdisc_priv(sch); 123 struct tbf_sched_data *q = qdisc_priv(sch);
124 int ret; 124 int ret;
125 125
126 if (skb->len > q->max_size) { 126 if (qdisc_pkt_len(skb) > q->max_size) {
127 sch->qstats.drops++; 127 sch->qstats.drops++;
128#ifdef CONFIG_NET_CLS_ACT 128#ifdef CONFIG_NET_CLS_ACT
129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) 129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
@@ -133,13 +133,14 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
133 return NET_XMIT_DROP; 133 return NET_XMIT_DROP;
134 } 134 }
135 135
136 if ((ret = q->qdisc->enqueue(skb, q->qdisc)) != 0) { 136 ret = qdisc_enqueue(skb, q->qdisc);
137 if (ret != 0) {
137 sch->qstats.drops++; 138 sch->qstats.drops++;
138 return ret; 139 return ret;
139 } 140 }
140 141
141 sch->q.qlen++; 142 sch->q.qlen++;
142 sch->bstats.bytes += skb->len; 143 sch->bstats.bytes += qdisc_pkt_len(skb);
143 sch->bstats.packets++; 144 sch->bstats.packets++;
144 return 0; 145 return 0;
145} 146}
@@ -180,7 +181,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc* sch)
180 psched_time_t now; 181 psched_time_t now;
181 long toks; 182 long toks;
182 long ptoks = 0; 183 long ptoks = 0;
183 unsigned int len = skb->len; 184 unsigned int len = qdisc_pkt_len(skb);
184 185
185 now = psched_get_time(); 186 now = psched_get_time();
186 toks = psched_tdiff_bounded(now, q->t_c, q->buffer); 187 toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
@@ -242,34 +243,6 @@ static void tbf_reset(struct Qdisc* sch)
242 qdisc_watchdog_cancel(&q->watchdog); 243 qdisc_watchdog_cancel(&q->watchdog);
243} 244}
244 245
245static struct Qdisc *tbf_create_dflt_qdisc(struct Qdisc *sch, u32 limit)
246{
247 struct Qdisc *q;
248 struct nlattr *nla;
249 int ret;
250
251 q = qdisc_create_dflt(sch->dev, &bfifo_qdisc_ops,
252 TC_H_MAKE(sch->handle, 1));
253 if (q) {
254 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)),
255 GFP_KERNEL);
256 if (nla) {
257 nla->nla_type = RTM_NEWQDISC;
258 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
259 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
260
261 ret = q->ops->change(q, nla);
262 kfree(nla);
263
264 if (ret == 0)
265 return q;
266 }
267 qdisc_destroy(q);
268 }
269
270 return NULL;
271}
272
273static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = { 246static const struct nla_policy tbf_policy[TCA_TBF_MAX + 1] = {
274 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) }, 247 [TCA_TBF_PARMS] = { .len = sizeof(struct tc_tbf_qopt) },
275 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 248 [TCA_TBF_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
@@ -322,8 +295,11 @@ static int tbf_change(struct Qdisc* sch, struct nlattr *opt)
322 goto done; 295 goto done;
323 296
324 if (qopt->limit > 0) { 297 if (qopt->limit > 0) {
325 if ((child = tbf_create_dflt_qdisc(sch, qopt->limit)) == NULL) 298 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
299 if (IS_ERR(child)) {
300 err = PTR_ERR(child);
326 goto done; 301 goto done;
302 }
327 } 303 }
328 304
329 sch_tree_lock(sch); 305 sch_tree_lock(sch);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 0444fd0f0d22..537223642b6e 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -78,12 +78,12 @@ struct teql_sched_data
78static int 78static int
79teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) 79teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
80{ 80{
81 struct net_device *dev = sch->dev; 81 struct net_device *dev = qdisc_dev(sch);
82 struct teql_sched_data *q = qdisc_priv(sch); 82 struct teql_sched_data *q = qdisc_priv(sch);
83 83
84 if (q->q.qlen < dev->tx_queue_len) { 84 if (q->q.qlen < dev->tx_queue_len) {
85 __skb_queue_tail(&q->q, skb); 85 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += skb->len; 86 sch->bstats.bytes += qdisc_pkt_len(skb);
87 sch->bstats.packets++; 87 sch->bstats.packets++;
88 return 0; 88 return 0;
89 } 89 }
@@ -107,17 +107,19 @@ static struct sk_buff *
107teql_dequeue(struct Qdisc* sch) 107teql_dequeue(struct Qdisc* sch)
108{ 108{
109 struct teql_sched_data *dat = qdisc_priv(sch); 109 struct teql_sched_data *dat = qdisc_priv(sch);
110 struct netdev_queue *dat_queue;
110 struct sk_buff *skb; 111 struct sk_buff *skb;
111 112
112 skb = __skb_dequeue(&dat->q); 113 skb = __skb_dequeue(&dat->q);
114 dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
113 if (skb == NULL) { 115 if (skb == NULL) {
114 struct net_device *m = dat->m->dev->qdisc->dev; 116 struct net_device *m = qdisc_dev(dat_queue->qdisc);
115 if (m) { 117 if (m) {
116 dat->m->slaves = sch; 118 dat->m->slaves = sch;
117 netif_wake_queue(m); 119 netif_wake_queue(m);
118 } 120 }
119 } 121 }
120 sch->q.qlen = dat->q.qlen + dat->m->dev->qdisc->q.qlen; 122 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
121 return skb; 123 return skb;
122} 124}
123 125
@@ -153,10 +155,16 @@ teql_destroy(struct Qdisc* sch)
153 if (q == master->slaves) { 155 if (q == master->slaves) {
154 master->slaves = NEXT_SLAVE(q); 156 master->slaves = NEXT_SLAVE(q);
155 if (q == master->slaves) { 157 if (q == master->slaves) {
158 struct netdev_queue *txq;
159 spinlock_t *root_lock;
160
161 txq = netdev_get_tx_queue(master->dev, 0);
156 master->slaves = NULL; 162 master->slaves = NULL;
157 spin_lock_bh(&master->dev->queue_lock); 163
158 qdisc_reset(master->dev->qdisc); 164 root_lock = qdisc_root_lock(txq->qdisc);
159 spin_unlock_bh(&master->dev->queue_lock); 165 spin_lock_bh(root_lock);
166 qdisc_reset(txq->qdisc);
167 spin_unlock_bh(root_lock);
160 } 168 }
161 } 169 }
162 skb_queue_purge(&dat->q); 170 skb_queue_purge(&dat->q);
@@ -170,7 +178,7 @@ teql_destroy(struct Qdisc* sch)
170 178
171static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) 179static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
172{ 180{
173 struct net_device *dev = sch->dev; 181 struct net_device *dev = qdisc_dev(sch);
174 struct teql_master *m = (struct teql_master*)sch->ops; 182 struct teql_master *m = (struct teql_master*)sch->ops;
175 struct teql_sched_data *q = qdisc_priv(sch); 183 struct teql_sched_data *q = qdisc_priv(sch);
176 184
@@ -216,7 +224,8 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
216static int 224static int
217__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev) 225__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
218{ 226{
219 struct teql_sched_data *q = qdisc_priv(dev->qdisc); 227 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
228 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
220 struct neighbour *mn = skb->dst->neighbour; 229 struct neighbour *mn = skb->dst->neighbour;
221 struct neighbour *n = q->ncache; 230 struct neighbour *n = q->ncache;
222 231
@@ -252,7 +261,8 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
252static inline int teql_resolve(struct sk_buff *skb, 261static inline int teql_resolve(struct sk_buff *skb,
253 struct sk_buff *skb_res, struct net_device *dev) 262 struct sk_buff *skb_res, struct net_device *dev)
254{ 263{
255 if (dev->qdisc == &noop_qdisc) 264 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
265 if (txq->qdisc == &noop_qdisc)
256 return -ENODEV; 266 return -ENODEV;
257 267
258 if (dev->header_ops == NULL || 268 if (dev->header_ops == NULL ||
@@ -268,7 +278,6 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
268 struct Qdisc *start, *q; 278 struct Qdisc *start, *q;
269 int busy; 279 int busy;
270 int nores; 280 int nores;
271 int len = skb->len;
272 int subq = skb_get_queue_mapping(skb); 281 int subq = skb_get_queue_mapping(skb);
273 struct sk_buff *skb_res = NULL; 282 struct sk_buff *skb_res = NULL;
274 283
@@ -282,12 +291,13 @@ restart:
282 goto drop; 291 goto drop;
283 292
284 do { 293 do {
285 struct net_device *slave = q->dev; 294 struct net_device *slave = qdisc_dev(q);
295 struct netdev_queue *slave_txq;
286 296
287 if (slave->qdisc_sleeping != q) 297 slave_txq = netdev_get_tx_queue(slave, 0);
298 if (slave_txq->qdisc_sleeping != q)
288 continue; 299 continue;
289 if (netif_queue_stopped(slave) || 300 if (__netif_subqueue_stopped(slave, subq) ||
290 __netif_subqueue_stopped(slave, subq) ||
291 !netif_running(slave)) { 301 !netif_running(slave)) {
292 busy = 1; 302 busy = 1;
293 continue; 303 continue;
@@ -296,14 +306,14 @@ restart:
296 switch (teql_resolve(skb, skb_res, slave)) { 306 switch (teql_resolve(skb, skb_res, slave)) {
297 case 0: 307 case 0:
298 if (netif_tx_trylock(slave)) { 308 if (netif_tx_trylock(slave)) {
299 if (!netif_queue_stopped(slave) && 309 if (!__netif_subqueue_stopped(slave, subq) &&
300 !__netif_subqueue_stopped(slave, subq) &&
301 slave->hard_start_xmit(skb, slave) == 0) { 310 slave->hard_start_xmit(skb, slave) == 0) {
302 netif_tx_unlock(slave); 311 netif_tx_unlock(slave);
303 master->slaves = NEXT_SLAVE(q); 312 master->slaves = NEXT_SLAVE(q);
304 netif_wake_queue(dev); 313 netif_wake_queue(dev);
305 master->stats.tx_packets++; 314 master->stats.tx_packets++;
306 master->stats.tx_bytes += len; 315 master->stats.tx_bytes +=
316 qdisc_pkt_len(skb);
307 return 0; 317 return 0;
308 } 318 }
309 netif_tx_unlock(slave); 319 netif_tx_unlock(slave);
@@ -352,7 +362,7 @@ static int teql_master_open(struct net_device *dev)
352 362
353 q = m->slaves; 363 q = m->slaves;
354 do { 364 do {
355 struct net_device *slave = q->dev; 365 struct net_device *slave = qdisc_dev(q);
356 366
357 if (slave == NULL) 367 if (slave == NULL)
358 return -EUNATCH; 368 return -EUNATCH;
@@ -403,7 +413,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
403 q = m->slaves; 413 q = m->slaves;
404 if (q) { 414 if (q) {
405 do { 415 do {
406 if (new_mtu > q->dev->mtu) 416 if (new_mtu > qdisc_dev(q)->mtu)
407 return -EINVAL; 417 return -EINVAL;
408 } while ((q=NEXT_SLAVE(q)) != m->slaves); 418 } while ((q=NEXT_SLAVE(q)) != m->slaves);
409 } 419 }