aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/cls_route.c4
-rw-r--r--net/sched/sch_api.c10
-rw-r--r--net/sched/sch_atm.c4
-rw-r--r--net/sched/sch_cbq.c22
-rw-r--r--net/sched/sch_dsmark.c4
-rw-r--r--net/sched/sch_fifo.c6
-rw-r--r--net/sched/sch_generic.c12
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_hfsc.c10
-rw-r--r--net/sched/sch_htb.c24
-rw-r--r--net/sched/sch_netem.c10
-rw-r--r--net/sched/sch_prio.c15
-rw-r--r--net/sched/sch_sfq.c4
-rw-r--r--net/sched/sch_teql.c12
15 files changed, 70 insertions, 71 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 9360fc81e8c7..e2389f161e46 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -334,7 +334,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
334 tcm->tcm_family = AF_UNSPEC; 334 tcm->tcm_family = AF_UNSPEC;
335 tcm->tcm__pad1 = 0; 335 tcm->tcm__pad1 = 0;
336 tcm->tcm__pad1 = 0; 336 tcm->tcm__pad1 = 0;
337 tcm->tcm_ifindex = tp->q->dev->ifindex; 337 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
338 tcm->tcm_parent = tp->classid; 338 tcm->tcm_parent = tp->classid;
339 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 339 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
340 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); 340 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind);
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 784dcb870b98..5a16ca28aa3d 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -302,7 +302,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg)
302 *fp = f->next; 302 *fp = f->next;
303 tcf_tree_unlock(tp); 303 tcf_tree_unlock(tp);
304 304
305 route4_reset_fastmap(tp->q->dev, head, f->id); 305 route4_reset_fastmap(qdisc_dev(tp->q), head, f->id);
306 route4_delete_filter(tp, f); 306 route4_delete_filter(tp, f);
307 307
308 /* Strip tree */ 308 /* Strip tree */
@@ -500,7 +500,7 @@ reinsert:
500 } 500 }
501 tcf_tree_unlock(tp); 501 tcf_tree_unlock(tp);
502 502
503 route4_reset_fastmap(tp->q->dev, head, f->id); 503 route4_reset_fastmap(qdisc_dev(tp->q), head, f->id);
504 *arg = (unsigned long)f; 504 *arg = (unsigned long)f;
505 return 0; 505 return 0;
506 506
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b86c98bd06a3..1f893082a4f6 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -281,7 +281,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
281{ 281{
282 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, 282 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
283 timer); 283 timer);
284 struct net_device *dev = wd->qdisc->dev; 284 struct net_device *dev = qdisc_dev(wd->qdisc);
285 285
286 wd->qdisc->flags &= ~TCQ_F_THROTTLED; 286 wd->qdisc->flags &= ~TCQ_F_THROTTLED;
287 smp_wmb(); 287 smp_wmb();
@@ -493,7 +493,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
493 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) 493 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
494 return; 494 return;
495 495
496 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); 496 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
497 if (sch == NULL) { 497 if (sch == NULL) {
498 WARN_ON(parentid != TC_H_ROOT); 498 WARN_ON(parentid != TC_H_ROOT);
499 return; 499 return;
@@ -593,7 +593,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
593 if (ops == NULL) 593 if (ops == NULL)
594 goto err_out; 594 goto err_out;
595 595
596 sch = qdisc_alloc(dev, dev_queue, ops); 596 sch = qdisc_alloc(dev_queue, ops);
597 if (IS_ERR(sch)) { 597 if (IS_ERR(sch)) {
598 err = PTR_ERR(sch); 598 err = PTR_ERR(sch);
599 goto err_out2; 599 goto err_out2;
@@ -940,7 +940,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
940 tcm->tcm_family = AF_UNSPEC; 940 tcm->tcm_family = AF_UNSPEC;
941 tcm->tcm__pad1 = 0; 941 tcm->tcm__pad1 = 0;
942 tcm->tcm__pad2 = 0; 942 tcm->tcm__pad2 = 0;
943 tcm->tcm_ifindex = q->dev->ifindex; 943 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
944 tcm->tcm_parent = clid; 944 tcm->tcm_parent = clid;
945 tcm->tcm_handle = q->handle; 945 tcm->tcm_handle = q->handle;
946 tcm->tcm_info = atomic_read(&q->refcnt); 946 tcm->tcm_info = atomic_read(&q->refcnt);
@@ -1186,7 +1186,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1186 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 1186 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
1187 tcm = NLMSG_DATA(nlh); 1187 tcm = NLMSG_DATA(nlh);
1188 tcm->tcm_family = AF_UNSPEC; 1188 tcm->tcm_family = AF_UNSPEC;
1189 tcm->tcm_ifindex = q->dev->ifindex; 1189 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1190 tcm->tcm_parent = q->handle; 1190 tcm->tcm_parent = q->handle;
1191 tcm->tcm_handle = q->handle; 1191 tcm->tcm_handle = q->handle;
1192 tcm->tcm_info = 0; 1192 tcm->tcm_info = 0;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 3dddab531d5a..0de757e3be4a 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -296,7 +296,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
296 goto err_out; 296 goto err_out;
297 } 297 }
298 flow->filter_list = NULL; 298 flow->filter_list = NULL;
299 flow->q = qdisc_create_dflt(sch->dev, sch->dev_queue, 299 flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
300 &pfifo_qdisc_ops, classid); 300 &pfifo_qdisc_ops, classid);
301 if (!flow->q) 301 if (!flow->q)
302 flow->q = &noop_qdisc; 302 flow->q = &noop_qdisc;
@@ -556,7 +556,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
556 556
557 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); 557 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
558 p->flows = &p->link; 558 p->flows = &p->link;
559 p->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue, 559 p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
560 &pfifo_qdisc_ops, sch->handle); 560 &pfifo_qdisc_ops, sch->handle);
561 if (!p->link.q) 561 if (!p->link.q)
562 p->link.q = &noop_qdisc; 562 p->link.q = &noop_qdisc;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d360dcd0818b..9f2ace585fd6 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -650,7 +650,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
650 } 650 }
651 651
652 sch->flags &= ~TCQ_F_THROTTLED; 652 sch->flags &= ~TCQ_F_THROTTLED;
653 netif_schedule(sch->dev); 653 netif_schedule(qdisc_dev(sch));
654 return HRTIMER_NORESTART; 654 return HRTIMER_NORESTART;
655} 655}
656 656
@@ -1077,9 +1077,9 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
1077 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ 1077 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
1078 q->quanta[prio]; 1078 q->quanta[prio];
1079 } 1079 }
1080 if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) { 1080 if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) {
1081 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); 1081 printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum);
1082 cl->quantum = cl->qdisc->dev->mtu/2 + 1; 1082 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
1083 } 1083 }
1084 } 1084 }
1085 } 1085 }
@@ -1401,7 +1401,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1401 q->link.sibling = &q->link; 1401 q->link.sibling = &q->link;
1402 q->link.common.classid = sch->handle; 1402 q->link.common.classid = sch->handle;
1403 q->link.qdisc = sch; 1403 q->link.qdisc = sch;
1404 if (!(q->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue, 1404 if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1405 &pfifo_qdisc_ops, 1405 &pfifo_qdisc_ops,
1406 sch->handle))) 1406 sch->handle)))
1407 q->link.q = &noop_qdisc; 1407 q->link.q = &noop_qdisc;
@@ -1411,7 +1411,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1411 q->link.cpriority = TC_CBQ_MAXPRIO-1; 1411 q->link.cpriority = TC_CBQ_MAXPRIO-1;
1412 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; 1412 q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC;
1413 q->link.overlimit = cbq_ovl_classic; 1413 q->link.overlimit = cbq_ovl_classic;
1414 q->link.allot = psched_mtu(sch->dev); 1414 q->link.allot = psched_mtu(qdisc_dev(sch));
1415 q->link.quantum = q->link.allot; 1415 q->link.quantum = q->link.allot;
1416 q->link.weight = q->link.R_tab->rate.rate; 1416 q->link.weight = q->link.R_tab->rate.rate;
1417 1417
@@ -1646,7 +1646,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1646 1646
1647 if (cl) { 1647 if (cl) {
1648 if (new == NULL) { 1648 if (new == NULL) {
1649 new = qdisc_create_dflt(sch->dev, sch->dev_queue, 1649 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1650 &pfifo_qdisc_ops, 1650 &pfifo_qdisc_ops,
1651 cl->common.classid); 1651 cl->common.classid);
1652 if (new == NULL) 1652 if (new == NULL)
@@ -1746,10 +1746,10 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1746#ifdef CONFIG_NET_CLS_ACT 1746#ifdef CONFIG_NET_CLS_ACT
1747 struct cbq_sched_data *q = qdisc_priv(sch); 1747 struct cbq_sched_data *q = qdisc_priv(sch);
1748 1748
1749 spin_lock_bh(&sch->dev->queue_lock); 1749 spin_lock_bh(&qdisc_dev(sch)->queue_lock);
1750 if (q->rx_class == cl) 1750 if (q->rx_class == cl)
1751 q->rx_class = NULL; 1751 q->rx_class = NULL;
1752 spin_unlock_bh(&sch->dev->queue_lock); 1752 spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1753#endif 1753#endif
1754 1754
1755 cbq_destroy_class(sch, cl); 1755 cbq_destroy_class(sch, cl);
@@ -1828,7 +1828,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1828 1828
1829 if (tca[TCA_RATE]) 1829 if (tca[TCA_RATE])
1830 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1830 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1831 &sch->dev->queue_lock, 1831 &qdisc_dev(sch)->queue_lock,
1832 tca[TCA_RATE]); 1832 tca[TCA_RATE]);
1833 return 0; 1833 return 0;
1834 } 1834 }
@@ -1879,7 +1879,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1879 cl->R_tab = rtab; 1879 cl->R_tab = rtab;
1880 rtab = NULL; 1880 rtab = NULL;
1881 cl->refcnt = 1; 1881 cl->refcnt = 1;
1882 if (!(cl->q = qdisc_create_dflt(sch->dev, sch->dev_queue, 1882 if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1883 &pfifo_qdisc_ops, classid))) 1883 &pfifo_qdisc_ops, classid)))
1884 cl->q = &noop_qdisc; 1884 cl->q = &noop_qdisc;
1885 cl->common.classid = classid; 1885 cl->common.classid = classid;
@@ -1919,7 +1919,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1919 1919
1920 if (tca[TCA_RATE]) 1920 if (tca[TCA_RATE])
1921 gen_new_estimator(&cl->bstats, &cl->rate_est, 1921 gen_new_estimator(&cl->bstats, &cl->rate_est,
1922 &sch->dev->queue_lock, tca[TCA_RATE]); 1922 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
1923 1923
1924 *arg = (unsigned long)cl; 1924 *arg = (unsigned long)cl;
1925 return 0; 1925 return 0;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index c955ba24e5cf..3aafbd17393a 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -60,7 +60,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
60 sch, p, new, old); 60 sch, p, new, old);
61 61
62 if (new == NULL) { 62 if (new == NULL) {
63 new = qdisc_create_dflt(sch->dev, sch->dev_queue, 63 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
64 &pfifo_qdisc_ops, 64 &pfifo_qdisc_ops,
65 sch->handle); 65 sch->handle);
66 if (new == NULL) 66 if (new == NULL)
@@ -391,7 +391,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
391 p->default_index = default_index; 391 p->default_index = default_index;
392 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); 392 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
393 393
394 p->q = qdisc_create_dflt(sch->dev, sch->dev_queue, 394 p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
395 &pfifo_qdisc_ops, sch->handle); 395 &pfifo_qdisc_ops, sch->handle);
396 if (p->q == NULL) 396 if (p->q == NULL)
397 p->q = &noop_qdisc; 397 p->q = &noop_qdisc;
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 779eae85faf0..1d97fa42c902 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -48,10 +48,10 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
48 struct fifo_sched_data *q = qdisc_priv(sch); 48 struct fifo_sched_data *q = qdisc_priv(sch);
49 49
50 if (opt == NULL) { 50 if (opt == NULL) {
51 u32 limit = sch->dev->tx_queue_len ? : 1; 51 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
52 52
53 if (sch->ops == &bfifo_qdisc_ops) 53 if (sch->ops == &bfifo_qdisc_ops)
54 limit *= sch->dev->mtu; 54 limit *= qdisc_dev(sch)->mtu;
55 55
56 q->limit = limit; 56 q->limit = limit;
57 } else { 57 } else {
@@ -137,7 +137,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
137 struct Qdisc *q; 137 struct Qdisc *q;
138 int err = -ENOMEM; 138 int err = -ENOMEM;
139 139
140 q = qdisc_create_dflt(sch->dev, sch->dev_queue, 140 q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
141 ops, TC_H_MAKE(sch->handle, 1)); 141 ops, TC_H_MAKE(sch->handle, 1));
142 if (q) { 142 if (q) {
143 err = fifo_set_limit(q, limit); 143 err = fifo_set_limit(q, limit);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index d97086480893..b626a4f32b6b 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -364,7 +364,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
364{ 364{
365 struct sk_buff_head *list = prio2list(skb, qdisc); 365 struct sk_buff_head *list = prio2list(skb, qdisc);
366 366
367 if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { 367 if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
368 qdisc->q.qlen++; 368 qdisc->q.qlen++;
369 return __qdisc_enqueue_tail(skb, qdisc, list); 369 return __qdisc_enqueue_tail(skb, qdisc, list);
370 } 370 }
@@ -440,8 +440,7 @@ static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
440 .owner = THIS_MODULE, 440 .owner = THIS_MODULE,
441}; 441};
442 442
443struct Qdisc *qdisc_alloc(struct net_device *dev, 443struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
444 struct netdev_queue *dev_queue,
445 struct Qdisc_ops *ops) 444 struct Qdisc_ops *ops)
446{ 445{
447 void *p; 446 void *p;
@@ -465,8 +464,7 @@ struct Qdisc *qdisc_alloc(struct net_device *dev,
465 sch->enqueue = ops->enqueue; 464 sch->enqueue = ops->enqueue;
466 sch->dequeue = ops->dequeue; 465 sch->dequeue = ops->dequeue;
467 sch->dev_queue = dev_queue; 466 sch->dev_queue = dev_queue;
468 sch->dev = dev; 467 dev_hold(qdisc_dev(sch));
469 dev_hold(dev);
470 atomic_set(&sch->refcnt, 1); 468 atomic_set(&sch->refcnt, 1);
471 469
472 return sch; 470 return sch;
@@ -481,7 +479,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev,
481{ 479{
482 struct Qdisc *sch; 480 struct Qdisc *sch;
483 481
484 sch = qdisc_alloc(dev, dev_queue, ops); 482 sch = qdisc_alloc(dev_queue, ops);
485 if (IS_ERR(sch)) 483 if (IS_ERR(sch))
486 goto errout; 484 goto errout;
487 sch->stats_lock = &dev->queue_lock; 485 sch->stats_lock = &dev->queue_lock;
@@ -534,7 +532,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
534 ops->destroy(qdisc); 532 ops->destroy(qdisc);
535 533
536 module_put(ops->owner); 534 module_put(ops->owner);
537 dev_put(qdisc->dev); 535 dev_put(qdisc_dev(qdisc));
538 call_rcu(&qdisc->q_rcu, __qdisc_destroy); 536 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
539} 537}
540EXPORT_SYMBOL(qdisc_destroy); 538EXPORT_SYMBOL(qdisc_destroy);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index c89fba56db56..39fa28511f07 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -164,7 +164,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
164 * if no default DP has been configured. This 164 * if no default DP has been configured. This
165 * allows for DP flows to be left untouched. 165 * allows for DP flows to be left untouched.
166 */ 166 */
167 if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len) 167 if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
168 return qdisc_enqueue_tail(skb, sch); 168 return qdisc_enqueue_tail(skb, sch);
169 else 169 else
170 goto drop; 170 goto drop;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5a22fec4eadd..333525422f45 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1045 1045
1046 if (tca[TCA_RATE]) 1046 if (tca[TCA_RATE])
1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1048 &sch->dev->queue_lock, 1048 &qdisc_dev(sch)->queue_lock,
1049 tca[TCA_RATE]); 1049 tca[TCA_RATE]);
1050 return 0; 1050 return 0;
1051 } 1051 }
@@ -1083,7 +1083,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1083 cl->refcnt = 1; 1083 cl->refcnt = 1;
1084 cl->sched = q; 1084 cl->sched = q;
1085 cl->cl_parent = parent; 1085 cl->cl_parent = parent;
1086 cl->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue, 1086 cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1087 &pfifo_qdisc_ops, classid); 1087 &pfifo_qdisc_ops, classid);
1088 if (cl->qdisc == NULL) 1088 if (cl->qdisc == NULL)
1089 cl->qdisc = &noop_qdisc; 1089 cl->qdisc = &noop_qdisc;
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1104 1104
1105 if (tca[TCA_RATE]) 1105 if (tca[TCA_RATE])
1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1107 &sch->dev->queue_lock, tca[TCA_RATE]); 1107 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]);
1108 *arg = (unsigned long)cl; 1108 *arg = (unsigned long)cl;
1109 return 0; 1109 return 0;
1110} 1110}
@@ -1202,7 +1202,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1202 if (cl->level > 0) 1202 if (cl->level > 0)
1203 return -EINVAL; 1203 return -EINVAL;
1204 if (new == NULL) { 1204 if (new == NULL) {
1205 new = qdisc_create_dflt(sch->dev, sch->dev_queue, 1205 new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1206 &pfifo_qdisc_ops, 1206 &pfifo_qdisc_ops,
1207 cl->cl_common.classid); 1207 cl->cl_common.classid);
1208 if (new == NULL) 1208 if (new == NULL)
@@ -1445,7 +1445,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1445 q->root.cl_common.classid = sch->handle; 1445 q->root.cl_common.classid = sch->handle;
1446 q->root.refcnt = 1; 1446 q->root.refcnt = 1;
1447 q->root.sched = q; 1447 q->root.sched = q;
1448 q->root.qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue, 1448 q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1449 &pfifo_qdisc_ops, 1449 &pfifo_qdisc_ops,
1450 sch->handle); 1450 sch->handle);
1451 if (q->root.qdisc == NULL) 1451 if (q->root.qdisc == NULL)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 956a67f66b9c..31f7d1536e6d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1026,7 +1026,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1026 qdisc_watchdog_init(&q->watchdog, sch); 1026 qdisc_watchdog_init(&q->watchdog, sch);
1027 skb_queue_head_init(&q->direct_queue); 1027 skb_queue_head_init(&q->direct_queue);
1028 1028
1029 q->direct_qlen = sch->dev->tx_queue_len; 1029 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1030 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1030 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1031 q->direct_qlen = 2; 1031 q->direct_qlen = 2;
1032 1032
@@ -1043,7 +1043,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1043 struct nlattr *nest; 1043 struct nlattr *nest;
1044 struct tc_htb_glob gopt; 1044 struct tc_htb_glob gopt;
1045 1045
1046 spin_lock_bh(&sch->dev->queue_lock); 1046 spin_lock_bh(&qdisc_dev(sch)->queue_lock);
1047 1047
1048 gopt.direct_pkts = q->direct_pkts; 1048 gopt.direct_pkts = q->direct_pkts;
1049 gopt.version = HTB_VER; 1049 gopt.version = HTB_VER;
@@ -1057,11 +1057,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1058 nla_nest_end(skb, nest); 1058 nla_nest_end(skb, nest);
1059 1059
1060 spin_unlock_bh(&sch->dev->queue_lock); 1060 spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1061 return skb->len; 1061 return skb->len;
1062 1062
1063nla_put_failure: 1063nla_put_failure:
1064 spin_unlock_bh(&sch->dev->queue_lock); 1064 spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1065 nla_nest_cancel(skb, nest); 1065 nla_nest_cancel(skb, nest);
1066 return -1; 1066 return -1;
1067} 1067}
@@ -1073,7 +1073,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1073 struct nlattr *nest; 1073 struct nlattr *nest;
1074 struct tc_htb_opt opt; 1074 struct tc_htb_opt opt;
1075 1075
1076 spin_lock_bh(&sch->dev->queue_lock); 1076 spin_lock_bh(&qdisc_dev(sch)->queue_lock);
1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; 1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1078 tcm->tcm_handle = cl->common.classid; 1078 tcm->tcm_handle = cl->common.classid;
1079 if (!cl->level && cl->un.leaf.q) 1079 if (!cl->level && cl->un.leaf.q)
@@ -1095,11 +1095,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1096 1096
1097 nla_nest_end(skb, nest); 1097 nla_nest_end(skb, nest);
1098 spin_unlock_bh(&sch->dev->queue_lock); 1098 spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1099 return skb->len; 1099 return skb->len;
1100 1100
1101nla_put_failure: 1101nla_put_failure:
1102 spin_unlock_bh(&sch->dev->queue_lock); 1102 spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
1103 nla_nest_cancel(skb, nest); 1103 nla_nest_cancel(skb, nest);
1104 return -1; 1104 return -1;
1105} 1105}
@@ -1129,7 +1129,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1129 1129
1130 if (cl && !cl->level) { 1130 if (cl && !cl->level) {
1131 if (new == NULL && 1131 if (new == NULL &&
1132 (new = qdisc_create_dflt(sch->dev, sch->dev_queue, 1132 (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1133 &pfifo_qdisc_ops, 1133 &pfifo_qdisc_ops,
1134 cl->common.classid)) 1134 cl->common.classid))
1135 == NULL) 1135 == NULL)
@@ -1257,7 +1257,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1257 return -EBUSY; 1257 return -EBUSY;
1258 1258
1259 if (!cl->level && htb_parent_last_child(cl)) { 1259 if (!cl->level && htb_parent_last_child(cl)) {
1260 new_q = qdisc_create_dflt(sch->dev, sch->dev_queue, 1260 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1261 &pfifo_qdisc_ops, 1261 &pfifo_qdisc_ops,
1262 cl->parent->common.classid); 1262 cl->parent->common.classid);
1263 last_child = 1; 1263 last_child = 1;
@@ -1365,7 +1365,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1365 goto failure; 1365 goto failure;
1366 1366
1367 gen_new_estimator(&cl->bstats, &cl->rate_est, 1367 gen_new_estimator(&cl->bstats, &cl->rate_est,
1368 &sch->dev->queue_lock, 1368 &qdisc_dev(sch)->queue_lock,
1369 tca[TCA_RATE] ? : &est.nla); 1369 tca[TCA_RATE] ? : &est.nla);
1370 cl->refcnt = 1; 1370 cl->refcnt = 1;
1371 cl->children = 0; 1371 cl->children = 0;
@@ -1378,7 +1378,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1378 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) 1378 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1379 so that can't be used inside of sch_tree_lock 1379 so that can't be used inside of sch_tree_lock
1380 -- thanks to Karlis Peisenieks */ 1380 -- thanks to Karlis Peisenieks */
1381 new_q = qdisc_create_dflt(sch->dev, sch->dev_queue, 1381 new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
1382 &pfifo_qdisc_ops, classid); 1382 &pfifo_qdisc_ops, classid);
1383 sch_tree_lock(sch); 1383 sch_tree_lock(sch);
1384 if (parent && !parent->level) { 1384 if (parent && !parent->level) {
@@ -1420,7 +1420,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1420 } else { 1420 } else {
1421 if (tca[TCA_RATE]) 1421 if (tca[TCA_RATE])
1422 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1422 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1423 &sch->dev->queue_lock, 1423 &qdisc_dev(sch)->queue_lock,
1424 tca[TCA_RATE]); 1424 tca[TCA_RATE]);
1425 sch_tree_lock(sch); 1425 sch_tree_lock(sch);
1426 } 1426 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index aa7a04e32ae9..790582960444 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -180,7 +180,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
180 * skb will be queued. 180 * skb will be queued.
181 */ 181 */
182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 struct Qdisc *rootq = sch->dev->qdisc; 183 struct Qdisc *rootq = qdisc_dev(sch)->qdisc;
184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 q->duplicate = 0; 185 q->duplicate = 0;
186 186
@@ -333,9 +333,9 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
333 for (i = 0; i < n; i++) 333 for (i = 0; i < n; i++)
334 d->table[i] = data[i]; 334 d->table[i] = data[i];
335 335
336 spin_lock_bh(&sch->dev->queue_lock); 336 spin_lock_bh(&qdisc_dev(sch)->queue_lock);
337 d = xchg(&q->delay_dist, d); 337 d = xchg(&q->delay_dist, d);
338 spin_unlock_bh(&sch->dev->queue_lock); 338 spin_unlock_bh(&qdisc_dev(sch)->queue_lock);
339 339
340 kfree(d); 340 kfree(d);
341 return 0; 341 return 0;
@@ -495,7 +495,7 @@ static int tfifo_init(struct Qdisc *sch, struct nlattr *opt)
495 495
496 q->limit = ctl->limit; 496 q->limit = ctl->limit;
497 } else 497 } else
498 q->limit = max_t(u32, sch->dev->tx_queue_len, 1); 498 q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1);
499 499
500 q->oldest = PSCHED_PASTPERFECT; 500 q->oldest = PSCHED_PASTPERFECT;
501 return 0; 501 return 0;
@@ -536,7 +536,7 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
536 536
537 qdisc_watchdog_init(&q->watchdog, sch); 537 qdisc_watchdog_init(&q->watchdog, sch);
538 538
539 q->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue, 539 q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
540 &tfifo_qdisc_ops, 540 &tfifo_qdisc_ops,
541 TC_H_MAKE(sch->handle, 1)); 541 TC_H_MAKE(sch->handle, 1));
542 if (!q->qdisc) { 542 if (!q->qdisc) {
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index ca58a039208e..39157f7bc046 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -136,7 +136,8 @@ prio_dequeue(struct Qdisc* sch)
136 * pulling an skb. This way we avoid excessive requeues 136 * pulling an skb. This way we avoid excessive requeues
137 * for slower queues. 137 * for slower queues.
138 */ 138 */
139 if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { 139 if (!__netif_subqueue_stopped(qdisc_dev(sch),
140 (q->mq ? prio : 0))) {
140 qdisc = q->queues[prio]; 141 qdisc = q->queues[prio];
141 skb = qdisc->dequeue(qdisc); 142 skb = qdisc->dequeue(qdisc);
142 if (skb) { 143 if (skb) {
@@ -165,8 +166,8 @@ static struct sk_buff *rr_dequeue(struct Qdisc* sch)
165 * for slower queues. If the queue is stopped, try the 166 * for slower queues. If the queue is stopped, try the
166 * next queue. 167 * next queue.
167 */ 168 */
168 if (!__netif_subqueue_stopped(sch->dev, 169 if (!__netif_subqueue_stopped(qdisc_dev(sch),
169 (q->mq ? q->curband : 0))) { 170 (q->mq ? q->curband : 0))) {
170 qdisc = q->queues[q->curband]; 171 qdisc = q->queues[q->curband];
171 skb = qdisc->dequeue(qdisc); 172 skb = qdisc->dequeue(qdisc);
172 if (skb) { 173 if (skb) {
@@ -249,10 +250,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
249 if (q->mq) { 250 if (q->mq) {
250 if (sch->parent != TC_H_ROOT) 251 if (sch->parent != TC_H_ROOT)
251 return -EINVAL; 252 return -EINVAL;
252 if (netif_is_multiqueue(sch->dev)) { 253 if (netif_is_multiqueue(qdisc_dev(sch))) {
253 if (q->bands == 0) 254 if (q->bands == 0)
254 q->bands = sch->dev->egress_subqueue_count; 255 q->bands = qdisc_dev(sch)->egress_subqueue_count;
255 else if (q->bands != sch->dev->egress_subqueue_count) 256 else if (q->bands != qdisc_dev(sch)->egress_subqueue_count)
256 return -EINVAL; 257 return -EINVAL;
257 } else 258 } else
258 return -EOPNOTSUPP; 259 return -EOPNOTSUPP;
@@ -281,7 +282,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt)
281 for (i=0; i<q->bands; i++) { 282 for (i=0; i<q->bands; i++) {
282 if (q->queues[i] == &noop_qdisc) { 283 if (q->queues[i] == &noop_qdisc) {
283 struct Qdisc *child; 284 struct Qdisc *child;
284 child = qdisc_create_dflt(sch->dev, sch->dev_queue, 285 child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
285 &pfifo_qdisc_ops, 286 &pfifo_qdisc_ops,
286 TC_H_MAKE(sch->handle, i + 1)); 287 TC_H_MAKE(sch->handle, i + 1));
287 if (child) { 288 if (child) {
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 6a97afbfb952..8458f630fac4 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -461,7 +461,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
461 return -EINVAL; 461 return -EINVAL;
462 462
463 sch_tree_lock(sch); 463 sch_tree_lock(sch);
464 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 464 q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch));
465 q->perturb_period = ctl->perturb_period * HZ; 465 q->perturb_period = ctl->perturb_period * HZ;
466 if (ctl->limit) 466 if (ctl->limit)
467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); 467 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
@@ -502,7 +502,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
502 q->max_depth = 0; 502 q->max_depth = 0;
503 q->tail = SFQ_DEPTH; 503 q->tail = SFQ_DEPTH;
504 if (opt == NULL) { 504 if (opt == NULL) {
505 q->quantum = psched_mtu(sch->dev); 505 q->quantum = psched_mtu(qdisc_dev(sch));
506 q->perturb_period = 0; 506 q->perturb_period = 0;
507 q->perturbation = net_random(); 507 q->perturbation = net_random();
508 } else { 508 } else {
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 0444fd0f0d22..b3fc82623fc6 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -78,7 +78,7 @@ struct teql_sched_data
78static int 78static int
79teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) 79teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
80{ 80{
81 struct net_device *dev = sch->dev; 81 struct net_device *dev = qdisc_dev(sch);
82 struct teql_sched_data *q = qdisc_priv(sch); 82 struct teql_sched_data *q = qdisc_priv(sch);
83 83
84 if (q->q.qlen < dev->tx_queue_len) { 84 if (q->q.qlen < dev->tx_queue_len) {
@@ -111,7 +111,7 @@ teql_dequeue(struct Qdisc* sch)
111 111
112 skb = __skb_dequeue(&dat->q); 112 skb = __skb_dequeue(&dat->q);
113 if (skb == NULL) { 113 if (skb == NULL) {
114 struct net_device *m = dat->m->dev->qdisc->dev; 114 struct net_device *m = qdisc_dev(dat->m->dev->qdisc);
115 if (m) { 115 if (m) {
116 dat->m->slaves = sch; 116 dat->m->slaves = sch;
117 netif_wake_queue(m); 117 netif_wake_queue(m);
@@ -170,7 +170,7 @@ teql_destroy(struct Qdisc* sch)
170 170
171static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) 171static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
172{ 172{
173 struct net_device *dev = sch->dev; 173 struct net_device *dev = qdisc_dev(sch);
174 struct teql_master *m = (struct teql_master*)sch->ops; 174 struct teql_master *m = (struct teql_master*)sch->ops;
175 struct teql_sched_data *q = qdisc_priv(sch); 175 struct teql_sched_data *q = qdisc_priv(sch);
176 176
@@ -282,7 +282,7 @@ restart:
282 goto drop; 282 goto drop;
283 283
284 do { 284 do {
285 struct net_device *slave = q->dev; 285 struct net_device *slave = qdisc_dev(q);
286 286
287 if (slave->qdisc_sleeping != q) 287 if (slave->qdisc_sleeping != q)
288 continue; 288 continue;
@@ -352,7 +352,7 @@ static int teql_master_open(struct net_device *dev)
352 352
353 q = m->slaves; 353 q = m->slaves;
354 do { 354 do {
355 struct net_device *slave = q->dev; 355 struct net_device *slave = qdisc_dev(q);
356 356
357 if (slave == NULL) 357 if (slave == NULL)
358 return -EUNATCH; 358 return -EUNATCH;
@@ -403,7 +403,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
403 q = m->slaves; 403 q = m->slaves;
404 if (q) { 404 if (q) {
405 do { 405 do {
406 if (new_mtu > q->dev->mtu) 406 if (new_mtu > qdisc_dev(q)->mtu)
407 return -EINVAL; 407 return -EINVAL;
408 } while ((q=NEXT_SLAVE(q)) != m->slaves); 408 } while ((q=NEXT_SLAVE(q)) != m->slaves);
409 } 409 }