diff options
-rw-r--r-- | include/net/sch_generic.h | 16 | ||||
-rw-r--r-- | net/mac80211/wme.c | 28 | ||||
-rw-r--r-- | net/sched/cls_api.c | 2 | ||||
-rw-r--r-- | net/sched/cls_route.c | 4 | ||||
-rw-r--r-- | net/sched/sch_api.c | 10 | ||||
-rw-r--r-- | net/sched/sch_atm.c | 4 | ||||
-rw-r--r-- | net/sched/sch_cbq.c | 22 | ||||
-rw-r--r-- | net/sched/sch_dsmark.c | 4 | ||||
-rw-r--r-- | net/sched/sch_fifo.c | 6 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 12 | ||||
-rw-r--r-- | net/sched/sch_gred.c | 2 | ||||
-rw-r--r-- | net/sched/sch_hfsc.c | 10 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 24 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 10 | ||||
-rw-r--r-- | net/sched/sch_prio.c | 15 | ||||
-rw-r--r-- | net/sched/sch_sfq.c | 4 | ||||
-rw-r--r-- | net/sched/sch_teql.c | 12 |
17 files changed, 93 insertions, 92 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 0ab53c575f87..66ec36d8ac97 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -38,7 +38,6 @@ struct Qdisc | |||
38 | atomic_t refcnt; | 38 | atomic_t refcnt; |
39 | struct sk_buff_head q; | 39 | struct sk_buff_head q; |
40 | struct netdev_queue *dev_queue; | 40 | struct netdev_queue *dev_queue; |
41 | struct net_device *dev; | ||
42 | struct list_head list; | 41 | struct list_head list; |
43 | 42 | ||
44 | struct gnet_stats_basic bstats; | 43 | struct gnet_stats_basic bstats; |
@@ -156,14 +155,18 @@ struct tcf_proto | |||
156 | struct tcf_proto_ops *ops; | 155 | struct tcf_proto_ops *ops; |
157 | }; | 156 | }; |
158 | 157 | ||
158 | static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) | ||
159 | { | ||
160 | return qdisc->dev_queue->dev; | ||
161 | } | ||
159 | 162 | ||
160 | extern void qdisc_lock_tree(struct net_device *dev); | 163 | extern void qdisc_lock_tree(struct net_device *dev); |
161 | extern void qdisc_unlock_tree(struct net_device *dev); | 164 | extern void qdisc_unlock_tree(struct net_device *dev); |
162 | 165 | ||
163 | #define sch_tree_lock(q) qdisc_lock_tree((q)->dev) | 166 | #define sch_tree_lock(q) qdisc_lock_tree(qdisc_dev(q)) |
164 | #define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev) | 167 | #define sch_tree_unlock(q) qdisc_unlock_tree(qdisc_dev(q)) |
165 | #define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev) | 168 | #define tcf_tree_lock(tp) qdisc_lock_tree(qdisc_dev((tp)->q)) |
166 | #define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev) | 169 | #define tcf_tree_unlock(tp) qdisc_unlock_tree(qdisc_dev((tp)->q)) |
167 | 170 | ||
168 | extern struct Qdisc noop_qdisc; | 171 | extern struct Qdisc noop_qdisc; |
169 | extern struct Qdisc_ops noop_qdisc_ops; | 172 | extern struct Qdisc_ops noop_qdisc_ops; |
@@ -217,8 +220,7 @@ extern void dev_deactivate(struct net_device *dev); | |||
217 | extern void qdisc_reset(struct Qdisc *qdisc); | 220 | extern void qdisc_reset(struct Qdisc *qdisc); |
218 | extern void qdisc_destroy(struct Qdisc *qdisc); | 221 | extern void qdisc_destroy(struct Qdisc *qdisc); |
219 | extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); | 222 | extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); |
220 | extern struct Qdisc *qdisc_alloc(struct net_device *dev, | 223 | extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
221 | struct netdev_queue *dev_queue, | ||
222 | struct Qdisc_ops *ops); | 224 | struct Qdisc_ops *ops); |
223 | extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, | 225 | extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, |
224 | struct netdev_queue *dev_queue, | 226 | struct netdev_queue *dev_queue, |
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 770f1c09b793..2fbc171130bf 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c | |||
@@ -103,7 +103,7 @@ static inline int wme_downgrade_ac(struct sk_buff *skb) | |||
103 | * negative return value indicates to drop the frame */ | 103 | * negative return value indicates to drop the frame */ |
104 | static int classify80211(struct sk_buff *skb, struct Qdisc *qd) | 104 | static int classify80211(struct sk_buff *skb, struct Qdisc *qd) |
105 | { | 105 | { |
106 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 106 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
107 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 107 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
108 | 108 | ||
109 | if (!ieee80211_is_data(hdr->frame_control)) { | 109 | if (!ieee80211_is_data(hdr->frame_control)) { |
@@ -140,7 +140,7 @@ static int classify80211(struct sk_buff *skb, struct Qdisc *qd) | |||
140 | 140 | ||
141 | static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) | 141 | static int wme_qdiscop_enqueue(struct sk_buff *skb, struct Qdisc* qd) |
142 | { | 142 | { |
143 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 143 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
144 | struct ieee80211_hw *hw = &local->hw; | 144 | struct ieee80211_hw *hw = &local->hw; |
145 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 145 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
146 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 146 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
@@ -249,7 +249,7 @@ static int wme_qdiscop_requeue(struct sk_buff *skb, struct Qdisc* qd) | |||
249 | static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd) | 249 | static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd) |
250 | { | 250 | { |
251 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 251 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
252 | struct net_device *dev = qd->dev; | 252 | struct net_device *dev = qdisc_dev(qd); |
253 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 253 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
254 | struct ieee80211_hw *hw = &local->hw; | 254 | struct ieee80211_hw *hw = &local->hw; |
255 | struct sk_buff *skb; | 255 | struct sk_buff *skb; |
@@ -286,7 +286,7 @@ static struct sk_buff *wme_qdiscop_dequeue(struct Qdisc* qd) | |||
286 | static void wme_qdiscop_reset(struct Qdisc* qd) | 286 | static void wme_qdiscop_reset(struct Qdisc* qd) |
287 | { | 287 | { |
288 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 288 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
289 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 289 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
290 | struct ieee80211_hw *hw = &local->hw; | 290 | struct ieee80211_hw *hw = &local->hw; |
291 | int queue; | 291 | int queue; |
292 | 292 | ||
@@ -303,7 +303,7 @@ static void wme_qdiscop_reset(struct Qdisc* qd) | |||
303 | static void wme_qdiscop_destroy(struct Qdisc* qd) | 303 | static void wme_qdiscop_destroy(struct Qdisc* qd) |
304 | { | 304 | { |
305 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 305 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
306 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 306 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
307 | struct ieee80211_hw *hw = &local->hw; | 307 | struct ieee80211_hw *hw = &local->hw; |
308 | int queue; | 308 | int queue; |
309 | 309 | ||
@@ -328,7 +328,7 @@ static int wme_qdiscop_tune(struct Qdisc *qd, struct nlattr *opt) | |||
328 | static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt) | 328 | static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt) |
329 | { | 329 | { |
330 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 330 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
331 | struct net_device *dev = qd->dev; | 331 | struct net_device *dev = qdisc_dev(qd); |
332 | struct ieee80211_local *local; | 332 | struct ieee80211_local *local; |
333 | struct ieee80211_hw *hw; | 333 | struct ieee80211_hw *hw; |
334 | int err = 0, i; | 334 | int err = 0, i; |
@@ -359,7 +359,7 @@ static int wme_qdiscop_init(struct Qdisc *qd, struct nlattr *opt) | |||
359 | /* create child queues */ | 359 | /* create child queues */ |
360 | for (i = 0; i < QD_NUM(hw); i++) { | 360 | for (i = 0; i < QD_NUM(hw); i++) { |
361 | skb_queue_head_init(&q->requeued[i]); | 361 | skb_queue_head_init(&q->requeued[i]); |
362 | q->queues[i] = qdisc_create_dflt(qd->dev, qd->dev_queue, | 362 | q->queues[i] = qdisc_create_dflt(qdisc_dev(qd), qd->dev_queue, |
363 | &pfifo_qdisc_ops, | 363 | &pfifo_qdisc_ops, |
364 | qd->handle); | 364 | qd->handle); |
365 | if (!q->queues[i]) { | 365 | if (!q->queues[i]) { |
@@ -386,7 +386,7 @@ static int wme_classop_graft(struct Qdisc *qd, unsigned long arg, | |||
386 | struct Qdisc *new, struct Qdisc **old) | 386 | struct Qdisc *new, struct Qdisc **old) |
387 | { | 387 | { |
388 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 388 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
389 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 389 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
390 | struct ieee80211_hw *hw = &local->hw; | 390 | struct ieee80211_hw *hw = &local->hw; |
391 | unsigned long queue = arg - 1; | 391 | unsigned long queue = arg - 1; |
392 | 392 | ||
@@ -410,7 +410,7 @@ static struct Qdisc * | |||
410 | wme_classop_leaf(struct Qdisc *qd, unsigned long arg) | 410 | wme_classop_leaf(struct Qdisc *qd, unsigned long arg) |
411 | { | 411 | { |
412 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 412 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
413 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 413 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
414 | struct ieee80211_hw *hw = &local->hw; | 414 | struct ieee80211_hw *hw = &local->hw; |
415 | unsigned long queue = arg - 1; | 415 | unsigned long queue = arg - 1; |
416 | 416 | ||
@@ -423,7 +423,7 @@ wme_classop_leaf(struct Qdisc *qd, unsigned long arg) | |||
423 | 423 | ||
424 | static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid) | 424 | static unsigned long wme_classop_get(struct Qdisc *qd, u32 classid) |
425 | { | 425 | { |
426 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 426 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
427 | struct ieee80211_hw *hw = &local->hw; | 427 | struct ieee80211_hw *hw = &local->hw; |
428 | unsigned long queue = TC_H_MIN(classid); | 428 | unsigned long queue = TC_H_MIN(classid); |
429 | 429 | ||
@@ -450,7 +450,7 @@ static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent, | |||
450 | struct nlattr **tca, unsigned long *arg) | 450 | struct nlattr **tca, unsigned long *arg) |
451 | { | 451 | { |
452 | unsigned long cl = *arg; | 452 | unsigned long cl = *arg; |
453 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 453 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
454 | struct ieee80211_hw *hw = &local->hw; | 454 | struct ieee80211_hw *hw = &local->hw; |
455 | 455 | ||
456 | if (cl - 1 > QD_NUM(hw)) | 456 | if (cl - 1 > QD_NUM(hw)) |
@@ -467,7 +467,7 @@ static int wme_classop_change(struct Qdisc *qd, u32 handle, u32 parent, | |||
467 | * when we add WMM-SA support - TSPECs may be deleted here */ | 467 | * when we add WMM-SA support - TSPECs may be deleted here */ |
468 | static int wme_classop_delete(struct Qdisc *qd, unsigned long cl) | 468 | static int wme_classop_delete(struct Qdisc *qd, unsigned long cl) |
469 | { | 469 | { |
470 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 470 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
471 | struct ieee80211_hw *hw = &local->hw; | 471 | struct ieee80211_hw *hw = &local->hw; |
472 | 472 | ||
473 | if (cl - 1 > QD_NUM(hw)) | 473 | if (cl - 1 > QD_NUM(hw)) |
@@ -480,7 +480,7 @@ static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl, | |||
480 | struct sk_buff *skb, struct tcmsg *tcm) | 480 | struct sk_buff *skb, struct tcmsg *tcm) |
481 | { | 481 | { |
482 | struct ieee80211_sched_data *q = qdisc_priv(qd); | 482 | struct ieee80211_sched_data *q = qdisc_priv(qd); |
483 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 483 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
484 | struct ieee80211_hw *hw = &local->hw; | 484 | struct ieee80211_hw *hw = &local->hw; |
485 | 485 | ||
486 | if (cl - 1 > QD_NUM(hw)) | 486 | if (cl - 1 > QD_NUM(hw)) |
@@ -494,7 +494,7 @@ static int wme_classop_dump_class(struct Qdisc *qd, unsigned long cl, | |||
494 | 494 | ||
495 | static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg) | 495 | static void wme_classop_walk(struct Qdisc *qd, struct qdisc_walker *arg) |
496 | { | 496 | { |
497 | struct ieee80211_local *local = wdev_priv(qd->dev->ieee80211_ptr); | 497 | struct ieee80211_local *local = wdev_priv(qdisc_dev(qd)->ieee80211_ptr); |
498 | struct ieee80211_hw *hw = &local->hw; | 498 | struct ieee80211_hw *hw = &local->hw; |
499 | int queue; | 499 | int queue; |
500 | 500 | ||
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 9360fc81e8c7..e2389f161e46 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -334,7 +334,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, | |||
334 | tcm->tcm_family = AF_UNSPEC; | 334 | tcm->tcm_family = AF_UNSPEC; |
335 | tcm->tcm__pad1 = 0; | 335 | tcm->tcm__pad1 = 0; |
336 | tcm->tcm__pad1 = 0; | 336 | tcm->tcm__pad1 = 0; |
337 | tcm->tcm_ifindex = tp->q->dev->ifindex; | 337 | tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; |
338 | tcm->tcm_parent = tp->classid; | 338 | tcm->tcm_parent = tp->classid; |
339 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); | 339 | tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); |
340 | NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); | 340 | NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); |
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 784dcb870b98..5a16ca28aa3d 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c | |||
@@ -302,7 +302,7 @@ static int route4_delete(struct tcf_proto *tp, unsigned long arg) | |||
302 | *fp = f->next; | 302 | *fp = f->next; |
303 | tcf_tree_unlock(tp); | 303 | tcf_tree_unlock(tp); |
304 | 304 | ||
305 | route4_reset_fastmap(tp->q->dev, head, f->id); | 305 | route4_reset_fastmap(qdisc_dev(tp->q), head, f->id); |
306 | route4_delete_filter(tp, f); | 306 | route4_delete_filter(tp, f); |
307 | 307 | ||
308 | /* Strip tree */ | 308 | /* Strip tree */ |
@@ -500,7 +500,7 @@ reinsert: | |||
500 | } | 500 | } |
501 | tcf_tree_unlock(tp); | 501 | tcf_tree_unlock(tp); |
502 | 502 | ||
503 | route4_reset_fastmap(tp->q->dev, head, f->id); | 503 | route4_reset_fastmap(qdisc_dev(tp->q), head, f->id); |
504 | *arg = (unsigned long)f; | 504 | *arg = (unsigned long)f; |
505 | return 0; | 505 | return 0; |
506 | 506 | ||
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index b86c98bd06a3..1f893082a4f6 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -281,7 +281,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) | |||
281 | { | 281 | { |
282 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, | 282 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, |
283 | timer); | 283 | timer); |
284 | struct net_device *dev = wd->qdisc->dev; | 284 | struct net_device *dev = qdisc_dev(wd->qdisc); |
285 | 285 | ||
286 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; | 286 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; |
287 | smp_wmb(); | 287 | smp_wmb(); |
@@ -493,7 +493,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) | |||
493 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) | 493 | if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS)) |
494 | return; | 494 | return; |
495 | 495 | ||
496 | sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); | 496 | sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid)); |
497 | if (sch == NULL) { | 497 | if (sch == NULL) { |
498 | WARN_ON(parentid != TC_H_ROOT); | 498 | WARN_ON(parentid != TC_H_ROOT); |
499 | return; | 499 | return; |
@@ -593,7 +593,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, | |||
593 | if (ops == NULL) | 593 | if (ops == NULL) |
594 | goto err_out; | 594 | goto err_out; |
595 | 595 | ||
596 | sch = qdisc_alloc(dev, dev_queue, ops); | 596 | sch = qdisc_alloc(dev_queue, ops); |
597 | if (IS_ERR(sch)) { | 597 | if (IS_ERR(sch)) { |
598 | err = PTR_ERR(sch); | 598 | err = PTR_ERR(sch); |
599 | goto err_out2; | 599 | goto err_out2; |
@@ -940,7 +940,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
940 | tcm->tcm_family = AF_UNSPEC; | 940 | tcm->tcm_family = AF_UNSPEC; |
941 | tcm->tcm__pad1 = 0; | 941 | tcm->tcm__pad1 = 0; |
942 | tcm->tcm__pad2 = 0; | 942 | tcm->tcm__pad2 = 0; |
943 | tcm->tcm_ifindex = q->dev->ifindex; | 943 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
944 | tcm->tcm_parent = clid; | 944 | tcm->tcm_parent = clid; |
945 | tcm->tcm_handle = q->handle; | 945 | tcm->tcm_handle = q->handle; |
946 | tcm->tcm_info = atomic_read(&q->refcnt); | 946 | tcm->tcm_info = atomic_read(&q->refcnt); |
@@ -1186,7 +1186,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, | |||
1186 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); | 1186 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); |
1187 | tcm = NLMSG_DATA(nlh); | 1187 | tcm = NLMSG_DATA(nlh); |
1188 | tcm->tcm_family = AF_UNSPEC; | 1188 | tcm->tcm_family = AF_UNSPEC; |
1189 | tcm->tcm_ifindex = q->dev->ifindex; | 1189 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
1190 | tcm->tcm_parent = q->handle; | 1190 | tcm->tcm_parent = q->handle; |
1191 | tcm->tcm_handle = q->handle; | 1191 | tcm->tcm_handle = q->handle; |
1192 | tcm->tcm_info = 0; | 1192 | tcm->tcm_info = 0; |
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 3dddab531d5a..0de757e3be4a 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c | |||
@@ -296,7 +296,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent, | |||
296 | goto err_out; | 296 | goto err_out; |
297 | } | 297 | } |
298 | flow->filter_list = NULL; | 298 | flow->filter_list = NULL; |
299 | flow->q = qdisc_create_dflt(sch->dev, sch->dev_queue, | 299 | flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
300 | &pfifo_qdisc_ops, classid); | 300 | &pfifo_qdisc_ops, classid); |
301 | if (!flow->q) | 301 | if (!flow->q) |
302 | flow->q = &noop_qdisc; | 302 | flow->q = &noop_qdisc; |
@@ -556,7 +556,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt) | |||
556 | 556 | ||
557 | pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); | 557 | pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); |
558 | p->flows = &p->link; | 558 | p->flows = &p->link; |
559 | p->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue, | 559 | p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
560 | &pfifo_qdisc_ops, sch->handle); | 560 | &pfifo_qdisc_ops, sch->handle); |
561 | if (!p->link.q) | 561 | if (!p->link.q) |
562 | p->link.q = &noop_qdisc; | 562 | p->link.q = &noop_qdisc; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index d360dcd0818b..9f2ace585fd6 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -650,7 +650,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
650 | } | 650 | } |
651 | 651 | ||
652 | sch->flags &= ~TCQ_F_THROTTLED; | 652 | sch->flags &= ~TCQ_F_THROTTLED; |
653 | netif_schedule(sch->dev); | 653 | netif_schedule(qdisc_dev(sch)); |
654 | return HRTIMER_NORESTART; | 654 | return HRTIMER_NORESTART; |
655 | } | 655 | } |
656 | 656 | ||
@@ -1077,9 +1077,9 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) | |||
1077 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ | 1077 | cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ |
1078 | q->quanta[prio]; | 1078 | q->quanta[prio]; |
1079 | } | 1079 | } |
1080 | if (cl->quantum <= 0 || cl->quantum>32*cl->qdisc->dev->mtu) { | 1080 | if (cl->quantum <= 0 || cl->quantum>32*qdisc_dev(cl->qdisc)->mtu) { |
1081 | printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); | 1081 | printk(KERN_WARNING "CBQ: class %08x has bad quantum==%ld, repaired.\n", cl->common.classid, cl->quantum); |
1082 | cl->quantum = cl->qdisc->dev->mtu/2 + 1; | 1082 | cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; |
1083 | } | 1083 | } |
1084 | } | 1084 | } |
1085 | } | 1085 | } |
@@ -1401,7 +1401,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1401 | q->link.sibling = &q->link; | 1401 | q->link.sibling = &q->link; |
1402 | q->link.common.classid = sch->handle; | 1402 | q->link.common.classid = sch->handle; |
1403 | q->link.qdisc = sch; | 1403 | q->link.qdisc = sch; |
1404 | if (!(q->link.q = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1404 | if (!(q->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1405 | &pfifo_qdisc_ops, | 1405 | &pfifo_qdisc_ops, |
1406 | sch->handle))) | 1406 | sch->handle))) |
1407 | q->link.q = &noop_qdisc; | 1407 | q->link.q = &noop_qdisc; |
@@ -1411,7 +1411,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1411 | q->link.cpriority = TC_CBQ_MAXPRIO-1; | 1411 | q->link.cpriority = TC_CBQ_MAXPRIO-1; |
1412 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; | 1412 | q->link.ovl_strategy = TC_CBQ_OVL_CLASSIC; |
1413 | q->link.overlimit = cbq_ovl_classic; | 1413 | q->link.overlimit = cbq_ovl_classic; |
1414 | q->link.allot = psched_mtu(sch->dev); | 1414 | q->link.allot = psched_mtu(qdisc_dev(sch)); |
1415 | q->link.quantum = q->link.allot; | 1415 | q->link.quantum = q->link.allot; |
1416 | q->link.weight = q->link.R_tab->rate.rate; | 1416 | q->link.weight = q->link.R_tab->rate.rate; |
1417 | 1417 | ||
@@ -1646,7 +1646,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1646 | 1646 | ||
1647 | if (cl) { | 1647 | if (cl) { |
1648 | if (new == NULL) { | 1648 | if (new == NULL) { |
1649 | new = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1649 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1650 | &pfifo_qdisc_ops, | 1650 | &pfifo_qdisc_ops, |
1651 | cl->common.classid); | 1651 | cl->common.classid); |
1652 | if (new == NULL) | 1652 | if (new == NULL) |
@@ -1746,10 +1746,10 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg) | |||
1746 | #ifdef CONFIG_NET_CLS_ACT | 1746 | #ifdef CONFIG_NET_CLS_ACT |
1747 | struct cbq_sched_data *q = qdisc_priv(sch); | 1747 | struct cbq_sched_data *q = qdisc_priv(sch); |
1748 | 1748 | ||
1749 | spin_lock_bh(&sch->dev->queue_lock); | 1749 | spin_lock_bh(&qdisc_dev(sch)->queue_lock); |
1750 | if (q->rx_class == cl) | 1750 | if (q->rx_class == cl) |
1751 | q->rx_class = NULL; | 1751 | q->rx_class = NULL; |
1752 | spin_unlock_bh(&sch->dev->queue_lock); | 1752 | spin_unlock_bh(&qdisc_dev(sch)->queue_lock); |
1753 | #endif | 1753 | #endif |
1754 | 1754 | ||
1755 | cbq_destroy_class(sch, cl); | 1755 | cbq_destroy_class(sch, cl); |
@@ -1828,7 +1828,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1828 | 1828 | ||
1829 | if (tca[TCA_RATE]) | 1829 | if (tca[TCA_RATE]) |
1830 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1830 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1831 | &sch->dev->queue_lock, | 1831 | &qdisc_dev(sch)->queue_lock, |
1832 | tca[TCA_RATE]); | 1832 | tca[TCA_RATE]); |
1833 | return 0; | 1833 | return 0; |
1834 | } | 1834 | } |
@@ -1879,7 +1879,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1879 | cl->R_tab = rtab; | 1879 | cl->R_tab = rtab; |
1880 | rtab = NULL; | 1880 | rtab = NULL; |
1881 | cl->refcnt = 1; | 1881 | cl->refcnt = 1; |
1882 | if (!(cl->q = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1882 | if (!(cl->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1883 | &pfifo_qdisc_ops, classid))) | 1883 | &pfifo_qdisc_ops, classid))) |
1884 | cl->q = &noop_qdisc; | 1884 | cl->q = &noop_qdisc; |
1885 | cl->common.classid = classid; | 1885 | cl->common.classid = classid; |
@@ -1919,7 +1919,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t | |||
1919 | 1919 | ||
1920 | if (tca[TCA_RATE]) | 1920 | if (tca[TCA_RATE]) |
1921 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1921 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1922 | &sch->dev->queue_lock, tca[TCA_RATE]); | 1922 | &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]); |
1923 | 1923 | ||
1924 | *arg = (unsigned long)cl; | 1924 | *arg = (unsigned long)cl; |
1925 | return 0; | 1925 | return 0; |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index c955ba24e5cf..3aafbd17393a 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
@@ -60,7 +60,7 @@ static int dsmark_graft(struct Qdisc *sch, unsigned long arg, | |||
60 | sch, p, new, old); | 60 | sch, p, new, old); |
61 | 61 | ||
62 | if (new == NULL) { | 62 | if (new == NULL) { |
63 | new = qdisc_create_dflt(sch->dev, sch->dev_queue, | 63 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
64 | &pfifo_qdisc_ops, | 64 | &pfifo_qdisc_ops, |
65 | sch->handle); | 65 | sch->handle); |
66 | if (new == NULL) | 66 | if (new == NULL) |
@@ -391,7 +391,7 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt) | |||
391 | p->default_index = default_index; | 391 | p->default_index = default_index; |
392 | p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); | 392 | p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]); |
393 | 393 | ||
394 | p->q = qdisc_create_dflt(sch->dev, sch->dev_queue, | 394 | p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
395 | &pfifo_qdisc_ops, sch->handle); | 395 | &pfifo_qdisc_ops, sch->handle); |
396 | if (p->q == NULL) | 396 | if (p->q == NULL) |
397 | p->q = &noop_qdisc; | 397 | p->q = &noop_qdisc; |
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c index 779eae85faf0..1d97fa42c902 100644 --- a/net/sched/sch_fifo.c +++ b/net/sched/sch_fifo.c | |||
@@ -48,10 +48,10 @@ static int fifo_init(struct Qdisc *sch, struct nlattr *opt) | |||
48 | struct fifo_sched_data *q = qdisc_priv(sch); | 48 | struct fifo_sched_data *q = qdisc_priv(sch); |
49 | 49 | ||
50 | if (opt == NULL) { | 50 | if (opt == NULL) { |
51 | u32 limit = sch->dev->tx_queue_len ? : 1; | 51 | u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1; |
52 | 52 | ||
53 | if (sch->ops == &bfifo_qdisc_ops) | 53 | if (sch->ops == &bfifo_qdisc_ops) |
54 | limit *= sch->dev->mtu; | 54 | limit *= qdisc_dev(sch)->mtu; |
55 | 55 | ||
56 | q->limit = limit; | 56 | q->limit = limit; |
57 | } else { | 57 | } else { |
@@ -137,7 +137,7 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops, | |||
137 | struct Qdisc *q; | 137 | struct Qdisc *q; |
138 | int err = -ENOMEM; | 138 | int err = -ENOMEM; |
139 | 139 | ||
140 | q = qdisc_create_dflt(sch->dev, sch->dev_queue, | 140 | q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
141 | ops, TC_H_MAKE(sch->handle, 1)); | 141 | ops, TC_H_MAKE(sch->handle, 1)); |
142 | if (q) { | 142 | if (q) { |
143 | err = fifo_set_limit(q, limit); | 143 | err = fifo_set_limit(q, limit); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index d97086480893..b626a4f32b6b 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -364,7 +364,7 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | |||
364 | { | 364 | { |
365 | struct sk_buff_head *list = prio2list(skb, qdisc); | 365 | struct sk_buff_head *list = prio2list(skb, qdisc); |
366 | 366 | ||
367 | if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { | 367 | if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { |
368 | qdisc->q.qlen++; | 368 | qdisc->q.qlen++; |
369 | return __qdisc_enqueue_tail(skb, qdisc, list); | 369 | return __qdisc_enqueue_tail(skb, qdisc, list); |
370 | } | 370 | } |
@@ -440,8 +440,7 @@ static struct Qdisc_ops pfifo_fast_ops __read_mostly = { | |||
440 | .owner = THIS_MODULE, | 440 | .owner = THIS_MODULE, |
441 | }; | 441 | }; |
442 | 442 | ||
443 | struct Qdisc *qdisc_alloc(struct net_device *dev, | 443 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
444 | struct netdev_queue *dev_queue, | ||
445 | struct Qdisc_ops *ops) | 444 | struct Qdisc_ops *ops) |
446 | { | 445 | { |
447 | void *p; | 446 | void *p; |
@@ -465,8 +464,7 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, | |||
465 | sch->enqueue = ops->enqueue; | 464 | sch->enqueue = ops->enqueue; |
466 | sch->dequeue = ops->dequeue; | 465 | sch->dequeue = ops->dequeue; |
467 | sch->dev_queue = dev_queue; | 466 | sch->dev_queue = dev_queue; |
468 | sch->dev = dev; | 467 | dev_hold(qdisc_dev(sch)); |
469 | dev_hold(dev); | ||
470 | atomic_set(&sch->refcnt, 1); | 468 | atomic_set(&sch->refcnt, 1); |
471 | 469 | ||
472 | return sch; | 470 | return sch; |
@@ -481,7 +479,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, | |||
481 | { | 479 | { |
482 | struct Qdisc *sch; | 480 | struct Qdisc *sch; |
483 | 481 | ||
484 | sch = qdisc_alloc(dev, dev_queue, ops); | 482 | sch = qdisc_alloc(dev_queue, ops); |
485 | if (IS_ERR(sch)) | 483 | if (IS_ERR(sch)) |
486 | goto errout; | 484 | goto errout; |
487 | sch->stats_lock = &dev->queue_lock; | 485 | sch->stats_lock = &dev->queue_lock; |
@@ -534,7 +532,7 @@ void qdisc_destroy(struct Qdisc *qdisc) | |||
534 | ops->destroy(qdisc); | 532 | ops->destroy(qdisc); |
535 | 533 | ||
536 | module_put(ops->owner); | 534 | module_put(ops->owner); |
537 | dev_put(qdisc->dev); | 535 | dev_put(qdisc_dev(qdisc)); |
538 | call_rcu(&qdisc->q_rcu, __qdisc_destroy); | 536 | call_rcu(&qdisc->q_rcu, __qdisc_destroy); |
539 | } | 537 | } |
540 | EXPORT_SYMBOL(qdisc_destroy); | 538 | EXPORT_SYMBOL(qdisc_destroy); |
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index c89fba56db56..39fa28511f07 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c | |||
@@ -164,7 +164,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc* sch) | |||
164 | * if no default DP has been configured. This | 164 | * if no default DP has been configured. This |
165 | * allows for DP flows to be left untouched. | 165 | * allows for DP flows to be left untouched. |
166 | */ | 166 | */ |
167 | if (skb_queue_len(&sch->q) < sch->dev->tx_queue_len) | 167 | if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len) |
168 | return qdisc_enqueue_tail(skb, sch); | 168 | return qdisc_enqueue_tail(skb, sch); |
169 | else | 169 | else |
170 | goto drop; | 170 | goto drop; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index 5a22fec4eadd..333525422f45 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1045 | 1045 | ||
1046 | if (tca[TCA_RATE]) | 1046 | if (tca[TCA_RATE]) |
1047 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1047 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1048 | &sch->dev->queue_lock, | 1048 | &qdisc_dev(sch)->queue_lock, |
1049 | tca[TCA_RATE]); | 1049 | tca[TCA_RATE]); |
1050 | return 0; | 1050 | return 0; |
1051 | } | 1051 | } |
@@ -1083,7 +1083,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1083 | cl->refcnt = 1; | 1083 | cl->refcnt = 1; |
1084 | cl->sched = q; | 1084 | cl->sched = q; |
1085 | cl->cl_parent = parent; | 1085 | cl->cl_parent = parent; |
1086 | cl->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1086 | cl->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1087 | &pfifo_qdisc_ops, classid); | 1087 | &pfifo_qdisc_ops, classid); |
1088 | if (cl->qdisc == NULL) | 1088 | if (cl->qdisc == NULL) |
1089 | cl->qdisc = &noop_qdisc; | 1089 | cl->qdisc = &noop_qdisc; |
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, | |||
1104 | 1104 | ||
1105 | if (tca[TCA_RATE]) | 1105 | if (tca[TCA_RATE]) |
1106 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1106 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1107 | &sch->dev->queue_lock, tca[TCA_RATE]); | 1107 | &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]); |
1108 | *arg = (unsigned long)cl; | 1108 | *arg = (unsigned long)cl; |
1109 | return 0; | 1109 | return 0; |
1110 | } | 1110 | } |
@@ -1202,7 +1202,7 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1202 | if (cl->level > 0) | 1202 | if (cl->level > 0) |
1203 | return -EINVAL; | 1203 | return -EINVAL; |
1204 | if (new == NULL) { | 1204 | if (new == NULL) { |
1205 | new = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1205 | new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1206 | &pfifo_qdisc_ops, | 1206 | &pfifo_qdisc_ops, |
1207 | cl->cl_common.classid); | 1207 | cl->cl_common.classid); |
1208 | if (new == NULL) | 1208 | if (new == NULL) |
@@ -1445,7 +1445,7 @@ hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt) | |||
1445 | q->root.cl_common.classid = sch->handle; | 1445 | q->root.cl_common.classid = sch->handle; |
1446 | q->root.refcnt = 1; | 1446 | q->root.refcnt = 1; |
1447 | q->root.sched = q; | 1447 | q->root.sched = q; |
1448 | q->root.qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1448 | q->root.qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1449 | &pfifo_qdisc_ops, | 1449 | &pfifo_qdisc_ops, |
1450 | sch->handle); | 1450 | sch->handle); |
1451 | if (q->root.qdisc == NULL) | 1451 | if (q->root.qdisc == NULL) |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 956a67f66b9c..31f7d1536e6d 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1026,7 +1026,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt) | |||
1026 | qdisc_watchdog_init(&q->watchdog, sch); | 1026 | qdisc_watchdog_init(&q->watchdog, sch); |
1027 | skb_queue_head_init(&q->direct_queue); | 1027 | skb_queue_head_init(&q->direct_queue); |
1028 | 1028 | ||
1029 | q->direct_qlen = sch->dev->tx_queue_len; | 1029 | q->direct_qlen = qdisc_dev(sch)->tx_queue_len; |
1030 | if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ | 1030 | if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ |
1031 | q->direct_qlen = 2; | 1031 | q->direct_qlen = 2; |
1032 | 1032 | ||
@@ -1043,7 +1043,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
1043 | struct nlattr *nest; | 1043 | struct nlattr *nest; |
1044 | struct tc_htb_glob gopt; | 1044 | struct tc_htb_glob gopt; |
1045 | 1045 | ||
1046 | spin_lock_bh(&sch->dev->queue_lock); | 1046 | spin_lock_bh(&qdisc_dev(sch)->queue_lock); |
1047 | 1047 | ||
1048 | gopt.direct_pkts = q->direct_pkts; | 1048 | gopt.direct_pkts = q->direct_pkts; |
1049 | gopt.version = HTB_VER; | 1049 | gopt.version = HTB_VER; |
@@ -1057,11 +1057,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
1057 | NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); | 1057 | NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); |
1058 | nla_nest_end(skb, nest); | 1058 | nla_nest_end(skb, nest); |
1059 | 1059 | ||
1060 | spin_unlock_bh(&sch->dev->queue_lock); | 1060 | spin_unlock_bh(&qdisc_dev(sch)->queue_lock); |
1061 | return skb->len; | 1061 | return skb->len; |
1062 | 1062 | ||
1063 | nla_put_failure: | 1063 | nla_put_failure: |
1064 | spin_unlock_bh(&sch->dev->queue_lock); | 1064 | spin_unlock_bh(&qdisc_dev(sch)->queue_lock); |
1065 | nla_nest_cancel(skb, nest); | 1065 | nla_nest_cancel(skb, nest); |
1066 | return -1; | 1066 | return -1; |
1067 | } | 1067 | } |
@@ -1073,7 +1073,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1073 | struct nlattr *nest; | 1073 | struct nlattr *nest; |
1074 | struct tc_htb_opt opt; | 1074 | struct tc_htb_opt opt; |
1075 | 1075 | ||
1076 | spin_lock_bh(&sch->dev->queue_lock); | 1076 | spin_lock_bh(&qdisc_dev(sch)->queue_lock); |
1077 | tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; | 1077 | tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; |
1078 | tcm->tcm_handle = cl->common.classid; | 1078 | tcm->tcm_handle = cl->common.classid; |
1079 | if (!cl->level && cl->un.leaf.q) | 1079 | if (!cl->level && cl->un.leaf.q) |
@@ -1095,11 +1095,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg, | |||
1095 | NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); | 1095 | NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); |
1096 | 1096 | ||
1097 | nla_nest_end(skb, nest); | 1097 | nla_nest_end(skb, nest); |
1098 | spin_unlock_bh(&sch->dev->queue_lock); | 1098 | spin_unlock_bh(&qdisc_dev(sch)->queue_lock); |
1099 | return skb->len; | 1099 | return skb->len; |
1100 | 1100 | ||
1101 | nla_put_failure: | 1101 | nla_put_failure: |
1102 | spin_unlock_bh(&sch->dev->queue_lock); | 1102 | spin_unlock_bh(&qdisc_dev(sch)->queue_lock); |
1103 | nla_nest_cancel(skb, nest); | 1103 | nla_nest_cancel(skb, nest); |
1104 | return -1; | 1104 | return -1; |
1105 | } | 1105 | } |
@@ -1129,7 +1129,7 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, | |||
1129 | 1129 | ||
1130 | if (cl && !cl->level) { | 1130 | if (cl && !cl->level) { |
1131 | if (new == NULL && | 1131 | if (new == NULL && |
1132 | (new = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1132 | (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1133 | &pfifo_qdisc_ops, | 1133 | &pfifo_qdisc_ops, |
1134 | cl->common.classid)) | 1134 | cl->common.classid)) |
1135 | == NULL) | 1135 | == NULL) |
@@ -1257,7 +1257,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) | |||
1257 | return -EBUSY; | 1257 | return -EBUSY; |
1258 | 1258 | ||
1259 | if (!cl->level && htb_parent_last_child(cl)) { | 1259 | if (!cl->level && htb_parent_last_child(cl)) { |
1260 | new_q = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1260 | new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1261 | &pfifo_qdisc_ops, | 1261 | &pfifo_qdisc_ops, |
1262 | cl->parent->common.classid); | 1262 | cl->parent->common.classid); |
1263 | last_child = 1; | 1263 | last_child = 1; |
@@ -1365,7 +1365,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1365 | goto failure; | 1365 | goto failure; |
1366 | 1366 | ||
1367 | gen_new_estimator(&cl->bstats, &cl->rate_est, | 1367 | gen_new_estimator(&cl->bstats, &cl->rate_est, |
1368 | &sch->dev->queue_lock, | 1368 | &qdisc_dev(sch)->queue_lock, |
1369 | tca[TCA_RATE] ? : &est.nla); | 1369 | tca[TCA_RATE] ? : &est.nla); |
1370 | cl->refcnt = 1; | 1370 | cl->refcnt = 1; |
1371 | cl->children = 0; | 1371 | cl->children = 0; |
@@ -1378,7 +1378,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1378 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) | 1378 | /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL) |
1379 | so that can't be used inside of sch_tree_lock | 1379 | so that can't be used inside of sch_tree_lock |
1380 | -- thanks to Karlis Peisenieks */ | 1380 | -- thanks to Karlis Peisenieks */ |
1381 | new_q = qdisc_create_dflt(sch->dev, sch->dev_queue, | 1381 | new_q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
1382 | &pfifo_qdisc_ops, classid); | 1382 | &pfifo_qdisc_ops, classid); |
1383 | sch_tree_lock(sch); | 1383 | sch_tree_lock(sch); |
1384 | if (parent && !parent->level) { | 1384 | if (parent && !parent->level) { |
@@ -1420,7 +1420,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1420 | } else { | 1420 | } else { |
1421 | if (tca[TCA_RATE]) | 1421 | if (tca[TCA_RATE]) |
1422 | gen_replace_estimator(&cl->bstats, &cl->rate_est, | 1422 | gen_replace_estimator(&cl->bstats, &cl->rate_est, |
1423 | &sch->dev->queue_lock, | 1423 | &qdisc_dev(sch)->queue_lock, |
1424 | tca[TCA_RATE]); | 1424 | tca[TCA_RATE]); |
1425 | sch_tree_lock(sch); | 1425 | sch_tree_lock(sch); |
1426 | } | 1426 | } |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index aa7a04e32ae9..790582960444 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -180,7 +180,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
180 | * skb will be queued. | 180 | * skb will be queued. |
181 | */ | 181 | */ |
182 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | 182 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
183 | struct Qdisc *rootq = sch->dev->qdisc; | 183 | struct Qdisc *rootq = qdisc_dev(sch)->qdisc; |
184 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ | 184 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
185 | q->duplicate = 0; | 185 | q->duplicate = 0; |
186 | 186 | ||
@@ -333,9 +333,9 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr) | |||
333 | for (i = 0; i < n; i++) | 333 | for (i = 0; i < n; i++) |
334 | d->table[i] = data[i]; | 334 | d->table[i] = data[i]; |
335 | 335 | ||
336 | spin_lock_bh(&sch->dev->queue_lock); | 336 | spin_lock_bh(&qdisc_dev(sch)->queue_lock); |
337 | d = xchg(&q->delay_dist, d); | 337 | d = xchg(&q->delay_dist, d); |
338 | spin_unlock_bh(&sch->dev->queue_lock); | 338 | spin_unlock_bh(&qdisc_dev(sch)->queue_lock); |
339 | 339 | ||
340 | kfree(d); | 340 | kfree(d); |
341 | return 0; | 341 | return 0; |
@@ -495,7 +495,7 @@ static int tfifo_init(struct Qdisc *sch, struct nlattr *opt) | |||
495 | 495 | ||
496 | q->limit = ctl->limit; | 496 | q->limit = ctl->limit; |
497 | } else | 497 | } else |
498 | q->limit = max_t(u32, sch->dev->tx_queue_len, 1); | 498 | q->limit = max_t(u32, qdisc_dev(sch)->tx_queue_len, 1); |
499 | 499 | ||
500 | q->oldest = PSCHED_PASTPERFECT; | 500 | q->oldest = PSCHED_PASTPERFECT; |
501 | return 0; | 501 | return 0; |
@@ -536,7 +536,7 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt) | |||
536 | 536 | ||
537 | qdisc_watchdog_init(&q->watchdog, sch); | 537 | qdisc_watchdog_init(&q->watchdog, sch); |
538 | 538 | ||
539 | q->qdisc = qdisc_create_dflt(sch->dev, sch->dev_queue, | 539 | q->qdisc = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
540 | &tfifo_qdisc_ops, | 540 | &tfifo_qdisc_ops, |
541 | TC_H_MAKE(sch->handle, 1)); | 541 | TC_H_MAKE(sch->handle, 1)); |
542 | if (!q->qdisc) { | 542 | if (!q->qdisc) { |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index ca58a039208e..39157f7bc046 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
@@ -136,7 +136,8 @@ prio_dequeue(struct Qdisc* sch) | |||
136 | * pulling an skb. This way we avoid excessive requeues | 136 | * pulling an skb. This way we avoid excessive requeues |
137 | * for slower queues. | 137 | * for slower queues. |
138 | */ | 138 | */ |
139 | if (!__netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) { | 139 | if (!__netif_subqueue_stopped(qdisc_dev(sch), |
140 | (q->mq ? prio : 0))) { | ||
140 | qdisc = q->queues[prio]; | 141 | qdisc = q->queues[prio]; |
141 | skb = qdisc->dequeue(qdisc); | 142 | skb = qdisc->dequeue(qdisc); |
142 | if (skb) { | 143 | if (skb) { |
@@ -165,8 +166,8 @@ static struct sk_buff *rr_dequeue(struct Qdisc* sch) | |||
165 | * for slower queues. If the queue is stopped, try the | 166 | * for slower queues. If the queue is stopped, try the |
166 | * next queue. | 167 | * next queue. |
167 | */ | 168 | */ |
168 | if (!__netif_subqueue_stopped(sch->dev, | 169 | if (!__netif_subqueue_stopped(qdisc_dev(sch), |
169 | (q->mq ? q->curband : 0))) { | 170 | (q->mq ? q->curband : 0))) { |
170 | qdisc = q->queues[q->curband]; | 171 | qdisc = q->queues[q->curband]; |
171 | skb = qdisc->dequeue(qdisc); | 172 | skb = qdisc->dequeue(qdisc); |
172 | if (skb) { | 173 | if (skb) { |
@@ -249,10 +250,10 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
249 | if (q->mq) { | 250 | if (q->mq) { |
250 | if (sch->parent != TC_H_ROOT) | 251 | if (sch->parent != TC_H_ROOT) |
251 | return -EINVAL; | 252 | return -EINVAL; |
252 | if (netif_is_multiqueue(sch->dev)) { | 253 | if (netif_is_multiqueue(qdisc_dev(sch))) { |
253 | if (q->bands == 0) | 254 | if (q->bands == 0) |
254 | q->bands = sch->dev->egress_subqueue_count; | 255 | q->bands = qdisc_dev(sch)->egress_subqueue_count; |
255 | else if (q->bands != sch->dev->egress_subqueue_count) | 256 | else if (q->bands != qdisc_dev(sch)->egress_subqueue_count) |
256 | return -EINVAL; | 257 | return -EINVAL; |
257 | } else | 258 | } else |
258 | return -EOPNOTSUPP; | 259 | return -EOPNOTSUPP; |
@@ -281,7 +282,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt) | |||
281 | for (i=0; i<q->bands; i++) { | 282 | for (i=0; i<q->bands; i++) { |
282 | if (q->queues[i] == &noop_qdisc) { | 283 | if (q->queues[i] == &noop_qdisc) { |
283 | struct Qdisc *child; | 284 | struct Qdisc *child; |
284 | child = qdisc_create_dflt(sch->dev, sch->dev_queue, | 285 | child = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, |
285 | &pfifo_qdisc_ops, | 286 | &pfifo_qdisc_ops, |
286 | TC_H_MAKE(sch->handle, i + 1)); | 287 | TC_H_MAKE(sch->handle, i + 1)); |
287 | if (child) { | 288 | if (child) { |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 6a97afbfb952..8458f630fac4 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -461,7 +461,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt) | |||
461 | return -EINVAL; | 461 | return -EINVAL; |
462 | 462 | ||
463 | sch_tree_lock(sch); | 463 | sch_tree_lock(sch); |
464 | q->quantum = ctl->quantum ? : psched_mtu(sch->dev); | 464 | q->quantum = ctl->quantum ? : psched_mtu(qdisc_dev(sch)); |
465 | q->perturb_period = ctl->perturb_period * HZ; | 465 | q->perturb_period = ctl->perturb_period * HZ; |
466 | if (ctl->limit) | 466 | if (ctl->limit) |
467 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); | 467 | q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); |
@@ -502,7 +502,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt) | |||
502 | q->max_depth = 0; | 502 | q->max_depth = 0; |
503 | q->tail = SFQ_DEPTH; | 503 | q->tail = SFQ_DEPTH; |
504 | if (opt == NULL) { | 504 | if (opt == NULL) { |
505 | q->quantum = psched_mtu(sch->dev); | 505 | q->quantum = psched_mtu(qdisc_dev(sch)); |
506 | q->perturb_period = 0; | 506 | q->perturb_period = 0; |
507 | q->perturbation = net_random(); | 507 | q->perturbation = net_random(); |
508 | } else { | 508 | } else { |
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index 0444fd0f0d22..b3fc82623fc6 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -78,7 +78,7 @@ struct teql_sched_data | |||
78 | static int | 78 | static int |
79 | teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) | 79 | teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) |
80 | { | 80 | { |
81 | struct net_device *dev = sch->dev; | 81 | struct net_device *dev = qdisc_dev(sch); |
82 | struct teql_sched_data *q = qdisc_priv(sch); | 82 | struct teql_sched_data *q = qdisc_priv(sch); |
83 | 83 | ||
84 | if (q->q.qlen < dev->tx_queue_len) { | 84 | if (q->q.qlen < dev->tx_queue_len) { |
@@ -111,7 +111,7 @@ teql_dequeue(struct Qdisc* sch) | |||
111 | 111 | ||
112 | skb = __skb_dequeue(&dat->q); | 112 | skb = __skb_dequeue(&dat->q); |
113 | if (skb == NULL) { | 113 | if (skb == NULL) { |
114 | struct net_device *m = dat->m->dev->qdisc->dev; | 114 | struct net_device *m = qdisc_dev(dat->m->dev->qdisc); |
115 | if (m) { | 115 | if (m) { |
116 | dat->m->slaves = sch; | 116 | dat->m->slaves = sch; |
117 | netif_wake_queue(m); | 117 | netif_wake_queue(m); |
@@ -170,7 +170,7 @@ teql_destroy(struct Qdisc* sch) | |||
170 | 170 | ||
171 | static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) | 171 | static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) |
172 | { | 172 | { |
173 | struct net_device *dev = sch->dev; | 173 | struct net_device *dev = qdisc_dev(sch); |
174 | struct teql_master *m = (struct teql_master*)sch->ops; | 174 | struct teql_master *m = (struct teql_master*)sch->ops; |
175 | struct teql_sched_data *q = qdisc_priv(sch); | 175 | struct teql_sched_data *q = qdisc_priv(sch); |
176 | 176 | ||
@@ -282,7 +282,7 @@ restart: | |||
282 | goto drop; | 282 | goto drop; |
283 | 283 | ||
284 | do { | 284 | do { |
285 | struct net_device *slave = q->dev; | 285 | struct net_device *slave = qdisc_dev(q); |
286 | 286 | ||
287 | if (slave->qdisc_sleeping != q) | 287 | if (slave->qdisc_sleeping != q) |
288 | continue; | 288 | continue; |
@@ -352,7 +352,7 @@ static int teql_master_open(struct net_device *dev) | |||
352 | 352 | ||
353 | q = m->slaves; | 353 | q = m->slaves; |
354 | do { | 354 | do { |
355 | struct net_device *slave = q->dev; | 355 | struct net_device *slave = qdisc_dev(q); |
356 | 356 | ||
357 | if (slave == NULL) | 357 | if (slave == NULL) |
358 | return -EUNATCH; | 358 | return -EUNATCH; |
@@ -403,7 +403,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu) | |||
403 | q = m->slaves; | 403 | q = m->slaves; |
404 | if (q) { | 404 | if (q) { |
405 | do { | 405 | do { |
406 | if (new_mtu > q->dev->mtu) | 406 | if (new_mtu > qdisc_dev(q)->mtu) |
407 | return -EINVAL; | 407 | return -EINVAL; |
408 | } while ((q=NEXT_SLAVE(q)) != m->slaves); | 408 | } while ((q=NEXT_SLAVE(q)) != m->slaves); |
409 | } | 409 | } |