aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_cbq.c8
-rw-r--r--net/sched/sch_generic.c40
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c16
-rw-r--r--net/sched/sch_netem.c4
-rw-r--r--net/sched/sch_teql.c4
7 files changed, 39 insertions, 39 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1f893082a4f6..2a1834f8c7d8 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -606,7 +606,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
606 sch->stats_lock = &dev->ingress_lock; 606 sch->stats_lock = &dev->ingress_lock;
607 handle = TC_H_MAKE(TC_H_INGRESS, 0); 607 handle = TC_H_MAKE(TC_H_INGRESS, 0);
608 } else { 608 } else {
609 sch->stats_lock = &dev->queue_lock; 609 sch->stats_lock = &dev_queue->lock;
610 if (handle == 0) { 610 if (handle == 0) {
611 handle = qdisc_alloc_handle(dev); 611 handle = qdisc_alloc_handle(dev);
612 err = -ENOMEM; 612 err = -ENOMEM;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 9f2ace585fd6..99ce3da2b0a4 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1746,10 +1746,10 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1746#ifdef CONFIG_NET_CLS_ACT 1746#ifdef CONFIG_NET_CLS_ACT
1747 struct cbq_sched_data *q = qdisc_priv(sch); 1747 struct cbq_sched_data *q = qdisc_priv(sch);
1748 1748
1749 spin_lock_bh(&qdisc_dev(sch)->queue_lock); 1749 spin_lock_bh(&sch->dev_queue->lock);
1750 if (q->rx_class == cl) 1750 if (q->rx_class == cl)
1751 q->rx_class = NULL; 1751 q->rx_class = NULL;
1752 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1752 spin_unlock_bh(&sch->dev_queue->lock);
1753#endif 1753#endif
1754 1754
1755 cbq_destroy_class(sch, cl); 1755 cbq_destroy_class(sch, cl);
@@ -1828,7 +1828,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1828 1828
1829 if (tca[TCA_RATE]) 1829 if (tca[TCA_RATE])
1830 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1830 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1831 &qdisc_dev(sch)->queue_lock, 1831 &sch->dev_queue->lock,
1832 tca[TCA_RATE]); 1832 tca[TCA_RATE]);
1833 return 0; 1833 return 0;
1834 } 1834 }
@@ -1919,7 +1919,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1919 1919
1920 if (tca[TCA_RATE]) 1920 if (tca[TCA_RATE])
1921 gen_new_estimator(&cl->bstats, &cl->rate_est, 1921 gen_new_estimator(&cl->bstats, &cl->rate_est,
1922 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]); 1922 &sch->dev_queue->lock, tca[TCA_RATE]);
1923 1923
1924 *arg = (unsigned long)cl; 1924 *arg = (unsigned long)cl;
1925 return 0; 1925 return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b626a4f32b6b..ee8f9f78a095 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,31 +29,31 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * dev->queue_lock spinlock. 32 * queue->lock spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via top level device 35 * - enqueue, dequeue are serialized via top level device
36 * spinlock dev->queue_lock. 36 * spinlock queue->lock.
37 * - ingress filtering is serialized via top level device 37 * - ingress filtering is serialized via top level device
38 * spinlock dev->ingress_lock. 38 * spinlock dev->ingress_lock.
39 * - updates to tree and tree walking are only done under the rtnl mutex. 39 * - updates to tree and tree walking are only done under the rtnl mutex.
40 */ 40 */
41 41
42void qdisc_lock_tree(struct net_device *dev) 42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->queue_lock) 43 __acquires(dev->tx_queue.lock)
44 __acquires(dev->ingress_lock) 44 __acquires(dev->ingress_lock)
45{ 45{
46 spin_lock_bh(&dev->queue_lock); 46 spin_lock_bh(&dev->tx_queue.lock);
47 spin_lock(&dev->ingress_lock); 47 spin_lock(&dev->ingress_lock);
48} 48}
49EXPORT_SYMBOL(qdisc_lock_tree); 49EXPORT_SYMBOL(qdisc_lock_tree);
50 50
51void qdisc_unlock_tree(struct net_device *dev) 51void qdisc_unlock_tree(struct net_device *dev)
52 __releases(dev->ingress_lock) 52 __releases(dev->ingress_lock)
53 __releases(dev->queue_lock) 53 __releases(dev->tx_queue.lock)
54{ 54{
55 spin_unlock(&dev->ingress_lock); 55 spin_unlock(&dev->ingress_lock);
56 spin_unlock_bh(&dev->queue_lock); 56 spin_unlock_bh(&dev->tx_queue.lock);
57} 57}
58EXPORT_SYMBOL(qdisc_unlock_tree); 58EXPORT_SYMBOL(qdisc_unlock_tree);
59 59
@@ -118,15 +118,15 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
118} 118}
119 119
120/* 120/*
121 * NOTE: Called under dev->queue_lock with locally disabled BH. 121 * NOTE: Called under queue->lock with locally disabled BH.
122 * 122 *
123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this 123 * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
124 * device at a time. dev->queue_lock serializes queue accesses for 124 * device at a time. queue->lock serializes queue accesses for
125 * this device AND dev->qdisc pointer itself. 125 * this device AND dev->qdisc pointer itself.
126 * 126 *
127 * netif_tx_lock serializes accesses to device driver. 127 * netif_tx_lock serializes accesses to device driver.
128 * 128 *
129 * dev->queue_lock and netif_tx_lock are mutually exclusive, 129 * queue->lock and netif_tx_lock are mutually exclusive,
130 * if one is grabbed, another must be free. 130 * if one is grabbed, another must be free.
131 * 131 *
132 * Note, that this procedure can be called by a watchdog timer 132 * Note, that this procedure can be called by a watchdog timer
@@ -148,14 +148,14 @@ static inline int qdisc_restart(struct net_device *dev)
148 148
149 149
150 /* And release queue */ 150 /* And release queue */
151 spin_unlock(&dev->queue_lock); 151 spin_unlock(&q->dev_queue->lock);
152 152
153 HARD_TX_LOCK(dev, smp_processor_id()); 153 HARD_TX_LOCK(dev, smp_processor_id());
154 if (!netif_subqueue_stopped(dev, skb)) 154 if (!netif_subqueue_stopped(dev, skb))
155 ret = dev_hard_start_xmit(skb, dev); 155 ret = dev_hard_start_xmit(skb, dev);
156 HARD_TX_UNLOCK(dev); 156 HARD_TX_UNLOCK(dev);
157 157
158 spin_lock(&dev->queue_lock); 158 spin_lock(&q->dev_queue->lock);
159 q = dev->qdisc; 159 q = dev->qdisc;
160 160
161 switch (ret) { 161 switch (ret) {
@@ -482,7 +482,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev,
482 sch = qdisc_alloc(dev_queue, ops); 482 sch = qdisc_alloc(dev_queue, ops);
483 if (IS_ERR(sch)) 483 if (IS_ERR(sch))
484 goto errout; 484 goto errout;
485 sch->stats_lock = &dev->queue_lock; 485 sch->stats_lock = &dev_queue->lock;
486 sch->parent = parentid; 486 sch->parent = parentid;
487 487
488 if (!ops->init || ops->init(sch, NULL) == 0) 488 if (!ops->init || ops->init(sch, NULL) == 0)
@@ -494,7 +494,7 @@ errout:
494} 494}
495EXPORT_SYMBOL(qdisc_create_dflt); 495EXPORT_SYMBOL(qdisc_create_dflt);
496 496
497/* Under dev->queue_lock and BH! */ 497/* Under queue->lock and BH! */
498 498
499void qdisc_reset(struct Qdisc *qdisc) 499void qdisc_reset(struct Qdisc *qdisc)
500{ 500{
@@ -514,7 +514,7 @@ static void __qdisc_destroy(struct rcu_head *head)
514 kfree((char *) qdisc - qdisc->padded); 514 kfree((char *) qdisc - qdisc->padded);
515} 515}
516 516
517/* Under dev->queue_lock and BH! */ 517/* Under queue->lock and BH! */
518 518
519void qdisc_destroy(struct Qdisc *qdisc) 519void qdisc_destroy(struct Qdisc *qdisc)
520{ 520{
@@ -566,13 +566,13 @@ void dev_activate(struct net_device *dev)
566 /* Delay activation until next carrier-on event */ 566 /* Delay activation until next carrier-on event */
567 return; 567 return;
568 568
569 spin_lock_bh(&dev->queue_lock); 569 spin_lock_bh(&dev->tx_queue.lock);
570 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); 570 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
571 if (dev->qdisc != &noqueue_qdisc) { 571 if (dev->qdisc != &noqueue_qdisc) {
572 dev->trans_start = jiffies; 572 dev->trans_start = jiffies;
573 dev_watchdog_up(dev); 573 dev_watchdog_up(dev);
574 } 574 }
575 spin_unlock_bh(&dev->queue_lock); 575 spin_unlock_bh(&dev->tx_queue.lock);
576} 576}
577 577
578void dev_deactivate(struct net_device *dev) 578void dev_deactivate(struct net_device *dev)
@@ -581,7 +581,7 @@ void dev_deactivate(struct net_device *dev)
581 struct sk_buff *skb; 581 struct sk_buff *skb;
582 int running; 582 int running;
583 583
584 spin_lock_bh(&dev->queue_lock); 584 spin_lock_bh(&dev->tx_queue.lock);
585 qdisc = dev->qdisc; 585 qdisc = dev->qdisc;
586 dev->qdisc = &noop_qdisc; 586 dev->qdisc = &noop_qdisc;
587 587
@@ -589,7 +589,7 @@ void dev_deactivate(struct net_device *dev)
589 589
590 skb = dev->gso_skb; 590 skb = dev->gso_skb;
591 dev->gso_skb = NULL; 591 dev->gso_skb = NULL;
592 spin_unlock_bh(&dev->queue_lock); 592 spin_unlock_bh(&dev->tx_queue.lock);
593 593
594 kfree_skb(skb); 594 kfree_skb(skb);
595 595
@@ -607,9 +607,9 @@ void dev_deactivate(struct net_device *dev)
607 * Double-check inside queue lock to ensure that all effects 607 * Double-check inside queue lock to ensure that all effects
608 * of the queue run are visible when we return. 608 * of the queue run are visible when we return.
609 */ 609 */
610 spin_lock_bh(&dev->queue_lock); 610 spin_lock_bh(&dev->tx_queue.lock);
611 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); 611 running = test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
612 spin_unlock_bh(&dev->queue_lock); 612 spin_unlock_bh(&dev->tx_queue.lock);
613 613
614 /* 614 /*
615 * The running flag should never be set at this point because 615 * The running flag should never be set at this point because
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 333525422f45..997d520ca580 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1045 1045
1046 if (tca[TCA_RATE]) 1046 if (tca[TCA_RATE])
1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1048 &qdisc_dev(sch)->queue_lock, 1048 &sch->dev_queue->lock,
1049 tca[TCA_RATE]); 1049 tca[TCA_RATE]);
1050 return 0; 1050 return 0;
1051 } 1051 }
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1104 1104
1105 if (tca[TCA_RATE]) 1105 if (tca[TCA_RATE])
1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1107 &qdisc_dev(sch)->queue_lock, tca[TCA_RATE]); 1107 &sch->dev_queue->lock, tca[TCA_RATE]);
1108 *arg = (unsigned long)cl; 1108 *arg = (unsigned long)cl;
1109 return 0; 1109 return 0;
1110} 1110}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 31f7d1536e6d..c8ca54cc26b0 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1043,7 +1043,7 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1043 struct nlattr *nest; 1043 struct nlattr *nest;
1044 struct tc_htb_glob gopt; 1044 struct tc_htb_glob gopt;
1045 1045
1046 spin_lock_bh(&qdisc_dev(sch)->queue_lock); 1046 spin_lock_bh(&sch->dev_queue->lock);
1047 1047
1048 gopt.direct_pkts = q->direct_pkts; 1048 gopt.direct_pkts = q->direct_pkts;
1049 gopt.version = HTB_VER; 1049 gopt.version = HTB_VER;
@@ -1057,11 +1057,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1058 nla_nest_end(skb, nest); 1058 nla_nest_end(skb, nest);
1059 1059
1060 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1060 spin_unlock_bh(&sch->dev_queue->lock);
1061 return skb->len; 1061 return skb->len;
1062 1062
1063nla_put_failure: 1063nla_put_failure:
1064 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1064 spin_unlock_bh(&sch->dev_queue->lock);
1065 nla_nest_cancel(skb, nest); 1065 nla_nest_cancel(skb, nest);
1066 return -1; 1066 return -1;
1067} 1067}
@@ -1073,7 +1073,7 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1073 struct nlattr *nest; 1073 struct nlattr *nest;
1074 struct tc_htb_opt opt; 1074 struct tc_htb_opt opt;
1075 1075
1076 spin_lock_bh(&qdisc_dev(sch)->queue_lock); 1076 spin_lock_bh(&sch->dev_queue->lock);
1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; 1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1078 tcm->tcm_handle = cl->common.classid; 1078 tcm->tcm_handle = cl->common.classid;
1079 if (!cl->level && cl->un.leaf.q) 1079 if (!cl->level && cl->un.leaf.q)
@@ -1095,11 +1095,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1096 1096
1097 nla_nest_end(skb, nest); 1097 nla_nest_end(skb, nest);
1098 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1098 spin_unlock_bh(&sch->dev_queue->lock);
1099 return skb->len; 1099 return skb->len;
1100 1100
1101nla_put_failure: 1101nla_put_failure:
1102 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 1102 spin_unlock_bh(&sch->dev_queue->lock);
1103 nla_nest_cancel(skb, nest); 1103 nla_nest_cancel(skb, nest);
1104 return -1; 1104 return -1;
1105} 1105}
@@ -1365,7 +1365,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1365 goto failure; 1365 goto failure;
1366 1366
1367 gen_new_estimator(&cl->bstats, &cl->rate_est, 1367 gen_new_estimator(&cl->bstats, &cl->rate_est,
1368 &qdisc_dev(sch)->queue_lock, 1368 &sch->dev_queue->lock,
1369 tca[TCA_RATE] ? : &est.nla); 1369 tca[TCA_RATE] ? : &est.nla);
1370 cl->refcnt = 1; 1370 cl->refcnt = 1;
1371 cl->children = 0; 1371 cl->children = 0;
@@ -1420,7 +1420,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1420 } else { 1420 } else {
1421 if (tca[TCA_RATE]) 1421 if (tca[TCA_RATE])
1422 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1422 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1423 &qdisc_dev(sch)->queue_lock, 1423 &sch->dev_queue->lock,
1424 tca[TCA_RATE]); 1424 tca[TCA_RATE]);
1425 sch_tree_lock(sch); 1425 sch_tree_lock(sch);
1426 } 1426 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 790582960444..71b73c528f9b 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -333,9 +333,9 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
333 for (i = 0; i < n; i++) 333 for (i = 0; i < n; i++)
334 d->table[i] = data[i]; 334 d->table[i] = data[i];
335 335
336 spin_lock_bh(&qdisc_dev(sch)->queue_lock); 336 spin_lock_bh(&sch->dev_queue->lock);
337 d = xchg(&q->delay_dist, d); 337 d = xchg(&q->delay_dist, d);
338 spin_unlock_bh(&qdisc_dev(sch)->queue_lock); 338 spin_unlock_bh(&sch->dev_queue->lock);
339 339
340 kfree(d); 340 kfree(d);
341 return 0; 341 return 0;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index b3fc82623fc6..4f3054e8e1ab 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -154,9 +154,9 @@ teql_destroy(struct Qdisc* sch)
154 master->slaves = NEXT_SLAVE(q); 154 master->slaves = NEXT_SLAVE(q);
155 if (q == master->slaves) { 155 if (q == master->slaves) {
156 master->slaves = NULL; 156 master->slaves = NULL;
157 spin_lock_bh(&master->dev->queue_lock); 157 spin_lock_bh(&master->dev->tx_queue.lock);
158 qdisc_reset(master->dev->qdisc); 158 qdisc_reset(master->dev->qdisc);
159 spin_unlock_bh(&master->dev->queue_lock); 159 spin_unlock_bh(&master->dev->tx_queue.lock);
160 } 160 }
161 } 161 }
162 skb_queue_purge(&dat->q); 162 skb_queue_purge(&dat->q);