aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-16 04:42:40 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:19 -0400
commit7698b4fcabcd790efc4f226bada1e7b5870653af (patch)
tree031ce7a911fc5bff995421a5615d9ab25416a479
parente2627c8c2241bce45e368e150654d076b58a4595 (diff)
pkt_sched: Add and use qdisc_root() and qdisc_root_lock().
When code wants to lock the qdisc tree state, the logic operation it's doing is locking the top-level qdisc that sits of the root of the netdev_queue. Add qdisc_root_lock() to represent this and convert the easiest cases. In order for this to work out in all cases, we have to hook up the noop_qdisc to a dummy netdev_queue. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/sch_generic.h12
-rw-r--r--net/sched/sch_api.c8
-rw-r--r--net/sched/sch_cbq.c9
-rw-r--r--net/sched/sch_generic.c21
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c18
-rw-r--r--net/sched/sch_netem.c9
7 files changed, 55 insertions, 26 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index bc2a09da21b1..92417825d387 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -161,6 +161,18 @@ struct tcf_proto
161 struct tcf_proto_ops *ops; 161 struct tcf_proto_ops *ops;
162}; 162};
163 163
164static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
165{
166 return qdisc->dev_queue->qdisc;
167}
168
169static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
170{
171 struct Qdisc *root = qdisc_root(qdisc);
172
173 return &root->dev_queue->lock;
174}
175
164static inline struct net_device *qdisc_dev(struct Qdisc *qdisc) 176static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
165{ 177{
166 return qdisc->dev_queue->dev; 178 return qdisc->dev_queue->dev;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 830ccc544a15..19c244a00839 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -633,7 +633,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
633 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { 633 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
634 if (tca[TCA_RATE]) { 634 if (tca[TCA_RATE]) {
635 err = gen_new_estimator(&sch->bstats, &sch->rate_est, 635 err = gen_new_estimator(&sch->bstats, &sch->rate_est,
636 &sch->dev_queue->lock, 636 qdisc_root_lock(sch),
637 tca[TCA_RATE]); 637 tca[TCA_RATE]);
638 if (err) { 638 if (err) {
639 /* 639 /*
@@ -675,7 +675,7 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
675 } 675 }
676 if (tca[TCA_RATE]) 676 if (tca[TCA_RATE])
677 gen_replace_estimator(&sch->bstats, &sch->rate_est, 677 gen_replace_estimator(&sch->bstats, &sch->rate_est,
678 &sch->dev_queue->lock, tca[TCA_RATE]); 678 qdisc_root_lock(sch), tca[TCA_RATE]);
679 return 0; 679 return 0;
680} 680}
681 681
@@ -967,7 +967,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
967 q->qstats.qlen = q->q.qlen; 967 q->qstats.qlen = q->q.qlen;
968 968
969 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 969 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
970 TCA_XSTATS, &q->dev_queue->lock, &d) < 0) 970 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
971 goto nla_put_failure; 971 goto nla_put_failure;
972 972
973 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) 973 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
@@ -1216,7 +1216,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1216 goto nla_put_failure; 1216 goto nla_put_failure;
1217 1217
1218 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 1218 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
1219 TCA_XSTATS, &q->dev_queue->lock, &d) < 0) 1219 TCA_XSTATS, qdisc_root_lock(q), &d) < 0)
1220 goto nla_put_failure; 1220 goto nla_put_failure;
1221 1221
1222 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) 1222 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 4efc836cbf38..37ae653db683 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1744,12 +1744,13 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
1744 1744
1745 if (--cl->refcnt == 0) { 1745 if (--cl->refcnt == 0) {
1746#ifdef CONFIG_NET_CLS_ACT 1746#ifdef CONFIG_NET_CLS_ACT
1747 spinlock_t *root_lock = qdisc_root_lock(sch);
1747 struct cbq_sched_data *q = qdisc_priv(sch); 1748 struct cbq_sched_data *q = qdisc_priv(sch);
1748 1749
1749 spin_lock_bh(&sch->dev_queue->lock); 1750 spin_lock_bh(root_lock);
1750 if (q->rx_class == cl) 1751 if (q->rx_class == cl)
1751 q->rx_class = NULL; 1752 q->rx_class = NULL;
1752 spin_unlock_bh(&sch->dev_queue->lock); 1753 spin_unlock_bh(root_lock);
1753#endif 1754#endif
1754 1755
1755 cbq_destroy_class(sch, cl); 1756 cbq_destroy_class(sch, cl);
@@ -1828,7 +1829,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1828 1829
1829 if (tca[TCA_RATE]) 1830 if (tca[TCA_RATE])
1830 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1831 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1831 &sch->dev_queue->lock, 1832 qdisc_root_lock(sch),
1832 tca[TCA_RATE]); 1833 tca[TCA_RATE]);
1833 return 0; 1834 return 0;
1834 } 1835 }
@@ -1919,7 +1920,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
1919 1920
1920 if (tca[TCA_RATE]) 1921 if (tca[TCA_RATE])
1921 gen_new_estimator(&cl->bstats, &cl->rate_est, 1922 gen_new_estimator(&cl->bstats, &cl->rate_est,
1922 &sch->dev_queue->lock, tca[TCA_RATE]); 1923 qdisc_root_lock(sch), tca[TCA_RATE]);
1923 1924
1924 *arg = (unsigned long)cl; 1925 *arg = (unsigned long)cl;
1925 return 0; 1926 return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ac208c2b2d10..739a8711ab30 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -151,14 +151,17 @@ static inline int qdisc_restart(struct netdev_queue *txq,
151{ 151{
152 int ret = NETDEV_TX_BUSY; 152 int ret = NETDEV_TX_BUSY;
153 struct net_device *dev; 153 struct net_device *dev;
154 spinlock_t *root_lock;
154 struct sk_buff *skb; 155 struct sk_buff *skb;
155 156
156 /* Dequeue packet */ 157 /* Dequeue packet */
157 if (unlikely((skb = dequeue_skb(q)) == NULL)) 158 if (unlikely((skb = dequeue_skb(q)) == NULL))
158 return 0; 159 return 0;
159 160
160 /* And release queue */ 161 root_lock = qdisc_root_lock(q);
161 spin_unlock(&txq->lock); 162
163 /* And release qdisc */
164 spin_unlock(root_lock);
162 165
163 dev = txq->dev; 166 dev = txq->dev;
164 167
@@ -167,7 +170,7 @@ static inline int qdisc_restart(struct netdev_queue *txq,
167 ret = dev_hard_start_xmit(skb, dev, txq); 170 ret = dev_hard_start_xmit(skb, dev, txq);
168 HARD_TX_UNLOCK(dev, txq); 171 HARD_TX_UNLOCK(dev, txq);
169 172
170 spin_lock(&txq->lock); 173 spin_lock(root_lock);
171 174
172 switch (ret) { 175 switch (ret) {
173 case NETDEV_TX_OK: 176 case NETDEV_TX_OK:
@@ -345,12 +348,18 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
345 .owner = THIS_MODULE, 348 .owner = THIS_MODULE,
346}; 349};
347 350
351static struct netdev_queue noop_netdev_queue = {
352 .lock = __SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock),
353 .qdisc = &noop_qdisc,
354};
355
348struct Qdisc noop_qdisc = { 356struct Qdisc noop_qdisc = {
349 .enqueue = noop_enqueue, 357 .enqueue = noop_enqueue,
350 .dequeue = noop_dequeue, 358 .dequeue = noop_dequeue,
351 .flags = TCQ_F_BUILTIN, 359 .flags = TCQ_F_BUILTIN,
352 .ops = &noop_qdisc_ops, 360 .ops = &noop_qdisc_ops,
353 .list = LIST_HEAD_INIT(noop_qdisc.list), 361 .list = LIST_HEAD_INIT(noop_qdisc.list),
362 .dev_queue = &noop_netdev_queue,
354}; 363};
355EXPORT_SYMBOL(noop_qdisc); 364EXPORT_SYMBOL(noop_qdisc);
356 365
@@ -666,19 +675,21 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
666 675
667 for (i = 0; i < dev->num_tx_queues; i++) { 676 for (i = 0; i < dev->num_tx_queues; i++) {
668 struct netdev_queue *dev_queue; 677 struct netdev_queue *dev_queue;
678 spinlock_t *root_lock;
669 struct Qdisc *q; 679 struct Qdisc *q;
670 int val; 680 int val;
671 681
672 dev_queue = netdev_get_tx_queue(dev, i); 682 dev_queue = netdev_get_tx_queue(dev, i);
673 q = dev_queue->qdisc; 683 q = dev_queue->qdisc;
684 root_lock = qdisc_root_lock(q);
674 685
675 if (lock) 686 if (lock)
676 spin_lock_bh(&dev_queue->lock); 687 spin_lock_bh(root_lock);
677 688
678 val = test_bit(__QDISC_STATE_RUNNING, &q->state); 689 val = test_bit(__QDISC_STATE_RUNNING, &q->state);
679 690
680 if (lock) 691 if (lock)
681 spin_unlock_bh(&dev_queue->lock); 692 spin_unlock_bh(root_lock);
682 693
683 if (val) 694 if (val)
684 return true; 695 return true;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 997d520ca580..5090708ba384 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1045 1045
1046 if (tca[TCA_RATE]) 1046 if (tca[TCA_RATE])
1047 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1047 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1048 &sch->dev_queue->lock, 1048 qdisc_root_lock(sch),
1049 tca[TCA_RATE]); 1049 tca[TCA_RATE]);
1050 return 0; 1050 return 0;
1051 } 1051 }
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1104 1104
1105 if (tca[TCA_RATE]) 1105 if (tca[TCA_RATE])
1106 gen_new_estimator(&cl->bstats, &cl->rate_est, 1106 gen_new_estimator(&cl->bstats, &cl->rate_est,
1107 &sch->dev_queue->lock, tca[TCA_RATE]); 1107 qdisc_root_lock(sch), tca[TCA_RATE]);
1108 *arg = (unsigned long)cl; 1108 *arg = (unsigned long)cl;
1109 return 0; 1109 return 0;
1110} 1110}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c8ca54cc26b0..ee48457eaa4a 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1039,11 +1039,12 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1039 1039
1040static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1040static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1041{ 1041{
1042 spinlock_t *root_lock = qdisc_root_lock(sch);
1042 struct htb_sched *q = qdisc_priv(sch); 1043 struct htb_sched *q = qdisc_priv(sch);
1043 struct nlattr *nest; 1044 struct nlattr *nest;
1044 struct tc_htb_glob gopt; 1045 struct tc_htb_glob gopt;
1045 1046
1046 spin_lock_bh(&sch->dev_queue->lock); 1047 spin_lock_bh(root_lock);
1047 1048
1048 gopt.direct_pkts = q->direct_pkts; 1049 gopt.direct_pkts = q->direct_pkts;
1049 gopt.version = HTB_VER; 1050 gopt.version = HTB_VER;
@@ -1057,11 +1058,11 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1057 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1058 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
1058 nla_nest_end(skb, nest); 1059 nla_nest_end(skb, nest);
1059 1060
1060 spin_unlock_bh(&sch->dev_queue->lock); 1061 spin_unlock_bh(root_lock);
1061 return skb->len; 1062 return skb->len;
1062 1063
1063nla_put_failure: 1064nla_put_failure:
1064 spin_unlock_bh(&sch->dev_queue->lock); 1065 spin_unlock_bh(root_lock);
1065 nla_nest_cancel(skb, nest); 1066 nla_nest_cancel(skb, nest);
1066 return -1; 1067 return -1;
1067} 1068}
@@ -1070,10 +1071,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1070 struct sk_buff *skb, struct tcmsg *tcm) 1071 struct sk_buff *skb, struct tcmsg *tcm)
1071{ 1072{
1072 struct htb_class *cl = (struct htb_class *)arg; 1073 struct htb_class *cl = (struct htb_class *)arg;
1074 spinlock_t *root_lock = qdisc_root_lock(sch);
1073 struct nlattr *nest; 1075 struct nlattr *nest;
1074 struct tc_htb_opt opt; 1076 struct tc_htb_opt opt;
1075 1077
1076 spin_lock_bh(&sch->dev_queue->lock); 1078 spin_lock_bh(root_lock);
1077 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; 1079 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1078 tcm->tcm_handle = cl->common.classid; 1080 tcm->tcm_handle = cl->common.classid;
1079 if (!cl->level && cl->un.leaf.q) 1081 if (!cl->level && cl->un.leaf.q)
@@ -1095,11 +1097,11 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1095 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1097 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
1096 1098
1097 nla_nest_end(skb, nest); 1099 nla_nest_end(skb, nest);
1098 spin_unlock_bh(&sch->dev_queue->lock); 1100 spin_unlock_bh(root_lock);
1099 return skb->len; 1101 return skb->len;
1100 1102
1101nla_put_failure: 1103nla_put_failure:
1102 spin_unlock_bh(&sch->dev_queue->lock); 1104 spin_unlock_bh(root_lock);
1103 nla_nest_cancel(skb, nest); 1105 nla_nest_cancel(skb, nest);
1104 return -1; 1106 return -1;
1105} 1107}
@@ -1365,7 +1367,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1365 goto failure; 1367 goto failure;
1366 1368
1367 gen_new_estimator(&cl->bstats, &cl->rate_est, 1369 gen_new_estimator(&cl->bstats, &cl->rate_est,
1368 &sch->dev_queue->lock, 1370 qdisc_root_lock(sch),
1369 tca[TCA_RATE] ? : &est.nla); 1371 tca[TCA_RATE] ? : &est.nla);
1370 cl->refcnt = 1; 1372 cl->refcnt = 1;
1371 cl->children = 0; 1373 cl->children = 0;
@@ -1420,7 +1422,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1420 } else { 1422 } else {
1421 if (tca[TCA_RATE]) 1423 if (tca[TCA_RATE])
1422 gen_replace_estimator(&cl->bstats, &cl->rate_est, 1424 gen_replace_estimator(&cl->bstats, &cl->rate_est,
1423 &sch->dev_queue->lock, 1425 qdisc_root_lock(sch),
1424 tca[TCA_RATE]); 1426 tca[TCA_RATE]);
1425 sch_tree_lock(sch); 1427 sch_tree_lock(sch);
1426 } 1428 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index bc585f2089ff..c5ea40c9eb21 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -180,7 +180,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
180 * skb will be queued. 180 * skb will be queued.
181 */ 181 */
182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { 182 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
183 struct Qdisc *rootq = sch->dev_queue->qdisc; 183 struct Qdisc *rootq = qdisc_root(sch);
184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ 184 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
185 q->duplicate = 0; 185 q->duplicate = 0;
186 186
@@ -319,6 +319,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
319 struct netem_sched_data *q = qdisc_priv(sch); 319 struct netem_sched_data *q = qdisc_priv(sch);
320 unsigned long n = nla_len(attr)/sizeof(__s16); 320 unsigned long n = nla_len(attr)/sizeof(__s16);
321 const __s16 *data = nla_data(attr); 321 const __s16 *data = nla_data(attr);
322 spinlock_t *root_lock;
322 struct disttable *d; 323 struct disttable *d;
323 int i; 324 int i;
324 325
@@ -333,9 +334,11 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
333 for (i = 0; i < n; i++) 334 for (i = 0; i < n; i++)
334 d->table[i] = data[i]; 335 d->table[i] = data[i];
335 336
336 spin_lock_bh(&sch->dev_queue->lock); 337 root_lock = qdisc_root_lock(sch);
338
339 spin_lock_bh(root_lock);
337 d = xchg(&q->delay_dist, d); 340 d = xchg(&q->delay_dist, d);
338 spin_unlock_bh(&sch->dev_queue->lock); 341 spin_unlock_bh(root_lock);
339 342
340 kfree(d); 343 kfree(d);
341 return 0; 344 return 0;