aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-04-16 20:02:10 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:29:07 -0400
commit0463d4ae25771aaf3379bb6b2392f6edf23c2828 (patch)
tree5c820b718abfe086a7b1d91814cb99d721439a46
parentffa4d7216e848fbfdcb8e6f0bb66abeaa1888964 (diff)
[NET_SCHED]: Eliminate qdisc_tree_lock
Since we're now holding the rtnl during the entire dump operation, we can remove qdisc_tree_lock, whose only purpose is to protect dump callbacks from concurrent changes to the qdisc tree. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/sch_api.c22
-rw-r--r--net/sched/sch_generic.c29
4 files changed, 10 insertions, 45 deletions
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index b2cc9a8ed4e7..5754d53d9efc 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -13,8 +13,6 @@ struct qdisc_walker
13 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); 13 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
14}; 14};
15 15
16extern rwlock_t qdisc_tree_lock;
17
18#define QDISC_ALIGNTO 32 16#define QDISC_ALIGNTO 32
19#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) 17#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
20 18
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ca3da5013b7a..ebf94edf0478 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -400,7 +400,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
400 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) 400 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
401 return skb->len; 401 return skb->len;
402 402
403 read_lock(&qdisc_tree_lock);
404 if (!tcm->tcm_parent) 403 if (!tcm->tcm_parent)
405 q = dev->qdisc_sleeping; 404 q = dev->qdisc_sleeping;
406 else 405 else
@@ -457,7 +456,6 @@ errout:
457 if (cl) 456 if (cl)
458 cops->put(q, cl); 457 cops->put(q, cl);
459out: 458out:
460 read_unlock(&qdisc_tree_lock);
461 dev_put(dev); 459 dev_put(dev);
462 return skb->len; 460 return skb->len;
463} 461}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2e863bdaa9a1..0ce6914f5981 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -191,7 +191,7 @@ int unregister_qdisc(struct Qdisc_ops *qops)
191 (root qdisc, all its children, children of children etc.) 191 (root qdisc, all its children, children of children etc.)
192 */ 192 */
193 193
194static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) 194struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
195{ 195{
196 struct Qdisc *q; 196 struct Qdisc *q;
197 197
@@ -202,16 +202,6 @@ static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle)
202 return NULL; 202 return NULL;
203} 203}
204 204
205struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
206{
207 struct Qdisc *q;
208
209 read_lock(&qdisc_tree_lock);
210 q = __qdisc_lookup(dev, handle);
211 read_unlock(&qdisc_tree_lock);
212 return q;
213}
214
215static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) 205static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
216{ 206{
217 unsigned long cl; 207 unsigned long cl;
@@ -405,7 +395,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
405 if (n == 0) 395 if (n == 0)
406 return; 396 return;
407 while ((parentid = sch->parent)) { 397 while ((parentid = sch->parent)) {
408 sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); 398 sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid));
409 cops = sch->ops->cl_ops; 399 cops = sch->ops->cl_ops;
410 if (cops->qlen_notify) { 400 if (cops->qlen_notify) {
411 cl = cops->get(sch, parentid); 401 cl = cops->get(sch, parentid);
@@ -905,7 +895,6 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
905 continue; 895 continue;
906 if (idx > s_idx) 896 if (idx > s_idx)
907 s_q_idx = 0; 897 s_q_idx = 0;
908 read_lock(&qdisc_tree_lock);
909 q_idx = 0; 898 q_idx = 0;
910 list_for_each_entry(q, &dev->qdisc_list, list) { 899 list_for_each_entry(q, &dev->qdisc_list, list) {
911 if (q_idx < s_q_idx) { 900 if (q_idx < s_q_idx) {
@@ -913,13 +902,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
913 continue; 902 continue;
914 } 903 }
915 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, 904 if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid,
916 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) { 905 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
917 read_unlock(&qdisc_tree_lock);
918 goto done; 906 goto done;
919 }
920 q_idx++; 907 q_idx++;
921 } 908 }
922 read_unlock(&qdisc_tree_lock);
923 } 909 }
924 910
925done: 911done:
@@ -1142,7 +1128,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1142 s_t = cb->args[0]; 1128 s_t = cb->args[0];
1143 t = 0; 1129 t = 0;
1144 1130
1145 read_lock(&qdisc_tree_lock);
1146 list_for_each_entry(q, &dev->qdisc_list, list) { 1131 list_for_each_entry(q, &dev->qdisc_list, list) {
1147 if (t < s_t || !q->ops->cl_ops || 1132 if (t < s_t || !q->ops->cl_ops ||
1148 (tcm->tcm_parent && 1133 (tcm->tcm_parent &&
@@ -1164,7 +1149,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1164 break; 1149 break;
1165 t++; 1150 t++;
1166 } 1151 }
1167 read_unlock(&qdisc_tree_lock);
1168 1152
1169 cb->args[0] = t; 1153 cb->args[0] = t;
1170 1154
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 52eb3439d7c6..1894eb72f6cf 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -36,34 +36,23 @@
36 36
37/* Main transmission queue. */ 37/* Main transmission queue. */
38 38
39/* Main qdisc structure lock. 39/* Modifications to data participating in scheduling must be protected with
40 40 * dev->queue_lock spinlock.
41 However, modifications 41 *
42 to data, participating in scheduling must be additionally 42 * The idea is the following:
43 protected with dev->queue_lock spinlock. 43 * - enqueue, dequeue are serialized via top level device
44 44 * spinlock dev->queue_lock.
45 The idea is the following: 45 * - updates to tree and tree walking are only done under the rtnl mutex.
46 - enqueue, dequeue are serialized via top level device
47 spinlock dev->queue_lock.
48 - tree walking is protected by read_lock(qdisc_tree_lock)
49 and this lock is used only in process context.
50 - updates to tree are made only under rtnl semaphore,
51 hence this lock may be made without local bh disabling.
52
53 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
54 */ 46 */
55DEFINE_RWLOCK(qdisc_tree_lock);
56 47
57void qdisc_lock_tree(struct net_device *dev) 48void qdisc_lock_tree(struct net_device *dev)
58{ 49{
59 write_lock(&qdisc_tree_lock);
60 spin_lock_bh(&dev->queue_lock); 50 spin_lock_bh(&dev->queue_lock);
61} 51}
62 52
63void qdisc_unlock_tree(struct net_device *dev) 53void qdisc_unlock_tree(struct net_device *dev)
64{ 54{
65 spin_unlock_bh(&dev->queue_lock); 55 spin_unlock_bh(&dev->queue_lock);
66 write_unlock(&qdisc_tree_lock);
67} 56}
68 57
69/* 58/*
@@ -528,15 +517,11 @@ void dev_activate(struct net_device *dev)
528 printk(KERN_INFO "%s: activation failed\n", dev->name); 517 printk(KERN_INFO "%s: activation failed\n", dev->name);
529 return; 518 return;
530 } 519 }
531 write_lock(&qdisc_tree_lock);
532 list_add_tail(&qdisc->list, &dev->qdisc_list); 520 list_add_tail(&qdisc->list, &dev->qdisc_list);
533 write_unlock(&qdisc_tree_lock);
534 } else { 521 } else {
535 qdisc = &noqueue_qdisc; 522 qdisc = &noqueue_qdisc;
536 } 523 }
537 write_lock(&qdisc_tree_lock);
538 dev->qdisc_sleeping = qdisc; 524 dev->qdisc_sleeping = qdisc;
539 write_unlock(&qdisc_tree_lock);
540 } 525 }
541 526
542 if (!netif_carrier_ok(dev)) 527 if (!netif_carrier_ok(dev))