aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-04-16 20:07:08 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:29:08 -0400
commitfd44de7cc1d430caef91ad9aecec9ff000fe86f8 (patch)
tree7b86a2e8623400c290c886ef83c573cdf49da3e8 /net
parent0463d4ae25771aaf3379bb6b2392f6edf23c2828 (diff)
[NET_SCHED]: ingress: switch back to using ingress_lock
Switch ingress queueing back to use ingress_lock. qdisc_lock_tree now locks both the ingress and egress qdiscs on the device. All changes to data that might be used on both ingress and egress needs to be protected by using qdisc_lock_tree instead of manually taking dev->queue_lock. Additionally the qdisc stats_lock needs to be initialized to ingress_lock for ingress qdiscs. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c4
-rw-r--r--net/sched/cls_route.c4
-rw-r--r--net/sched/sch_api.c26
-rw-r--r--net/sched/sch_generic.c6
-rw-r--r--net/sched/sch_ingress.c9
5 files changed, 26 insertions, 23 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 7f31d0f88424..c8f5ea9aea81 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1747,10 +1747,10 @@ static int ing_filter(struct sk_buff *skb)
1747 1747
1748 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS); 1748 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
1749 1749
1750 spin_lock(&dev->queue_lock); 1750 spin_lock(&dev->ingress_lock);
1751 if ((q = dev->qdisc_ingress) != NULL) 1751 if ((q = dev->qdisc_ingress) != NULL)
1752 result = q->enqueue(skb, q); 1752 result = q->enqueue(skb, q);
1753 spin_unlock(&dev->queue_lock); 1753 spin_unlock(&dev->ingress_lock);
1754 1754
1755 } 1755 }
1756 1756
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index e92d716c9158..cc941d0ee3a5 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -89,9 +89,9 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
89static inline 89static inline
90void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id) 90void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
91{ 91{
92 spin_lock_bh(&dev->queue_lock); 92 qdisc_lock_tree(dev);
93 memset(head->fastmap, 0, sizeof(head->fastmap)); 93 memset(head->fastmap, 0, sizeof(head->fastmap));
94 spin_unlock_bh(&dev->queue_lock); 94 qdisc_unlock_tree(dev);
95} 95}
96 96
97static inline void 97static inline void
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 0ce6914f5981..8699e7006d80 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -500,12 +500,16 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
500 500
501 if (handle == TC_H_INGRESS) { 501 if (handle == TC_H_INGRESS) {
502 sch->flags |= TCQ_F_INGRESS; 502 sch->flags |= TCQ_F_INGRESS;
503 sch->stats_lock = &dev->ingress_lock;
503 handle = TC_H_MAKE(TC_H_INGRESS, 0); 504 handle = TC_H_MAKE(TC_H_INGRESS, 0);
504 } else if (handle == 0) { 505 } else {
505 handle = qdisc_alloc_handle(dev); 506 sch->stats_lock = &dev->queue_lock;
506 err = -ENOMEM; 507 if (handle == 0) {
507 if (handle == 0) 508 handle = qdisc_alloc_handle(dev);
508 goto err_out3; 509 err = -ENOMEM;
510 if (handle == 0)
511 goto err_out3;
512 }
509 } 513 }
510 514
511 sch->handle = handle; 515 sch->handle = handle;
@@ -654,9 +658,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
654 return err; 658 return err;
655 if (q) { 659 if (q) {
656 qdisc_notify(skb, n, clid, q, NULL); 660 qdisc_notify(skb, n, clid, q, NULL);
657 spin_lock_bh(&dev->queue_lock); 661 qdisc_lock_tree(dev);
658 qdisc_destroy(q); 662 qdisc_destroy(q);
659 spin_unlock_bh(&dev->queue_lock); 663 qdisc_unlock_tree(dev);
660 } 664 }
661 } else { 665 } else {
662 qdisc_notify(skb, n, clid, NULL, q); 666 qdisc_notify(skb, n, clid, NULL, q);
@@ -789,17 +793,17 @@ graft:
789 err = qdisc_graft(dev, p, clid, q, &old_q); 793 err = qdisc_graft(dev, p, clid, q, &old_q);
790 if (err) { 794 if (err) {
791 if (q) { 795 if (q) {
792 spin_lock_bh(&dev->queue_lock); 796 qdisc_lock_tree(dev);
793 qdisc_destroy(q); 797 qdisc_destroy(q);
794 spin_unlock_bh(&dev->queue_lock); 798 qdisc_unlock_tree(dev);
795 } 799 }
796 return err; 800 return err;
797 } 801 }
798 qdisc_notify(skb, n, clid, old_q, q); 802 qdisc_notify(skb, n, clid, old_q, q);
799 if (old_q) { 803 if (old_q) {
800 spin_lock_bh(&dev->queue_lock); 804 qdisc_lock_tree(dev);
801 qdisc_destroy(old_q); 805 qdisc_destroy(old_q);
802 spin_unlock_bh(&dev->queue_lock); 806 qdisc_unlock_tree(dev);
803 } 807 }
804 } 808 }
805 return 0; 809 return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 1894eb72f6cf..3385ee592541 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -42,16 +42,20 @@
42 * The idea is the following: 42 * The idea is the following:
43 * - enqueue, dequeue are serialized via top level device 43 * - enqueue, dequeue are serialized via top level device
44 * spinlock dev->queue_lock. 44 * spinlock dev->queue_lock.
45 * - ingress filtering is serialized via top level device
46 * spinlock dev->ingress_lock.
45 * - updates to tree and tree walking are only done under the rtnl mutex. 47 * - updates to tree and tree walking are only done under the rtnl mutex.
46 */ 48 */
47 49
48void qdisc_lock_tree(struct net_device *dev) 50void qdisc_lock_tree(struct net_device *dev)
49{ 51{
50 spin_lock_bh(&dev->queue_lock); 52 spin_lock_bh(&dev->queue_lock);
53 spin_lock(&dev->ingress_lock);
51} 54}
52 55
53void qdisc_unlock_tree(struct net_device *dev) 56void qdisc_unlock_tree(struct net_device *dev)
54{ 57{
58 spin_unlock(&dev->ingress_lock);
55 spin_unlock_bh(&dev->queue_lock); 59 spin_unlock_bh(&dev->queue_lock);
56} 60}
57 61
@@ -431,7 +435,6 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
431 sch->dequeue = ops->dequeue; 435 sch->dequeue = ops->dequeue;
432 sch->dev = dev; 436 sch->dev = dev;
433 dev_hold(dev); 437 dev_hold(dev);
434 sch->stats_lock = &dev->queue_lock;
435 atomic_set(&sch->refcnt, 1); 438 atomic_set(&sch->refcnt, 1);
436 439
437 return sch; 440 return sch;
@@ -447,6 +450,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
447 sch = qdisc_alloc(dev, ops); 450 sch = qdisc_alloc(dev, ops);
448 if (IS_ERR(sch)) 451 if (IS_ERR(sch))
449 goto errout; 452 goto errout;
453 sch->stats_lock = &dev->queue_lock;
450 sch->parent = parentid; 454 sch->parent = parentid;
451 455
452 if (!ops->init || ops->init(sch, NULL) == 0) 456 if (!ops->init || ops->init(sch, NULL) == 0)
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 1fb60aba1e6c..ad22dc6af22a 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -248,16 +248,11 @@ ing_hook(unsigned int hook, struct sk_buff **pskb,
248 skb->dev ? (*pskb)->dev->name : "(no dev)", 248 skb->dev ? (*pskb)->dev->name : "(no dev)",
249 skb->len); 249 skb->len);
250 250
251/*
252revisit later: Use a private since lock dev->queue_lock is also
253used on the egress (might slow things for an iota)
254*/
255
256 if (dev->qdisc_ingress) { 251 if (dev->qdisc_ingress) {
257 spin_lock(&dev->queue_lock); 252 spin_lock(&dev->ingress_lock);
258 if ((q = dev->qdisc_ingress) != NULL) 253 if ((q = dev->qdisc_ingress) != NULL)
259 fwres = q->enqueue(skb, q); 254 fwres = q->enqueue(skb, q);
260 spin_unlock(&dev->queue_lock); 255 spin_unlock(&dev->ingress_lock);
261 } 256 }
262 257
263 return fwres; 258 return fwres;