diff options
author | Patrick McHardy <kaber@trash.net> | 2007-04-16 20:07:08 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:29:08 -0400 |
commit | fd44de7cc1d430caef91ad9aecec9ff000fe86f8 (patch) | |
tree | 7b86a2e8623400c290c886ef83c573cdf49da3e8 /net/sched/sch_api.c | |
parent | 0463d4ae25771aaf3379bb6b2392f6edf23c2828 (diff) |
[NET_SCHED]: ingress: switch back to using ingress_lock
Switch ingress queueing back to use ingress_lock. qdisc_lock_tree now locks
both the ingress and egress qdiscs on the device. All changes to data that
might be used on both ingress and egress needs to be protected by using
qdisc_lock_tree instead of manually taking dev->queue_lock. Additionally
the qdisc stats_lock needs to be initialized to ingress_lock for ingress
qdiscs.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_api.c')
-rw-r--r-- | net/sched/sch_api.c | 26 |
1 files changed, 15 insertions, 11 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0ce6914f5981..8699e7006d80 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -500,12 +500,16 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp) | |||
500 | 500 | ||
501 | if (handle == TC_H_INGRESS) { | 501 | if (handle == TC_H_INGRESS) { |
502 | sch->flags |= TCQ_F_INGRESS; | 502 | sch->flags |= TCQ_F_INGRESS; |
503 | sch->stats_lock = &dev->ingress_lock; | ||
503 | handle = TC_H_MAKE(TC_H_INGRESS, 0); | 504 | handle = TC_H_MAKE(TC_H_INGRESS, 0); |
504 | } else if (handle == 0) { | 505 | } else { |
505 | handle = qdisc_alloc_handle(dev); | 506 | sch->stats_lock = &dev->queue_lock; |
506 | err = -ENOMEM; | 507 | if (handle == 0) { |
507 | if (handle == 0) | 508 | handle = qdisc_alloc_handle(dev); |
508 | goto err_out3; | 509 | err = -ENOMEM; |
510 | if (handle == 0) | ||
511 | goto err_out3; | ||
512 | } | ||
509 | } | 513 | } |
510 | 514 | ||
511 | sch->handle = handle; | 515 | sch->handle = handle; |
@@ -654,9 +658,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) | |||
654 | return err; | 658 | return err; |
655 | if (q) { | 659 | if (q) { |
656 | qdisc_notify(skb, n, clid, q, NULL); | 660 | qdisc_notify(skb, n, clid, q, NULL); |
657 | spin_lock_bh(&dev->queue_lock); | 661 | qdisc_lock_tree(dev); |
658 | qdisc_destroy(q); | 662 | qdisc_destroy(q); |
659 | spin_unlock_bh(&dev->queue_lock); | 663 | qdisc_unlock_tree(dev); |
660 | } | 664 | } |
661 | } else { | 665 | } else { |
662 | qdisc_notify(skb, n, clid, NULL, q); | 666 | qdisc_notify(skb, n, clid, NULL, q); |
@@ -789,17 +793,17 @@ graft: | |||
789 | err = qdisc_graft(dev, p, clid, q, &old_q); | 793 | err = qdisc_graft(dev, p, clid, q, &old_q); |
790 | if (err) { | 794 | if (err) { |
791 | if (q) { | 795 | if (q) { |
792 | spin_lock_bh(&dev->queue_lock); | 796 | qdisc_lock_tree(dev); |
793 | qdisc_destroy(q); | 797 | qdisc_destroy(q); |
794 | spin_unlock_bh(&dev->queue_lock); | 798 | qdisc_unlock_tree(dev); |
795 | } | 799 | } |
796 | return err; | 800 | return err; |
797 | } | 801 | } |
798 | qdisc_notify(skb, n, clid, old_q, q); | 802 | qdisc_notify(skb, n, clid, old_q, q); |
799 | if (old_q) { | 803 | if (old_q) { |
800 | spin_lock_bh(&dev->queue_lock); | 804 | qdisc_lock_tree(dev); |
801 | qdisc_destroy(old_q); | 805 | qdisc_destroy(old_q); |
802 | spin_unlock_bh(&dev->queue_lock); | 806 | qdisc_unlock_tree(dev); |
803 | } | 807 | } |
804 | } | 808 | } |
805 | return 0; | 809 | return 0; |