diff options
author | Patrick McHardy <kaber@trash.net> | 2007-04-16 20:07:08 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:29:08 -0400 |
commit | fd44de7cc1d430caef91ad9aecec9ff000fe86f8 (patch) | |
tree | 7b86a2e8623400c290c886ef83c573cdf49da3e8 /net/sched/sch_generic.c | |
parent | 0463d4ae25771aaf3379bb6b2392f6edf23c2828 (diff) |
[NET_SCHED]: ingress: switch back to using ingress_lock
Switch ingress queueing back to use ingress_lock. qdisc_lock_tree now locks
both the ingress and egress qdiscs on the device. All changes to data that
might be used on both ingress and egress needs to be protected by using
qdisc_lock_tree instead of manually taking dev->queue_lock. Additionally
the qdisc stats_lock needs to be initialized to ingress_lock for ingress
qdiscs.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 6 |
1 files changed, 5 insertions, 1 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 1894eb72f6cf..3385ee592541 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -42,16 +42,20 @@ | |||
42 | * The idea is the following: | 42 | * The idea is the following: |
43 | * - enqueue, dequeue are serialized via top level device | 43 | * - enqueue, dequeue are serialized via top level device |
44 | * spinlock dev->queue_lock. | 44 | * spinlock dev->queue_lock. |
45 | * - ingress filtering is serialized via top level device | ||
46 | * spinlock dev->ingress_lock. | ||
45 | * - updates to tree and tree walking are only done under the rtnl mutex. | 47 | * - updates to tree and tree walking are only done under the rtnl mutex. |
46 | */ | 48 | */ |
47 | 49 | ||
48 | void qdisc_lock_tree(struct net_device *dev) | 50 | void qdisc_lock_tree(struct net_device *dev) |
49 | { | 51 | { |
50 | spin_lock_bh(&dev->queue_lock); | 52 | spin_lock_bh(&dev->queue_lock); |
53 | spin_lock(&dev->ingress_lock); | ||
51 | } | 54 | } |
52 | 55 | ||
53 | void qdisc_unlock_tree(struct net_device *dev) | 56 | void qdisc_unlock_tree(struct net_device *dev) |
54 | { | 57 | { |
58 | spin_unlock(&dev->ingress_lock); | ||
55 | spin_unlock_bh(&dev->queue_lock); | 59 | spin_unlock_bh(&dev->queue_lock); |
56 | } | 60 | } |
57 | 61 | ||
@@ -431,7 +435,6 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) | |||
431 | sch->dequeue = ops->dequeue; | 435 | sch->dequeue = ops->dequeue; |
432 | sch->dev = dev; | 436 | sch->dev = dev; |
433 | dev_hold(dev); | 437 | dev_hold(dev); |
434 | sch->stats_lock = &dev->queue_lock; | ||
435 | atomic_set(&sch->refcnt, 1); | 438 | atomic_set(&sch->refcnt, 1); |
436 | 439 | ||
437 | return sch; | 440 | return sch; |
@@ -447,6 +450,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, | |||
447 | sch = qdisc_alloc(dev, ops); | 450 | sch = qdisc_alloc(dev, ops); |
448 | if (IS_ERR(sch)) | 451 | if (IS_ERR(sch)) |
449 | goto errout; | 452 | goto errout; |
453 | sch->stats_lock = &dev->queue_lock; | ||
450 | sch->parent = parentid; | 454 | sch->parent = parentid; |
451 | 455 | ||
452 | if (!ops->init || ops->init(sch, NULL) == 0) | 456 | if (!ops->init || ops->init(sch, NULL) == 0) |