diff options
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 35 |
1 files changed, 12 insertions, 23 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 52eb3439d7c6..3385ee592541 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -36,34 +36,27 @@ | |||
36 | 36 | ||
37 | /* Main transmission queue. */ | 37 | /* Main transmission queue. */ |
38 | 38 | ||
39 | /* Main qdisc structure lock. | 39 | /* Modifications to data participating in scheduling must be protected with |
40 | 40 | * dev->queue_lock spinlock. | |
41 | However, modifications | 41 | * |
42 | to data, participating in scheduling must be additionally | 42 | * The idea is the following: |
43 | protected with dev->queue_lock spinlock. | 43 | * - enqueue, dequeue are serialized via top level device |
44 | 44 | * spinlock dev->queue_lock. | |
45 | The idea is the following: | 45 | * - ingress filtering is serialized via top level device |
46 | - enqueue, dequeue are serialized via top level device | 46 | * spinlock dev->ingress_lock. |
47 | spinlock dev->queue_lock. | 47 | * - updates to tree and tree walking are only done under the rtnl mutex. |
48 | - tree walking is protected by read_lock(qdisc_tree_lock) | ||
49 | and this lock is used only in process context. | ||
50 | - updates to tree are made only under rtnl semaphore, | ||
51 | hence this lock may be made without local bh disabling. | ||
52 | |||
53 | qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! | ||
54 | */ | 48 | */ |
55 | DEFINE_RWLOCK(qdisc_tree_lock); | ||
56 | 49 | ||
57 | void qdisc_lock_tree(struct net_device *dev) | 50 | void qdisc_lock_tree(struct net_device *dev) |
58 | { | 51 | { |
59 | write_lock(&qdisc_tree_lock); | ||
60 | spin_lock_bh(&dev->queue_lock); | 52 | spin_lock_bh(&dev->queue_lock); |
53 | spin_lock(&dev->ingress_lock); | ||
61 | } | 54 | } |
62 | 55 | ||
63 | void qdisc_unlock_tree(struct net_device *dev) | 56 | void qdisc_unlock_tree(struct net_device *dev) |
64 | { | 57 | { |
58 | spin_unlock(&dev->ingress_lock); | ||
65 | spin_unlock_bh(&dev->queue_lock); | 59 | spin_unlock_bh(&dev->queue_lock); |
66 | write_unlock(&qdisc_tree_lock); | ||
67 | } | 60 | } |
68 | 61 | ||
69 | /* | 62 | /* |
@@ -442,7 +435,6 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) | |||
442 | sch->dequeue = ops->dequeue; | 435 | sch->dequeue = ops->dequeue; |
443 | sch->dev = dev; | 436 | sch->dev = dev; |
444 | dev_hold(dev); | 437 | dev_hold(dev); |
445 | sch->stats_lock = &dev->queue_lock; | ||
446 | atomic_set(&sch->refcnt, 1); | 438 | atomic_set(&sch->refcnt, 1); |
447 | 439 | ||
448 | return sch; | 440 | return sch; |
@@ -458,6 +450,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops, | |||
458 | sch = qdisc_alloc(dev, ops); | 450 | sch = qdisc_alloc(dev, ops); |
459 | if (IS_ERR(sch)) | 451 | if (IS_ERR(sch)) |
460 | goto errout; | 452 | goto errout; |
453 | sch->stats_lock = &dev->queue_lock; | ||
461 | sch->parent = parentid; | 454 | sch->parent = parentid; |
462 | 455 | ||
463 | if (!ops->init || ops->init(sch, NULL) == 0) | 456 | if (!ops->init || ops->init(sch, NULL) == 0) |
@@ -528,15 +521,11 @@ void dev_activate(struct net_device *dev) | |||
528 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 521 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
529 | return; | 522 | return; |
530 | } | 523 | } |
531 | write_lock(&qdisc_tree_lock); | ||
532 | list_add_tail(&qdisc->list, &dev->qdisc_list); | 524 | list_add_tail(&qdisc->list, &dev->qdisc_list); |
533 | write_unlock(&qdisc_tree_lock); | ||
534 | } else { | 525 | } else { |
535 | qdisc = &noqueue_qdisc; | 526 | qdisc = &noqueue_qdisc; |
536 | } | 527 | } |
537 | write_lock(&qdisc_tree_lock); | ||
538 | dev->qdisc_sleeping = qdisc; | 528 | dev->qdisc_sleeping = qdisc; |
539 | write_unlock(&qdisc_tree_lock); | ||
540 | } | 529 | } |
541 | 530 | ||
542 | if (!netif_carrier_ok(dev)) | 531 | if (!netif_carrier_ok(dev)) |