aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-04-16 20:02:10 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:29:07 -0400
commit0463d4ae25771aaf3379bb6b2392f6edf23c2828 (patch)
tree5c820b718abfe086a7b1d91814cb99d721439a46 /net/sched/sch_generic.c
parentffa4d7216e848fbfdcb8e6f0bb66abeaa1888964 (diff)
[NET_SCHED]: Eliminate qdisc_tree_lock
Since we're now holding the rtnl during the entire dump operation, we can remove qdisc_tree_lock, whose only purpose is to protect dump callbacks from concurrent changes to the qdisc tree. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c29
1 files changed, 7 insertions, 22 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 52eb3439d7c6..1894eb72f6cf 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -36,34 +36,23 @@
36 36
37/* Main transmission queue. */ 37/* Main transmission queue. */
38 38
39/* Main qdisc structure lock. 39/* Modifications to data participating in scheduling must be protected with
40 40 * dev->queue_lock spinlock.
41 However, modifications 41 *
42 to data, participating in scheduling must be additionally 42 * The idea is the following:
43 protected with dev->queue_lock spinlock. 43 * - enqueue, dequeue are serialized via top level device
44 44 * spinlock dev->queue_lock.
45 The idea is the following: 45 * - updates to tree and tree walking are only done under the rtnl mutex.
46 - enqueue, dequeue are serialized via top level device
47 spinlock dev->queue_lock.
48 - tree walking is protected by read_lock(qdisc_tree_lock)
49 and this lock is used only in process context.
50 - updates to tree are made only under rtnl semaphore,
51 hence this lock may be made without local bh disabling.
52
53 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
54 */ 46 */
55DEFINE_RWLOCK(qdisc_tree_lock);
56 47
57void qdisc_lock_tree(struct net_device *dev) 48void qdisc_lock_tree(struct net_device *dev)
58{ 49{
59 write_lock(&qdisc_tree_lock);
60 spin_lock_bh(&dev->queue_lock); 50 spin_lock_bh(&dev->queue_lock);
61} 51}
62 52
63void qdisc_unlock_tree(struct net_device *dev) 53void qdisc_unlock_tree(struct net_device *dev)
64{ 54{
65 spin_unlock_bh(&dev->queue_lock); 55 spin_unlock_bh(&dev->queue_lock);
66 write_unlock(&qdisc_tree_lock);
67} 56}
68 57
69/* 58/*
@@ -528,15 +517,11 @@ void dev_activate(struct net_device *dev)
528 printk(KERN_INFO "%s: activation failed\n", dev->name); 517 printk(KERN_INFO "%s: activation failed\n", dev->name);
529 return; 518 return;
530 } 519 }
531 write_lock(&qdisc_tree_lock);
532 list_add_tail(&qdisc->list, &dev->qdisc_list); 520 list_add_tail(&qdisc->list, &dev->qdisc_list);
533 write_unlock(&qdisc_tree_lock);
534 } else { 521 } else {
535 qdisc = &noqueue_qdisc; 522 qdisc = &noqueue_qdisc;
536 } 523 }
537 write_lock(&qdisc_tree_lock);
538 dev->qdisc_sleeping = qdisc; 524 dev->qdisc_sleeping = qdisc;
539 write_unlock(&qdisc_tree_lock);
540 } 525 }
541 526
542 if (!netif_carrier_ok(dev)) 527 if (!netif_carrier_ok(dev))