aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 12:26:46 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-04-27 12:26:46 -0400
commit15c54033964a943de7b0763efd3bd0ede7326395 (patch)
tree840b292612d1b5396d5bab5bde537a9013db3ceb /net/sched/sch_generic.c
parentad5da3cf39a5b11a198929be1f2644e17ecd767e (diff)
parent912a41a4ab935ce8c4308428ec13fc7f8b1f18f4 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (448 commits) [IPV4] nl_fib_lookup: Initialise res.r before fib_res_put(&res) [IPV6]: Fix thinko in ipv6_rthdr_rcv() changes. [IPV4]: Add multipath cached to feature-removal-schedule.txt [WIRELESS] cfg80211: Clarify locking comment. [WIRELESS] cfg80211: Fix locking in wiphy_new. [WEXT] net_device: Don't include wext bits if not required. [WEXT]: Misc code cleanups. [WEXT]: Reduce inline abuse. [WEXT]: Move EXPORT_SYMBOL statements where they belong. [WEXT]: Cleanup early ioctl call path. [WEXT]: Remove options. [WEXT]: Remove dead debug code. [WEXT]: Clean up how wext is called. [WEXT]: Move to net/wireless [AFS]: Eliminate cmpxchg() usage in vlocation code. [RXRPC]: Fix pointers passed to bitops. [RXRPC]: Remove bogus atomic_* overrides. [AFS]: Fix u64 printing in debug logging. [AFS]: Add "directory write" support. [AFS]: Implement the CB.InitCallBackState3 operation. ...
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c35
1 files changed, 12 insertions, 23 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 52eb3439d7c6..3385ee592541 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -36,34 +36,27 @@
36 36
37/* Main transmission queue. */ 37/* Main transmission queue. */
38 38
39/* Main qdisc structure lock. 39/* Modifications to data participating in scheduling must be protected with
40 40 * dev->queue_lock spinlock.
41 However, modifications 41 *
42 to data, participating in scheduling must be additionally 42 * The idea is the following:
43 protected with dev->queue_lock spinlock. 43 * - enqueue, dequeue are serialized via top level device
44 44 * spinlock dev->queue_lock.
45 The idea is the following: 45 * - ingress filtering is serialized via top level device
46 - enqueue, dequeue are serialized via top level device 46 * spinlock dev->ingress_lock.
47 spinlock dev->queue_lock. 47 * - updates to tree and tree walking are only done under the rtnl mutex.
48 - tree walking is protected by read_lock(qdisc_tree_lock)
49 and this lock is used only in process context.
50 - updates to tree are made only under rtnl semaphore,
51 hence this lock may be made without local bh disabling.
52
53 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
54 */ 48 */
55DEFINE_RWLOCK(qdisc_tree_lock);
56 49
57void qdisc_lock_tree(struct net_device *dev) 50void qdisc_lock_tree(struct net_device *dev)
58{ 51{
59 write_lock(&qdisc_tree_lock);
60 spin_lock_bh(&dev->queue_lock); 52 spin_lock_bh(&dev->queue_lock);
53 spin_lock(&dev->ingress_lock);
61} 54}
62 55
63void qdisc_unlock_tree(struct net_device *dev) 56void qdisc_unlock_tree(struct net_device *dev)
64{ 57{
58 spin_unlock(&dev->ingress_lock);
65 spin_unlock_bh(&dev->queue_lock); 59 spin_unlock_bh(&dev->queue_lock);
66 write_unlock(&qdisc_tree_lock);
67} 60}
68 61
69/* 62/*
@@ -442,7 +435,6 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
442 sch->dequeue = ops->dequeue; 435 sch->dequeue = ops->dequeue;
443 sch->dev = dev; 436 sch->dev = dev;
444 dev_hold(dev); 437 dev_hold(dev);
445 sch->stats_lock = &dev->queue_lock;
446 atomic_set(&sch->refcnt, 1); 438 atomic_set(&sch->refcnt, 1);
447 439
448 return sch; 440 return sch;
@@ -458,6 +450,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
458 sch = qdisc_alloc(dev, ops); 450 sch = qdisc_alloc(dev, ops);
459 if (IS_ERR(sch)) 451 if (IS_ERR(sch))
460 goto errout; 452 goto errout;
453 sch->stats_lock = &dev->queue_lock;
461 sch->parent = parentid; 454 sch->parent = parentid;
462 455
463 if (!ops->init || ops->init(sch, NULL) == 0) 456 if (!ops->init || ops->init(sch, NULL) == 0)
@@ -528,15 +521,11 @@ void dev_activate(struct net_device *dev)
528 printk(KERN_INFO "%s: activation failed\n", dev->name); 521 printk(KERN_INFO "%s: activation failed\n", dev->name);
529 return; 522 return;
530 } 523 }
531 write_lock(&qdisc_tree_lock);
532 list_add_tail(&qdisc->list, &dev->qdisc_list); 524 list_add_tail(&qdisc->list, &dev->qdisc_list);
533 write_unlock(&qdisc_tree_lock);
534 } else { 525 } else {
535 qdisc = &noqueue_qdisc; 526 qdisc = &noqueue_qdisc;
536 } 527 }
537 write_lock(&qdisc_tree_lock);
538 dev->qdisc_sleeping = qdisc; 528 dev->qdisc_sleeping = qdisc;
539 write_unlock(&qdisc_tree_lock);
540 } 529 }
541 530
542 if (!netif_carrier_ok(dev)) 531 if (!netif_carrier_ok(dev))