aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-16 06:22:39 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-17 22:21:29 -0400
commitc7e4f3bbb4ba4e48ab3b529d5016e454cee1ccd6 (patch)
tree76975288fd9448ee522867e3681978804431e736 /net/sched
parent78a5b30b7324b2d66bcf7d2e3935877d3c26497c (diff)
pkt_sched: Kill qdisc_lock_tree and qdisc_unlock_tree.
No longer used. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_generic.c36
1 files changed, 3 insertions, 33 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 8cdf0b4a6a5a..3d53e92ad9c8 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -29,44 +29,14 @@
29/* Main transmission queue. */ 29/* Main transmission queue. */
30 30
31/* Modifications to data participating in scheduling must be protected with 31/* Modifications to data participating in scheduling must be protected with
32 * queue->lock spinlock. 32 * qdisc_root_lock(qdisc) spinlock.
33 * 33 *
34 * The idea is the following: 34 * The idea is the following:
35 * - enqueue, dequeue are serialized via top level device 35 * - enqueue, dequeue are serialized via qdisc root lock
36 * spinlock queue->lock. 36 * - ingress filtering is also serialized via qdisc root lock
37 * - ingress filtering is serialized via top level device
38 * spinlock dev->rx_queue.lock.
39 * - updates to tree and tree walking are only done under the rtnl mutex. 37 * - updates to tree and tree walking are only done under the rtnl mutex.
40 */ 38 */
41 39
42void qdisc_lock_tree(struct net_device *dev)
43 __acquires(dev->rx_queue.lock)
44{
45 unsigned int i;
46
47 local_bh_disable();
48 for (i = 0; i < dev->num_tx_queues; i++) {
49 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
50 spin_lock(&txq->lock);
51 }
52 spin_lock(&dev->rx_queue.lock);
53}
54EXPORT_SYMBOL(qdisc_lock_tree);
55
56void qdisc_unlock_tree(struct net_device *dev)
57 __releases(dev->rx_queue.lock)
58{
59 unsigned int i;
60
61 spin_unlock(&dev->rx_queue.lock);
62 for (i = 0; i < dev->num_tx_queues; i++) {
63 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
64 spin_unlock(&txq->lock);
65 }
66 local_bh_enable();
67}
68EXPORT_SYMBOL(qdisc_unlock_tree);
69
70static inline int qdisc_qlen(struct Qdisc *q) 40static inline int qdisc_qlen(struct Qdisc *q)
71{ 41{
72 return q->q.qlen; 42 return q->q.qlen;