aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 3b6e6a780927..52eb3439d7c6 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -36,7 +36,7 @@
36 36
37/* Main transmission queue. */ 37/* Main transmission queue. */
38 38
39/* Main qdisc structure lock. 39/* Main qdisc structure lock.
40 40
41 However, modifications 41 However, modifications
42 to data, participating in scheduling must be additionally 42 to data, participating in scheduling must be additionally
@@ -66,7 +66,7 @@ void qdisc_unlock_tree(struct net_device *dev)
66 write_unlock(&qdisc_tree_lock); 66 write_unlock(&qdisc_tree_lock);
67} 67}
68 68
69/* 69/*
70 dev->queue_lock serializes queue accesses for this device 70 dev->queue_lock serializes queue accesses for this device
71 AND dev->qdisc pointer itself. 71 AND dev->qdisc pointer itself.
72 72
@@ -82,7 +82,7 @@ void qdisc_unlock_tree(struct net_device *dev)
82 we do not check dev->tbusy flag here. 82 we do not check dev->tbusy flag here.
83 83
84 Returns: 0 - queue is empty. 84 Returns: 0 - queue is empty.
85 >0 - queue is not empty, but throttled. 85 >0 - queue is not empty, but throttled.
86 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. 86 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
87 87
88 NOTE: Called under dev->queue_lock with locally disabled BH. 88 NOTE: Called under dev->queue_lock with locally disabled BH.
@@ -112,7 +112,7 @@ static inline int qdisc_restart(struct net_device *dev)
112 if (!netif_tx_trylock(dev)) { 112 if (!netif_tx_trylock(dev)) {
113 collision: 113 collision:
114 /* So, someone grabbed the driver. */ 114 /* So, someone grabbed the driver. */
115 115
116 /* It may be transient configuration error, 116 /* It may be transient configuration error,
117 when hard_start_xmit() recurses. We detect 117 when hard_start_xmit() recurses. We detect
118 it by checking xmit owner and drop the 118 it by checking xmit owner and drop the
@@ -128,7 +128,7 @@ static inline int qdisc_restart(struct net_device *dev)
128 goto requeue; 128 goto requeue;
129 } 129 }
130 } 130 }
131 131
132 { 132 {
133 /* And release queue */ 133 /* And release queue */
134 spin_unlock(&dev->queue_lock); 134 spin_unlock(&dev->queue_lock);
@@ -137,7 +137,7 @@ static inline int qdisc_restart(struct net_device *dev)
137 int ret; 137 int ret;
138 138
139 ret = dev_hard_start_xmit(skb, dev); 139 ret = dev_hard_start_xmit(skb, dev);
140 if (ret == NETDEV_TX_OK) { 140 if (ret == NETDEV_TX_OK) {
141 if (!nolock) { 141 if (!nolock) {
142 netif_tx_unlock(dev); 142 netif_tx_unlock(dev);
143 } 143 }
@@ -146,15 +146,15 @@ static inline int qdisc_restart(struct net_device *dev)
146 } 146 }
147 if (ret == NETDEV_TX_LOCKED && nolock) { 147 if (ret == NETDEV_TX_LOCKED && nolock) {
148 spin_lock(&dev->queue_lock); 148 spin_lock(&dev->queue_lock);
149 goto collision; 149 goto collision;
150 } 150 }
151 } 151 }
152 152
153 /* NETDEV_TX_BUSY - we need to requeue */ 153 /* NETDEV_TX_BUSY - we need to requeue */
154 /* Release the driver */ 154 /* Release the driver */
155 if (!nolock) { 155 if (!nolock) {
156 netif_tx_unlock(dev); 156 netif_tx_unlock(dev);
157 } 157 }
158 spin_lock(&dev->queue_lock); 158 spin_lock(&dev->queue_lock);
159 q = dev->qdisc; 159 q = dev->qdisc;
160 } 160 }
@@ -300,7 +300,7 @@ struct Qdisc noop_qdisc = {
300 .enqueue = noop_enqueue, 300 .enqueue = noop_enqueue,
301 .dequeue = noop_dequeue, 301 .dequeue = noop_dequeue,
302 .flags = TCQ_F_BUILTIN, 302 .flags = TCQ_F_BUILTIN,
303 .ops = &noop_qdisc_ops, 303 .ops = &noop_qdisc_ops,
304 .list = LIST_HEAD_INIT(noop_qdisc.list), 304 .list = LIST_HEAD_INIT(noop_qdisc.list),
305}; 305};
306 306
@@ -454,7 +454,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
454 unsigned int parentid) 454 unsigned int parentid)
455{ 455{
456 struct Qdisc *sch; 456 struct Qdisc *sch;
457 457
458 sch = qdisc_alloc(dev, ops); 458 sch = qdisc_alloc(dev, ops);
459 if (IS_ERR(sch)) 459 if (IS_ERR(sch))
460 goto errout; 460 goto errout;
@@ -478,7 +478,7 @@ void qdisc_reset(struct Qdisc *qdisc)
478 ops->reset(qdisc); 478 ops->reset(qdisc);
479} 479}
480 480
481/* this is the rcu callback function to clean up a qdisc when there 481/* this is the rcu callback function to clean up a qdisc when there
482 * are no further references to it */ 482 * are no further references to it */
483 483
484static void __qdisc_destroy(struct rcu_head *head) 484static void __qdisc_destroy(struct rcu_head *head)
@@ -600,10 +600,10 @@ void dev_shutdown(struct net_device *dev)
600 dev->qdisc_sleeping = &noop_qdisc; 600 dev->qdisc_sleeping = &noop_qdisc;
601 qdisc_destroy(qdisc); 601 qdisc_destroy(qdisc);
602#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) 602#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
603 if ((qdisc = dev->qdisc_ingress) != NULL) { 603 if ((qdisc = dev->qdisc_ingress) != NULL) {
604 dev->qdisc_ingress = NULL; 604 dev->qdisc_ingress = NULL;
605 qdisc_destroy(qdisc); 605 qdisc_destroy(qdisc);
606 } 606 }
607#endif 607#endif
608 BUG_TRAP(!timer_pending(&dev->watchdog_timer)); 608 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
609 qdisc_unlock_tree(dev); 609 qdisc_unlock_tree(dev);