aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 407dfdb142a4..fcc7533f0bcc 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -75,9 +75,8 @@ static inline int dev_requeue_skb(struct sk_buff *skb,
75 return 0; 75 return 0;
76} 76}
77 77
78static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev, 78static inline struct sk_buff *dequeue_skb(struct netdev_queue *dev_queue,
79 struct netdev_queue *dev_queue, 79 struct Qdisc *q)
80 struct Qdisc *q)
81{ 80{
82 struct sk_buff *skb; 81 struct sk_buff *skb;
83 82
@@ -90,10 +89,10 @@ static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
90} 89}
91 90
92static inline int handle_dev_cpu_collision(struct sk_buff *skb, 91static inline int handle_dev_cpu_collision(struct sk_buff *skb,
93 struct net_device *dev,
94 struct netdev_queue *dev_queue, 92 struct netdev_queue *dev_queue,
95 struct Qdisc *q) 93 struct Qdisc *q)
96{ 94{
95 struct net_device *dev = dev_queue->dev;
97 int ret; 96 int ret;
98 97
99 if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { 98 if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
@@ -139,21 +138,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
139 * >0 - queue is not empty. 138 * >0 - queue is not empty.
140 * 139 *
141 */ 140 */
142static inline int qdisc_restart(struct net_device *dev) 141static inline int qdisc_restart(struct netdev_queue *txq)
143{ 142{
144 struct netdev_queue *txq = &dev->tx_queue;
145 struct Qdisc *q = txq->qdisc; 143 struct Qdisc *q = txq->qdisc;
146 struct sk_buff *skb;
147 int ret = NETDEV_TX_BUSY; 144 int ret = NETDEV_TX_BUSY;
145 struct net_device *dev;
146 struct sk_buff *skb;
148 147
149 /* Dequeue packet */ 148 /* Dequeue packet */
150 if (unlikely((skb = dev_dequeue_skb(dev, txq, q)) == NULL)) 149 if (unlikely((skb = dequeue_skb(txq, q)) == NULL))
151 return 0; 150 return 0;
152 151
153 152
154 /* And release queue */ 153 /* And release queue */
155 spin_unlock(&txq->lock); 154 spin_unlock(&txq->lock);
156 155
156 dev = txq->dev;
157
157 HARD_TX_LOCK(dev, smp_processor_id()); 158 HARD_TX_LOCK(dev, smp_processor_id());
158 if (!netif_subqueue_stopped(dev, skb)) 159 if (!netif_subqueue_stopped(dev, skb))
159 ret = dev_hard_start_xmit(skb, dev); 160 ret = dev_hard_start_xmit(skb, dev);
@@ -170,7 +171,7 @@ static inline int qdisc_restart(struct net_device *dev)
170 171
171 case NETDEV_TX_LOCKED: 172 case NETDEV_TX_LOCKED:
172 /* Driver try lock failed */ 173 /* Driver try lock failed */
173 ret = handle_dev_cpu_collision(skb, dev, txq, q); 174 ret = handle_dev_cpu_collision(skb, txq, q);
174 break; 175 break;
175 176
176 default: 177 default:
@@ -186,11 +187,12 @@ static inline int qdisc_restart(struct net_device *dev)
186 return ret; 187 return ret;
187} 188}
188 189
189void __qdisc_run(struct net_device *dev) 190void __qdisc_run(struct netdev_queue *txq)
190{ 191{
192 struct net_device *dev = txq->dev;
191 unsigned long start_time = jiffies; 193 unsigned long start_time = jiffies;
192 194
193 while (qdisc_restart(dev)) { 195 while (qdisc_restart(txq)) {
194 if (netif_queue_stopped(dev)) 196 if (netif_queue_stopped(dev))
195 break; 197 break;
196 198
@@ -200,7 +202,7 @@ void __qdisc_run(struct net_device *dev)
200 * 2. we've been doing it for too long. 202 * 2. we've been doing it for too long.
201 */ 203 */
202 if (need_resched() || jiffies != start_time) { 204 if (need_resched() || jiffies != start_time) {
203 netif_schedule_queue(&dev->tx_queue); 205 netif_schedule_queue(txq);
204 break; 206 break;
205 } 207 }
206 } 208 }