aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_generic.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index e970e8e75720..95ae11956f35 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -134,34 +134,19 @@ static inline int qdisc_restart(struct net_device *dev)
134{ 134{
135 struct Qdisc *q = dev->qdisc; 135 struct Qdisc *q = dev->qdisc;
136 struct sk_buff *skb; 136 struct sk_buff *skb;
137 unsigned lockless;
138 int ret; 137 int ret;
139 138
140 /* Dequeue packet */ 139 /* Dequeue packet */
141 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) 140 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
142 return 0; 141 return 0;
143 142
144 /*
145 * When the driver has LLTX set, it does its own locking in
146 * start_xmit. These checks are worth it because even uncongested
147 * locks can be quite expensive. The driver can do a trylock, as
148 * is being done here; in case of lock contention it should return
149 * NETDEV_TX_LOCKED and the packet will be requeued.
150 */
151 lockless = (dev->features & NETIF_F_LLTX);
152
153 if (!lockless && !netif_tx_trylock(dev)) {
154 /* Another CPU grabbed the driver tx lock */
155 return handle_dev_cpu_collision(skb, dev, q);
156 }
157 143
158 /* And release queue */ 144 /* And release queue */
159 spin_unlock(&dev->queue_lock); 145 spin_unlock(&dev->queue_lock);
160 146
147 HARD_TX_LOCK(dev, smp_processor_id());
161 ret = dev_hard_start_xmit(skb, dev); 148 ret = dev_hard_start_xmit(skb, dev);
162 149 HARD_TX_UNLOCK(dev);
163 if (!lockless)
164 netif_tx_unlock(dev);
165 150
166 spin_lock(&dev->queue_lock); 151 spin_lock(&dev->queue_lock);
167 q = dev->qdisc; 152 q = dev->qdisc;