aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-07-09 02:13:53 -0400
committerDavid S. Miller <davem@davemloft.net>2008-07-09 02:13:53 -0400
commitc773e847ea8f6812804e40f52399c6921a00eab1 (patch)
tree952e0e262cc0b0f2136bc2a62938ae1d186f896a /net/sched/sch_generic.c
parenteb6aafe3f843cb0e939546c03540a3b4911b6964 (diff)
netdev: Move _xmit_lock and xmit_lock_owner into netdev_queue.
Accesses are mostly structured such that when there are multiple TX queues the code transformations will be a little bit simpler. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fcc7533f0bc..b6a36d39466 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -92,10 +92,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
92 struct netdev_queue *dev_queue, 92 struct netdev_queue *dev_queue,
93 struct Qdisc *q) 93 struct Qdisc *q)
94{ 94{
95 struct net_device *dev = dev_queue->dev;
96 int ret; 95 int ret;
97 96
98 if (unlikely(dev->xmit_lock_owner == smp_processor_id())) { 97 if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
99 /* 98 /*
100 * Same CPU holding the lock. It may be a transient 99 * Same CPU holding the lock. It may be a transient
101 * configuration error, when hard_start_xmit() recurses. We 100 * configuration error, when hard_start_xmit() recurses. We
@@ -105,7 +104,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
105 kfree_skb(skb); 104 kfree_skb(skb);
106 if (net_ratelimit()) 105 if (net_ratelimit())
107 printk(KERN_WARNING "Dead loop on netdevice %s, " 106 printk(KERN_WARNING "Dead loop on netdevice %s, "
108 "fix it urgently!\n", dev->name); 107 "fix it urgently!\n", dev_queue->dev->name);
109 ret = qdisc_qlen(q); 108 ret = qdisc_qlen(q);
110 } else { 109 } else {
111 /* 110 /*
@@ -155,10 +154,10 @@ static inline int qdisc_restart(struct netdev_queue *txq)
155 154
156 dev = txq->dev; 155 dev = txq->dev;
157 156
158 HARD_TX_LOCK(dev, smp_processor_id()); 157 HARD_TX_LOCK(dev, txq, smp_processor_id());
159 if (!netif_subqueue_stopped(dev, skb)) 158 if (!netif_subqueue_stopped(dev, skb))
160 ret = dev_hard_start_xmit(skb, dev); 159 ret = dev_hard_start_xmit(skb, dev);
161 HARD_TX_UNLOCK(dev); 160 HARD_TX_UNLOCK(dev, txq);
162 161
163 spin_lock(&txq->lock); 162 spin_lock(&txq->lock);
164 q = txq->qdisc; 163 q = txq->qdisc;