aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorJamal Hadi Salim <hadi@cyberus.ca>2007-09-25 22:27:13 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:52:15 -0400
commit8236632fb3532188c75656421e29f5ab51b47db7 (patch)
treee93f1c710c420fb643ddb424f0db974ac4e064f2 /net/sched/sch_generic.c
parent854d8363f37491c955b0edc60d37b62f3d71bb67 (diff)
[NET_SCHED]: explict hold dev tx lock
For N cpus, with full throttle traffic on all N CPUs, funneling traffic to the same ethernet device, the devices queue lock is contended by all N CPUs constantly. The TX lock is only contended by a max of 2 CPUS. In the current mode of operation, after all the work of entering the dequeue region, we may endup aborting the path if we are unable to get the tx lock and go back to contend for the queue lock. As N goes up, this gets worse. The changes in this patch result in a small increase in performance with a 4CPU (2xdual-core) with no irq binding. Both e1000 and tg3 showed similar behavior; Signed-off-by: Jamal Hadi Salim <hadi@cyberus.ca> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index e970e8e75720..95ae11956f35 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -134,34 +134,19 @@ static inline int qdisc_restart(struct net_device *dev)
134{ 134{
135 struct Qdisc *q = dev->qdisc; 135 struct Qdisc *q = dev->qdisc;
136 struct sk_buff *skb; 136 struct sk_buff *skb;
137 unsigned lockless;
138 int ret; 137 int ret;
139 138
140 /* Dequeue packet */ 139 /* Dequeue packet */
141 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL)) 140 if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
142 return 0; 141 return 0;
143 142
144 /*
145 * When the driver has LLTX set, it does its own locking in
146 * start_xmit. These checks are worth it because even uncongested
147 * locks can be quite expensive. The driver can do a trylock, as
148 * is being done here; in case of lock contention it should return
149 * NETDEV_TX_LOCKED and the packet will be requeued.
150 */
151 lockless = (dev->features & NETIF_F_LLTX);
152
153 if (!lockless && !netif_tx_trylock(dev)) {
154 /* Another CPU grabbed the driver tx lock */
155 return handle_dev_cpu_collision(skb, dev, q);
156 }
157 143
158 /* And release queue */ 144 /* And release queue */
159 spin_unlock(&dev->queue_lock); 145 spin_unlock(&dev->queue_lock);
160 146
147 HARD_TX_LOCK(dev, smp_processor_id());
161 ret = dev_hard_start_xmit(skb, dev); 148 ret = dev_hard_start_xmit(skb, dev);
162 149 HARD_TX_UNLOCK(dev);
163 if (!lockless)
164 netif_tx_unlock(dev);
165 150
166 spin_lock(&dev->queue_lock); 151 spin_lock(&dev->queue_lock);
167 q = dev->qdisc; 152 q = dev->qdisc;