aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib/ipoib_main.c
diff options
context:
space:
mode:
authorKrishna Kumar <krkumar2@in.ibm.com>2007-11-15 00:05:37 -0500
committerRoland Dreier <rolandd@cisco.com>2008-01-25 17:17:44 -0500
commit48fe5e594c979177b7f20affd027be56e8ea2767 (patch)
tree7e2885c5f3ac8cd1abc4366b228088589f1ec917 /drivers/infiniband/ulp/ipoib/ipoib_main.c
parent3d68ea32611095c7e09409ac1b2a56da22fd5eb7 (diff)
IPoIB: Remove redundant check of netif_queue_stopped() in xmit handler
qdisc_run() now tests for queue_stopped() before calling __qdisc_run(), and the same check is done in every iteration of __qdisc_run(), so another check is not required in the driver xmit. This means that ipoib_start_xmit() no longer needs to test netif_queue_stopped(); the test was added to fix earlier kernels, where the networking stack did not guarantee that the xmit method of an LLTX driver would not be called after the queue was stopped, but current kernels do provide this guarantee. To validate, I put a debug in the TX_BUSY path which never hit with 64 threads running overnight exercising this code a few 100 million times. Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp/ipoib/ipoib_main.c')
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
1 files changed, 0 insertions, 10 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d7330451685c..a082466f4a83 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -672,16 +672,6 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
672 if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags))) 672 if (unlikely(!spin_trylock_irqsave(&priv->tx_lock, flags)))
673 return NETDEV_TX_LOCKED; 673 return NETDEV_TX_LOCKED;
674 674
675 /*
676 * Check if our queue is stopped. Since we have the LLTX bit
677 * set, we can't rely on netif_stop_queue() preventing our
678 * xmit function from being called with a full queue.
679 */
680 if (unlikely(netif_queue_stopped(dev))) {
681 spin_unlock_irqrestore(&priv->tx_lock, flags);
682 return NETDEV_TX_BUSY;
683 }
684
685 if (likely(skb->dst && skb->dst->neighbour)) { 675 if (likely(skb->dst && skb->dst->neighbour)) {
686 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) { 676 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
687 ipoib_path_lookup(skb, dev); 677 ipoib_path_lookup(skb, dev);