aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/en_tx.c
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2008-12-25 21:13:45 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-25 21:13:45 -0500
commit48374ddce72e278e29080e3177e74a13c034d8b4 (patch)
treef25da6d9b4a35f212fd5437260fe81210a297367 /drivers/net/mlx4/en_tx.c
parentb51968d676db1c4e541b4c84de7ce7af812c9e9f (diff)
mlx4_en: Removed TX locking when polling TX cq
There is no need to synchronize the polling with the transmit function. The only place to synchronize is when we process the cq from the transmit function. Also removed spin_lock_irq, and using spin_trylock, if somebody else is already processing the cq, no need to wait for it to finish. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mlx4/en_tx.c')
-rw-r--r--drivers/net/mlx4/en_tx.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 8592f8fb8475..1f25821dccfd 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -404,14 +404,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
404 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 404 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
405 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring]; 405 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
406 406
407 spin_lock_irq(&ring->comp_lock);
408 cq->armed = 0; 407 cq->armed = 0;
408 if (!spin_trylock(&ring->comp_lock))
409 return;
409 mlx4_en_process_tx_cq(cq->dev, cq); 410 mlx4_en_process_tx_cq(cq->dev, cq);
410 if (ring->blocked) 411 mod_timer(&cq->timer, jiffies + 1);
411 mlx4_en_arm_cq(priv, cq); 412 spin_unlock(&ring->comp_lock);
412 else
413 mod_timer(&cq->timer, jiffies + 1);
414 spin_unlock_irq(&ring->comp_lock);
415} 413}
416 414
417 415
@@ -424,8 +422,10 @@ void mlx4_en_poll_tx_cq(unsigned long data)
424 422
425 INC_PERF_COUNTER(priv->pstats.tx_poll); 423 INC_PERF_COUNTER(priv->pstats.tx_poll);
426 424
427 netif_tx_lock(priv->dev); 425 if (!spin_trylock(&ring->comp_lock)) {
428 spin_lock_irq(&ring->comp_lock); 426 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
427 return;
428 }
429 mlx4_en_process_tx_cq(cq->dev, cq); 429 mlx4_en_process_tx_cq(cq->dev, cq);
430 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb); 430 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
431 431
@@ -435,8 +435,7 @@ void mlx4_en_poll_tx_cq(unsigned long data)
435 if (inflight && priv->port_up) 435 if (inflight && priv->port_up)
436 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT); 436 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
437 437
438 spin_unlock_irq(&ring->comp_lock); 438 spin_unlock(&ring->comp_lock);
439 netif_tx_unlock(priv->dev);
440} 439}
441 440
442static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 441static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
@@ -479,7 +478,10 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
479 478
480 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 479 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
481 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 480 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
482 mlx4_en_process_tx_cq(priv->dev, cq); 481 if (spin_trylock(&ring->comp_lock)) {
482 mlx4_en_process_tx_cq(priv->dev, cq);
483 spin_unlock(&ring->comp_lock);
484 }
483} 485}
484 486
485static void *get_frag_ptr(struct sk_buff *skb) 487static void *get_frag_ptr(struct sk_buff *skb)