aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
4 files changed, 9 insertions, 74 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 00b81272e314..908a460d8db6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -124,11 +124,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
124 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; 124 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
125 cq->mcq.event = mlx4_en_cq_event; 125 cq->mcq.event = mlx4_en_cq_event;
126 126
127 if (cq->is_tx) { 127 if (!cq->is_tx) {
128 init_timer(&cq->timer);
129 cq->timer.function = mlx4_en_poll_tx_cq;
130 cq->timer.data = (unsigned long) cq;
131 } else {
132 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 128 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
133 napi_enable(&cq->napi); 129 napi_enable(&cq->napi);
134 } 130 }
@@ -151,16 +147,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
151 147
152void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 148void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
153{ 149{
154 struct mlx4_en_dev *mdev = priv->mdev; 150 if (!cq->is_tx) {
155
156 if (cq->is_tx)
157 del_timer(&cq->timer);
158 else {
159 napi_disable(&cq->napi); 151 napi_disable(&cq->napi);
160 netif_napi_del(&cq->napi); 152 netif_napi_del(&cq->napi);
161 } 153 }
162 154
163 mlx4_cq_free(mdev->dev, &cq->mcq); 155 mlx4_cq_free(priv->mdev->dev, &cq->mcq);
164} 156}
165 157
166/* Set rx cq moderation parameters */ 158/* Set rx cq moderation parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index bfcfd8ae5f73..35ad0971939d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -667,6 +667,10 @@ int mlx4_en_start_port(struct net_device *dev)
667 mlx4_en_deactivate_cq(priv, cq); 667 mlx4_en_deactivate_cq(priv, cq);
668 goto tx_err; 668 goto tx_err;
669 } 669 }
670
671 /* Arm CQ for TX completions */
672 mlx4_en_arm_cq(priv, cq);
673
670 /* Set initial ownership of all Tx TXBBs to SW (1) */ 674 /* Set initial ownership of all Tx TXBBs to SW (1) */
671 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 675 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
672 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 676 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d9bab5338c2f..2d493420e1a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -67,8 +67,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
67 67
68 inline_thold = min(inline_thold, MAX_INLINE); 68 inline_thold = min(inline_thold, MAX_INLINE);
69 69
70 spin_lock_init(&ring->comp_lock);
71
72 tmp = size * sizeof(struct mlx4_en_tx_info); 70 tmp = size * sizeof(struct mlx4_en_tx_info);
73 ring->tx_info = vmalloc(tmp); 71 ring->tx_info = vmalloc(tmp);
74 if (!ring->tx_info) 72 if (!ring->tx_info)
@@ -377,41 +375,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
377{ 375{
378 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 376 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
379 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 377 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
380 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
381 378
382 if (!spin_trylock(&ring->comp_lock))
383 return;
384 mlx4_en_process_tx_cq(cq->dev, cq); 379 mlx4_en_process_tx_cq(cq->dev, cq);
385 mod_timer(&cq->timer, jiffies + 1); 380 mlx4_en_arm_cq(priv, cq);
386 spin_unlock(&ring->comp_lock);
387} 381}
388 382
389 383
390void mlx4_en_poll_tx_cq(unsigned long data)
391{
392 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
393 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
394 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
395 u32 inflight;
396
397 INC_PERF_COUNTER(priv->pstats.tx_poll);
398
399 if (!spin_trylock_irq(&ring->comp_lock)) {
400 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
401 return;
402 }
403 mlx4_en_process_tx_cq(cq->dev, cq);
404 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
405
406 /* If there are still packets in flight and the timer has not already
407 * been scheduled by the Tx routine then schedule it here to guarantee
408 * completion processing of these packets */
409 if (inflight && priv->port_up)
410 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
411
412 spin_unlock_irq(&ring->comp_lock);
413}
414
415static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 384static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
416 struct mlx4_en_tx_ring *ring, 385 struct mlx4_en_tx_ring *ring,
417 u32 index, 386 u32 index,
@@ -440,25 +409,6 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
440 return ring->buf + index * TXBB_SIZE; 409 return ring->buf + index * TXBB_SIZE;
441} 410}
442 411
443static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
444{
445 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
446 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
447 unsigned long flags;
448
449 /* If we don't have a pending timer, set one up to catch our recent
450 post in case the interface becomes idle */
451 if (!timer_pending(&cq->timer))
452 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
453
454 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
455 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
456 if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
457 mlx4_en_process_tx_cq(priv->dev, cq);
458 spin_unlock_irqrestore(&ring->comp_lock, flags);
459 }
460}
461
462static int is_inline(struct sk_buff *skb, void **pfrag) 412static int is_inline(struct sk_buff *skb, void **pfrag)
463{ 413{
464 void *ptr; 414 void *ptr;
@@ -590,7 +540,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
590 struct mlx4_en_priv *priv = netdev_priv(dev); 540 struct mlx4_en_priv *priv = netdev_priv(dev);
591 struct mlx4_en_dev *mdev = priv->mdev; 541 struct mlx4_en_dev *mdev = priv->mdev;
592 struct mlx4_en_tx_ring *ring; 542 struct mlx4_en_tx_ring *ring;
593 struct mlx4_en_cq *cq;
594 struct mlx4_en_tx_desc *tx_desc; 543 struct mlx4_en_tx_desc *tx_desc;
595 struct mlx4_wqe_data_seg *data; 544 struct mlx4_wqe_data_seg *data;
596 struct skb_frag_struct *frag; 545 struct skb_frag_struct *frag;
@@ -638,9 +587,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
638 ring->blocked = 1; 587 ring->blocked = 1;
639 priv->port_stats.queue_stopped++; 588 priv->port_stats.queue_stopped++;
640 589
641 /* Use interrupts to find out when queue opened */
642 cq = &priv->tx_cq[tx_ind];
643 mlx4_en_arm_cq(priv, cq);
644 return NETDEV_TX_BUSY; 590 return NETDEV_TX_BUSY;
645 } 591 }
646 592
@@ -788,9 +734,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
788 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); 734 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
789 } 735 }
790 736
791 /* Poll CQ here */
792 mlx4_en_xmit_poll(priv, tx_ind);
793
794 return NETDEV_TX_OK; 737 return NETDEV_TX_OK;
795 738
796tx_drop: 739tx_drop:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 47e1c0ff1775..0feafc5344b3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -122,7 +122,7 @@ enum {
122#define MLX4_EN_RX_COAL_TARGET 44 122#define MLX4_EN_RX_COAL_TARGET 44
123#define MLX4_EN_RX_COAL_TIME 0x10 123#define MLX4_EN_RX_COAL_TIME 0x10
124 124
125#define MLX4_EN_TX_COAL_PKTS 5 125#define MLX4_EN_TX_COAL_PKTS 16
126#define MLX4_EN_TX_COAL_TIME 0x80 126#define MLX4_EN_TX_COAL_TIME 0x80
127 127
128#define MLX4_EN_RX_RATE_LOW 400000 128#define MLX4_EN_RX_RATE_LOW 400000
@@ -255,7 +255,6 @@ struct mlx4_en_tx_ring {
255 unsigned long bytes; 255 unsigned long bytes;
256 unsigned long packets; 256 unsigned long packets;
257 unsigned long tx_csum; 257 unsigned long tx_csum;
258 spinlock_t comp_lock;
259 struct mlx4_bf bf; 258 struct mlx4_bf bf;
260 bool bf_enabled; 259 bool bf_enabled;
261}; 260};
@@ -308,8 +307,6 @@ struct mlx4_en_cq {
308 spinlock_t lock; 307 spinlock_t lock;
309 struct net_device *dev; 308 struct net_device *dev;
310 struct napi_struct napi; 309 struct napi_struct napi;
311 /* Per-core Tx cq processing support */
312 struct timer_list timer;
313 int size; 310 int size;
314 int buf_size; 311 int buf_size;
315 unsigned vector; 312 unsigned vector;
@@ -530,7 +527,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
530int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 527int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
531int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 528int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
532 529
533void mlx4_en_poll_tx_cq(unsigned long data);
534void mlx4_en_tx_irq(struct mlx4_cq *mcq); 530void mlx4_en_tx_irq(struct mlx4_cq *mcq);
535u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 531u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
536netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 532netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);