aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
3 files changed, 17 insertions, 4 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 35ad0971939d..eaa8fadf19c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -667,6 +667,7 @@ int mlx4_en_start_port(struct net_device *dev)
667 mlx4_en_deactivate_cq(priv, cq); 667 mlx4_en_deactivate_cq(priv, cq);
668 goto tx_err; 668 goto tx_err;
669 } 669 }
670 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
670 671
671 /* Arm CQ for TX completions */ 672 /* Arm CQ for TX completions */
672 mlx4_en_arm_cq(priv, cq); 673 mlx4_en_arm_cq(priv, cq);
@@ -812,12 +813,15 @@ static void mlx4_en_restart(struct work_struct *work)
812 watchdog_task); 813 watchdog_task);
813 struct mlx4_en_dev *mdev = priv->mdev; 814 struct mlx4_en_dev *mdev = priv->mdev;
814 struct net_device *dev = priv->dev; 815 struct net_device *dev = priv->dev;
816 int i;
815 817
816 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 818 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
817 819
818 mutex_lock(&mdev->state_lock); 820 mutex_lock(&mdev->state_lock);
819 if (priv->port_up) { 821 if (priv->port_up) {
820 mlx4_en_stop_port(dev); 822 mlx4_en_stop_port(dev);
823 for (i = 0; i < priv->tx_ring_num; i++)
824 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
821 if (mlx4_en_start_port(dev)) 825 if (mlx4_en_start_port(dev))
822 en_err(priv, "Failed restarting port %d\n", priv->port); 826 en_err(priv, "Failed restarting port %d\n", priv->port);
823 } 827 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 2d493420e1a6..9a38483feb92 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -315,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
315 int size = cq->size; 315 int size = cq->size;
316 u32 size_mask = ring->size_mask; 316 u32 size_mask = ring->size_mask;
317 struct mlx4_cqe *buf = cq->buf; 317 struct mlx4_cqe *buf = cq->buf;
318 u32 packets = 0;
319 u32 bytes = 0;
318 320
319 if (!priv->port_up) 321 if (!priv->port_up)
320 return; 322 return;
@@ -343,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
343 priv, ring, ring_index, 345 priv, ring, ring_index,
344 !!((ring->cons + txbbs_skipped) & 346 !!((ring->cons + txbbs_skipped) &
345 ring->size)); 347 ring->size));
348 packets++;
349 bytes += ring->tx_info[ring_index].nr_bytes;
346 } while (ring_index != new_index); 350 } while (ring_index != new_index);
347 351
348 ++cons_index; 352 ++cons_index;
@@ -359,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
359 mlx4_cq_set_ci(mcq); 363 mlx4_cq_set_ci(mcq);
360 wmb(); 364 wmb();
361 ring->cons += txbbs_skipped; 365 ring->cons += txbbs_skipped;
366 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
362 367
363 /* Wakeup Tx queue if this ring stopped it */ 368 /* Wakeup Tx queue if this ring stopped it */
364 if (unlikely(ring->blocked)) { 369 if (unlikely(ring->blocked)) {
365 if ((u32) (ring->prod - ring->cons) <= 370 if ((u32) (ring->prod - ring->cons) <=
366 ring->size - HEADROOM - MAX_DESC_TXBBS) { 371 ring->size - HEADROOM - MAX_DESC_TXBBS) {
367 ring->blocked = 0; 372 ring->blocked = 0;
368 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); 373 netif_tx_wake_queue(ring->tx_queue);
369 priv->port_stats.wake_queue++; 374 priv->port_stats.wake_queue++;
370 } 375 }
371 } 376 }
@@ -583,7 +588,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
583 if (unlikely(((int)(ring->prod - ring->cons)) > 588 if (unlikely(((int)(ring->prod - ring->cons)) >
584 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 589 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
585 /* every full Tx ring stops queue */ 590 /* every full Tx ring stops queue */
586 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); 591 netif_tx_stop_queue(ring->tx_queue);
587 ring->blocked = 1; 592 ring->blocked = 1;
588 priv->port_stats.queue_stopped++; 593 priv->port_stats.queue_stopped++;
589 594
@@ -649,7 +654,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
649 priv->port_stats.tso_packets++; 654 priv->port_stats.tso_packets++;
650 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 655 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
651 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); 656 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
652 ring->bytes += skb->len + (i - 1) * lso_header_size; 657 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
653 ring->packets += i; 658 ring->packets += i;
654 } else { 659 } else {
655 /* Normal (Non LSO) packet */ 660 /* Normal (Non LSO) packet */
@@ -657,10 +662,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
657 ((ring->prod & ring->size) ? 662 ((ring->prod & ring->size) ?
658 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 663 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
659 data = &tx_desc->data; 664 data = &tx_desc->data;
660 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); 665 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
661 ring->packets++; 666 ring->packets++;
662 667
663 } 668 }
669 ring->bytes += tx_info->nr_bytes;
670 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
664 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 671 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
665 672
666 673
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 0feafc5344b3..5d876375a132 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -200,6 +200,7 @@ enum cq_type {
200struct mlx4_en_tx_info { 200struct mlx4_en_tx_info {
201 struct sk_buff *skb; 201 struct sk_buff *skb;
202 u32 nr_txbb; 202 u32 nr_txbb;
203 u32 nr_bytes;
203 u8 linear; 204 u8 linear;
204 u8 data_offset; 205 u8 data_offset;
205 u8 inl; 206 u8 inl;
@@ -257,6 +258,7 @@ struct mlx4_en_tx_ring {
257 unsigned long tx_csum; 258 unsigned long tx_csum;
258 struct mlx4_bf bf; 259 struct mlx4_bf bf;
259 bool bf_enabled; 260 bool bf_enabled;
261 struct netdev_queue *tx_queue;
260}; 262};
261 263
262struct mlx4_en_rx_desc { 264struct mlx4_en_rx_desc {