diff options
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r-- | drivers/net/mlx4/en_netdev.c | 8 | ||||
-rw-r--r-- | drivers/net/mlx4/en_rx.c | 96 | ||||
-rw-r--r-- | drivers/net/mlx4/en_tx.c | 29 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4_en.h | 5 | ||||
-rw-r--r-- | drivers/net/mlx4/mr.c | 14 |
5 files changed, 20 insertions, 132 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index e02bafdd3682..93f4abd990a9 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -668,7 +668,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
668 | queue_work(mdev->workqueue, &priv->mcast_task); | 668 | queue_work(mdev->workqueue, &priv->mcast_task); |
669 | 669 | ||
670 | priv->port_up = true; | 670 | priv->port_up = true; |
671 | netif_start_queue(dev); | 671 | netif_tx_start_all_queues(dev); |
672 | return 0; | 672 | return 0; |
673 | 673 | ||
674 | mac_err: | 674 | mac_err: |
@@ -700,14 +700,14 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
700 | en_dbg(DRV, priv, "stop port called while port already down\n"); | 700 | en_dbg(DRV, priv, "stop port called while port already down\n"); |
701 | return; | 701 | return; |
702 | } | 702 | } |
703 | netif_stop_queue(dev); | ||
704 | 703 | ||
705 | /* Synchronize with tx routine */ | 704 | /* Synchronize with tx routine */ |
706 | netif_tx_lock_bh(dev); | 705 | netif_tx_lock_bh(dev); |
707 | priv->port_up = false; | 706 | netif_tx_stop_all_queues(dev); |
708 | netif_tx_unlock_bh(dev); | 707 | netif_tx_unlock_bh(dev); |
709 | 708 | ||
710 | /* close port*/ | 709 | /* close port*/ |
710 | priv->port_up = false; | ||
711 | mlx4_CLOSE_PORT(mdev->dev, priv->port); | 711 | mlx4_CLOSE_PORT(mdev->dev, priv->port); |
712 | 712 | ||
713 | /* Unregister Mac address for the port */ | 713 | /* Unregister Mac address for the port */ |
@@ -881,7 +881,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
881 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); | 881 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); |
882 | 882 | ||
883 | cancel_delayed_work(&priv->stats_task); | 883 | cancel_delayed_work(&priv->stats_task); |
884 | cancel_delayed_work(&priv->refill_task); | ||
885 | /* flush any pending task for this netdev */ | 884 | /* flush any pending task for this netdev */ |
886 | flush_workqueue(mdev->workqueue); | 885 | flush_workqueue(mdev->workqueue); |
887 | 886 | ||
@@ -986,7 +985,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
986 | spin_lock_init(&priv->stats_lock); | 985 | spin_lock_init(&priv->stats_lock); |
987 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); | 986 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); |
988 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); | 987 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); |
989 | INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill); | ||
990 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | 988 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); |
991 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | 989 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); |
992 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | 990 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); |
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c index 5a14899c1e25..91bdfdfd431f 100644 --- a/drivers/net/mlx4/en_rx.c +++ b/drivers/net/mlx4/en_rx.c | |||
@@ -269,31 +269,6 @@ reduce_rings: | |||
269 | return 0; | 269 | return 0; |
270 | } | 270 | } |
271 | 271 | ||
272 | static int mlx4_en_fill_rx_buf(struct net_device *dev, | ||
273 | struct mlx4_en_rx_ring *ring) | ||
274 | { | ||
275 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
276 | int num = 0; | ||
277 | int err; | ||
278 | |||
279 | while ((u32) (ring->prod - ring->cons) < ring->actual_size) { | ||
280 | err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod & | ||
281 | ring->size_mask); | ||
282 | if (err) { | ||
283 | if (netif_msg_rx_err(priv)) | ||
284 | en_warn(priv, "Failed preparing rx descriptor\n"); | ||
285 | priv->port_stats.rx_alloc_failed++; | ||
286 | break; | ||
287 | } | ||
288 | ++num; | ||
289 | ++ring->prod; | ||
290 | } | ||
291 | if ((u32) (ring->prod - ring->cons) == ring->actual_size) | ||
292 | ring->full = 1; | ||
293 | |||
294 | return num; | ||
295 | } | ||
296 | |||
297 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | 272 | static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, |
298 | struct mlx4_en_rx_ring *ring) | 273 | struct mlx4_en_rx_ring *ring) |
299 | { | 274 | { |
@@ -312,42 +287,6 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, | |||
312 | } | 287 | } |
313 | } | 288 | } |
314 | 289 | ||
315 | |||
316 | void mlx4_en_rx_refill(struct work_struct *work) | ||
317 | { | ||
318 | struct delayed_work *delay = to_delayed_work(work); | ||
319 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
320 | refill_task); | ||
321 | struct mlx4_en_dev *mdev = priv->mdev; | ||
322 | struct net_device *dev = priv->dev; | ||
323 | struct mlx4_en_rx_ring *ring; | ||
324 | int need_refill = 0; | ||
325 | int i; | ||
326 | |||
327 | mutex_lock(&mdev->state_lock); | ||
328 | if (!mdev->device_up || !priv->port_up) | ||
329 | goto out; | ||
330 | |||
331 | /* We only get here if there are no receive buffers, so we can't race | ||
332 | * with Rx interrupts while filling buffers */ | ||
333 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
334 | ring = &priv->rx_ring[i]; | ||
335 | if (ring->need_refill) { | ||
336 | if (mlx4_en_fill_rx_buf(dev, ring)) { | ||
337 | ring->need_refill = 0; | ||
338 | mlx4_en_update_rx_prod_db(ring); | ||
339 | } else | ||
340 | need_refill = 1; | ||
341 | } | ||
342 | } | ||
343 | if (need_refill) | ||
344 | queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ); | ||
345 | |||
346 | out: | ||
347 | mutex_unlock(&mdev->state_lock); | ||
348 | } | ||
349 | |||
350 | |||
351 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, | 290 | int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, |
352 | struct mlx4_en_rx_ring *ring, u32 size, u16 stride) | 291 | struct mlx4_en_rx_ring *ring, u32 size, u16 stride) |
353 | { | 292 | { |
@@ -457,9 +396,6 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv) | |||
457 | ring_ind--; | 396 | ring_ind--; |
458 | goto err_allocator; | 397 | goto err_allocator; |
459 | } | 398 | } |
460 | |||
461 | /* Fill Rx buffers */ | ||
462 | ring->full = 0; | ||
463 | } | 399 | } |
464 | err = mlx4_en_fill_rx_buffers(priv); | 400 | err = mlx4_en_fill_rx_buffers(priv); |
465 | if (err) | 401 | if (err) |
@@ -647,33 +583,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv, | |||
647 | return skb; | 583 | return skb; |
648 | } | 584 | } |
649 | 585 | ||
650 | static void mlx4_en_copy_desc(struct mlx4_en_priv *priv, | ||
651 | struct mlx4_en_rx_ring *ring, | ||
652 | int from, int to, int num) | ||
653 | { | ||
654 | struct skb_frag_struct *skb_frags_from; | ||
655 | struct skb_frag_struct *skb_frags_to; | ||
656 | struct mlx4_en_rx_desc *rx_desc_from; | ||
657 | struct mlx4_en_rx_desc *rx_desc_to; | ||
658 | int from_index, to_index; | ||
659 | int nr, i; | ||
660 | |||
661 | for (i = 0; i < num; i++) { | ||
662 | from_index = (from + i) & ring->size_mask; | ||
663 | to_index = (to + i) & ring->size_mask; | ||
664 | skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info); | ||
665 | skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info); | ||
666 | rx_desc_from = ring->buf + (from_index << ring->log_stride); | ||
667 | rx_desc_to = ring->buf + (to_index << ring->log_stride); | ||
668 | |||
669 | for (nr = 0; nr < priv->num_frags; nr++) { | ||
670 | skb_frags_to[nr].page = skb_frags_from[nr].page; | ||
671 | skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset; | ||
672 | rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr; | ||
673 | } | ||
674 | } | ||
675 | } | ||
676 | |||
677 | 586 | ||
678 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) | 587 | int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) |
679 | { | 588 | { |
@@ -821,11 +730,6 @@ out: | |||
821 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ | 730 | wmb(); /* ensure HW sees CQ consumer before we post new buffers */ |
822 | ring->cons = cq->mcq.cons_index; | 731 | ring->cons = cq->mcq.cons_index; |
823 | ring->prod += polled; /* Polled descriptors were realocated in place */ | 732 | ring->prod += polled; /* Polled descriptors were realocated in place */ |
824 | if (unlikely(!ring->full)) { | ||
825 | mlx4_en_copy_desc(priv, ring, ring->cons - polled, | ||
826 | ring->prod - polled, polled); | ||
827 | mlx4_en_fill_rx_buf(dev, ring); | ||
828 | } | ||
829 | mlx4_en_update_rx_prod_db(ring); | 733 | mlx4_en_update_rx_prod_db(ring); |
830 | return polled; | 734 | return polled; |
831 | } | 735 | } |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 5dc7466ad035..08c43f2ae72b 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -515,16 +515,9 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev, | |||
515 | else { | 515 | else { |
516 | if (netif_msg_tx_err(priv)) | 516 | if (netif_msg_tx_err(priv)) |
517 | en_warn(priv, "Non-linear headers\n"); | 517 | en_warn(priv, "Non-linear headers\n"); |
518 | dev_kfree_skb_any(skb); | ||
519 | return 0; | 518 | return 0; |
520 | } | 519 | } |
521 | } | 520 | } |
522 | if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { | ||
523 | if (netif_msg_tx_err(priv)) | ||
524 | en_warn(priv, "LSO header size too big\n"); | ||
525 | dev_kfree_skb_any(skb); | ||
526 | return 0; | ||
527 | } | ||
528 | } else { | 521 | } else { |
529 | *lso_header_size = 0; | 522 | *lso_header_size = 0; |
530 | if (!is_inline(skb, NULL)) | 523 | if (!is_inline(skb, NULL)) |
@@ -616,13 +609,9 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
616 | int lso_header_size; | 609 | int lso_header_size; |
617 | void *fragptr; | 610 | void *fragptr; |
618 | 611 | ||
619 | if (unlikely(!skb->len)) { | ||
620 | dev_kfree_skb_any(skb); | ||
621 | return NETDEV_TX_OK; | ||
622 | } | ||
623 | real_size = get_real_size(skb, dev, &lso_header_size); | 612 | real_size = get_real_size(skb, dev, &lso_header_size); |
624 | if (unlikely(!real_size)) | 613 | if (unlikely(!real_size)) |
625 | return NETDEV_TX_OK; | 614 | goto tx_drop; |
626 | 615 | ||
627 | /* Allign descriptor to TXBB size */ | 616 | /* Allign descriptor to TXBB size */ |
628 | desc_size = ALIGN(real_size, TXBB_SIZE); | 617 | desc_size = ALIGN(real_size, TXBB_SIZE); |
@@ -630,8 +619,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
630 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { | 619 | if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { |
631 | if (netif_msg_tx_err(priv)) | 620 | if (netif_msg_tx_err(priv)) |
632 | en_warn(priv, "Oversized header or SG list\n"); | 621 | en_warn(priv, "Oversized header or SG list\n"); |
633 | dev_kfree_skb_any(skb); | 622 | goto tx_drop; |
634 | return NETDEV_TX_OK; | ||
635 | } | 623 | } |
636 | 624 | ||
637 | tx_ind = skb->queue_mapping; | 625 | tx_ind = skb->queue_mapping; |
@@ -653,14 +641,6 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
653 | return NETDEV_TX_BUSY; | 641 | return NETDEV_TX_BUSY; |
654 | } | 642 | } |
655 | 643 | ||
656 | /* Now that we know what Tx ring to use */ | ||
657 | if (unlikely(!priv->port_up)) { | ||
658 | if (netif_msg_tx_err(priv)) | ||
659 | en_warn(priv, "xmit: port down!\n"); | ||
660 | dev_kfree_skb_any(skb); | ||
661 | return NETDEV_TX_OK; | ||
662 | } | ||
663 | |||
664 | /* Track current inflight packets for performance analysis */ | 644 | /* Track current inflight packets for performance analysis */ |
665 | AVG_PERF_COUNTER(priv->pstats.inflight_avg, | 645 | AVG_PERF_COUNTER(priv->pstats.inflight_avg, |
666 | (u32) (ring->prod - ring->cons - 1)); | 646 | (u32) (ring->prod - ring->cons - 1)); |
@@ -785,5 +765,10 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
785 | mlx4_en_xmit_poll(priv, tx_ind); | 765 | mlx4_en_xmit_poll(priv, tx_ind); |
786 | 766 | ||
787 | return 0; | 767 | return 0; |
768 | |||
769 | tx_drop: | ||
770 | dev_kfree_skb_any(skb); | ||
771 | priv->stats.tx_dropped++; | ||
772 | return NETDEV_TX_OK; | ||
788 | } | 773 | } |
789 | 774 | ||
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h index d43a9e4c2aea..c7c5e86804ff 100644 --- a/drivers/net/mlx4/mlx4_en.h +++ b/drivers/net/mlx4/mlx4_en.h | |||
@@ -99,7 +99,6 @@ | |||
99 | #define RSS_FACTOR 2 | 99 | #define RSS_FACTOR 2 |
100 | #define TXBB_SIZE 64 | 100 | #define TXBB_SIZE 64 |
101 | #define HEADROOM (2048 / TXBB_SIZE + 1) | 101 | #define HEADROOM (2048 / TXBB_SIZE + 1) |
102 | #define MAX_LSO_HDR_SIZE 92 | ||
103 | #define STAMP_STRIDE 64 | 102 | #define STAMP_STRIDE 64 |
104 | #define STAMP_DWORDS (STAMP_STRIDE / 4) | 103 | #define STAMP_DWORDS (STAMP_STRIDE / 4) |
105 | #define STAMP_SHIFT 31 | 104 | #define STAMP_SHIFT 31 |
@@ -296,8 +295,6 @@ struct mlx4_en_rx_ring { | |||
296 | u32 prod; | 295 | u32 prod; |
297 | u32 cons; | 296 | u32 cons; |
298 | u32 buf_size; | 297 | u32 buf_size; |
299 | int need_refill; | ||
300 | int full; | ||
301 | void *buf; | 298 | void *buf; |
302 | void *rx_info; | 299 | void *rx_info; |
303 | unsigned long bytes; | 300 | unsigned long bytes; |
@@ -495,7 +492,6 @@ struct mlx4_en_priv { | |||
495 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; | 492 | struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; |
496 | struct work_struct mcast_task; | 493 | struct work_struct mcast_task; |
497 | struct work_struct mac_task; | 494 | struct work_struct mac_task; |
498 | struct delayed_work refill_task; | ||
499 | struct work_struct watchdog_task; | 495 | struct work_struct watchdog_task; |
500 | struct work_struct linkstate_task; | 496 | struct work_struct linkstate_task; |
501 | struct delayed_work stats_task; | 497 | struct delayed_work stats_task; |
@@ -565,7 +561,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv, | |||
565 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); | 561 | int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); |
566 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); | 562 | void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); |
567 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); | 563 | int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); |
568 | void mlx4_en_rx_refill(struct work_struct *work); | ||
569 | void mlx4_en_rx_irq(struct mlx4_cq *mcq); | 564 | void mlx4_en_rx_irq(struct mlx4_cq *mcq); |
570 | 565 | ||
571 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); | 566 | int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode); |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 5887e4764d22..f96948be0a44 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -399,11 +399,14 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
399 | if (!mtts) | 399 | if (!mtts) |
400 | return -ENOMEM; | 400 | return -ENOMEM; |
401 | 401 | ||
402 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | ||
403 | npages * sizeof (u64), DMA_TO_DEVICE); | ||
404 | |||
402 | for (i = 0; i < npages; ++i) | 405 | for (i = 0; i < npages; ++i) |
403 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | 406 | mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); |
404 | 407 | ||
405 | dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, | 408 | dma_sync_single_for_device(&dev->pdev->dev, dma_handle, |
406 | npages * sizeof (u64), DMA_TO_DEVICE); | 409 | npages * sizeof (u64), DMA_TO_DEVICE); |
407 | 410 | ||
408 | return 0; | 411 | return 0; |
409 | } | 412 | } |
@@ -547,11 +550,14 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list | |||
547 | /* Make sure MPT status is visible before writing MTT entries */ | 550 | /* Make sure MPT status is visible before writing MTT entries */ |
548 | wmb(); | 551 | wmb(); |
549 | 552 | ||
553 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, | ||
554 | npages * sizeof(u64), DMA_TO_DEVICE); | ||
555 | |||
550 | for (i = 0; i < npages; ++i) | 556 | for (i = 0; i < npages; ++i) |
551 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); | 557 | fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); |
552 | 558 | ||
553 | dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, | 559 | dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, |
554 | npages * sizeof(u64), DMA_TO_DEVICE); | 560 | npages * sizeof(u64), DMA_TO_DEVICE); |
555 | 561 | ||
556 | fmr->mpt->key = cpu_to_be32(key); | 562 | fmr->mpt->key = cpu_to_be32(key); |
557 | fmr->mpt->lkey = cpu_to_be32(key); | 563 | fmr->mpt->lkey = cpu_to_be32(key); |