diff options
Diffstat (limited to 'drivers/net/mlx4/en_netdev.c')
-rw-r--r-- | drivers/net/mlx4/en_netdev.c | 199 |
1 files changed, 152 insertions, 47 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 897f576b8b1..5762ebde445 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -156,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work) | |||
156 | mutex_lock(&mdev->state_lock); | 156 | mutex_lock(&mdev->state_lock); |
157 | if (priv->port_up) { | 157 | if (priv->port_up) { |
158 | /* Remove old MAC and insert the new one */ | 158 | /* Remove old MAC and insert the new one */ |
159 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | 159 | err = mlx4_replace_mac(mdev->dev, priv->port, |
160 | err = mlx4_register_mac(mdev->dev, priv->port, | 160 | priv->base_qpn, priv->mac, 0); |
161 | priv->mac, &priv->mac_index); | ||
162 | if (err) | 161 | if (err) |
163 | en_err(priv, "Failed changing HW MAC address\n"); | 162 | en_err(priv, "Failed changing HW MAC address\n"); |
164 | } else | 163 | } else |
@@ -214,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
214 | struct mlx4_en_dev *mdev = priv->mdev; | 213 | struct mlx4_en_dev *mdev = priv->mdev; |
215 | struct net_device *dev = priv->dev; | 214 | struct net_device *dev = priv->dev; |
216 | u64 mcast_addr = 0; | 215 | u64 mcast_addr = 0; |
216 | u8 mc_list[16] = {0}; | ||
217 | int err; | 217 | int err; |
218 | 218 | ||
219 | mutex_lock(&mdev->state_lock); | 219 | mutex_lock(&mdev->state_lock); |
@@ -239,8 +239,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
239 | priv->flags |= MLX4_EN_FLAG_PROMISC; | 239 | priv->flags |= MLX4_EN_FLAG_PROMISC; |
240 | 240 | ||
241 | /* Enable promiscouos mode */ | 241 | /* Enable promiscouos mode */ |
242 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | 242 | if (!mdev->dev->caps.vep_uc_steering) |
243 | priv->base_qpn, 1); | 243 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, |
244 | priv->base_qpn, 1); | ||
245 | else | ||
246 | err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, | ||
247 | priv->port); | ||
244 | if (err) | 248 | if (err) |
245 | en_err(priv, "Failed enabling " | 249 | en_err(priv, "Failed enabling " |
246 | "promiscous mode\n"); | 250 | "promiscous mode\n"); |
@@ -252,10 +256,21 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
252 | en_err(priv, "Failed disabling " | 256 | en_err(priv, "Failed disabling " |
253 | "multicast filter\n"); | 257 | "multicast filter\n"); |
254 | 258 | ||
255 | /* Disable port VLAN filter */ | 259 | /* Add the default qp number as multicast promisc */ |
256 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | 260 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { |
257 | if (err) | 261 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, |
258 | en_err(priv, "Failed disabling VLAN filter\n"); | 262 | priv->port); |
263 | if (err) | ||
264 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
265 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
266 | } | ||
267 | |||
268 | if (priv->vlgrp) { | ||
269 | /* Disable port VLAN filter */ | ||
270 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | ||
271 | if (err) | ||
272 | en_err(priv, "Failed disabling VLAN filter\n"); | ||
273 | } | ||
259 | } | 274 | } |
260 | goto out; | 275 | goto out; |
261 | } | 276 | } |
@@ -270,11 +285,24 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
270 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | 285 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; |
271 | 286 | ||
272 | /* Disable promiscouos mode */ | 287 | /* Disable promiscouos mode */ |
273 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | 288 | if (!mdev->dev->caps.vep_uc_steering) |
274 | priv->base_qpn, 0); | 289 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, |
290 | priv->base_qpn, 0); | ||
291 | else | ||
292 | err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
293 | priv->port); | ||
275 | if (err) | 294 | if (err) |
276 | en_err(priv, "Failed disabling promiscous mode\n"); | 295 | en_err(priv, "Failed disabling promiscous mode\n"); |
277 | 296 | ||
297 | /* Disable Multicast promisc */ | ||
298 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
299 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
300 | priv->port); | ||
301 | if (err) | ||
302 | en_err(priv, "Failed disabling multicast promiscous mode\n"); | ||
303 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
304 | } | ||
305 | |||
278 | /* Enable port VLAN filter */ | 306 | /* Enable port VLAN filter */ |
279 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | 307 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); |
280 | if (err) | 308 | if (err) |
@@ -287,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
287 | 0, MLX4_MCAST_DISABLE); | 315 | 0, MLX4_MCAST_DISABLE); |
288 | if (err) | 316 | if (err) |
289 | en_err(priv, "Failed disabling multicast filter\n"); | 317 | en_err(priv, "Failed disabling multicast filter\n"); |
318 | |||
319 | /* Add the default qp number as multicast promisc */ | ||
320 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { | ||
321 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, | ||
322 | priv->port); | ||
323 | if (err) | ||
324 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
325 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
326 | } | ||
290 | } else { | 327 | } else { |
291 | int i; | 328 | int i; |
329 | /* Disable Multicast promisc */ | ||
330 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
331 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
332 | priv->port); | ||
333 | if (err) | ||
334 | en_err(priv, "Failed disabling multicast promiscous mode\n"); | ||
335 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
336 | } | ||
292 | 337 | ||
293 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | 338 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, |
294 | 0, MLX4_MCAST_DISABLE); | 339 | 0, MLX4_MCAST_DISABLE); |
295 | if (err) | 340 | if (err) |
296 | en_err(priv, "Failed disabling multicast filter\n"); | 341 | en_err(priv, "Failed disabling multicast filter\n"); |
297 | 342 | ||
343 | /* Detach our qp from all the multicast addresses */ | ||
344 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
345 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
346 | mc_list[5] = priv->port; | ||
347 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
348 | mc_list, MLX4_PROT_ETH); | ||
349 | } | ||
298 | /* Flush mcast filter and init it with broadcast address */ | 350 | /* Flush mcast filter and init it with broadcast address */ |
299 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | 351 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, |
300 | 1, MLX4_MCAST_CONFIG); | 352 | 1, MLX4_MCAST_CONFIG); |
@@ -307,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
307 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | 359 | for (i = 0; i < priv->mc_addrs_cnt; i++) { |
308 | mcast_addr = | 360 | mcast_addr = |
309 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); | 361 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); |
362 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
363 | mc_list[5] = priv->port; | ||
364 | mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, | ||
365 | mc_list, 0, MLX4_PROT_ETH); | ||
310 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | 366 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, |
311 | mcast_addr, 0, MLX4_MCAST_CONFIG); | 367 | mcast_addr, 0, MLX4_MCAST_CONFIG); |
312 | } | 368 | } |
@@ -314,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
314 | 0, MLX4_MCAST_ENABLE); | 370 | 0, MLX4_MCAST_ENABLE); |
315 | if (err) | 371 | if (err) |
316 | en_err(priv, "Failed enabling multicast filter\n"); | 372 | en_err(priv, "Failed enabling multicast filter\n"); |
317 | |||
318 | mlx4_en_clear_list(dev); | ||
319 | } | 373 | } |
320 | out: | 374 | out: |
321 | mutex_unlock(&mdev->state_lock); | 375 | mutex_unlock(&mdev->state_lock); |
@@ -417,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
417 | unsigned long avg_pkt_size; | 471 | unsigned long avg_pkt_size; |
418 | unsigned long rx_packets; | 472 | unsigned long rx_packets; |
419 | unsigned long rx_bytes; | 473 | unsigned long rx_bytes; |
420 | unsigned long rx_byte_diff; | ||
421 | unsigned long tx_packets; | 474 | unsigned long tx_packets; |
422 | unsigned long tx_pkt_diff; | 475 | unsigned long tx_pkt_diff; |
423 | unsigned long rx_pkt_diff; | 476 | unsigned long rx_pkt_diff; |
@@ -441,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
441 | rx_pkt_diff = ((unsigned long) (rx_packets - | 494 | rx_pkt_diff = ((unsigned long) (rx_packets - |
442 | priv->last_moder_packets)); | 495 | priv->last_moder_packets)); |
443 | packets = max(tx_pkt_diff, rx_pkt_diff); | 496 | packets = max(tx_pkt_diff, rx_pkt_diff); |
444 | rx_byte_diff = rx_bytes - priv->last_moder_bytes; | ||
445 | rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1; | ||
446 | rate = packets * HZ / period; | 497 | rate = packets * HZ / period; |
447 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - | 498 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - |
448 | priv->last_moder_bytes)) / packets : 0; | 499 | priv->last_moder_bytes)) / packets : 0; |
449 | 500 | ||
450 | /* Apply auto-moderation only when packet rate exceeds a rate that | 501 | /* Apply auto-moderation only when packet rate exceeds a rate that |
451 | * it matters */ | 502 | * it matters */ |
452 | if (rate > MLX4_EN_RX_RATE_THRESH) { | 503 | if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { |
453 | /* If tx and rx packet rates are not balanced, assume that | 504 | /* If tx and rx packet rates are not balanced, assume that |
454 | * traffic is mainly BW bound and apply maximum moderation. | 505 | * traffic is mainly BW bound and apply maximum moderation. |
455 | * Otherwise, moderate according to packet rate */ | 506 | * Otherwise, moderate according to packet rate */ |
456 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff && | 507 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff || |
457 | rx_pkt_diff / rx_byte_diff < | 508 | 2 * rx_pkt_diff > 3 * tx_pkt_diff) { |
458 | MLX4_EN_SMALL_PKT_SIZE) | ||
459 | moder_time = priv->rx_usecs_low; | ||
460 | else if (2 * rx_pkt_diff > 3 * tx_pkt_diff) | ||
461 | moder_time = priv->rx_usecs_high; | 509 | moder_time = priv->rx_usecs_high; |
462 | else { | 510 | } else { |
463 | if (rate < priv->pkt_rate_low) | 511 | if (rate < priv->pkt_rate_low) |
464 | moder_time = priv->rx_usecs_low; | 512 | moder_time = priv->rx_usecs_low; |
465 | else if (rate > priv->pkt_rate_high) | 513 | else if (rate > priv->pkt_rate_high) |
@@ -471,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
471 | priv->rx_usecs_low; | 519 | priv->rx_usecs_low; |
472 | } | 520 | } |
473 | } else { | 521 | } else { |
474 | /* When packet rate is low, use default moderation rather than | 522 | moder_time = priv->rx_usecs_low; |
475 | * 0 to prevent interrupt storms if traffic suddenly increases */ | ||
476 | moder_time = priv->rx_usecs; | ||
477 | } | 523 | } |
478 | 524 | ||
479 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | 525 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", |
@@ -565,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev) | |||
565 | int err = 0; | 611 | int err = 0; |
566 | int i; | 612 | int i; |
567 | int j; | 613 | int j; |
614 | u8 mc_list[16] = {0}; | ||
615 | char name[32]; | ||
568 | 616 | ||
569 | if (priv->port_up) { | 617 | if (priv->port_up) { |
570 | en_dbg(DRV, priv, "start port called while port already up\n"); | 618 | en_dbg(DRV, priv, "start port called while port already up\n"); |
@@ -603,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev) | |||
603 | ++rx_index; | 651 | ++rx_index; |
604 | } | 652 | } |
605 | 653 | ||
654 | /* Set port mac number */ | ||
655 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
656 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
657 | priv->mac, &priv->base_qpn, 0); | ||
658 | if (err) { | ||
659 | en_err(priv, "Failed setting port mac\n"); | ||
660 | goto cq_err; | ||
661 | } | ||
662 | mdev->mac_removed[priv->port] = 0; | ||
663 | |||
606 | err = mlx4_en_config_rss_steer(priv); | 664 | err = mlx4_en_config_rss_steer(priv); |
607 | if (err) { | 665 | if (err) { |
608 | en_err(priv, "Failed configuring rss steering\n"); | 666 | en_err(priv, "Failed configuring rss steering\n"); |
609 | goto cq_err; | 667 | goto mac_err; |
610 | } | 668 | } |
611 | 669 | ||
670 | if (mdev->dev->caps.comp_pool && !priv->tx_vector) { | ||
671 | sprintf(name , "%s-tx", priv->dev->name); | ||
672 | if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) { | ||
673 | mlx4_warn(mdev, "Failed Assigning an EQ to " | ||
674 | "%s_tx ,Falling back to legacy " | ||
675 | "EQ's\n", priv->dev->name); | ||
676 | } | ||
677 | } | ||
612 | /* Configure tx cq's and rings */ | 678 | /* Configure tx cq's and rings */ |
613 | for (i = 0; i < priv->tx_ring_num; i++) { | 679 | for (i = 0; i < priv->tx_ring_num; i++) { |
614 | /* Configure cq */ | 680 | /* Configure cq */ |
615 | cq = &priv->tx_cq[i]; | 681 | cq = &priv->tx_cq[i]; |
682 | cq->vector = priv->tx_vector; | ||
616 | err = mlx4_en_activate_cq(priv, cq); | 683 | err = mlx4_en_activate_cq(priv, cq); |
617 | if (err) { | 684 | if (err) { |
618 | en_err(priv, "Failed allocating Tx CQ\n"); | 685 | en_err(priv, "Failed allocating Tx CQ\n"); |
@@ -659,24 +726,22 @@ int mlx4_en_start_port(struct net_device *dev) | |||
659 | en_err(priv, "Failed setting default qp numbers\n"); | 726 | en_err(priv, "Failed setting default qp numbers\n"); |
660 | goto tx_err; | 727 | goto tx_err; |
661 | } | 728 | } |
662 | /* Set port mac number */ | ||
663 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
664 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
665 | priv->mac, &priv->mac_index); | ||
666 | if (err) { | ||
667 | en_err(priv, "Failed setting port mac\n"); | ||
668 | goto tx_err; | ||
669 | } | ||
670 | mdev->mac_removed[priv->port] = 0; | ||
671 | 729 | ||
672 | /* Init port */ | 730 | /* Init port */ |
673 | en_dbg(HW, priv, "Initializing port\n"); | 731 | en_dbg(HW, priv, "Initializing port\n"); |
674 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | 732 | err = mlx4_INIT_PORT(mdev->dev, priv->port); |
675 | if (err) { | 733 | if (err) { |
676 | en_err(priv, "Failed Initializing port\n"); | 734 | en_err(priv, "Failed Initializing port\n"); |
677 | goto mac_err; | 735 | goto tx_err; |
678 | } | 736 | } |
679 | 737 | ||
738 | /* Attach rx QP to bradcast address */ | ||
739 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
740 | mc_list[5] = priv->port; | ||
741 | if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
742 | 0, MLX4_PROT_ETH)) | ||
743 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); | ||
744 | |||
680 | /* Schedule multicast task to populate multicast list */ | 745 | /* Schedule multicast task to populate multicast list */ |
681 | queue_work(mdev->workqueue, &priv->mcast_task); | 746 | queue_work(mdev->workqueue, &priv->mcast_task); |
682 | 747 | ||
@@ -684,8 +749,6 @@ int mlx4_en_start_port(struct net_device *dev) | |||
684 | netif_tx_start_all_queues(dev); | 749 | netif_tx_start_all_queues(dev); |
685 | return 0; | 750 | return 0; |
686 | 751 | ||
687 | mac_err: | ||
688 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
689 | tx_err: | 752 | tx_err: |
690 | while (tx_index--) { | 753 | while (tx_index--) { |
691 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); | 754 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); |
@@ -693,6 +756,8 @@ tx_err: | |||
693 | } | 756 | } |
694 | 757 | ||
695 | mlx4_en_release_rss_steer(priv); | 758 | mlx4_en_release_rss_steer(priv); |
759 | mac_err: | ||
760 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); | ||
696 | cq_err: | 761 | cq_err: |
697 | while (rx_index--) | 762 | while (rx_index--) |
698 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); | 763 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); |
@@ -708,6 +773,7 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
708 | struct mlx4_en_priv *priv = netdev_priv(dev); | 773 | struct mlx4_en_priv *priv = netdev_priv(dev); |
709 | struct mlx4_en_dev *mdev = priv->mdev; | 774 | struct mlx4_en_dev *mdev = priv->mdev; |
710 | int i; | 775 | int i; |
776 | u8 mc_list[16] = {0}; | ||
711 | 777 | ||
712 | if (!priv->port_up) { | 778 | if (!priv->port_up) { |
713 | en_dbg(DRV, priv, "stop port called while port already down\n"); | 779 | en_dbg(DRV, priv, "stop port called while port already down\n"); |
@@ -722,8 +788,23 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
722 | /* Set port as not active */ | 788 | /* Set port as not active */ |
723 | priv->port_up = false; | 789 | priv->port_up = false; |
724 | 790 | ||
791 | /* Detach All multicasts */ | ||
792 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
793 | mc_list[5] = priv->port; | ||
794 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
795 | MLX4_PROT_ETH); | ||
796 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
797 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
798 | mc_list[5] = priv->port; | ||
799 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
800 | mc_list, MLX4_PROT_ETH); | ||
801 | } | ||
802 | mlx4_en_clear_list(dev); | ||
803 | /* Flush multicast filter */ | ||
804 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); | ||
805 | |||
725 | /* Unregister Mac address for the port */ | 806 | /* Unregister Mac address for the port */ |
726 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | 807 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); |
727 | mdev->mac_removed[priv->port] = 1; | 808 | mdev->mac_removed[priv->port] = 1; |
728 | 809 | ||
729 | /* Free TX Rings */ | 810 | /* Free TX Rings */ |
@@ -801,7 +882,6 @@ static int mlx4_en_open(struct net_device *dev) | |||
801 | priv->rx_ring[i].packets = 0; | 882 | priv->rx_ring[i].packets = 0; |
802 | } | 883 | } |
803 | 884 | ||
804 | mlx4_en_set_default_moderation(priv); | ||
805 | err = mlx4_en_start_port(dev); | 885 | err = mlx4_en_start_port(dev); |
806 | if (err) | 886 | if (err) |
807 | en_err(priv, "Failed starting port:%d\n", priv->port); | 887 | en_err(priv, "Failed starting port:%d\n", priv->port); |
@@ -828,7 +908,7 @@ static int mlx4_en_close(struct net_device *dev) | |||
828 | return 0; | 908 | return 0; |
829 | } | 909 | } |
830 | 910 | ||
831 | void mlx4_en_free_resources(struct mlx4_en_priv *priv) | 911 | void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors) |
832 | { | 912 | { |
833 | int i; | 913 | int i; |
834 | 914 | ||
@@ -836,14 +916,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |||
836 | if (priv->tx_ring[i].tx_info) | 916 | if (priv->tx_ring[i].tx_info) |
837 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); | 917 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); |
838 | if (priv->tx_cq[i].buf) | 918 | if (priv->tx_cq[i].buf) |
839 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); | 919 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors); |
840 | } | 920 | } |
841 | 921 | ||
842 | for (i = 0; i < priv->rx_ring_num; i++) { | 922 | for (i = 0; i < priv->rx_ring_num; i++) { |
843 | if (priv->rx_ring[i].rx_info) | 923 | if (priv->rx_ring[i].rx_info) |
844 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); | 924 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); |
845 | if (priv->rx_cq[i].buf) | 925 | if (priv->rx_cq[i].buf) |
846 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); | 926 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors); |
847 | } | 927 | } |
848 | } | 928 | } |
849 | 929 | ||
@@ -851,6 +931,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
851 | { | 931 | { |
852 | struct mlx4_en_port_profile *prof = priv->prof; | 932 | struct mlx4_en_port_profile *prof = priv->prof; |
853 | int i; | 933 | int i; |
934 | int base_tx_qpn, err; | ||
935 | |||
936 | err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); | ||
937 | if (err) { | ||
938 | en_err(priv, "failed reserving range for TX rings\n"); | ||
939 | return err; | ||
940 | } | ||
854 | 941 | ||
855 | /* Create tx Rings */ | 942 | /* Create tx Rings */ |
856 | for (i = 0; i < priv->tx_ring_num; i++) { | 943 | for (i = 0; i < priv->tx_ring_num; i++) { |
@@ -858,7 +945,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
858 | prof->tx_ring_size, i, TX)) | 945 | prof->tx_ring_size, i, TX)) |
859 | goto err; | 946 | goto err; |
860 | 947 | ||
861 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], | 948 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, |
862 | prof->tx_ring_size, TXBB_SIZE)) | 949 | prof->tx_ring_size, TXBB_SIZE)) |
863 | goto err; | 950 | goto err; |
864 | } | 951 | } |
@@ -878,6 +965,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
878 | 965 | ||
879 | err: | 966 | err: |
880 | en_err(priv, "Failed to allocate NIC resources\n"); | 967 | en_err(priv, "Failed to allocate NIC resources\n"); |
968 | mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); | ||
881 | return -ENOMEM; | 969 | return -ENOMEM; |
882 | } | 970 | } |
883 | 971 | ||
@@ -905,7 +993,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
905 | mdev->pndev[priv->port] = NULL; | 993 | mdev->pndev[priv->port] = NULL; |
906 | mutex_unlock(&mdev->state_lock); | 994 | mutex_unlock(&mdev->state_lock); |
907 | 995 | ||
908 | mlx4_en_free_resources(priv); | 996 | mlx4_en_free_resources(priv, false); |
909 | free_netdev(dev); | 997 | free_netdev(dev); |
910 | } | 998 | } |
911 | 999 | ||
@@ -932,7 +1020,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | |||
932 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); | 1020 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); |
933 | } else { | 1021 | } else { |
934 | mlx4_en_stop_port(dev); | 1022 | mlx4_en_stop_port(dev); |
935 | mlx4_en_set_default_moderation(priv); | ||
936 | err = mlx4_en_start_port(dev); | 1023 | err = mlx4_en_start_port(dev); |
937 | if (err) { | 1024 | if (err) { |
938 | en_err(priv, "Failed restarting port:%d\n", | 1025 | en_err(priv, "Failed restarting port:%d\n", |
@@ -1079,7 +1166,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1079 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | 1166 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); |
1080 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | 1167 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); |
1081 | 1168 | ||
1169 | /* Configure port */ | ||
1170 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
1171 | MLX4_EN_MIN_MTU, | ||
1172 | 0, 0, 0, 0); | ||
1173 | if (err) { | ||
1174 | en_err(priv, "Failed setting port general configurations " | ||
1175 | "for port %d, with error %d\n", priv->port, err); | ||
1176 | goto out; | ||
1177 | } | ||
1178 | |||
1179 | /* Init port */ | ||
1180 | en_warn(priv, "Initializing port\n"); | ||
1181 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
1182 | if (err) { | ||
1183 | en_err(priv, "Failed Initializing port\n"); | ||
1184 | goto out; | ||
1185 | } | ||
1082 | priv->registered = 1; | 1186 | priv->registered = 1; |
1187 | mlx4_en_set_default_moderation(priv); | ||
1083 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | 1188 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); |
1084 | return 0; | 1189 | return 0; |
1085 | 1190 | ||