aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/en_netdev.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4/en_netdev.c')
-rw-r--r--drivers/net/mlx4/en_netdev.c274
1 files changed, 198 insertions, 76 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index a0d8a26f5a02..61850adae6f7 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -69,6 +69,7 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
69 struct mlx4_en_priv *priv = netdev_priv(dev); 69 struct mlx4_en_priv *priv = netdev_priv(dev);
70 struct mlx4_en_dev *mdev = priv->mdev; 70 struct mlx4_en_dev *mdev = priv->mdev;
71 int err; 71 int err;
72 int idx;
72 73
73 if (!priv->vlgrp) 74 if (!priv->vlgrp)
74 return; 75 return;
@@ -83,7 +84,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
83 if (err) 84 if (err)
84 en_err(priv, "Failed configuring VLAN filter\n"); 85 en_err(priv, "Failed configuring VLAN filter\n");
85 } 86 }
87 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
88 en_err(priv, "failed adding vlan %d\n", vid);
86 mutex_unlock(&mdev->state_lock); 89 mutex_unlock(&mdev->state_lock);
90
87} 91}
88 92
89static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) 93static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
@@ -91,6 +95,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
91 struct mlx4_en_priv *priv = netdev_priv(dev); 95 struct mlx4_en_priv *priv = netdev_priv(dev);
92 struct mlx4_en_dev *mdev = priv->mdev; 96 struct mlx4_en_dev *mdev = priv->mdev;
93 int err; 97 int err;
98 int idx;
94 99
95 if (!priv->vlgrp) 100 if (!priv->vlgrp)
96 return; 101 return;
@@ -101,6 +106,11 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
101 106
102 /* Remove VID from port VLAN filter */ 107 /* Remove VID from port VLAN filter */
103 mutex_lock(&mdev->state_lock); 108 mutex_lock(&mdev->state_lock);
109 if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
110 mlx4_unregister_vlan(mdev->dev, priv->port, idx);
111 else
112 en_err(priv, "could not find vid %d in cache\n", vid);
113
104 if (mdev->device_up && priv->port_up) { 114 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 115 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err) 116 if (err)
@@ -109,7 +119,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
109 mutex_unlock(&mdev->state_lock); 119 mutex_unlock(&mdev->state_lock);
110} 120}
111 121
112static u64 mlx4_en_mac_to_u64(u8 *addr) 122u64 mlx4_en_mac_to_u64(u8 *addr)
113{ 123{
114 u64 mac = 0; 124 u64 mac = 0;
115 int i; 125 int i;
@@ -146,9 +156,8 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
146 mutex_lock(&mdev->state_lock); 156 mutex_lock(&mdev->state_lock);
147 if (priv->port_up) { 157 if (priv->port_up) {
148 /* Remove old MAC and insert the new one */ 158 /* Remove old MAC and insert the new one */
149 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 159 err = mlx4_replace_mac(mdev->dev, priv->port,
150 err = mlx4_register_mac(mdev->dev, priv->port, 160 priv->base_qpn, priv->mac, 0);
151 priv->mac, &priv->mac_index);
152 if (err) 161 if (err)
153 en_err(priv, "Failed changing HW MAC address\n"); 162 en_err(priv, "Failed changing HW MAC address\n");
154 } else 163 } else
@@ -204,6 +213,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
204 struct mlx4_en_dev *mdev = priv->mdev; 213 struct mlx4_en_dev *mdev = priv->mdev;
205 struct net_device *dev = priv->dev; 214 struct net_device *dev = priv->dev;
206 u64 mcast_addr = 0; 215 u64 mcast_addr = 0;
216 u8 mc_list[16] = {0};
207 int err; 217 int err;
208 218
209 mutex_lock(&mdev->state_lock); 219 mutex_lock(&mdev->state_lock);
@@ -229,11 +239,15 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
229 priv->flags |= MLX4_EN_FLAG_PROMISC; 239 priv->flags |= MLX4_EN_FLAG_PROMISC;
230 240
231 /* Enable promiscouos mode */ 241 /* Enable promiscouos mode */
232 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 242 if (!mdev->dev->caps.vep_uc_steering)
233 priv->base_qpn, 1); 243 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
244 priv->base_qpn, 1);
245 else
246 err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
247 priv->port);
234 if (err) 248 if (err)
235 en_err(priv, "Failed enabling " 249 en_err(priv, "Failed enabling "
236 "promiscous mode\n"); 250 "promiscuous mode\n");
237 251
238 /* Disable port multicast filter (unconditionally) */ 252 /* Disable port multicast filter (unconditionally) */
239 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 253 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -242,16 +256,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
242 en_err(priv, "Failed disabling " 256 en_err(priv, "Failed disabling "
243 "multicast filter\n"); 257 "multicast filter\n");
244 258
245 /* Disable port VLAN filter */ 259 /* Add the default qp number as multicast promisc */
246 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 260 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
247 if (err) 261 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
248 en_err(priv, "Failed disabling VLAN filter\n"); 262 priv->port);
263 if (err)
264 en_err(priv, "Failed entering multicast promisc mode\n");
265 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
266 }
267
268 if (priv->vlgrp) {
269 /* Disable port VLAN filter */
270 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
271 if (err)
272 en_err(priv, "Failed disabling VLAN filter\n");
273 }
249 } 274 }
250 goto out; 275 goto out;
251 } 276 }
252 277
253 /* 278 /*
254 * Not in promiscous mode 279 * Not in promiscuous mode
255 */ 280 */
256 281
257 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 282 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
@@ -260,10 +285,23 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
260 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 285 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
261 286
262 /* Disable promiscouos mode */ 287 /* Disable promiscouos mode */
263 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 288 if (!mdev->dev->caps.vep_uc_steering)
264 priv->base_qpn, 0); 289 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
290 priv->base_qpn, 0);
291 else
292 err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
293 priv->port);
265 if (err) 294 if (err)
266 en_err(priv, "Failed disabling promiscous mode\n"); 295 en_err(priv, "Failed disabling promiscuous mode\n");
296
297 /* Disable Multicast promisc */
298 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
299 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
300 priv->port);
301 if (err)
302 en_err(priv, "Failed disabling multicast promiscuous mode\n");
303 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
304 }
267 305
268 /* Enable port VLAN filter */ 306 /* Enable port VLAN filter */
269 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 307 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
@@ -277,14 +315,38 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
277 0, MLX4_MCAST_DISABLE); 315 0, MLX4_MCAST_DISABLE);
278 if (err) 316 if (err)
279 en_err(priv, "Failed disabling multicast filter\n"); 317 en_err(priv, "Failed disabling multicast filter\n");
318
319 /* Add the default qp number as multicast promisc */
320 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
321 err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
322 priv->port);
323 if (err)
324 en_err(priv, "Failed entering multicast promisc mode\n");
325 priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
326 }
280 } else { 327 } else {
281 int i; 328 int i;
329 /* Disable Multicast promisc */
330 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
331 err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
332 priv->port);
333 if (err)
334 en_err(priv, "Failed disabling multicast promiscuous mode\n");
335 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
336 }
282 337
283 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 338 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
284 0, MLX4_MCAST_DISABLE); 339 0, MLX4_MCAST_DISABLE);
285 if (err) 340 if (err)
286 en_err(priv, "Failed disabling multicast filter\n"); 341 en_err(priv, "Failed disabling multicast filter\n");
287 342
343 /* Detach our qp from all the multicast addresses */
344 for (i = 0; i < priv->mc_addrs_cnt; i++) {
345 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
346 mc_list[5] = priv->port;
347 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
348 mc_list, MLX4_PROT_ETH);
349 }
288 /* Flush mcast filter and init it with broadcast address */ 350 /* Flush mcast filter and init it with broadcast address */
289 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 351 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
290 1, MLX4_MCAST_CONFIG); 352 1, MLX4_MCAST_CONFIG);
@@ -297,6 +359,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
297 for (i = 0; i < priv->mc_addrs_cnt; i++) { 359 for (i = 0; i < priv->mc_addrs_cnt; i++) {
298 mcast_addr = 360 mcast_addr =
299 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); 361 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
362 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
363 mc_list[5] = priv->port;
364 mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
365 mc_list, 0, MLX4_PROT_ETH);
300 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 366 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
301 mcast_addr, 0, MLX4_MCAST_CONFIG); 367 mcast_addr, 0, MLX4_MCAST_CONFIG);
302 } 368 }
@@ -304,8 +370,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
304 0, MLX4_MCAST_ENABLE); 370 0, MLX4_MCAST_ENABLE);
305 if (err) 371 if (err)
306 en_err(priv, "Failed enabling multicast filter\n"); 372 en_err(priv, "Failed enabling multicast filter\n");
307
308 mlx4_en_clear_list(dev);
309 } 373 }
310out: 374out:
311 mutex_unlock(&mdev->state_lock); 375 mutex_unlock(&mdev->state_lock);
@@ -407,7 +471,6 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
407 unsigned long avg_pkt_size; 471 unsigned long avg_pkt_size;
408 unsigned long rx_packets; 472 unsigned long rx_packets;
409 unsigned long rx_bytes; 473 unsigned long rx_bytes;
410 unsigned long rx_byte_diff;
411 unsigned long tx_packets; 474 unsigned long tx_packets;
412 unsigned long tx_pkt_diff; 475 unsigned long tx_pkt_diff;
413 unsigned long rx_pkt_diff; 476 unsigned long rx_pkt_diff;
@@ -431,25 +494,20 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
431 rx_pkt_diff = ((unsigned long) (rx_packets - 494 rx_pkt_diff = ((unsigned long) (rx_packets -
432 priv->last_moder_packets)); 495 priv->last_moder_packets));
433 packets = max(tx_pkt_diff, rx_pkt_diff); 496 packets = max(tx_pkt_diff, rx_pkt_diff);
434 rx_byte_diff = rx_bytes - priv->last_moder_bytes;
435 rx_byte_diff = rx_byte_diff ? rx_byte_diff : 1;
436 rate = packets * HZ / period; 497 rate = packets * HZ / period;
437 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 498 avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
438 priv->last_moder_bytes)) / packets : 0; 499 priv->last_moder_bytes)) / packets : 0;
439 500
440 /* Apply auto-moderation only when packet rate exceeds a rate that 501 /* Apply auto-moderation only when packet rate exceeds a rate that
441 * it matters */ 502 * it matters */
442 if (rate > MLX4_EN_RX_RATE_THRESH) { 503 if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
443 /* If tx and rx packet rates are not balanced, assume that 504 /* If tx and rx packet rates are not balanced, assume that
444 * traffic is mainly BW bound and apply maximum moderation. 505 * traffic is mainly BW bound and apply maximum moderation.
445 * Otherwise, moderate according to packet rate */ 506 * Otherwise, moderate according to packet rate */
446 if (2 * tx_pkt_diff > 3 * rx_pkt_diff && 507 if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
447 rx_pkt_diff / rx_byte_diff < 508 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
448 MLX4_EN_SMALL_PKT_SIZE)
449 moder_time = priv->rx_usecs_low;
450 else if (2 * rx_pkt_diff > 3 * tx_pkt_diff)
451 moder_time = priv->rx_usecs_high; 509 moder_time = priv->rx_usecs_high;
452 else { 510 } else {
453 if (rate < priv->pkt_rate_low) 511 if (rate < priv->pkt_rate_low)
454 moder_time = priv->rx_usecs_low; 512 moder_time = priv->rx_usecs_low;
455 else if (rate > priv->pkt_rate_high) 513 else if (rate > priv->pkt_rate_high)
@@ -461,9 +519,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
461 priv->rx_usecs_low; 519 priv->rx_usecs_low;
462 } 520 }
463 } else { 521 } else {
464 /* When packet rate is low, use default moderation rather than 522 moder_time = priv->rx_usecs_low;
465 * 0 to prevent interrupt storms if traffic suddenly increases */
466 moder_time = priv->rx_usecs;
467 } 523 }
468 524
469 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 525 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
@@ -513,6 +569,10 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
513 569
514 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 570 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
515 } 571 }
572 if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
573 queue_work(mdev->workqueue, &priv->mac_task);
574 mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
575 }
516 mutex_unlock(&mdev->state_lock); 576 mutex_unlock(&mdev->state_lock);
517} 577}
518 578
@@ -528,10 +588,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
528 * report to system log */ 588 * report to system log */
529 if (priv->last_link_state != linkstate) { 589 if (priv->last_link_state != linkstate) {
530 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 590 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
531 en_dbg(LINK, priv, "Link Down\n"); 591 en_info(priv, "Link Down\n");
532 netif_carrier_off(priv->dev); 592 netif_carrier_off(priv->dev);
533 } else { 593 } else {
534 en_dbg(LINK, priv, "Link Up\n"); 594 en_info(priv, "Link Up\n");
535 netif_carrier_on(priv->dev); 595 netif_carrier_on(priv->dev);
536 } 596 }
537 } 597 }
@@ -551,6 +611,8 @@ int mlx4_en_start_port(struct net_device *dev)
551 int err = 0; 611 int err = 0;
552 int i; 612 int i;
553 int j; 613 int j;
614 u8 mc_list[16] = {0};
615 char name[32];
554 616
555 if (priv->port_up) { 617 if (priv->port_up) {
556 en_dbg(DRV, priv, "start port called while port already up\n"); 618 en_dbg(DRV, priv, "start port called while port already up\n");
@@ -589,16 +651,35 @@ int mlx4_en_start_port(struct net_device *dev)
589 ++rx_index; 651 ++rx_index;
590 } 652 }
591 653
654 /* Set port mac number */
655 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
656 err = mlx4_register_mac(mdev->dev, priv->port,
657 priv->mac, &priv->base_qpn, 0);
658 if (err) {
659 en_err(priv, "Failed setting port mac\n");
660 goto cq_err;
661 }
662 mdev->mac_removed[priv->port] = 0;
663
592 err = mlx4_en_config_rss_steer(priv); 664 err = mlx4_en_config_rss_steer(priv);
593 if (err) { 665 if (err) {
594 en_err(priv, "Failed configuring rss steering\n"); 666 en_err(priv, "Failed configuring rss steering\n");
595 goto cq_err; 667 goto mac_err;
596 } 668 }
597 669
670 if (mdev->dev->caps.comp_pool && !priv->tx_vector) {
671 sprintf(name , "%s-tx", priv->dev->name);
672 if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) {
673 mlx4_warn(mdev, "Failed Assigning an EQ to "
674 "%s_tx ,Falling back to legacy "
675 "EQ's\n", priv->dev->name);
676 }
677 }
598 /* Configure tx cq's and rings */ 678 /* Configure tx cq's and rings */
599 for (i = 0; i < priv->tx_ring_num; i++) { 679 for (i = 0; i < priv->tx_ring_num; i++) {
600 /* Configure cq */ 680 /* Configure cq */
601 cq = &priv->tx_cq[i]; 681 cq = &priv->tx_cq[i];
682 cq->vector = priv->tx_vector;
602 err = mlx4_en_activate_cq(priv, cq); 683 err = mlx4_en_activate_cq(priv, cq);
603 if (err) { 684 if (err) {
604 en_err(priv, "Failed allocating Tx CQ\n"); 685 en_err(priv, "Failed allocating Tx CQ\n");
@@ -645,23 +726,25 @@ int mlx4_en_start_port(struct net_device *dev)
645 en_err(priv, "Failed setting default qp numbers\n"); 726 en_err(priv, "Failed setting default qp numbers\n");
646 goto tx_err; 727 goto tx_err;
647 } 728 }
648 /* Set port mac number */
649 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
650 err = mlx4_register_mac(mdev->dev, priv->port,
651 priv->mac, &priv->mac_index);
652 if (err) {
653 en_err(priv, "Failed setting port mac\n");
654 goto tx_err;
655 }
656 729
657 /* Init port */ 730 /* Init port */
658 en_dbg(HW, priv, "Initializing port\n"); 731 en_dbg(HW, priv, "Initializing port\n");
659 err = mlx4_INIT_PORT(mdev->dev, priv->port); 732 err = mlx4_INIT_PORT(mdev->dev, priv->port);
660 if (err) { 733 if (err) {
661 en_err(priv, "Failed Initializing port\n"); 734 en_err(priv, "Failed Initializing port\n");
662 goto mac_err; 735 goto tx_err;
663 } 736 }
664 737
738 /* Attach rx QP to bradcast address */
739 memset(&mc_list[10], 0xff, ETH_ALEN);
740 mc_list[5] = priv->port;
741 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
742 0, MLX4_PROT_ETH))
743 mlx4_warn(mdev, "Failed Attaching Broadcast\n");
744
745 /* Must redo promiscuous mode setup. */
746 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
747
665 /* Schedule multicast task to populate multicast list */ 748 /* Schedule multicast task to populate multicast list */
666 queue_work(mdev->workqueue, &priv->mcast_task); 749 queue_work(mdev->workqueue, &priv->mcast_task);
667 750
@@ -669,8 +752,6 @@ int mlx4_en_start_port(struct net_device *dev)
669 netif_tx_start_all_queues(dev); 752 netif_tx_start_all_queues(dev);
670 return 0; 753 return 0;
671 754
672mac_err:
673 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
674tx_err: 755tx_err:
675 while (tx_index--) { 756 while (tx_index--) {
676 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); 757 mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
@@ -678,6 +759,8 @@ tx_err:
678 } 759 }
679 760
680 mlx4_en_release_rss_steer(priv); 761 mlx4_en_release_rss_steer(priv);
762mac_err:
763 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
681cq_err: 764cq_err:
682 while (rx_index--) 765 while (rx_index--)
683 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); 766 mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -693,6 +776,7 @@ void mlx4_en_stop_port(struct net_device *dev)
693 struct mlx4_en_priv *priv = netdev_priv(dev); 776 struct mlx4_en_priv *priv = netdev_priv(dev);
694 struct mlx4_en_dev *mdev = priv->mdev; 777 struct mlx4_en_dev *mdev = priv->mdev;
695 int i; 778 int i;
779 u8 mc_list[16] = {0};
696 780
697 if (!priv->port_up) { 781 if (!priv->port_up) {
698 en_dbg(DRV, priv, "stop port called while port already down\n"); 782 en_dbg(DRV, priv, "stop port called while port already down\n");
@@ -704,12 +788,27 @@ void mlx4_en_stop_port(struct net_device *dev)
704 netif_tx_stop_all_queues(dev); 788 netif_tx_stop_all_queues(dev);
705 netif_tx_unlock_bh(dev); 789 netif_tx_unlock_bh(dev);
706 790
707 /* close port*/ 791 /* Set port as not active */
708 priv->port_up = false; 792 priv->port_up = false;
709 mlx4_CLOSE_PORT(mdev->dev, priv->port); 793
794 /* Detach All multicasts */
795 memset(&mc_list[10], 0xff, ETH_ALEN);
796 mc_list[5] = priv->port;
797 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
798 MLX4_PROT_ETH);
799 for (i = 0; i < priv->mc_addrs_cnt; i++) {
800 memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
801 mc_list[5] = priv->port;
802 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
803 mc_list, MLX4_PROT_ETH);
804 }
805 mlx4_en_clear_list(dev);
806 /* Flush multicast filter */
807 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
710 808
711 /* Unregister Mac address for the port */ 809 /* Unregister Mac address for the port */
712 mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); 810 mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
811 mdev->mac_removed[priv->port] = 1;
713 812
714 /* Free TX Rings */ 813 /* Free TX Rings */
715 for (i = 0; i < priv->tx_ring_num; i++) { 814 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -731,6 +830,9 @@ void mlx4_en_stop_port(struct net_device *dev)
731 msleep(1); 830 msleep(1);
732 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); 831 mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
733 } 832 }
833
834 /* close port*/
835 mlx4_CLOSE_PORT(mdev->dev, priv->port);
734} 836}
735 837
736static void mlx4_en_restart(struct work_struct *work) 838static void mlx4_en_restart(struct work_struct *work)
@@ -783,7 +885,6 @@ static int mlx4_en_open(struct net_device *dev)
783 priv->rx_ring[i].packets = 0; 885 priv->rx_ring[i].packets = 0;
784 } 886 }
785 887
786 mlx4_en_set_default_moderation(priv);
787 err = mlx4_en_start_port(dev); 888 err = mlx4_en_start_port(dev);
788 if (err) 889 if (err)
789 en_err(priv, "Failed starting port:%d\n", priv->port); 890 en_err(priv, "Failed starting port:%d\n", priv->port);
@@ -810,7 +911,7 @@ static int mlx4_en_close(struct net_device *dev)
810 return 0; 911 return 0;
811} 912}
812 913
813void mlx4_en_free_resources(struct mlx4_en_priv *priv) 914void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors)
814{ 915{
815 int i; 916 int i;
816 917
@@ -818,14 +919,14 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
818 if (priv->tx_ring[i].tx_info) 919 if (priv->tx_ring[i].tx_info)
819 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 920 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
820 if (priv->tx_cq[i].buf) 921 if (priv->tx_cq[i].buf)
821 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 922 mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors);
822 } 923 }
823 924
824 for (i = 0; i < priv->rx_ring_num; i++) { 925 for (i = 0; i < priv->rx_ring_num; i++) {
825 if (priv->rx_ring[i].rx_info) 926 if (priv->rx_ring[i].rx_info)
826 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); 927 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
827 if (priv->rx_cq[i].buf) 928 if (priv->rx_cq[i].buf)
828 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 929 mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors);
829 } 930 }
830} 931}
831 932
@@ -833,6 +934,13 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
833{ 934{
834 struct mlx4_en_port_profile *prof = priv->prof; 935 struct mlx4_en_port_profile *prof = priv->prof;
835 int i; 936 int i;
937 int base_tx_qpn, err;
938
939 err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn);
940 if (err) {
941 en_err(priv, "failed reserving range for TX rings\n");
942 return err;
943 }
836 944
837 /* Create tx Rings */ 945 /* Create tx Rings */
838 for (i = 0; i < priv->tx_ring_num; i++) { 946 for (i = 0; i < priv->tx_ring_num; i++) {
@@ -840,7 +948,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
840 prof->tx_ring_size, i, TX)) 948 prof->tx_ring_size, i, TX))
841 goto err; 949 goto err;
842 950
843 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 951 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i,
844 prof->tx_ring_size, TXBB_SIZE)) 952 prof->tx_ring_size, TXBB_SIZE))
845 goto err; 953 goto err;
846 } 954 }
@@ -860,6 +968,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
860 968
861err: 969err:
862 en_err(priv, "Failed to allocate NIC resources\n"); 970 en_err(priv, "Failed to allocate NIC resources\n");
971 mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num);
863 return -ENOMEM; 972 return -ENOMEM;
864} 973}
865 974
@@ -887,7 +996,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
887 mdev->pndev[priv->port] = NULL; 996 mdev->pndev[priv->port] = NULL;
888 mutex_unlock(&mdev->state_lock); 997 mutex_unlock(&mdev->state_lock);
889 998
890 mlx4_en_free_resources(priv); 999 mlx4_en_free_resources(priv, false);
891 free_netdev(dev); 1000 free_netdev(dev);
892} 1001}
893 1002
@@ -914,7 +1023,6 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
914 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1023 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
915 } else { 1024 } else {
916 mlx4_en_stop_port(dev); 1025 mlx4_en_stop_port(dev);
917 mlx4_en_set_default_moderation(priv);
918 err = mlx4_en_start_port(dev); 1026 err = mlx4_en_start_port(dev);
919 if (err) { 1027 if (err) {
920 en_err(priv, "Failed restarting port:%d\n", 1028 en_err(priv, "Failed restarting port:%d\n",
@@ -954,7 +1062,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
954 int i; 1062 int i;
955 int err; 1063 int err;
956 1064
957 dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); 1065 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
1066 prof->tx_ring_num, prof->rx_ring_num);
958 if (dev == NULL) { 1067 if (dev == NULL) {
959 mlx4_err(mdev, "Net device allocation failed\n"); 1068 mlx4_err(mdev, "Net device allocation failed\n");
960 return -ENOMEM; 1069 return -ENOMEM;
@@ -974,7 +1083,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
974 priv->prof = prof; 1083 priv->prof = prof;
975 priv->port = port; 1084 priv->port = port;
976 priv->port_up = false; 1085 priv->port_up = false;
977 priv->rx_csum = 1;
978 priv->flags = prof->flags; 1086 priv->flags = prof->flags;
979 priv->tx_ring_num = prof->tx_ring_num; 1087 priv->tx_ring_num = prof->tx_ring_num;
980 priv->rx_ring_num = prof->rx_ring_num; 1088 priv->rx_ring_num = prof->rx_ring_num;
@@ -1017,35 +1125,31 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1017 */ 1125 */
1018 dev->netdev_ops = &mlx4_netdev_ops; 1126 dev->netdev_ops = &mlx4_netdev_ops;
1019 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; 1127 dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
1020 dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS; 1128 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1129 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1021 1130
1022 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1131 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1023 1132
1024 /* Set defualt MAC */ 1133 /* Set defualt MAC */
1025 dev->addr_len = ETH_ALEN; 1134 dev->addr_len = ETH_ALEN;
1026 for (i = 0; i < ETH_ALEN; i++) 1135 for (i = 0; i < ETH_ALEN; i++) {
1027 dev->dev_addr[ETH_ALEN - 1 - i] = 1136 dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1028 (u8) (priv->mac >> (8 * i)); 1137 dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i));
1138 }
1029 1139
1030 /* 1140 /*
1031 * Set driver features 1141 * Set driver features
1032 */ 1142 */
1033 dev->features |= NETIF_F_SG; 1143 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1034 dev->vlan_features |= NETIF_F_SG; 1144 if (mdev->LSO_support)
1035 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1145 dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1036 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1146
1037 dev->features |= NETIF_F_HIGHDMA; 1147 dev->vlan_features = dev->hw_features;
1038 dev->features |= NETIF_F_HW_VLAN_TX | 1148
1039 NETIF_F_HW_VLAN_RX | 1149 dev->hw_features |= NETIF_F_RXCSUM;
1040 NETIF_F_HW_VLAN_FILTER; 1150 dev->features = dev->hw_features | NETIF_F_HIGHDMA |
1041 if (mdev->profile.num_lro) 1151 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1042 dev->features |= NETIF_F_LRO; 1152 NETIF_F_HW_VLAN_FILTER;
1043 if (mdev->LSO_support) {
1044 dev->features |= NETIF_F_TSO;
1045 dev->features |= NETIF_F_TSO6;
1046 dev->vlan_features |= NETIF_F_TSO;
1047 dev->vlan_features |= NETIF_F_TSO6;
1048 }
1049 1153
1050 mdev->pndev[port] = dev; 1154 mdev->pndev[port] = dev;
1051 1155
@@ -1059,7 +1163,25 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1059 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 1163 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1060 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 1164 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1061 1165
1166 /* Configure port */
1167 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
1168 MLX4_EN_MIN_MTU,
1169 0, 0, 0, 0);
1170 if (err) {
1171 en_err(priv, "Failed setting port general configurations "
1172 "for port %d, with error %d\n", priv->port, err);
1173 goto out;
1174 }
1175
1176 /* Init port */
1177 en_warn(priv, "Initializing port\n");
1178 err = mlx4_INIT_PORT(mdev->dev, priv->port);
1179 if (err) {
1180 en_err(priv, "Failed Initializing port\n");
1181 goto out;
1182 }
1062 priv->registered = 1; 1183 priv->registered = 1;
1184 mlx4_en_set_default_moderation(priv);
1063 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1185 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1064 return 0; 1186 return 0;
1065 1187