aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r--drivers/net/mlx4/en_netdev.c162
-rw-r--r--drivers/net/mlx4/en_params.c6
-rw-r--r--drivers/net/mlx4/en_rx.c78
-rw-r--r--drivers/net/mlx4/en_tx.c39
-rw-r--r--drivers/net/mlx4/mlx4_en.h30
5 files changed, 159 insertions, 156 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 0cd185a2e089..fea65e78d6de 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -51,14 +51,14 @@ static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *
51 struct mlx4_en_dev *mdev = priv->mdev; 51 struct mlx4_en_dev *mdev = priv->mdev;
52 int err; 52 int err;
53 53
54 mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp); 54 en_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
55 priv->vlgrp = grp; 55 priv->vlgrp = grp;
56 56
57 mutex_lock(&mdev->state_lock); 57 mutex_lock(&mdev->state_lock);
58 if (mdev->device_up && priv->port_up) { 58 if (mdev->device_up && priv->port_up) {
59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); 59 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
60 if (err) 60 if (err)
61 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 61 en_err(priv, "Failed configuring VLAN filter\n");
62 } 62 }
63 mutex_unlock(&mdev->state_lock); 63 mutex_unlock(&mdev->state_lock);
64} 64}
@@ -72,15 +72,15 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
72 if (!priv->vlgrp) 72 if (!priv->vlgrp)
73 return; 73 return;
74 74
75 mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", 75 en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
76 vid, vlan_group_get_device(priv->vlgrp, vid)); 76 vid, vlan_group_get_device(priv->vlgrp, vid));
77 77
78 /* Add VID to port VLAN filter */ 78 /* Add VID to port VLAN filter */
79 mutex_lock(&mdev->state_lock); 79 mutex_lock(&mdev->state_lock);
80 if (mdev->device_up && priv->port_up) { 80 if (mdev->device_up && priv->port_up) {
81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 81 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
82 if (err) 82 if (err)
83 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 83 en_err(priv, "Failed configuring VLAN filter\n");
84 } 84 }
85 mutex_unlock(&mdev->state_lock); 85 mutex_unlock(&mdev->state_lock);
86} 86}
@@ -94,9 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
94 if (!priv->vlgrp) 94 if (!priv->vlgrp)
95 return; 95 return;
96 96
97 mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp " 97 en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n",
98 "entry:%p)\n", vid, priv->vlgrp, 98 vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid));
99 vlan_group_get_device(priv->vlgrp, vid));
100 vlan_group_set_device(priv->vlgrp, vid, NULL); 99 vlan_group_set_device(priv->vlgrp, vid, NULL);
101 100
102 /* Remove VID from port VLAN filter */ 101 /* Remove VID from port VLAN filter */
@@ -104,7 +103,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
104 if (mdev->device_up && priv->port_up) { 103 if (mdev->device_up && priv->port_up) {
105 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 104 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
106 if (err) 105 if (err)
107 mlx4_err(mdev, "Failed configuring VLAN filter\n"); 106 en_err(priv, "Failed configuring VLAN filter\n");
108 } 107 }
109 mutex_unlock(&mdev->state_lock); 108 mutex_unlock(&mdev->state_lock);
110} 109}
@@ -150,9 +149,10 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
150 err = mlx4_register_mac(mdev->dev, priv->port, 149 err = mlx4_register_mac(mdev->dev, priv->port,
151 priv->mac, &priv->mac_index); 150 priv->mac, &priv->mac_index);
152 if (err) 151 if (err)
153 mlx4_err(mdev, "Failed changing HW MAC address\n"); 152 en_err(priv, "Failed changing HW MAC address\n");
154 } else 153 } else
155 mlx4_dbg(HW, priv, "Port is down, exiting...\n"); 154 en_dbg(HW, priv, "Port is down while "
155 "registering mac, exiting...\n");
156 156
157 mutex_unlock(&mdev->state_lock); 157 mutex_unlock(&mdev->state_lock);
158} 158}
@@ -174,7 +174,6 @@ static void mlx4_en_clear_list(struct net_device *dev)
174static void mlx4_en_cache_mclist(struct net_device *dev) 174static void mlx4_en_cache_mclist(struct net_device *dev)
175{ 175{
176 struct mlx4_en_priv *priv = netdev_priv(dev); 176 struct mlx4_en_priv *priv = netdev_priv(dev);
177 struct mlx4_en_dev *mdev = priv->mdev;
178 struct dev_mc_list *mclist; 177 struct dev_mc_list *mclist;
179 struct dev_mc_list *tmp; 178 struct dev_mc_list *tmp;
180 struct dev_mc_list *plist = NULL; 179 struct dev_mc_list *plist = NULL;
@@ -182,7 +181,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 181 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); 182 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
184 if (!tmp) { 183 if (!tmp) {
185 mlx4_err(mdev, "failed to allocate multicast list\n"); 184 en_err(priv, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev); 185 mlx4_en_clear_list(dev);
187 return; 186 return;
188 } 187 }
@@ -219,13 +218,13 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
219 218
220 mutex_lock(&mdev->state_lock); 219 mutex_lock(&mdev->state_lock);
221 if (!mdev->device_up) { 220 if (!mdev->device_up) {
222 mlx4_dbg(HW, priv, "Card is not up, ignoring " 221 en_dbg(HW, priv, "Card is not up, "
223 "multicast change.\n"); 222 "ignoring multicast change.\n");
224 goto out; 223 goto out;
225 } 224 }
226 if (!priv->port_up) { 225 if (!priv->port_up) {
227 mlx4_dbg(HW, priv, "Port is down, ignoring " 226 en_dbg(HW, priv, "Port is down, "
228 "multicast change.\n"); 227 "ignoring multicast change.\n");
229 goto out; 228 goto out;
230 } 229 }
231 230
@@ -236,29 +235,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
236 if (dev->flags & IFF_PROMISC) { 235 if (dev->flags & IFF_PROMISC) {
237 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 236 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
238 if (netif_msg_rx_status(priv)) 237 if (netif_msg_rx_status(priv))
239 mlx4_warn(mdev, "Port:%d entering promiscuous mode\n", 238 en_warn(priv, "Entering promiscuous mode\n");
240 priv->port);
241 priv->flags |= MLX4_EN_FLAG_PROMISC; 239 priv->flags |= MLX4_EN_FLAG_PROMISC;
242 240
243 /* Enable promiscouos mode */ 241 /* Enable promiscouos mode */
244 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 242 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
245 priv->base_qpn, 1); 243 priv->base_qpn, 1);
246 if (err) 244 if (err)
247 mlx4_err(mdev, "Failed enabling " 245 en_err(priv, "Failed enabling "
248 "promiscous mode\n"); 246 "promiscous mode\n");
249 247
250 /* Disable port multicast filter (unconditionally) */ 248 /* Disable port multicast filter (unconditionally) */
251 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 249 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
252 0, MLX4_MCAST_DISABLE); 250 0, MLX4_MCAST_DISABLE);
253 if (err) 251 if (err)
254 mlx4_err(mdev, "Failed disabling " 252 en_err(priv, "Failed disabling "
255 "multicast filter\n"); 253 "multicast filter\n");
256 254
257 /* Disable port VLAN filter */ 255 /* Disable port VLAN filter */
258 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); 256 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
259 if (err) 257 if (err)
260 mlx4_err(mdev, "Failed disabling " 258 en_err(priv, "Failed disabling VLAN filter\n");
261 "VLAN filter\n");
262 } 259 }
263 goto out; 260 goto out;
264 } 261 }
@@ -269,20 +266,19 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
269 266
270 if (priv->flags & MLX4_EN_FLAG_PROMISC) { 267 if (priv->flags & MLX4_EN_FLAG_PROMISC) {
271 if (netif_msg_rx_status(priv)) 268 if (netif_msg_rx_status(priv))
272 mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n", 269 en_warn(priv, "Leaving promiscuous mode\n");
273 priv->port);
274 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 270 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
275 271
276 /* Disable promiscouos mode */ 272 /* Disable promiscouos mode */
277 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 273 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
278 priv->base_qpn, 0); 274 priv->base_qpn, 0);
279 if (err) 275 if (err)
280 mlx4_err(mdev, "Failed disabling promiscous mode\n"); 276 en_err(priv, "Failed disabling promiscous mode\n");
281 277
282 /* Enable port VLAN filter */ 278 /* Enable port VLAN filter */
283 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); 279 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
284 if (err) 280 if (err)
285 mlx4_err(mdev, "Failed enabling VLAN filter\n"); 281 en_err(priv, "Failed enabling VLAN filter\n");
286 } 282 }
287 283
288 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 284 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
@@ -290,12 +286,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
290 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 286 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
291 0, MLX4_MCAST_DISABLE); 287 0, MLX4_MCAST_DISABLE);
292 if (err) 288 if (err)
293 mlx4_err(mdev, "Failed disabling multicast filter\n"); 289 en_err(priv, "Failed disabling multicast filter\n");
294 } else { 290 } else {
295 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 291 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
296 0, MLX4_MCAST_DISABLE); 292 0, MLX4_MCAST_DISABLE);
297 if (err) 293 if (err)
298 mlx4_err(mdev, "Failed disabling multicast filter\n"); 294 en_err(priv, "Failed disabling multicast filter\n");
299 295
300 /* Flush mcast filter and init it with broadcast address */ 296 /* Flush mcast filter and init it with broadcast address */
301 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 297 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
@@ -314,7 +310,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
314 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 310 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
315 0, MLX4_MCAST_ENABLE); 311 0, MLX4_MCAST_ENABLE);
316 if (err) 312 if (err)
317 mlx4_err(mdev, "Failed enabling multicast filter\n"); 313 en_err(priv, "Failed enabling multicast filter\n");
318 314
319 mlx4_en_clear_list(dev); 315 mlx4_en_clear_list(dev);
320 } 316 }
@@ -346,10 +342,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
346 struct mlx4_en_dev *mdev = priv->mdev; 342 struct mlx4_en_dev *mdev = priv->mdev;
347 343
348 if (netif_msg_timer(priv)) 344 if (netif_msg_timer(priv))
349 mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); 345 en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
350 346
351 priv->port_stats.tx_timeout++; 347 priv->port_stats.tx_timeout++;
352 mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); 348 en_dbg(DRV, priv, "Scheduling watchdog\n");
353 queue_work(mdev->workqueue, &priv->watchdog_task); 349 queue_work(mdev->workqueue, &priv->watchdog_task);
354} 350}
355 351
@@ -378,8 +374,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
378 */ 374 */
379 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1; 375 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1;
380 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 376 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
381 mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 377 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
382 "rx_frames:%d rx_usecs:%d\n", 378 "rx_frames:%d rx_usecs:%d\n",
383 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 379 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
384 380
385 /* Setup cq moderation params */ 381 /* Setup cq moderation params */
@@ -412,7 +408,6 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
412static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 408static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
413{ 409{
414 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 410 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
415 struct mlx4_en_dev *mdev = priv->mdev;
416 struct mlx4_en_cq *cq; 411 struct mlx4_en_cq *cq;
417 unsigned long packets; 412 unsigned long packets;
418 unsigned long rate; 413 unsigned long rate;
@@ -472,11 +467,11 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
472 moder_time = priv->rx_usecs; 467 moder_time = priv->rx_usecs;
473 } 468 }
474 469
475 mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", 470 en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
476 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); 471 tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
477 472
478 mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " 473 en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
479 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", 474 "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
480 priv->last_moder_time, moder_time, period, packets, 475 priv->last_moder_time, moder_time, period, packets,
481 avg_pkt_size, rate); 476 avg_pkt_size, rate);
482 477
@@ -487,8 +482,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
487 cq->moder_time = moder_time; 482 cq->moder_time = moder_time;
488 err = mlx4_en_set_cq_moder(priv, cq); 483 err = mlx4_en_set_cq_moder(priv, cq);
489 if (err) { 484 if (err) {
490 mlx4_err(mdev, "Failed modifying moderation for cq:%d " 485 en_err(priv, "Failed modifying moderation for cq:%d\n", i);
491 "on port:%d\n", i, priv->port);
492 break; 486 break;
493 } 487 }
494 } 488 }
@@ -511,8 +505,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
511 505
512 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 506 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
513 if (err) 507 if (err)
514 mlx4_dbg(HW, priv, "Could not update stats for " 508 en_dbg(HW, priv, "Could not update stats \n");
515 "port:%d\n", priv->port);
516 509
517 mutex_lock(&mdev->state_lock); 510 mutex_lock(&mdev->state_lock);
518 if (mdev->device_up) { 511 if (mdev->device_up) {
@@ -536,12 +529,10 @@ static void mlx4_en_linkstate(struct work_struct *work)
536 * report to system log */ 529 * report to system log */
537 if (priv->last_link_state != linkstate) { 530 if (priv->last_link_state != linkstate) {
538 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 531 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
539 if (netif_msg_link(priv)) 532 en_dbg(LINK, priv, "Link Down\n");
540 mlx4_info(mdev, "Port %d - link down\n", priv->port);
541 netif_carrier_off(priv->dev); 533 netif_carrier_off(priv->dev);
542 } else { 534 } else {
543 if (netif_msg_link(priv)) 535 en_dbg(LINK, priv, "Link Up\n");
544 mlx4_info(mdev, "Port %d - link up\n", priv->port);
545 netif_carrier_on(priv->dev); 536 netif_carrier_on(priv->dev);
546 } 537 }
547 } 538 }
@@ -563,19 +554,19 @@ int mlx4_en_start_port(struct net_device *dev)
563 int j; 554 int j;
564 555
565 if (priv->port_up) { 556 if (priv->port_up) {
566 mlx4_dbg(DRV, priv, "start port called while port already up\n"); 557 en_dbg(DRV, priv, "start port called while port already up\n");
567 return 0; 558 return 0;
568 } 559 }
569 560
570 /* Calculate Rx buf size */ 561 /* Calculate Rx buf size */
571 dev->mtu = min(dev->mtu, priv->max_mtu); 562 dev->mtu = min(dev->mtu, priv->max_mtu);
572 mlx4_en_calc_rx_buf(dev); 563 mlx4_en_calc_rx_buf(dev);
573 mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 564 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
574 565
575 /* Configure rx cq's and rings */ 566 /* Configure rx cq's and rings */
576 err = mlx4_en_activate_rx_rings(priv); 567 err = mlx4_en_activate_rx_rings(priv);
577 if (err) { 568 if (err) {
578 mlx4_err(mdev, "Failed to activate RX rings\n"); 569 en_err(priv, "Failed to activate RX rings\n");
579 return err; 570 return err;
580 } 571 }
581 for (i = 0; i < priv->rx_ring_num; i++) { 572 for (i = 0; i < priv->rx_ring_num; i++) {
@@ -583,14 +574,14 @@ int mlx4_en_start_port(struct net_device *dev)
583 574
584 err = mlx4_en_activate_cq(priv, cq); 575 err = mlx4_en_activate_cq(priv, cq);
585 if (err) { 576 if (err) {
586 mlx4_err(mdev, "Failed activating Rx CQ\n"); 577 en_err(priv, "Failed activating Rx CQ\n");
587 goto cq_err; 578 goto cq_err;
588 } 579 }
589 for (j = 0; j < cq->size; j++) 580 for (j = 0; j < cq->size; j++)
590 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 581 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
591 err = mlx4_en_set_cq_moder(priv, cq); 582 err = mlx4_en_set_cq_moder(priv, cq);
592 if (err) { 583 if (err) {
593 mlx4_err(mdev, "Failed setting cq moderation parameters"); 584 en_err(priv, "Failed setting cq moderation parameters");
594 mlx4_en_deactivate_cq(priv, cq); 585 mlx4_en_deactivate_cq(priv, cq);
595 goto cq_err; 586 goto cq_err;
596 } 587 }
@@ -601,7 +592,7 @@ int mlx4_en_start_port(struct net_device *dev)
601 592
602 err = mlx4_en_config_rss_steer(priv); 593 err = mlx4_en_config_rss_steer(priv);
603 if (err) { 594 if (err) {
604 mlx4_err(mdev, "Failed configuring rss steering\n"); 595 en_err(priv, "Failed configuring rss steering\n");
605 goto cq_err; 596 goto cq_err;
606 } 597 }
607 598
@@ -611,16 +602,16 @@ int mlx4_en_start_port(struct net_device *dev)
611 cq = &priv->tx_cq[i]; 602 cq = &priv->tx_cq[i];
612 err = mlx4_en_activate_cq(priv, cq); 603 err = mlx4_en_activate_cq(priv, cq);
613 if (err) { 604 if (err) {
614 mlx4_err(mdev, "Failed allocating Tx CQ\n"); 605 en_err(priv, "Failed allocating Tx CQ\n");
615 goto tx_err; 606 goto tx_err;
616 } 607 }
617 err = mlx4_en_set_cq_moder(priv, cq); 608 err = mlx4_en_set_cq_moder(priv, cq);
618 if (err) { 609 if (err) {
619 mlx4_err(mdev, "Failed setting cq moderation parameters"); 610 en_err(priv, "Failed setting cq moderation parameters");
620 mlx4_en_deactivate_cq(priv, cq); 611 mlx4_en_deactivate_cq(priv, cq);
621 goto tx_err; 612 goto tx_err;
622 } 613 }
623 mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 614 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
624 cq->buf->wqe_index = cpu_to_be16(0xffff); 615 cq->buf->wqe_index = cpu_to_be16(0xffff);
625 616
626 /* Configure ring */ 617 /* Configure ring */
@@ -628,7 +619,7 @@ int mlx4_en_start_port(struct net_device *dev)
628 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 619 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
629 priv->rx_ring[0].srq.srqn); 620 priv->rx_ring[0].srq.srqn);
630 if (err) { 621 if (err) {
631 mlx4_err(mdev, "Failed allocating Tx ring\n"); 622 en_err(priv, "Failed allocating Tx ring\n");
632 mlx4_en_deactivate_cq(priv, cq); 623 mlx4_en_deactivate_cq(priv, cq);
633 goto tx_err; 624 goto tx_err;
634 } 625 }
@@ -646,30 +637,30 @@ int mlx4_en_start_port(struct net_device *dev)
646 priv->prof->rx_pause, 637 priv->prof->rx_pause,
647 priv->prof->rx_ppp); 638 priv->prof->rx_ppp);
648 if (err) { 639 if (err) {
649 mlx4_err(mdev, "Failed setting port general configurations" 640 en_err(priv, "Failed setting port general configurations "
650 " for port %d, with error %d\n", priv->port, err); 641 "for port %d, with error %d\n", priv->port, err);
651 goto tx_err; 642 goto tx_err;
652 } 643 }
653 /* Set default qp number */ 644 /* Set default qp number */
654 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 645 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
655 if (err) { 646 if (err) {
656 mlx4_err(mdev, "Failed setting default qp numbers\n"); 647 en_err(priv, "Failed setting default qp numbers\n");
657 goto tx_err; 648 goto tx_err;
658 } 649 }
659 /* Set port mac number */ 650 /* Set port mac number */
660 mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); 651 en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
661 err = mlx4_register_mac(mdev->dev, priv->port, 652 err = mlx4_register_mac(mdev->dev, priv->port,
662 priv->mac, &priv->mac_index); 653 priv->mac, &priv->mac_index);
663 if (err) { 654 if (err) {
664 mlx4_err(mdev, "Failed setting port mac\n"); 655 en_err(priv, "Failed setting port mac\n");
665 goto tx_err; 656 goto tx_err;
666 } 657 }
667 658
668 /* Init port */ 659 /* Init port */
669 mlx4_dbg(HW, priv, "Initializing port\n"); 660 en_dbg(HW, priv, "Initializing port\n");
670 err = mlx4_INIT_PORT(mdev->dev, priv->port); 661 err = mlx4_INIT_PORT(mdev->dev, priv->port);
671 if (err) { 662 if (err) {
672 mlx4_err(mdev, "Failed Initializing port\n"); 663 en_err(priv, "Failed Initializing port\n");
673 goto mac_err; 664 goto mac_err;
674 } 665 }
675 666
@@ -706,8 +697,7 @@ void mlx4_en_stop_port(struct net_device *dev)
706 int i; 697 int i;
707 698
708 if (!priv->port_up) { 699 if (!priv->port_up) {
709 mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n", 700 en_dbg(DRV, priv, "stop port called while port already down\n");
710 priv->port);
711 return; 701 return;
712 } 702 }
713 netif_stop_queue(dev); 703 netif_stop_queue(dev);
@@ -752,13 +742,13 @@ static void mlx4_en_restart(struct work_struct *work)
752 struct mlx4_en_dev *mdev = priv->mdev; 742 struct mlx4_en_dev *mdev = priv->mdev;
753 struct net_device *dev = priv->dev; 743 struct net_device *dev = priv->dev;
754 744
755 mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 745 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
756 746
757 mutex_lock(&mdev->state_lock); 747 mutex_lock(&mdev->state_lock);
758 if (priv->port_up) { 748 if (priv->port_up) {
759 mlx4_en_stop_port(dev); 749 mlx4_en_stop_port(dev);
760 if (mlx4_en_start_port(dev)) 750 if (mlx4_en_start_port(dev))
761 mlx4_err(mdev, "Failed restarting port %d\n", priv->port); 751 en_err(priv, "Failed restarting port %d\n", priv->port);
762 } 752 }
763 mutex_unlock(&mdev->state_lock); 753 mutex_unlock(&mdev->state_lock);
764} 754}
@@ -774,14 +764,14 @@ static int mlx4_en_open(struct net_device *dev)
774 mutex_lock(&mdev->state_lock); 764 mutex_lock(&mdev->state_lock);
775 765
776 if (!mdev->device_up) { 766 if (!mdev->device_up) {
777 mlx4_err(mdev, "Cannot open - device down/disabled\n"); 767 en_err(priv, "Cannot open - device down/disabled\n");
778 err = -EBUSY; 768 err = -EBUSY;
779 goto out; 769 goto out;
780 } 770 }
781 771
782 /* Reset HW statistics and performance counters */ 772 /* Reset HW statistics and performance counters */
783 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 773 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
784 mlx4_dbg(HW, priv, "Failed dumping statistics\n"); 774 en_dbg(HW, priv, "Failed dumping statistics\n");
785 775
786 memset(&priv->stats, 0, sizeof(priv->stats)); 776 memset(&priv->stats, 0, sizeof(priv->stats));
787 memset(&priv->pstats, 0, sizeof(priv->pstats)); 777 memset(&priv->pstats, 0, sizeof(priv->pstats));
@@ -798,7 +788,7 @@ static int mlx4_en_open(struct net_device *dev)
798 mlx4_en_set_default_moderation(priv); 788 mlx4_en_set_default_moderation(priv);
799 err = mlx4_en_start_port(dev); 789 err = mlx4_en_start_port(dev);
800 if (err) 790 if (err)
801 mlx4_err(mdev, "Failed starting port:%d\n", priv->port); 791 en_err(priv, "Failed starting port:%d\n", priv->port);
802 792
803out: 793out:
804 mutex_unlock(&mdev->state_lock); 794 mutex_unlock(&mdev->state_lock);
@@ -811,8 +801,7 @@ static int mlx4_en_close(struct net_device *dev)
811 struct mlx4_en_priv *priv = netdev_priv(dev); 801 struct mlx4_en_priv *priv = netdev_priv(dev);
812 struct mlx4_en_dev *mdev = priv->mdev; 802 struct mlx4_en_dev *mdev = priv->mdev;
813 803
814 if (netif_msg_ifdown(priv)) 804 en_dbg(IFDOWN, priv, "Close port called\n");
815 mlx4_info(mdev, "Close called for port:%d\n", priv->port);
816 805
817 mutex_lock(&mdev->state_lock); 806 mutex_lock(&mdev->state_lock);
818 807
@@ -844,7 +833,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
844 833
845int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 834int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
846{ 835{
847 struct mlx4_en_dev *mdev = priv->mdev;
848 struct mlx4_en_port_profile *prof = priv->prof; 836 struct mlx4_en_port_profile *prof = priv->prof;
849 int i; 837 int i;
850 838
@@ -873,7 +861,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
873 return 0; 861 return 0;
874 862
875err: 863err:
876 mlx4_err(mdev, "Failed to allocate NIC resources\n"); 864 en_err(priv, "Failed to allocate NIC resources\n");
877 return -ENOMEM; 865 return -ENOMEM;
878} 866}
879 867
@@ -883,7 +871,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
883 struct mlx4_en_priv *priv = netdev_priv(dev); 871 struct mlx4_en_priv *priv = netdev_priv(dev);
884 struct mlx4_en_dev *mdev = priv->mdev; 872 struct mlx4_en_dev *mdev = priv->mdev;
885 873
886 mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 874 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
887 875
888 /* Unregister device - this will close the port if it was up */ 876 /* Unregister device - this will close the port if it was up */
889 if (priv->registered) 877 if (priv->registered)
@@ -912,11 +900,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
912 struct mlx4_en_dev *mdev = priv->mdev; 900 struct mlx4_en_dev *mdev = priv->mdev;
913 int err = 0; 901 int err = 0;
914 902
915 mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", 903 en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
916 dev->mtu, new_mtu); 904 dev->mtu, new_mtu);
917 905
918 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 906 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
919 mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu); 907 en_err(priv, "Bad MTU size:%d.\n", new_mtu);
920 return -EPERM; 908 return -EPERM;
921 } 909 }
922 dev->mtu = new_mtu; 910 dev->mtu = new_mtu;
@@ -926,13 +914,13 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
926 if (!mdev->device_up) { 914 if (!mdev->device_up) {
927 /* NIC is probably restarting - let watchdog task reset 915 /* NIC is probably restarting - let watchdog task reset
928 * the port */ 916 * the port */
929 mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n"); 917 en_dbg(DRV, priv, "Change MTU called with card down!?\n");
930 } else { 918 } else {
931 mlx4_en_stop_port(dev); 919 mlx4_en_stop_port(dev);
932 mlx4_en_set_default_moderation(priv); 920 mlx4_en_set_default_moderation(priv);
933 err = mlx4_en_start_port(dev); 921 err = mlx4_en_start_port(dev);
934 if (err) { 922 if (err) {
935 mlx4_err(mdev, "Failed restarting port:%d\n", 923 en_err(priv, "Failed restarting port:%d\n",
936 priv->port); 924 priv->port);
937 queue_work(mdev->workqueue, &priv->watchdog_task); 925 queue_work(mdev->workqueue, &priv->watchdog_task);
938 } 926 }
@@ -1006,7 +994,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1006 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 994 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
1007 priv->mac = mdev->dev->caps.def_mac[priv->port]; 995 priv->mac = mdev->dev->caps.def_mac[priv->port];
1008 if (ILLEGAL_MAC(priv->mac)) { 996 if (ILLEGAL_MAC(priv->mac)) {
1009 mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 997 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
1010 priv->port, priv->mac); 998 priv->port, priv->mac);
1011 err = -EINVAL; 999 err = -EINVAL;
1012 goto out; 1000 goto out;
@@ -1025,7 +1013,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1025 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 1013 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
1026 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 1014 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
1027 if (err) { 1015 if (err) {
1028 mlx4_err(mdev, "Failed to allocate page for rx qps\n"); 1016 en_err(priv, "Failed to allocate page for rx qps\n");
1029 goto out; 1017 goto out;
1030 } 1018 }
1031 priv->allocated = 1; 1019 priv->allocated = 1;
@@ -1068,9 +1056,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1068 netif_carrier_off(dev); 1056 netif_carrier_off(dev);
1069 err = register_netdev(dev); 1057 err = register_netdev(dev);
1070 if (err) { 1058 if (err) {
1071 mlx4_err(mdev, "Netdev registration failed\n"); 1059 en_err(priv, "Netdev registration failed for port %d\n", port);
1072 goto out; 1060 goto out;
1073 } 1061 }
1062
1063 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
1064 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
1065
1074 priv->registered = 1; 1066 priv->registered = 1;
1075 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1067 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
1076 return 0; 1068 return 0;
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
index c1bd040b9e05..3290fec64b2c 100644
--- a/drivers/net/mlx4/en_params.c
+++ b/drivers/net/mlx4/en_params.c
@@ -371,7 +371,7 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
371 priv->prof->rx_pause, 371 priv->prof->rx_pause,
372 priv->prof->rx_ppp); 372 priv->prof->rx_ppp);
373 if (err) 373 if (err)
374 mlx4_err(mdev, "Failed setting pause params to\n"); 374 en_err(priv, "Failed setting pause params\n");
375 375
376 return err; 376 return err;
377} 377}
@@ -421,13 +421,13 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
421 421
422 err = mlx4_en_alloc_resources(priv); 422 err = mlx4_en_alloc_resources(priv);
423 if (err) { 423 if (err) {
424 mlx4_err(mdev, "Failed reallocating port resources\n"); 424 en_err(priv, "Failed reallocating port resources\n");
425 goto out; 425 goto out;
426 } 426 }
427 if (port_up) { 427 if (port_up) {
428 err = mlx4_en_start_port(dev); 428 err = mlx4_en_start_port(dev);
429 if (err) 429 if (err)
430 mlx4_err(mdev, "Failed starting port\n"); 430 en_err(priv, "Failed starting port\n");
431 } 431 }
432 432
433out: 433out:
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 6bfab6e5ba1d..5a14899c1e25 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -114,8 +114,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
114 goto out; 114 goto out;
115 115
116 page_alloc->offset = priv->frag_info[i].frag_align; 116 page_alloc->offset = priv->frag_info[i].frag_align;
117 mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n", 117 en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
118 i, page_alloc->page); 118 i, page_alloc->page);
119 } 119 }
120 return 0; 120 return 0;
121 121
@@ -136,8 +136,8 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
136 136
137 for (i = 0; i < priv->num_frags; i++) { 137 for (i = 0; i < priv->num_frags; i++) {
138 page_alloc = &ring->page_alloc[i]; 138 page_alloc = &ring->page_alloc[i];
139 mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n", 139 en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
140 i, page_count(page_alloc->page)); 140 i, page_count(page_alloc->page));
141 141
142 put_page(page_alloc->page); 142 put_page(page_alloc->page);
143 page_alloc->page = NULL; 143 page_alloc->page = NULL;
@@ -214,10 +214,10 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
214 214
215 skb_frags = ring->rx_info + (index << priv->log_rx_info); 215 skb_frags = ring->rx_info + (index << priv->log_rx_info);
216 for (nr = 0; nr < priv->num_frags; nr++) { 216 for (nr = 0; nr < priv->num_frags; nr++) {
217 mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr); 217 en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
218 dma = be64_to_cpu(rx_desc->data[nr].addr); 218 dma = be64_to_cpu(rx_desc->data[nr].addr);
219 219
220 mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma); 220 en_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, 221 pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
222 PCI_DMA_FROMDEVICE); 222 PCI_DMA_FROMDEVICE);
223 put_page(skb_frags[nr].page); 223 put_page(skb_frags[nr].page);
@@ -226,7 +226,6 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
226 226
227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) 227static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
228{ 228{
229 struct mlx4_en_dev *mdev = priv->mdev;
230 struct mlx4_en_rx_ring *ring; 229 struct mlx4_en_rx_ring *ring;
231 int ring_ind; 230 int ring_ind;
232 int buf_ind; 231 int buf_ind;
@@ -239,14 +238,14 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
239 if (mlx4_en_prepare_rx_desc(priv, ring, 238 if (mlx4_en_prepare_rx_desc(priv, ring,
240 ring->actual_size)) { 239 ring->actual_size)) {
241 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { 240 if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
242 mlx4_err(mdev, "Failed to allocate " 241 en_err(priv, "Failed to allocate "
243 "enough rx buffers\n"); 242 "enough rx buffers\n");
244 return -ENOMEM; 243 return -ENOMEM;
245 } else { 244 } else {
246 new_size = rounddown_pow_of_two(ring->actual_size); 245 new_size = rounddown_pow_of_two(ring->actual_size);
247 mlx4_warn(mdev, "Only %d buffers allocated " 246 en_warn(priv, "Only %d buffers allocated "
248 "reducing ring size to %d", 247 "reducing ring size to %d",
249 ring->actual_size, new_size); 248 ring->actual_size, new_size);
250 goto reduce_rings; 249 goto reduce_rings;
251 } 250 }
252 } 251 }
@@ -282,8 +281,7 @@ static int mlx4_en_fill_rx_buf(struct net_device *dev,
282 ring->size_mask); 281 ring->size_mask);
283 if (err) { 282 if (err) {
284 if (netif_msg_rx_err(priv)) 283 if (netif_msg_rx_err(priv))
285 mlx4_warn(priv->mdev, 284 en_warn(priv, "Failed preparing rx descriptor\n");
286 "Failed preparing rx descriptor\n");
287 priv->port_stats.rx_alloc_failed++; 285 priv->port_stats.rx_alloc_failed++;
288 break; 286 break;
289 } 287 }
@@ -301,14 +299,14 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
301{ 299{
302 int index; 300 int index;
303 301
304 mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n", 302 en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
305 ring->cons, ring->prod); 303 ring->cons, ring->prod);
306 304
307 /* Unmap and free Rx buffers */ 305 /* Unmap and free Rx buffers */
308 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size); 306 BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
309 while (ring->cons != ring->prod) { 307 while (ring->cons != ring->prod) {
310 index = ring->cons & ring->size_mask; 308 index = ring->cons & ring->size_mask;
311 mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index); 309 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
312 mlx4_en_free_rx_desc(priv, ring, index); 310 mlx4_en_free_rx_desc(priv, ring, index);
313 ++ring->cons; 311 ++ring->cons;
314 } 312 }
@@ -373,10 +371,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
373 sizeof(struct skb_frag_struct)); 371 sizeof(struct skb_frag_struct));
374 ring->rx_info = vmalloc(tmp); 372 ring->rx_info = vmalloc(tmp);
375 if (!ring->rx_info) { 373 if (!ring->rx_info) {
376 mlx4_err(mdev, "Failed allocating rx_info ring\n"); 374 en_err(priv, "Failed allocating rx_info ring\n");
377 return -ENOMEM; 375 return -ENOMEM;
378 } 376 }
379 mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n", 377 en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
380 ring->rx_info, tmp); 378 ring->rx_info, tmp);
381 379
382 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, 380 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
@@ -386,7 +384,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
386 384
387 err = mlx4_en_map_buffer(&ring->wqres.buf); 385 err = mlx4_en_map_buffer(&ring->wqres.buf);
388 if (err) { 386 if (err) {
389 mlx4_err(mdev, "Failed to map RX buffer\n"); 387 en_err(priv, "Failed to map RX buffer\n");
390 goto err_hwq; 388 goto err_hwq;
391 } 389 }
392 ring->buf = ring->wqres.buf.direct.buf; 390 ring->buf = ring->wqres.buf.direct.buf;
@@ -404,7 +402,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
404 sizeof(struct net_lro_desc), 402 sizeof(struct net_lro_desc),
405 GFP_KERNEL); 403 GFP_KERNEL);
406 if (!ring->lro.lro_arr) { 404 if (!ring->lro.lro_arr) {
407 mlx4_err(mdev, "Failed to allocate lro array\n"); 405 en_err(priv, "Failed to allocate lro array\n");
408 goto err_map; 406 goto err_map;
409 } 407 }
410 ring->lro.get_frag_header = mlx4_en_get_frag_header; 408 ring->lro.get_frag_header = mlx4_en_get_frag_header;
@@ -455,7 +453,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
455 /* Initialize page allocators */ 453 /* Initialize page allocators */
456 err = mlx4_en_init_allocator(priv, ring); 454 err = mlx4_en_init_allocator(priv, ring);
457 if (err) { 455 if (err) {
458 mlx4_err(mdev, "Failed initializing ring allocator\n"); 456 en_err(priv, "Failed initializing ring allocator\n");
459 ring_ind--; 457 ring_ind--;
460 goto err_allocator; 458 goto err_allocator;
461 } 459 }
@@ -486,7 +484,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
486 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, 484 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
487 ring->wqres.db.dma, &ring->srq); 485 ring->wqres.db.dma, &ring->srq);
488 if (err){ 486 if (err){
489 mlx4_err(mdev, "Failed to allocate srq\n"); 487 en_err(priv, "Failed to allocate srq\n");
490 ring_ind--; 488 ring_ind--;
491 goto err_srq; 489 goto err_srq;
492 } 490 }
@@ -601,7 +599,7 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
601 599
602 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN); 600 skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
603 if (!skb) { 601 if (!skb) {
604 mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n"); 602 en_dbg(RX_ERR, priv, "Failed allocating skb\n");
605 return NULL; 603 return NULL;
606 } 604 }
607 skb->dev = priv->dev; 605 skb->dev = priv->dev;
@@ -680,7 +678,6 @@ static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
680int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget) 678int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
681{ 679{
682 struct mlx4_en_priv *priv = netdev_priv(dev); 680 struct mlx4_en_priv *priv = netdev_priv(dev);
683 struct mlx4_en_dev *mdev = priv->mdev;
684 struct mlx4_cqe *cqe; 681 struct mlx4_cqe *cqe;
685 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring]; 682 struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
686 struct skb_frag_struct *skb_frags; 683 struct skb_frag_struct *skb_frags;
@@ -717,14 +714,14 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
717 /* Drop packet on bad receive or bad checksum */ 714 /* Drop packet on bad receive or bad checksum */
718 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 715 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
719 MLX4_CQE_OPCODE_ERROR)) { 716 MLX4_CQE_OPCODE_ERROR)) {
720 mlx4_err(mdev, "CQE completed in error - vendor " 717 en_err(priv, "CQE completed in error - vendor "
721 "syndrom:%d syndrom:%d\n", 718 "syndrom:%d syndrom:%d\n",
722 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome, 719 ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
723 ((struct mlx4_err_cqe *) cqe)->syndrome); 720 ((struct mlx4_err_cqe *) cqe)->syndrome);
724 goto next; 721 goto next;
725 } 722 }
726 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) { 723 if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
727 mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n"); 724 en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
728 goto next; 725 goto next;
729 } 726 }
730 727
@@ -874,7 +871,7 @@ static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16
874 u16 res = MLX4_EN_ALLOC_SIZE % stride; 871 u16 res = MLX4_EN_ALLOC_SIZE % stride;
875 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align; 872 u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
876 873
877 mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d " 874 en_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
878 "res:%d offset:%d\n", stride, align, res, offset); 875 "res:%d offset:%d\n", stride, align, res, offset);
879 return offset; 876 return offset;
880} 877}
@@ -919,10 +916,10 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
919 priv->rx_skb_size = eff_mtu; 916 priv->rx_skb_size = eff_mtu;
920 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct)); 917 priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
921 918
922 mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d " 919 en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
923 "num_frags:%d):\n", eff_mtu, priv->num_frags); 920 "num_frags:%d):\n", eff_mtu, priv->num_frags);
924 for (i = 0; i < priv->num_frags; i++) { 921 for (i = 0; i < priv->num_frags; i++) {
925 mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d " 922 en_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
926 "stride:%d last_offset:%d\n", i, 923 "stride:%d last_offset:%d\n", i,
927 priv->frag_info[i].frag_size, 924 priv->frag_info[i].frag_size,
928 priv->frag_info[i].frag_prefix_size, 925 priv->frag_info[i].frag_prefix_size,
@@ -942,12 +939,12 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
942 int i; 939 int i;
943 940
944 rss_map->size = roundup_pow_of_two(num_entries); 941 rss_map->size = roundup_pow_of_two(num_entries);
945 mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n", 942 en_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
946 rss_map->size); 943 rss_map->size);
947 944
948 for (i = 0; i < rss_map->size; i++) { 945 for (i = 0; i < rss_map->size; i++) {
949 rss_map->map[i] = i % num_rings; 946 rss_map->map[i] = i % num_rings;
950 mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]); 947 en_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
951 } 948 }
952} 949}
953 950
@@ -962,13 +959,13 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
962 959
963 context = kmalloc(sizeof *context , GFP_KERNEL); 960 context = kmalloc(sizeof *context , GFP_KERNEL);
964 if (!context) { 961 if (!context) {
965 mlx4_err(mdev, "Failed to allocate qp context\n"); 962 en_err(priv, "Failed to allocate qp context\n");
966 return -ENOMEM; 963 return -ENOMEM;
967 } 964 }
968 965
969 err = mlx4_qp_alloc(mdev->dev, qpn, qp); 966 err = mlx4_qp_alloc(mdev->dev, qpn, qp);
970 if (err) { 967 if (err) {
971 mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn); 968 en_err(priv, "Failed to allocate qp #%x\n", qpn);
972 goto out; 969 goto out;
973 } 970 }
974 qp->event = mlx4_en_sqp_event; 971 qp->event = mlx4_en_sqp_event;
@@ -1000,12 +997,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1000 int err = 0; 997 int err = 0;
1001 int good_qps = 0; 998 int good_qps = 0;
1002 999
1003 mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port); 1000 en_dbg(DRV, priv, "Configuring rss steering\n");
1004 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size, 1001 err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
1005 rss_map->size, &rss_map->base_qpn); 1002 rss_map->size, &rss_map->base_qpn);
1006 if (err) { 1003 if (err) {
1007 mlx4_err(mdev, "Failed reserving %d qps for port %u\n", 1004 en_err(priv, "Failed reserving %d qps\n", rss_map->size);
1008 rss_map->size, priv->port);
1009 return err; 1005 return err;
1010 } 1006 }
1011 1007
@@ -1025,13 +1021,13 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1025 /* Configure RSS indirection qp */ 1021 /* Configure RSS indirection qp */
1026 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn); 1022 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
1027 if (err) { 1023 if (err) {
1028 mlx4_err(mdev, "Failed to reserve range for RSS " 1024 en_err(priv, "Failed to reserve range for RSS "
1029 "indirection qp\n"); 1025 "indirection qp\n");
1030 goto rss_err; 1026 goto rss_err;
1031 } 1027 }
1032 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp); 1028 err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
1033 if (err) { 1029 if (err) {
1034 mlx4_err(mdev, "Failed to allocate RSS indirection QP\n"); 1030 en_err(priv, "Failed to allocate RSS indirection QP\n");
1035 goto reserve_err; 1031 goto reserve_err;
1036 } 1032 }
1037 rss_map->indir_qp.event = mlx4_en_sqp_event; 1033 rss_map->indir_qp.event = mlx4_en_sqp_event;
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 1c83a96fde35..95703f90c1b9 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -68,15 +68,15 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
68 tmp = size * sizeof(struct mlx4_en_tx_info); 68 tmp = size * sizeof(struct mlx4_en_tx_info);
69 ring->tx_info = vmalloc(tmp); 69 ring->tx_info = vmalloc(tmp);
70 if (!ring->tx_info) { 70 if (!ring->tx_info) {
71 mlx4_err(mdev, "Failed allocating tx_info ring\n"); 71 en_err(priv, "Failed allocating tx_info ring\n");
72 return -ENOMEM; 72 return -ENOMEM;
73 } 73 }
74 mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n", 74 en_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
75 ring->tx_info, tmp); 75 ring->tx_info, tmp);
76 76
77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL); 77 ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
78 if (!ring->bounce_buf) { 78 if (!ring->bounce_buf) {
79 mlx4_err(mdev, "Failed allocating bounce buffer\n"); 79 en_err(priv, "Failed allocating bounce buffer\n");
80 err = -ENOMEM; 80 err = -ENOMEM;
81 goto err_tx; 81 goto err_tx;
82 } 82 }
@@ -85,31 +85,31 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, 85 err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
86 2 * PAGE_SIZE); 86 2 * PAGE_SIZE);
87 if (err) { 87 if (err) {
88 mlx4_err(mdev, "Failed allocating hwq resources\n"); 88 en_err(priv, "Failed allocating hwq resources\n");
89 goto err_bounce; 89 goto err_bounce;
90 } 90 }
91 91
92 err = mlx4_en_map_buffer(&ring->wqres.buf); 92 err = mlx4_en_map_buffer(&ring->wqres.buf);
93 if (err) { 93 if (err) {
94 mlx4_err(mdev, "Failed to map TX buffer\n"); 94 en_err(priv, "Failed to map TX buffer\n");
95 goto err_hwq_res; 95 goto err_hwq_res;
96 } 96 }
97 97
98 ring->buf = ring->wqres.buf.direct.buf; 98 ring->buf = ring->wqres.buf.direct.buf;
99 99
100 mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d " 100 en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size, 101 "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map); 102 ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
103 103
104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn); 104 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
105 if (err) { 105 if (err) {
106 mlx4_err(mdev, "Failed reserving qp for tx ring.\n"); 106 en_err(priv, "Failed reserving qp for tx ring.\n");
107 goto err_map; 107 goto err_map;
108 } 108 }
109 109
110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp); 110 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
111 if (err) { 111 if (err) {
112 mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn); 112 en_err(priv, "Failed allocating qp %d\n", ring->qpn);
113 goto err_reserve; 113 goto err_reserve;
114 } 114 }
115 ring->qp.event = mlx4_en_sqp_event; 115 ring->qp.event = mlx4_en_sqp_event;
@@ -135,7 +135,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
135 struct mlx4_en_tx_ring *ring) 135 struct mlx4_en_tx_ring *ring)
136{ 136{
137 struct mlx4_en_dev *mdev = priv->mdev; 137 struct mlx4_en_dev *mdev = priv->mdev;
138 mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); 138 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
139 139
140 mlx4_qp_remove(mdev->dev, &ring->qp); 140 mlx4_qp_remove(mdev->dev, &ring->qp);
141 mlx4_qp_free(mdev->dev, &ring->qp); 141 mlx4_qp_free(mdev->dev, &ring->qp);
@@ -274,12 +274,12 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
274 274
275 /* Skip last polled descriptor */ 275 /* Skip last polled descriptor */
276 ring->cons += ring->last_nr_txbb; 276 ring->cons += ring->last_nr_txbb;
277 mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n", 277 en_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
278 ring->cons, ring->prod); 278 ring->cons, ring->prod);
279 279
280 if ((u32) (ring->prod - ring->cons) > ring->size) { 280 if ((u32) (ring->prod - ring->cons) > ring->size) {
281 if (netif_msg_tx_err(priv)) 281 if (netif_msg_tx_err(priv))
282 mlx4_warn(priv->mdev, "Tx consumer passed producer!\n"); 282 en_warn(priv, "Tx consumer passed producer!\n");
283 return 0; 283 return 0;
284 } 284 }
285 285
@@ -292,7 +292,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
292 } 292 }
293 293
294 if (cnt) 294 if (cnt)
295 mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt); 295 en_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
296 296
297 return cnt; 297 return cnt;
298} 298}
@@ -321,7 +321,7 @@ void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num
321 num = 0; 321 num = 0;
322 } 322 }
323 prio_map[prio] = ring; 323 prio_map[prio] = ring;
324 mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring); 324 en_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
325 num++; 325 num++;
326 } 326 }
327} 327}
@@ -539,7 +539,6 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
539 int *lso_header_size) 539 int *lso_header_size)
540{ 540{
541 struct mlx4_en_priv *priv = netdev_priv(dev); 541 struct mlx4_en_priv *priv = netdev_priv(dev);
542 struct mlx4_en_dev *mdev = priv->mdev;
543 int real_size; 542 int real_size;
544 543
545 if (skb_is_gso(skb)) { 544 if (skb_is_gso(skb)) {
@@ -553,14 +552,14 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
553 real_size += DS_SIZE; 552 real_size += DS_SIZE;
554 else { 553 else {
555 if (netif_msg_tx_err(priv)) 554 if (netif_msg_tx_err(priv))
556 mlx4_warn(mdev, "Non-linear headers\n"); 555 en_warn(priv, "Non-linear headers\n");
557 dev_kfree_skb_any(skb); 556 dev_kfree_skb_any(skb);
558 return 0; 557 return 0;
559 } 558 }
560 } 559 }
561 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) { 560 if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
562 if (netif_msg_tx_err(priv)) 561 if (netif_msg_tx_err(priv))
563 mlx4_warn(mdev, "LSO header size too big\n"); 562 en_warn(priv, "LSO header size too big\n");
564 dev_kfree_skb_any(skb); 563 dev_kfree_skb_any(skb);
565 return 0; 564 return 0;
566 } 565 }
@@ -669,7 +668,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
669 nr_txbb = desc_size / TXBB_SIZE; 668 nr_txbb = desc_size / TXBB_SIZE;
670 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) { 669 if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
671 if (netif_msg_tx_err(priv)) 670 if (netif_msg_tx_err(priv))
672 mlx4_warn(mdev, "Oversized header or SG list\n"); 671 en_warn(priv, "Oversized header or SG list\n");
673 dev_kfree_skb_any(skb); 672 dev_kfree_skb_any(skb);
674 return NETDEV_TX_OK; 673 return NETDEV_TX_OK;
675 } 674 }
@@ -695,7 +694,7 @@ int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
695 /* Now that we know what Tx ring to use */ 694 /* Now that we know what Tx ring to use */
696 if (unlikely(!priv->port_up)) { 695 if (unlikely(!priv->port_up)) {
697 if (netif_msg_tx_err(priv)) 696 if (netif_msg_tx_err(priv))
698 mlx4_warn(mdev, "xmit: port down!\n"); 697 en_warn(priv, "xmit: port down!\n");
699 dev_kfree_skb_any(skb); 698 dev_kfree_skb_any(skb);
700 return NETDEV_TX_OK; 699 return NETDEV_TX_OK;
701 } 700 }
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index ef840abbcd39..c92b38247969 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -55,20 +55,36 @@
55 55
56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) 56#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
57 57
58#define mlx4_dbg(mlevel, priv, format, arg...) \ 58#define en_print(level, priv, format, arg...) \
59 if (NETIF_MSG_##mlevel & priv->msg_enable) \ 59 { \
60 printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\ 60 if ((priv)->registered) \
61 (dev_name(&priv->mdev->pdev->dev)) , ## arg) 61 printk(level "%s: %s: " format, DRV_NAME, \
62 (priv->dev)->name, ## arg); \
63 else \
64 printk(level "%s: %s: Port %d: " format, \
65 DRV_NAME, dev_name(&priv->mdev->pdev->dev), \
66 (priv)->port, ## arg); \
67 }
68
69#define en_dbg(mlevel, priv, format, arg...) \
70 { \
71 if (NETIF_MSG_##mlevel & priv->msg_enable) \
72 en_print(KERN_DEBUG, priv, format, ## arg) \
73 }
74#define en_warn(priv, format, arg...) \
75 en_print(KERN_WARNING, priv, format, ## arg)
76#define en_err(priv, format, arg...) \
77 en_print(KERN_ERR, priv, format, ## arg)
62 78
63#define mlx4_err(mdev, format, arg...) \ 79#define mlx4_err(mdev, format, arg...) \
64 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\ 80 printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
65 (dev_name(&mdev->pdev->dev)) , ## arg) 81 dev_name(&mdev->pdev->dev) , ## arg)
66#define mlx4_info(mdev, format, arg...) \ 82#define mlx4_info(mdev, format, arg...) \
67 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\ 83 printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
68 (dev_name(&mdev->pdev->dev)) , ## arg) 84 dev_name(&mdev->pdev->dev) , ## arg)
69#define mlx4_warn(mdev, format, arg...) \ 85#define mlx4_warn(mdev, format, arg...) \
70 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\ 86 printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
71 (dev_name(&mdev->pdev->dev)) , ## arg) 87 dev_name(&mdev->pdev->dev) , ## arg)
72 88
73/* 89/*
74 * Device constants 90 * Device constants