diff options
Diffstat (limited to 'drivers/net/mlx4/en_netdev.c')
-rw-r--r-- | drivers/net/mlx4/en_netdev.c | 175 |
1 files changed, 85 insertions, 90 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 0cd185a2e089..0a7e78ade63f 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -51,14 +51,14 @@ static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group * | |||
51 | struct mlx4_en_dev *mdev = priv->mdev; | 51 | struct mlx4_en_dev *mdev = priv->mdev; |
52 | int err; | 52 | int err; |
53 | 53 | ||
54 | mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp); | 54 | en_dbg(HW, priv, "Registering VLAN group:%p\n", grp); |
55 | priv->vlgrp = grp; | 55 | priv->vlgrp = grp; |
56 | 56 | ||
57 | mutex_lock(&mdev->state_lock); | 57 | mutex_lock(&mdev->state_lock); |
58 | if (mdev->device_up && priv->port_up) { | 58 | if (mdev->device_up && priv->port_up) { |
59 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); | 59 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); |
60 | if (err) | 60 | if (err) |
61 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | 61 | en_err(priv, "Failed configuring VLAN filter\n"); |
62 | } | 62 | } |
63 | mutex_unlock(&mdev->state_lock); | 63 | mutex_unlock(&mdev->state_lock); |
64 | } | 64 | } |
@@ -72,15 +72,15 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
72 | if (!priv->vlgrp) | 72 | if (!priv->vlgrp) |
73 | return; | 73 | return; |
74 | 74 | ||
75 | mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", | 75 | en_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", |
76 | vid, vlan_group_get_device(priv->vlgrp, vid)); | 76 | vid, vlan_group_get_device(priv->vlgrp, vid)); |
77 | 77 | ||
78 | /* Add VID to port VLAN filter */ | 78 | /* Add VID to port VLAN filter */ |
79 | mutex_lock(&mdev->state_lock); | 79 | mutex_lock(&mdev->state_lock); |
80 | if (mdev->device_up && priv->port_up) { | 80 | if (mdev->device_up && priv->port_up) { |
81 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | 81 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); |
82 | if (err) | 82 | if (err) |
83 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | 83 | en_err(priv, "Failed configuring VLAN filter\n"); |
84 | } | 84 | } |
85 | mutex_unlock(&mdev->state_lock); | 85 | mutex_unlock(&mdev->state_lock); |
86 | } | 86 | } |
@@ -94,9 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
94 | if (!priv->vlgrp) | 94 | if (!priv->vlgrp) |
95 | return; | 95 | return; |
96 | 96 | ||
97 | mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp " | 97 | en_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp entry:%p)\n", |
98 | "entry:%p)\n", vid, priv->vlgrp, | 98 | vid, priv->vlgrp, vlan_group_get_device(priv->vlgrp, vid)); |
99 | vlan_group_get_device(priv->vlgrp, vid)); | ||
100 | vlan_group_set_device(priv->vlgrp, vid, NULL); | 99 | vlan_group_set_device(priv->vlgrp, vid, NULL); |
101 | 100 | ||
102 | /* Remove VID from port VLAN filter */ | 101 | /* Remove VID from port VLAN filter */ |
@@ -104,7 +103,7 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | |||
104 | if (mdev->device_up && priv->port_up) { | 103 | if (mdev->device_up && priv->port_up) { |
105 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | 104 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); |
106 | if (err) | 105 | if (err) |
107 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | 106 | en_err(priv, "Failed configuring VLAN filter\n"); |
108 | } | 107 | } |
109 | mutex_unlock(&mdev->state_lock); | 108 | mutex_unlock(&mdev->state_lock); |
110 | } | 109 | } |
@@ -150,9 +149,10 @@ static void mlx4_en_do_set_mac(struct work_struct *work) | |||
150 | err = mlx4_register_mac(mdev->dev, priv->port, | 149 | err = mlx4_register_mac(mdev->dev, priv->port, |
151 | priv->mac, &priv->mac_index); | 150 | priv->mac, &priv->mac_index); |
152 | if (err) | 151 | if (err) |
153 | mlx4_err(mdev, "Failed changing HW MAC address\n"); | 152 | en_err(priv, "Failed changing HW MAC address\n"); |
154 | } else | 153 | } else |
155 | mlx4_dbg(HW, priv, "Port is down, exiting...\n"); | 154 | en_dbg(HW, priv, "Port is down while " |
155 | "registering mac, exiting...\n"); | ||
156 | 156 | ||
157 | mutex_unlock(&mdev->state_lock); | 157 | mutex_unlock(&mdev->state_lock); |
158 | } | 158 | } |
@@ -174,7 +174,6 @@ static void mlx4_en_clear_list(struct net_device *dev) | |||
174 | static void mlx4_en_cache_mclist(struct net_device *dev) | 174 | static void mlx4_en_cache_mclist(struct net_device *dev) |
175 | { | 175 | { |
176 | struct mlx4_en_priv *priv = netdev_priv(dev); | 176 | struct mlx4_en_priv *priv = netdev_priv(dev); |
177 | struct mlx4_en_dev *mdev = priv->mdev; | ||
178 | struct dev_mc_list *mclist; | 177 | struct dev_mc_list *mclist; |
179 | struct dev_mc_list *tmp; | 178 | struct dev_mc_list *tmp; |
180 | struct dev_mc_list *plist = NULL; | 179 | struct dev_mc_list *plist = NULL; |
@@ -182,7 +181,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev) | |||
182 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { | 181 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { |
183 | tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); | 182 | tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); |
184 | if (!tmp) { | 183 | if (!tmp) { |
185 | mlx4_err(mdev, "failed to allocate multicast list\n"); | 184 | en_err(priv, "failed to allocate multicast list\n"); |
186 | mlx4_en_clear_list(dev); | 185 | mlx4_en_clear_list(dev); |
187 | return; | 186 | return; |
188 | } | 187 | } |
@@ -219,13 +218,13 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
219 | 218 | ||
220 | mutex_lock(&mdev->state_lock); | 219 | mutex_lock(&mdev->state_lock); |
221 | if (!mdev->device_up) { | 220 | if (!mdev->device_up) { |
222 | mlx4_dbg(HW, priv, "Card is not up, ignoring " | 221 | en_dbg(HW, priv, "Card is not up, " |
223 | "multicast change.\n"); | 222 | "ignoring multicast change.\n"); |
224 | goto out; | 223 | goto out; |
225 | } | 224 | } |
226 | if (!priv->port_up) { | 225 | if (!priv->port_up) { |
227 | mlx4_dbg(HW, priv, "Port is down, ignoring " | 226 | en_dbg(HW, priv, "Port is down, " |
228 | "multicast change.\n"); | 227 | "ignoring multicast change.\n"); |
229 | goto out; | 228 | goto out; |
230 | } | 229 | } |
231 | 230 | ||
@@ -236,29 +235,27 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
236 | if (dev->flags & IFF_PROMISC) { | 235 | if (dev->flags & IFF_PROMISC) { |
237 | if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { | 236 | if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { |
238 | if (netif_msg_rx_status(priv)) | 237 | if (netif_msg_rx_status(priv)) |
239 | mlx4_warn(mdev, "Port:%d entering promiscuous mode\n", | 238 | en_warn(priv, "Entering promiscuous mode\n"); |
240 | priv->port); | ||
241 | priv->flags |= MLX4_EN_FLAG_PROMISC; | 239 | priv->flags |= MLX4_EN_FLAG_PROMISC; |
242 | 240 | ||
243 | /* Enable promiscouos mode */ | 241 | /* Enable promiscouos mode */ |
244 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | 242 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, |
245 | priv->base_qpn, 1); | 243 | priv->base_qpn, 1); |
246 | if (err) | 244 | if (err) |
247 | mlx4_err(mdev, "Failed enabling " | 245 | en_err(priv, "Failed enabling " |
248 | "promiscous mode\n"); | 246 | "promiscous mode\n"); |
249 | 247 | ||
250 | /* Disable port multicast filter (unconditionally) */ | 248 | /* Disable port multicast filter (unconditionally) */ |
251 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | 249 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, |
252 | 0, MLX4_MCAST_DISABLE); | 250 | 0, MLX4_MCAST_DISABLE); |
253 | if (err) | 251 | if (err) |
254 | mlx4_err(mdev, "Failed disabling " | 252 | en_err(priv, "Failed disabling " |
255 | "multicast filter\n"); | 253 | "multicast filter\n"); |
256 | 254 | ||
257 | /* Disable port VLAN filter */ | 255 | /* Disable port VLAN filter */ |
258 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | 256 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); |
259 | if (err) | 257 | if (err) |
260 | mlx4_err(mdev, "Failed disabling " | 258 | en_err(priv, "Failed disabling VLAN filter\n"); |
261 | "VLAN filter\n"); | ||
262 | } | 259 | } |
263 | goto out; | 260 | goto out; |
264 | } | 261 | } |
@@ -269,20 +266,19 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
269 | 266 | ||
270 | if (priv->flags & MLX4_EN_FLAG_PROMISC) { | 267 | if (priv->flags & MLX4_EN_FLAG_PROMISC) { |
271 | if (netif_msg_rx_status(priv)) | 268 | if (netif_msg_rx_status(priv)) |
272 | mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n", | 269 | en_warn(priv, "Leaving promiscuous mode\n"); |
273 | priv->port); | ||
274 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | 270 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; |
275 | 271 | ||
276 | /* Disable promiscouos mode */ | 272 | /* Disable promiscouos mode */ |
277 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | 273 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, |
278 | priv->base_qpn, 0); | 274 | priv->base_qpn, 0); |
279 | if (err) | 275 | if (err) |
280 | mlx4_err(mdev, "Failed disabling promiscous mode\n"); | 276 | en_err(priv, "Failed disabling promiscous mode\n"); |
281 | 277 | ||
282 | /* Enable port VLAN filter */ | 278 | /* Enable port VLAN filter */ |
283 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | 279 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); |
284 | if (err) | 280 | if (err) |
285 | mlx4_err(mdev, "Failed enabling VLAN filter\n"); | 281 | en_err(priv, "Failed enabling VLAN filter\n"); |
286 | } | 282 | } |
287 | 283 | ||
288 | /* Enable/disable the multicast filter according to IFF_ALLMULTI */ | 284 | /* Enable/disable the multicast filter according to IFF_ALLMULTI */ |
@@ -290,12 +286,12 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
290 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | 286 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, |
291 | 0, MLX4_MCAST_DISABLE); | 287 | 0, MLX4_MCAST_DISABLE); |
292 | if (err) | 288 | if (err) |
293 | mlx4_err(mdev, "Failed disabling multicast filter\n"); | 289 | en_err(priv, "Failed disabling multicast filter\n"); |
294 | } else { | 290 | } else { |
295 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | 291 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, |
296 | 0, MLX4_MCAST_DISABLE); | 292 | 0, MLX4_MCAST_DISABLE); |
297 | if (err) | 293 | if (err) |
298 | mlx4_err(mdev, "Failed disabling multicast filter\n"); | 294 | en_err(priv, "Failed disabling multicast filter\n"); |
299 | 295 | ||
300 | /* Flush mcast filter and init it with broadcast address */ | 296 | /* Flush mcast filter and init it with broadcast address */ |
301 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | 297 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, |
@@ -314,7 +310,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) | |||
314 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | 310 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, |
315 | 0, MLX4_MCAST_ENABLE); | 311 | 0, MLX4_MCAST_ENABLE); |
316 | if (err) | 312 | if (err) |
317 | mlx4_err(mdev, "Failed enabling multicast filter\n"); | 313 | en_err(priv, "Failed enabling multicast filter\n"); |
318 | 314 | ||
319 | mlx4_en_clear_list(dev); | 315 | mlx4_en_clear_list(dev); |
320 | } | 316 | } |
@@ -346,10 +342,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev) | |||
346 | struct mlx4_en_dev *mdev = priv->mdev; | 342 | struct mlx4_en_dev *mdev = priv->mdev; |
347 | 343 | ||
348 | if (netif_msg_timer(priv)) | 344 | if (netif_msg_timer(priv)) |
349 | mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); | 345 | en_warn(priv, "Tx timeout called on port:%d\n", priv->port); |
350 | 346 | ||
351 | priv->port_stats.tx_timeout++; | 347 | priv->port_stats.tx_timeout++; |
352 | mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); | 348 | en_dbg(DRV, priv, "Scheduling watchdog\n"); |
353 | queue_work(mdev->workqueue, &priv->watchdog_task); | 349 | queue_work(mdev->workqueue, &priv->watchdog_task); |
354 | } | 350 | } |
355 | 351 | ||
@@ -376,10 +372,10 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | |||
376 | * satisfy our coelsing target. | 372 | * satisfy our coelsing target. |
377 | * - moder_time is set to a fixed value. | 373 | * - moder_time is set to a fixed value. |
378 | */ | 374 | */ |
379 | priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->mtu + 1; | 375 | priv->rx_frames = MLX4_EN_RX_COAL_TARGET; |
380 | priv->rx_usecs = MLX4_EN_RX_COAL_TIME; | 376 | priv->rx_usecs = MLX4_EN_RX_COAL_TIME; |
381 | mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " | 377 | en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " |
382 | "rx_frames:%d rx_usecs:%d\n", | 378 | "rx_frames:%d rx_usecs:%d\n", |
383 | priv->dev->mtu, priv->rx_frames, priv->rx_usecs); | 379 | priv->dev->mtu, priv->rx_frames, priv->rx_usecs); |
384 | 380 | ||
385 | /* Setup cq moderation params */ | 381 | /* Setup cq moderation params */ |
@@ -412,7 +408,6 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | |||
412 | static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | 408 | static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) |
413 | { | 409 | { |
414 | unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); | 410 | unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); |
415 | struct mlx4_en_dev *mdev = priv->mdev; | ||
416 | struct mlx4_en_cq *cq; | 411 | struct mlx4_en_cq *cq; |
417 | unsigned long packets; | 412 | unsigned long packets; |
418 | unsigned long rate; | 413 | unsigned long rate; |
@@ -472,11 +467,11 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
472 | moder_time = priv->rx_usecs; | 467 | moder_time = priv->rx_usecs; |
473 | } | 468 | } |
474 | 469 | ||
475 | mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | 470 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", |
476 | tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); | 471 | tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); |
477 | 472 | ||
478 | mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " | 473 | en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " |
479 | "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", | 474 | "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", |
480 | priv->last_moder_time, moder_time, period, packets, | 475 | priv->last_moder_time, moder_time, period, packets, |
481 | avg_pkt_size, rate); | 476 | avg_pkt_size, rate); |
482 | 477 | ||
@@ -487,8 +482,7 @@ static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | |||
487 | cq->moder_time = moder_time; | 482 | cq->moder_time = moder_time; |
488 | err = mlx4_en_set_cq_moder(priv, cq); | 483 | err = mlx4_en_set_cq_moder(priv, cq); |
489 | if (err) { | 484 | if (err) { |
490 | mlx4_err(mdev, "Failed modifying moderation for cq:%d " | 485 | en_err(priv, "Failed modifying moderation for cq:%d\n", i); |
491 | "on port:%d\n", i, priv->port); | ||
492 | break; | 486 | break; |
493 | } | 487 | } |
494 | } | 488 | } |
@@ -511,8 +505,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work) | |||
511 | 505 | ||
512 | err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); | 506 | err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); |
513 | if (err) | 507 | if (err) |
514 | mlx4_dbg(HW, priv, "Could not update stats for " | 508 | en_dbg(HW, priv, "Could not update stats \n"); |
515 | "port:%d\n", priv->port); | ||
516 | 509 | ||
517 | mutex_lock(&mdev->state_lock); | 510 | mutex_lock(&mdev->state_lock); |
518 | if (mdev->device_up) { | 511 | if (mdev->device_up) { |
@@ -536,12 +529,10 @@ static void mlx4_en_linkstate(struct work_struct *work) | |||
536 | * report to system log */ | 529 | * report to system log */ |
537 | if (priv->last_link_state != linkstate) { | 530 | if (priv->last_link_state != linkstate) { |
538 | if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { | 531 | if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { |
539 | if (netif_msg_link(priv)) | 532 | en_dbg(LINK, priv, "Link Down\n"); |
540 | mlx4_info(mdev, "Port %d - link down\n", priv->port); | ||
541 | netif_carrier_off(priv->dev); | 533 | netif_carrier_off(priv->dev); |
542 | } else { | 534 | } else { |
543 | if (netif_msg_link(priv)) | 535 | en_dbg(LINK, priv, "Link Up\n"); |
544 | mlx4_info(mdev, "Port %d - link up\n", priv->port); | ||
545 | netif_carrier_on(priv->dev); | 536 | netif_carrier_on(priv->dev); |
546 | } | 537 | } |
547 | } | 538 | } |
@@ -563,19 +554,19 @@ int mlx4_en_start_port(struct net_device *dev) | |||
563 | int j; | 554 | int j; |
564 | 555 | ||
565 | if (priv->port_up) { | 556 | if (priv->port_up) { |
566 | mlx4_dbg(DRV, priv, "start port called while port already up\n"); | 557 | en_dbg(DRV, priv, "start port called while port already up\n"); |
567 | return 0; | 558 | return 0; |
568 | } | 559 | } |
569 | 560 | ||
570 | /* Calculate Rx buf size */ | 561 | /* Calculate Rx buf size */ |
571 | dev->mtu = min(dev->mtu, priv->max_mtu); | 562 | dev->mtu = min(dev->mtu, priv->max_mtu); |
572 | mlx4_en_calc_rx_buf(dev); | 563 | mlx4_en_calc_rx_buf(dev); |
573 | mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); | 564 | en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); |
574 | 565 | ||
575 | /* Configure rx cq's and rings */ | 566 | /* Configure rx cq's and rings */ |
576 | err = mlx4_en_activate_rx_rings(priv); | 567 | err = mlx4_en_activate_rx_rings(priv); |
577 | if (err) { | 568 | if (err) { |
578 | mlx4_err(mdev, "Failed to activate RX rings\n"); | 569 | en_err(priv, "Failed to activate RX rings\n"); |
579 | return err; | 570 | return err; |
580 | } | 571 | } |
581 | for (i = 0; i < priv->rx_ring_num; i++) { | 572 | for (i = 0; i < priv->rx_ring_num; i++) { |
@@ -583,14 +574,14 @@ int mlx4_en_start_port(struct net_device *dev) | |||
583 | 574 | ||
584 | err = mlx4_en_activate_cq(priv, cq); | 575 | err = mlx4_en_activate_cq(priv, cq); |
585 | if (err) { | 576 | if (err) { |
586 | mlx4_err(mdev, "Failed activating Rx CQ\n"); | 577 | en_err(priv, "Failed activating Rx CQ\n"); |
587 | goto cq_err; | 578 | goto cq_err; |
588 | } | 579 | } |
589 | for (j = 0; j < cq->size; j++) | 580 | for (j = 0; j < cq->size; j++) |
590 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; | 581 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; |
591 | err = mlx4_en_set_cq_moder(priv, cq); | 582 | err = mlx4_en_set_cq_moder(priv, cq); |
592 | if (err) { | 583 | if (err) { |
593 | mlx4_err(mdev, "Failed setting cq moderation parameters"); | 584 | en_err(priv, "Failed setting cq moderation parameters"); |
594 | mlx4_en_deactivate_cq(priv, cq); | 585 | mlx4_en_deactivate_cq(priv, cq); |
595 | goto cq_err; | 586 | goto cq_err; |
596 | } | 587 | } |
@@ -601,7 +592,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
601 | 592 | ||
602 | err = mlx4_en_config_rss_steer(priv); | 593 | err = mlx4_en_config_rss_steer(priv); |
603 | if (err) { | 594 | if (err) { |
604 | mlx4_err(mdev, "Failed configuring rss steering\n"); | 595 | en_err(priv, "Failed configuring rss steering\n"); |
605 | goto cq_err; | 596 | goto cq_err; |
606 | } | 597 | } |
607 | 598 | ||
@@ -611,16 +602,16 @@ int mlx4_en_start_port(struct net_device *dev) | |||
611 | cq = &priv->tx_cq[i]; | 602 | cq = &priv->tx_cq[i]; |
612 | err = mlx4_en_activate_cq(priv, cq); | 603 | err = mlx4_en_activate_cq(priv, cq); |
613 | if (err) { | 604 | if (err) { |
614 | mlx4_err(mdev, "Failed allocating Tx CQ\n"); | 605 | en_err(priv, "Failed allocating Tx CQ\n"); |
615 | goto tx_err; | 606 | goto tx_err; |
616 | } | 607 | } |
617 | err = mlx4_en_set_cq_moder(priv, cq); | 608 | err = mlx4_en_set_cq_moder(priv, cq); |
618 | if (err) { | 609 | if (err) { |
619 | mlx4_err(mdev, "Failed setting cq moderation parameters"); | 610 | en_err(priv, "Failed setting cq moderation parameters"); |
620 | mlx4_en_deactivate_cq(priv, cq); | 611 | mlx4_en_deactivate_cq(priv, cq); |
621 | goto tx_err; | 612 | goto tx_err; |
622 | } | 613 | } |
623 | mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); | 614 | en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); |
624 | cq->buf->wqe_index = cpu_to_be16(0xffff); | 615 | cq->buf->wqe_index = cpu_to_be16(0xffff); |
625 | 616 | ||
626 | /* Configure ring */ | 617 | /* Configure ring */ |
@@ -628,7 +619,7 @@ int mlx4_en_start_port(struct net_device *dev) | |||
628 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, | 619 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, |
629 | priv->rx_ring[0].srq.srqn); | 620 | priv->rx_ring[0].srq.srqn); |
630 | if (err) { | 621 | if (err) { |
631 | mlx4_err(mdev, "Failed allocating Tx ring\n"); | 622 | en_err(priv, "Failed allocating Tx ring\n"); |
632 | mlx4_en_deactivate_cq(priv, cq); | 623 | mlx4_en_deactivate_cq(priv, cq); |
633 | goto tx_err; | 624 | goto tx_err; |
634 | } | 625 | } |
@@ -646,30 +637,30 @@ int mlx4_en_start_port(struct net_device *dev) | |||
646 | priv->prof->rx_pause, | 637 | priv->prof->rx_pause, |
647 | priv->prof->rx_ppp); | 638 | priv->prof->rx_ppp); |
648 | if (err) { | 639 | if (err) { |
649 | mlx4_err(mdev, "Failed setting port general configurations" | 640 | en_err(priv, "Failed setting port general configurations " |
650 | " for port %d, with error %d\n", priv->port, err); | 641 | "for port %d, with error %d\n", priv->port, err); |
651 | goto tx_err; | 642 | goto tx_err; |
652 | } | 643 | } |
653 | /* Set default qp number */ | 644 | /* Set default qp number */ |
654 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); | 645 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); |
655 | if (err) { | 646 | if (err) { |
656 | mlx4_err(mdev, "Failed setting default qp numbers\n"); | 647 | en_err(priv, "Failed setting default qp numbers\n"); |
657 | goto tx_err; | 648 | goto tx_err; |
658 | } | 649 | } |
659 | /* Set port mac number */ | 650 | /* Set port mac number */ |
660 | mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | 651 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); |
661 | err = mlx4_register_mac(mdev->dev, priv->port, | 652 | err = mlx4_register_mac(mdev->dev, priv->port, |
662 | priv->mac, &priv->mac_index); | 653 | priv->mac, &priv->mac_index); |
663 | if (err) { | 654 | if (err) { |
664 | mlx4_err(mdev, "Failed setting port mac\n"); | 655 | en_err(priv, "Failed setting port mac\n"); |
665 | goto tx_err; | 656 | goto tx_err; |
666 | } | 657 | } |
667 | 658 | ||
668 | /* Init port */ | 659 | /* Init port */ |
669 | mlx4_dbg(HW, priv, "Initializing port\n"); | 660 | en_dbg(HW, priv, "Initializing port\n"); |
670 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | 661 | err = mlx4_INIT_PORT(mdev->dev, priv->port); |
671 | if (err) { | 662 | if (err) { |
672 | mlx4_err(mdev, "Failed Initializing port\n"); | 663 | en_err(priv, "Failed Initializing port\n"); |
673 | goto mac_err; | 664 | goto mac_err; |
674 | } | 665 | } |
675 | 666 | ||
@@ -706,8 +697,7 @@ void mlx4_en_stop_port(struct net_device *dev) | |||
706 | int i; | 697 | int i; |
707 | 698 | ||
708 | if (!priv->port_up) { | 699 | if (!priv->port_up) { |
709 | mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n", | 700 | en_dbg(DRV, priv, "stop port called while port already down\n"); |
710 | priv->port); | ||
711 | return; | 701 | return; |
712 | } | 702 | } |
713 | netif_stop_queue(dev); | 703 | netif_stop_queue(dev); |
@@ -752,13 +742,13 @@ static void mlx4_en_restart(struct work_struct *work) | |||
752 | struct mlx4_en_dev *mdev = priv->mdev; | 742 | struct mlx4_en_dev *mdev = priv->mdev; |
753 | struct net_device *dev = priv->dev; | 743 | struct net_device *dev = priv->dev; |
754 | 744 | ||
755 | mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); | 745 | en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); |
756 | 746 | ||
757 | mutex_lock(&mdev->state_lock); | 747 | mutex_lock(&mdev->state_lock); |
758 | if (priv->port_up) { | 748 | if (priv->port_up) { |
759 | mlx4_en_stop_port(dev); | 749 | mlx4_en_stop_port(dev); |
760 | if (mlx4_en_start_port(dev)) | 750 | if (mlx4_en_start_port(dev)) |
761 | mlx4_err(mdev, "Failed restarting port %d\n", priv->port); | 751 | en_err(priv, "Failed restarting port %d\n", priv->port); |
762 | } | 752 | } |
763 | mutex_unlock(&mdev->state_lock); | 753 | mutex_unlock(&mdev->state_lock); |
764 | } | 754 | } |
@@ -774,14 +764,14 @@ static int mlx4_en_open(struct net_device *dev) | |||
774 | mutex_lock(&mdev->state_lock); | 764 | mutex_lock(&mdev->state_lock); |
775 | 765 | ||
776 | if (!mdev->device_up) { | 766 | if (!mdev->device_up) { |
777 | mlx4_err(mdev, "Cannot open - device down/disabled\n"); | 767 | en_err(priv, "Cannot open - device down/disabled\n"); |
778 | err = -EBUSY; | 768 | err = -EBUSY; |
779 | goto out; | 769 | goto out; |
780 | } | 770 | } |
781 | 771 | ||
782 | /* Reset HW statistics and performance counters */ | 772 | /* Reset HW statistics and performance counters */ |
783 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | 773 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) |
784 | mlx4_dbg(HW, priv, "Failed dumping statistics\n"); | 774 | en_dbg(HW, priv, "Failed dumping statistics\n"); |
785 | 775 | ||
786 | memset(&priv->stats, 0, sizeof(priv->stats)); | 776 | memset(&priv->stats, 0, sizeof(priv->stats)); |
787 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | 777 | memset(&priv->pstats, 0, sizeof(priv->pstats)); |
@@ -798,7 +788,7 @@ static int mlx4_en_open(struct net_device *dev) | |||
798 | mlx4_en_set_default_moderation(priv); | 788 | mlx4_en_set_default_moderation(priv); |
799 | err = mlx4_en_start_port(dev); | 789 | err = mlx4_en_start_port(dev); |
800 | if (err) | 790 | if (err) |
801 | mlx4_err(mdev, "Failed starting port:%d\n", priv->port); | 791 | en_err(priv, "Failed starting port:%d\n", priv->port); |
802 | 792 | ||
803 | out: | 793 | out: |
804 | mutex_unlock(&mdev->state_lock); | 794 | mutex_unlock(&mdev->state_lock); |
@@ -811,8 +801,7 @@ static int mlx4_en_close(struct net_device *dev) | |||
811 | struct mlx4_en_priv *priv = netdev_priv(dev); | 801 | struct mlx4_en_priv *priv = netdev_priv(dev); |
812 | struct mlx4_en_dev *mdev = priv->mdev; | 802 | struct mlx4_en_dev *mdev = priv->mdev; |
813 | 803 | ||
814 | if (netif_msg_ifdown(priv)) | 804 | en_dbg(IFDOWN, priv, "Close port called\n"); |
815 | mlx4_info(mdev, "Close called for port:%d\n", priv->port); | ||
816 | 805 | ||
817 | mutex_lock(&mdev->state_lock); | 806 | mutex_lock(&mdev->state_lock); |
818 | 807 | ||
@@ -844,7 +833,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |||
844 | 833 | ||
845 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | 834 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) |
846 | { | 835 | { |
847 | struct mlx4_en_dev *mdev = priv->mdev; | ||
848 | struct mlx4_en_port_profile *prof = priv->prof; | 836 | struct mlx4_en_port_profile *prof = priv->prof; |
849 | int i; | 837 | int i; |
850 | 838 | ||
@@ -873,7 +861,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |||
873 | return 0; | 861 | return 0; |
874 | 862 | ||
875 | err: | 863 | err: |
876 | mlx4_err(mdev, "Failed to allocate NIC resources\n"); | 864 | en_err(priv, "Failed to allocate NIC resources\n"); |
877 | return -ENOMEM; | 865 | return -ENOMEM; |
878 | } | 866 | } |
879 | 867 | ||
@@ -883,7 +871,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
883 | struct mlx4_en_priv *priv = netdev_priv(dev); | 871 | struct mlx4_en_priv *priv = netdev_priv(dev); |
884 | struct mlx4_en_dev *mdev = priv->mdev; | 872 | struct mlx4_en_dev *mdev = priv->mdev; |
885 | 873 | ||
886 | mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); | 874 | en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); |
887 | 875 | ||
888 | /* Unregister device - this will close the port if it was up */ | 876 | /* Unregister device - this will close the port if it was up */ |
889 | if (priv->registered) | 877 | if (priv->registered) |
@@ -912,11 +900,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | |||
912 | struct mlx4_en_dev *mdev = priv->mdev; | 900 | struct mlx4_en_dev *mdev = priv->mdev; |
913 | int err = 0; | 901 | int err = 0; |
914 | 902 | ||
915 | mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", | 903 | en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", |
916 | dev->mtu, new_mtu); | 904 | dev->mtu, new_mtu); |
917 | 905 | ||
918 | if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { | 906 | if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { |
919 | mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu); | 907 | en_err(priv, "Bad MTU size:%d.\n", new_mtu); |
920 | return -EPERM; | 908 | return -EPERM; |
921 | } | 909 | } |
922 | dev->mtu = new_mtu; | 910 | dev->mtu = new_mtu; |
@@ -926,13 +914,13 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | |||
926 | if (!mdev->device_up) { | 914 | if (!mdev->device_up) { |
927 | /* NIC is probably restarting - let watchdog task reset | 915 | /* NIC is probably restarting - let watchdog task reset |
928 | * the port */ | 916 | * the port */ |
929 | mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n"); | 917 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); |
930 | } else { | 918 | } else { |
931 | mlx4_en_stop_port(dev); | 919 | mlx4_en_stop_port(dev); |
932 | mlx4_en_set_default_moderation(priv); | 920 | mlx4_en_set_default_moderation(priv); |
933 | err = mlx4_en_start_port(dev); | 921 | err = mlx4_en_start_port(dev); |
934 | if (err) { | 922 | if (err) { |
935 | mlx4_err(mdev, "Failed restarting port:%d\n", | 923 | en_err(priv, "Failed restarting port:%d\n", |
936 | priv->port); | 924 | priv->port); |
937 | queue_work(mdev->workqueue, &priv->watchdog_task); | 925 | queue_work(mdev->workqueue, &priv->watchdog_task); |
938 | } | 926 | } |
@@ -946,6 +934,7 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
946 | .ndo_open = mlx4_en_open, | 934 | .ndo_open = mlx4_en_open, |
947 | .ndo_stop = mlx4_en_close, | 935 | .ndo_stop = mlx4_en_close, |
948 | .ndo_start_xmit = mlx4_en_xmit, | 936 | .ndo_start_xmit = mlx4_en_xmit, |
937 | .ndo_select_queue = mlx4_en_select_queue, | ||
949 | .ndo_get_stats = mlx4_en_get_stats, | 938 | .ndo_get_stats = mlx4_en_get_stats, |
950 | .ndo_set_multicast_list = mlx4_en_set_multicast, | 939 | .ndo_set_multicast_list = mlx4_en_set_multicast, |
951 | .ndo_set_mac_address = mlx4_en_set_mac, | 940 | .ndo_set_mac_address = mlx4_en_set_mac, |
@@ -968,7 +957,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
968 | int i; | 957 | int i; |
969 | int err; | 958 | int err; |
970 | 959 | ||
971 | dev = alloc_etherdev(sizeof(struct mlx4_en_priv)); | 960 | dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); |
972 | if (dev == NULL) { | 961 | if (dev == NULL) { |
973 | mlx4_err(mdev, "Net device allocation failed\n"); | 962 | mlx4_err(mdev, "Net device allocation failed\n"); |
974 | return -ENOMEM; | 963 | return -ENOMEM; |
@@ -1006,7 +995,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1006 | priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; | 995 | priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; |
1007 | priv->mac = mdev->dev->caps.def_mac[priv->port]; | 996 | priv->mac = mdev->dev->caps.def_mac[priv->port]; |
1008 | if (ILLEGAL_MAC(priv->mac)) { | 997 | if (ILLEGAL_MAC(priv->mac)) { |
1009 | mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n", | 998 | en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", |
1010 | priv->port, priv->mac); | 999 | priv->port, priv->mac); |
1011 | err = -EINVAL; | 1000 | err = -EINVAL; |
1012 | goto out; | 1001 | goto out; |
@@ -1025,19 +1014,17 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1025 | err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, | 1014 | err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, |
1026 | MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); | 1015 | MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); |
1027 | if (err) { | 1016 | if (err) { |
1028 | mlx4_err(mdev, "Failed to allocate page for rx qps\n"); | 1017 | en_err(priv, "Failed to allocate page for rx qps\n"); |
1029 | goto out; | 1018 | goto out; |
1030 | } | 1019 | } |
1031 | priv->allocated = 1; | 1020 | priv->allocated = 1; |
1032 | 1021 | ||
1033 | /* Populate Tx priority mappings */ | ||
1034 | mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num); | ||
1035 | |||
1036 | /* | 1022 | /* |
1037 | * Initialize netdev entry points | 1023 | * Initialize netdev entry points |
1038 | */ | 1024 | */ |
1039 | dev->netdev_ops = &mlx4_netdev_ops; | 1025 | dev->netdev_ops = &mlx4_netdev_ops; |
1040 | dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; | 1026 | dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; |
1027 | dev->real_num_tx_queues = MLX4_EN_NUM_TX_RINGS; | ||
1041 | 1028 | ||
1042 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); | 1029 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); |
1043 | 1030 | ||
@@ -1051,7 +1038,9 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1051 | * Set driver features | 1038 | * Set driver features |
1052 | */ | 1039 | */ |
1053 | dev->features |= NETIF_F_SG; | 1040 | dev->features |= NETIF_F_SG; |
1041 | dev->vlan_features |= NETIF_F_SG; | ||
1054 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | 1042 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; |
1043 | dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
1055 | dev->features |= NETIF_F_HIGHDMA; | 1044 | dev->features |= NETIF_F_HIGHDMA; |
1056 | dev->features |= NETIF_F_HW_VLAN_TX | | 1045 | dev->features |= NETIF_F_HW_VLAN_TX | |
1057 | NETIF_F_HW_VLAN_RX | | 1046 | NETIF_F_HW_VLAN_RX | |
@@ -1061,6 +1050,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1061 | if (mdev->LSO_support) { | 1050 | if (mdev->LSO_support) { |
1062 | dev->features |= NETIF_F_TSO; | 1051 | dev->features |= NETIF_F_TSO; |
1063 | dev->features |= NETIF_F_TSO6; | 1052 | dev->features |= NETIF_F_TSO6; |
1053 | dev->vlan_features |= NETIF_F_TSO; | ||
1054 | dev->vlan_features |= NETIF_F_TSO6; | ||
1064 | } | 1055 | } |
1065 | 1056 | ||
1066 | mdev->pndev[port] = dev; | 1057 | mdev->pndev[port] = dev; |
@@ -1068,9 +1059,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
1068 | netif_carrier_off(dev); | 1059 | netif_carrier_off(dev); |
1069 | err = register_netdev(dev); | 1060 | err = register_netdev(dev); |
1070 | if (err) { | 1061 | if (err) { |
1071 | mlx4_err(mdev, "Netdev registration failed\n"); | 1062 | en_err(priv, "Netdev registration failed for port %d\n", port); |
1072 | goto out; | 1063 | goto out; |
1073 | } | 1064 | } |
1065 | |||
1066 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | ||
1067 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | ||
1068 | |||
1074 | priv->registered = 1; | 1069 | priv->registered = 1; |
1075 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | 1070 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); |
1076 | return 0; | 1071 | return 0; |