diff options
Diffstat (limited to 'drivers/net/mlx4/en_netdev.c')
-rw-r--r-- | drivers/net/mlx4/en_netdev.c | 1088 |
1 files changed, 1088 insertions, 0 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c new file mode 100644 index 000000000000..a339afbeed38 --- /dev/null +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -0,0 +1,1088 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/etherdevice.h> | ||
35 | #include <linux/tcp.h> | ||
36 | #include <linux/if_vlan.h> | ||
37 | #include <linux/delay.h> | ||
38 | |||
39 | #include <linux/mlx4/driver.h> | ||
40 | #include <linux/mlx4/device.h> | ||
41 | #include <linux/mlx4/cmd.h> | ||
42 | #include <linux/mlx4/cq.h> | ||
43 | |||
44 | #include "mlx4_en.h" | ||
45 | #include "en_port.h" | ||
46 | |||
47 | |||
48 | static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
49 | { | ||
50 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
51 | struct mlx4_en_dev *mdev = priv->mdev; | ||
52 | int err; | ||
53 | |||
54 | mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp); | ||
55 | priv->vlgrp = grp; | ||
56 | |||
57 | mutex_lock(&mdev->state_lock); | ||
58 | if (mdev->device_up && priv->port_up) { | ||
59 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp); | ||
60 | if (err) | ||
61 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
62 | } | ||
63 | mutex_unlock(&mdev->state_lock); | ||
64 | } | ||
65 | |||
66 | static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | ||
67 | { | ||
68 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
69 | struct mlx4_en_dev *mdev = priv->mdev; | ||
70 | int err; | ||
71 | |||
72 | if (!priv->vlgrp) | ||
73 | return; | ||
74 | |||
75 | mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n", | ||
76 | vid, vlan_group_get_device(priv->vlgrp, vid)); | ||
77 | |||
78 | /* Add VID to port VLAN filter */ | ||
79 | mutex_lock(&mdev->state_lock); | ||
80 | if (mdev->device_up && priv->port_up) { | ||
81 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
82 | if (err) | ||
83 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
84 | } | ||
85 | mutex_unlock(&mdev->state_lock); | ||
86 | } | ||
87 | |||
88 | static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
89 | { | ||
90 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
91 | struct mlx4_en_dev *mdev = priv->mdev; | ||
92 | int err; | ||
93 | |||
94 | if (!priv->vlgrp) | ||
95 | return; | ||
96 | |||
97 | mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp " | ||
98 | "entry:%p)\n", vid, priv->vlgrp, | ||
99 | vlan_group_get_device(priv->vlgrp, vid)); | ||
100 | vlan_group_set_device(priv->vlgrp, vid, NULL); | ||
101 | |||
102 | /* Remove VID from port VLAN filter */ | ||
103 | mutex_lock(&mdev->state_lock); | ||
104 | if (mdev->device_up && priv->port_up) { | ||
105 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
106 | if (err) | ||
107 | mlx4_err(mdev, "Failed configuring VLAN filter\n"); | ||
108 | } | ||
109 | mutex_unlock(&mdev->state_lock); | ||
110 | } | ||
111 | |||
112 | static u64 mlx4_en_mac_to_u64(u8 *addr) | ||
113 | { | ||
114 | u64 mac = 0; | ||
115 | int i; | ||
116 | |||
117 | for (i = 0; i < ETH_ALEN; i++) { | ||
118 | mac <<= 8; | ||
119 | mac |= addr[i]; | ||
120 | } | ||
121 | return mac; | ||
122 | } | ||
123 | |||
124 | static int mlx4_en_set_mac(struct net_device *dev, void *addr) | ||
125 | { | ||
126 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
127 | struct mlx4_en_dev *mdev = priv->mdev; | ||
128 | struct sockaddr *saddr = addr; | ||
129 | |||
130 | if (!is_valid_ether_addr(saddr->sa_data)) | ||
131 | return -EADDRNOTAVAIL; | ||
132 | |||
133 | memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); | ||
134 | priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); | ||
135 | queue_work(mdev->workqueue, &priv->mac_task); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static void mlx4_en_do_set_mac(struct work_struct *work) | ||
140 | { | ||
141 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
142 | mac_task); | ||
143 | struct mlx4_en_dev *mdev = priv->mdev; | ||
144 | int err = 0; | ||
145 | |||
146 | mutex_lock(&mdev->state_lock); | ||
147 | if (priv->port_up) { | ||
148 | /* Remove old MAC and insert the new one */ | ||
149 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
150 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
151 | priv->mac, &priv->mac_index); | ||
152 | if (err) | ||
153 | mlx4_err(mdev, "Failed changing HW MAC address\n"); | ||
154 | } else | ||
155 | mlx4_dbg(HW, priv, "Port is down, exiting...\n"); | ||
156 | |||
157 | mutex_unlock(&mdev->state_lock); | ||
158 | } | ||
159 | |||
160 | static void mlx4_en_clear_list(struct net_device *dev) | ||
161 | { | ||
162 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
163 | struct dev_mc_list *plist = priv->mc_list; | ||
164 | struct dev_mc_list *next; | ||
165 | |||
166 | while (plist) { | ||
167 | next = plist->next; | ||
168 | kfree(plist); | ||
169 | plist = next; | ||
170 | } | ||
171 | priv->mc_list = NULL; | ||
172 | } | ||
173 | |||
174 | static void mlx4_en_cache_mclist(struct net_device *dev) | ||
175 | { | ||
176 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
177 | struct mlx4_en_dev *mdev = priv->mdev; | ||
178 | struct dev_mc_list *mclist; | ||
179 | struct dev_mc_list *tmp; | ||
180 | struct dev_mc_list *plist = NULL; | ||
181 | |||
182 | for (mclist = dev->mc_list; mclist; mclist = mclist->next) { | ||
183 | tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); | ||
184 | if (!tmp) { | ||
185 | mlx4_err(mdev, "failed to allocate multicast list\n"); | ||
186 | mlx4_en_clear_list(dev); | ||
187 | return; | ||
188 | } | ||
189 | memcpy(tmp, mclist, sizeof(struct dev_mc_list)); | ||
190 | tmp->next = NULL; | ||
191 | if (plist) | ||
192 | plist->next = tmp; | ||
193 | else | ||
194 | priv->mc_list = tmp; | ||
195 | plist = tmp; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | |||
200 | static void mlx4_en_set_multicast(struct net_device *dev) | ||
201 | { | ||
202 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
203 | |||
204 | if (!priv->port_up) | ||
205 | return; | ||
206 | |||
207 | queue_work(priv->mdev->workqueue, &priv->mcast_task); | ||
208 | } | ||
209 | |||
210 | static void mlx4_en_do_set_multicast(struct work_struct *work) | ||
211 | { | ||
212 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
213 | mcast_task); | ||
214 | struct mlx4_en_dev *mdev = priv->mdev; | ||
215 | struct net_device *dev = priv->dev; | ||
216 | struct dev_mc_list *mclist; | ||
217 | u64 mcast_addr = 0; | ||
218 | int err; | ||
219 | |||
220 | mutex_lock(&mdev->state_lock); | ||
221 | if (!mdev->device_up) { | ||
222 | mlx4_dbg(HW, priv, "Card is not up, ignoring " | ||
223 | "multicast change.\n"); | ||
224 | goto out; | ||
225 | } | ||
226 | if (!priv->port_up) { | ||
227 | mlx4_dbg(HW, priv, "Port is down, ignoring " | ||
228 | "multicast change.\n"); | ||
229 | goto out; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Promsicuous mode: disable all filters | ||
234 | */ | ||
235 | |||
236 | if (dev->flags & IFF_PROMISC) { | ||
237 | if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { | ||
238 | if (netif_msg_rx_status(priv)) | ||
239 | mlx4_warn(mdev, "Port:%d entering promiscuous mode\n", | ||
240 | priv->port); | ||
241 | priv->flags |= MLX4_EN_FLAG_PROMISC; | ||
242 | |||
243 | /* Enable promiscouos mode */ | ||
244 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
245 | priv->base_qpn, 1); | ||
246 | if (err) | ||
247 | mlx4_err(mdev, "Failed enabling " | ||
248 | "promiscous mode\n"); | ||
249 | |||
250 | /* Disable port multicast filter (unconditionally) */ | ||
251 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
252 | 0, MLX4_MCAST_DISABLE); | ||
253 | if (err) | ||
254 | mlx4_err(mdev, "Failed disabling " | ||
255 | "multicast filter\n"); | ||
256 | |||
257 | /* Disable port VLAN filter */ | ||
258 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL); | ||
259 | if (err) | ||
260 | mlx4_err(mdev, "Failed disabling " | ||
261 | "VLAN filter\n"); | ||
262 | } | ||
263 | goto out; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * Not in promiscous mode | ||
268 | */ | ||
269 | |||
270 | if (priv->flags & MLX4_EN_FLAG_PROMISC) { | ||
271 | if (netif_msg_rx_status(priv)) | ||
272 | mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n", | ||
273 | priv->port); | ||
274 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | ||
275 | |||
276 | /* Disable promiscouos mode */ | ||
277 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
278 | priv->base_qpn, 0); | ||
279 | if (err) | ||
280 | mlx4_err(mdev, "Failed disabling promiscous mode\n"); | ||
281 | |||
282 | /* Enable port VLAN filter */ | ||
283 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp); | ||
284 | if (err) | ||
285 | mlx4_err(mdev, "Failed enabling VLAN filter\n"); | ||
286 | } | ||
287 | |||
288 | /* Enable/disable the multicast filter according to IFF_ALLMULTI */ | ||
289 | if (dev->flags & IFF_ALLMULTI) { | ||
290 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
291 | 0, MLX4_MCAST_DISABLE); | ||
292 | if (err) | ||
293 | mlx4_err(mdev, "Failed disabling multicast filter\n"); | ||
294 | } else { | ||
295 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
296 | 0, MLX4_MCAST_DISABLE); | ||
297 | if (err) | ||
298 | mlx4_err(mdev, "Failed disabling multicast filter\n"); | ||
299 | |||
300 | /* Flush mcast filter and init it with broadcast address */ | ||
301 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | ||
302 | 1, MLX4_MCAST_CONFIG); | ||
303 | |||
304 | /* Update multicast list - we cache all addresses so they won't | ||
305 | * change while HW is updated holding the command semaphor */ | ||
306 | netif_tx_lock_bh(dev); | ||
307 | mlx4_en_cache_mclist(dev); | ||
308 | netif_tx_unlock_bh(dev); | ||
309 | for (mclist = priv->mc_list; mclist; mclist = mclist->next) { | ||
310 | mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr); | ||
311 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | ||
312 | mcast_addr, 0, MLX4_MCAST_CONFIG); | ||
313 | } | ||
314 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
315 | 0, MLX4_MCAST_ENABLE); | ||
316 | if (err) | ||
317 | mlx4_err(mdev, "Failed enabling multicast filter\n"); | ||
318 | |||
319 | mlx4_en_clear_list(dev); | ||
320 | } | ||
321 | out: | ||
322 | mutex_unlock(&mdev->state_lock); | ||
323 | } | ||
324 | |||
325 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
326 | static void mlx4_en_netpoll(struct net_device *dev) | ||
327 | { | ||
328 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
329 | struct mlx4_en_cq *cq; | ||
330 | unsigned long flags; | ||
331 | int i; | ||
332 | |||
333 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
334 | cq = &priv->rx_cq[i]; | ||
335 | spin_lock_irqsave(&cq->lock, flags); | ||
336 | napi_synchronize(&cq->napi); | ||
337 | mlx4_en_process_rx_cq(dev, cq, 0); | ||
338 | spin_unlock_irqrestore(&cq->lock, flags); | ||
339 | } | ||
340 | } | ||
341 | #endif | ||
342 | |||
343 | static void mlx4_en_tx_timeout(struct net_device *dev) | ||
344 | { | ||
345 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
346 | struct mlx4_en_dev *mdev = priv->mdev; | ||
347 | |||
348 | if (netif_msg_timer(priv)) | ||
349 | mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port); | ||
350 | |||
351 | if (netif_carrier_ok(dev)) { | ||
352 | priv->port_stats.tx_timeout++; | ||
353 | mlx4_dbg(DRV, priv, "Scheduling watchdog\n"); | ||
354 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
355 | } | ||
356 | } | ||
357 | |||
358 | |||
359 | static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) | ||
360 | { | ||
361 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
362 | |||
363 | spin_lock_bh(&priv->stats_lock); | ||
364 | memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); | ||
365 | spin_unlock_bh(&priv->stats_lock); | ||
366 | |||
367 | return &priv->ret_stats; | ||
368 | } | ||
369 | |||
370 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | ||
371 | { | ||
372 | struct mlx4_en_dev *mdev = priv->mdev; | ||
373 | struct mlx4_en_cq *cq; | ||
374 | int i; | ||
375 | |||
376 | /* If we haven't received a specific coalescing setting | ||
377 | * (module param), we set the moderation paramters as follows: | ||
378 | * - moder_cnt is set to the number of mtu sized packets to | ||
379 | * satisfy our coelsing target. | ||
380 | * - moder_time is set to a fixed value. | ||
381 | */ | ||
382 | priv->rx_frames = (mdev->profile.rx_moder_cnt == | ||
383 | MLX4_EN_AUTO_CONF) ? | ||
384 | MLX4_EN_RX_COAL_TARGET / | ||
385 | priv->dev->mtu + 1 : | ||
386 | mdev->profile.rx_moder_cnt; | ||
387 | priv->rx_usecs = (mdev->profile.rx_moder_time == | ||
388 | MLX4_EN_AUTO_CONF) ? | ||
389 | MLX4_EN_RX_COAL_TIME : | ||
390 | mdev->profile.rx_moder_time; | ||
391 | mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - " | ||
392 | "rx_frames:%d rx_usecs:%d\n", | ||
393 | priv->dev->mtu, priv->rx_frames, priv->rx_usecs); | ||
394 | |||
395 | /* Setup cq moderation params */ | ||
396 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
397 | cq = &priv->rx_cq[i]; | ||
398 | cq->moder_cnt = priv->rx_frames; | ||
399 | cq->moder_time = priv->rx_usecs; | ||
400 | } | ||
401 | |||
402 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
403 | cq = &priv->tx_cq[i]; | ||
404 | cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; | ||
405 | cq->moder_time = MLX4_EN_TX_COAL_TIME; | ||
406 | } | ||
407 | |||
408 | /* Reset auto-moderation params */ | ||
409 | priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; | ||
410 | priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; | ||
411 | priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; | ||
412 | priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; | ||
413 | priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; | ||
414 | priv->adaptive_rx_coal = mdev->profile.auto_moder; | ||
415 | priv->last_moder_time = MLX4_EN_AUTO_CONF; | ||
416 | priv->last_moder_jiffies = 0; | ||
417 | priv->last_moder_packets = 0; | ||
418 | priv->last_moder_tx_packets = 0; | ||
419 | priv->last_moder_bytes = 0; | ||
420 | } | ||
421 | |||
422 | static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | ||
423 | { | ||
424 | unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); | ||
425 | struct mlx4_en_dev *mdev = priv->mdev; | ||
426 | struct mlx4_en_cq *cq; | ||
427 | unsigned long packets; | ||
428 | unsigned long rate; | ||
429 | unsigned long avg_pkt_size; | ||
430 | unsigned long rx_packets; | ||
431 | unsigned long rx_bytes; | ||
432 | unsigned long tx_packets; | ||
433 | unsigned long tx_pkt_diff; | ||
434 | unsigned long rx_pkt_diff; | ||
435 | int moder_time; | ||
436 | int i, err; | ||
437 | |||
438 | if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) | ||
439 | return; | ||
440 | |||
441 | spin_lock_bh(&priv->stats_lock); | ||
442 | rx_packets = priv->stats.rx_packets; | ||
443 | rx_bytes = priv->stats.rx_bytes; | ||
444 | tx_packets = priv->stats.tx_packets; | ||
445 | spin_unlock_bh(&priv->stats_lock); | ||
446 | |||
447 | if (!priv->last_moder_jiffies || !period) | ||
448 | goto out; | ||
449 | |||
450 | tx_pkt_diff = ((unsigned long) (tx_packets - | ||
451 | priv->last_moder_tx_packets)); | ||
452 | rx_pkt_diff = ((unsigned long) (rx_packets - | ||
453 | priv->last_moder_packets)); | ||
454 | packets = max(tx_pkt_diff, rx_pkt_diff); | ||
455 | rate = packets * HZ / period; | ||
456 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - | ||
457 | priv->last_moder_bytes)) / packets : 0; | ||
458 | |||
459 | /* Apply auto-moderation only when packet rate exceeds a rate that | ||
460 | * it matters */ | ||
461 | if (rate > MLX4_EN_RX_RATE_THRESH) { | ||
462 | /* If tx and rx packet rates are not balanced, assume that | ||
463 | * traffic is mainly BW bound and apply maximum moderation. | ||
464 | * Otherwise, moderate according to packet rate */ | ||
465 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff || | ||
466 | 2 * rx_pkt_diff > 3 * tx_pkt_diff) { | ||
467 | moder_time = priv->rx_usecs_high; | ||
468 | } else { | ||
469 | if (rate < priv->pkt_rate_low) | ||
470 | moder_time = priv->rx_usecs_low; | ||
471 | else if (rate > priv->pkt_rate_high) | ||
472 | moder_time = priv->rx_usecs_high; | ||
473 | else | ||
474 | moder_time = (rate - priv->pkt_rate_low) * | ||
475 | (priv->rx_usecs_high - priv->rx_usecs_low) / | ||
476 | (priv->pkt_rate_high - priv->pkt_rate_low) + | ||
477 | priv->rx_usecs_low; | ||
478 | } | ||
479 | } else { | ||
480 | /* When packet rate is low, use default moderation rather than | ||
481 | * 0 to prevent interrupt storms if traffic suddenly increases */ | ||
482 | moder_time = priv->rx_usecs; | ||
483 | } | ||
484 | |||
485 | mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | ||
486 | tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); | ||
487 | |||
488 | mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " | ||
489 | "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", | ||
490 | priv->last_moder_time, moder_time, period, packets, | ||
491 | avg_pkt_size, rate); | ||
492 | |||
493 | if (moder_time != priv->last_moder_time) { | ||
494 | priv->last_moder_time = moder_time; | ||
495 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
496 | cq = &priv->rx_cq[i]; | ||
497 | cq->moder_time = moder_time; | ||
498 | err = mlx4_en_set_cq_moder(priv, cq); | ||
499 | if (err) { | ||
500 | mlx4_err(mdev, "Failed modifying moderation for cq:%d " | ||
501 | "on port:%d\n", i, priv->port); | ||
502 | break; | ||
503 | } | ||
504 | } | ||
505 | } | ||
506 | |||
507 | out: | ||
508 | priv->last_moder_packets = rx_packets; | ||
509 | priv->last_moder_tx_packets = tx_packets; | ||
510 | priv->last_moder_bytes = rx_bytes; | ||
511 | priv->last_moder_jiffies = jiffies; | ||
512 | } | ||
513 | |||
514 | static void mlx4_en_do_get_stats(struct work_struct *work) | ||
515 | { | ||
516 | struct delayed_work *delay = container_of(work, struct delayed_work, work); | ||
517 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
518 | stats_task); | ||
519 | struct mlx4_en_dev *mdev = priv->mdev; | ||
520 | int err; | ||
521 | |||
522 | err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); | ||
523 | if (err) | ||
524 | mlx4_dbg(HW, priv, "Could not update stats for " | ||
525 | "port:%d\n", priv->port); | ||
526 | |||
527 | mutex_lock(&mdev->state_lock); | ||
528 | if (mdev->device_up) { | ||
529 | if (priv->port_up) | ||
530 | mlx4_en_auto_moderation(priv); | ||
531 | |||
532 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
533 | } | ||
534 | mutex_unlock(&mdev->state_lock); | ||
535 | } | ||
536 | |||
537 | static void mlx4_en_linkstate(struct work_struct *work) | ||
538 | { | ||
539 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
540 | linkstate_task); | ||
541 | struct mlx4_en_dev *mdev = priv->mdev; | ||
542 | int linkstate = priv->link_state; | ||
543 | |||
544 | mutex_lock(&mdev->state_lock); | ||
545 | /* If observable port state changed set carrier state and | ||
546 | * report to system log */ | ||
547 | if (priv->last_link_state != linkstate) { | ||
548 | if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { | ||
549 | if (netif_msg_link(priv)) | ||
550 | mlx4_info(mdev, "Port %d - link down\n", priv->port); | ||
551 | netif_carrier_off(priv->dev); | ||
552 | } else { | ||
553 | if (netif_msg_link(priv)) | ||
554 | mlx4_info(mdev, "Port %d - link up\n", priv->port); | ||
555 | netif_carrier_on(priv->dev); | ||
556 | } | ||
557 | } | ||
558 | priv->last_link_state = linkstate; | ||
559 | mutex_unlock(&mdev->state_lock); | ||
560 | } | ||
561 | |||
562 | |||
563 | static int mlx4_en_start_port(struct net_device *dev) | ||
564 | { | ||
565 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
566 | struct mlx4_en_dev *mdev = priv->mdev; | ||
567 | struct mlx4_en_cq *cq; | ||
568 | struct mlx4_en_tx_ring *tx_ring; | ||
569 | struct mlx4_en_rx_ring *rx_ring; | ||
570 | int rx_index = 0; | ||
571 | int tx_index = 0; | ||
572 | u16 stride; | ||
573 | int err = 0; | ||
574 | int i; | ||
575 | int j; | ||
576 | |||
577 | if (priv->port_up) { | ||
578 | mlx4_dbg(DRV, priv, "start port called while port already up\n"); | ||
579 | return 0; | ||
580 | } | ||
581 | |||
582 | /* Calculate Rx buf size */ | ||
583 | dev->mtu = min(dev->mtu, priv->max_mtu); | ||
584 | mlx4_en_calc_rx_buf(dev); | ||
585 | mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); | ||
586 | stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
587 | DS_SIZE * priv->num_frags); | ||
588 | /* Configure rx cq's and rings */ | ||
589 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
590 | cq = &priv->rx_cq[i]; | ||
591 | rx_ring = &priv->rx_ring[i]; | ||
592 | |||
593 | err = mlx4_en_activate_cq(priv, cq); | ||
594 | if (err) { | ||
595 | mlx4_err(mdev, "Failed activating Rx CQ\n"); | ||
596 | goto rx_err; | ||
597 | } | ||
598 | for (j = 0; j < cq->size; j++) | ||
599 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; | ||
600 | err = mlx4_en_set_cq_moder(priv, cq); | ||
601 | if (err) { | ||
602 | mlx4_err(mdev, "Failed setting cq moderation parameters"); | ||
603 | mlx4_en_deactivate_cq(priv, cq); | ||
604 | goto cq_err; | ||
605 | } | ||
606 | mlx4_en_arm_cq(priv, cq); | ||
607 | |||
608 | ++rx_index; | ||
609 | } | ||
610 | |||
611 | err = mlx4_en_activate_rx_rings(priv); | ||
612 | if (err) { | ||
613 | mlx4_err(mdev, "Failed to activate RX rings\n"); | ||
614 | goto cq_err; | ||
615 | } | ||
616 | |||
617 | err = mlx4_en_config_rss_steer(priv); | ||
618 | if (err) { | ||
619 | mlx4_err(mdev, "Failed configuring rss steering\n"); | ||
620 | goto rx_err; | ||
621 | } | ||
622 | |||
623 | /* Configure tx cq's and rings */ | ||
624 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
625 | /* Configure cq */ | ||
626 | cq = &priv->tx_cq[i]; | ||
627 | err = mlx4_en_activate_cq(priv, cq); | ||
628 | if (err) { | ||
629 | mlx4_err(mdev, "Failed allocating Tx CQ\n"); | ||
630 | goto tx_err; | ||
631 | } | ||
632 | err = mlx4_en_set_cq_moder(priv, cq); | ||
633 | if (err) { | ||
634 | mlx4_err(mdev, "Failed setting cq moderation parameters"); | ||
635 | mlx4_en_deactivate_cq(priv, cq); | ||
636 | goto tx_err; | ||
637 | } | ||
638 | mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); | ||
639 | cq->buf->wqe_index = cpu_to_be16(0xffff); | ||
640 | |||
641 | /* Configure ring */ | ||
642 | tx_ring = &priv->tx_ring[i]; | ||
643 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, | ||
644 | priv->rx_ring[0].srq.srqn); | ||
645 | if (err) { | ||
646 | mlx4_err(mdev, "Failed allocating Tx ring\n"); | ||
647 | mlx4_en_deactivate_cq(priv, cq); | ||
648 | goto tx_err; | ||
649 | } | ||
650 | /* Set initial ownership of all Tx TXBBs to SW (1) */ | ||
651 | for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) | ||
652 | *((u32 *) (tx_ring->buf + j)) = 0xffffffff; | ||
653 | ++tx_index; | ||
654 | } | ||
655 | |||
656 | /* Configure port */ | ||
657 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
658 | priv->rx_skb_size + ETH_FCS_LEN, | ||
659 | mdev->profile.tx_pause, | ||
660 | mdev->profile.tx_ppp, | ||
661 | mdev->profile.rx_pause, | ||
662 | mdev->profile.rx_ppp); | ||
663 | if (err) { | ||
664 | mlx4_err(mdev, "Failed setting port general configurations" | ||
665 | " for port %d, with error %d\n", priv->port, err); | ||
666 | goto tx_err; | ||
667 | } | ||
668 | /* Set default qp number */ | ||
669 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); | ||
670 | if (err) { | ||
671 | mlx4_err(mdev, "Failed setting default qp numbers\n"); | ||
672 | goto tx_err; | ||
673 | } | ||
674 | /* Set port mac number */ | ||
675 | mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
676 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
677 | priv->mac, &priv->mac_index); | ||
678 | if (err) { | ||
679 | mlx4_err(mdev, "Failed setting port mac\n"); | ||
680 | goto tx_err; | ||
681 | } | ||
682 | |||
683 | /* Init port */ | ||
684 | mlx4_dbg(HW, priv, "Initializing port\n"); | ||
685 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
686 | if (err) { | ||
687 | mlx4_err(mdev, "Failed Initializing port\n"); | ||
688 | goto mac_err; | ||
689 | } | ||
690 | |||
691 | /* Schedule multicast task to populate multicast list */ | ||
692 | queue_work(mdev->workqueue, &priv->mcast_task); | ||
693 | |||
694 | priv->port_up = true; | ||
695 | netif_start_queue(dev); | ||
696 | return 0; | ||
697 | |||
698 | mac_err: | ||
699 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
700 | tx_err: | ||
701 | while (tx_index--) { | ||
702 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); | ||
703 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); | ||
704 | } | ||
705 | |||
706 | mlx4_en_release_rss_steer(priv); | ||
707 | rx_err: | ||
708 | for (i = 0; i < priv->rx_ring_num; i++) | ||
709 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[rx_index]); | ||
710 | cq_err: | ||
711 | while (rx_index--) | ||
712 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); | ||
713 | |||
714 | return err; /* need to close devices */ | ||
715 | } | ||
716 | |||
717 | |||
718 | static void mlx4_en_stop_port(struct net_device *dev) | ||
719 | { | ||
720 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
721 | struct mlx4_en_dev *mdev = priv->mdev; | ||
722 | int i; | ||
723 | |||
724 | if (!priv->port_up) { | ||
725 | mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n", | ||
726 | priv->port); | ||
727 | return; | ||
728 | } | ||
729 | netif_stop_queue(dev); | ||
730 | |||
731 | /* Synchronize with tx routine */ | ||
732 | netif_tx_lock_bh(dev); | ||
733 | priv->port_up = false; | ||
734 | netif_tx_unlock_bh(dev); | ||
735 | |||
736 | /* close port*/ | ||
737 | mlx4_CLOSE_PORT(mdev->dev, priv->port); | ||
738 | |||
739 | /* Unregister Mac address for the port */ | ||
740 | mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index); | ||
741 | |||
742 | /* Free TX Rings */ | ||
743 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
744 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); | ||
745 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); | ||
746 | } | ||
747 | msleep(10); | ||
748 | |||
749 | for (i = 0; i < priv->tx_ring_num; i++) | ||
750 | mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); | ||
751 | |||
752 | /* Free RSS qps */ | ||
753 | mlx4_en_release_rss_steer(priv); | ||
754 | |||
755 | /* Free RX Rings */ | ||
756 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
757 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); | ||
758 | while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) | ||
759 | msleep(1); | ||
760 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); | ||
761 | } | ||
762 | } | ||
763 | |||
764 | static void mlx4_en_restart(struct work_struct *work) | ||
765 | { | ||
766 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
767 | watchdog_task); | ||
768 | struct mlx4_en_dev *mdev = priv->mdev; | ||
769 | struct net_device *dev = priv->dev; | ||
770 | |||
771 | mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); | ||
772 | mlx4_en_stop_port(dev); | ||
773 | if (mlx4_en_start_port(dev)) | ||
774 | mlx4_err(mdev, "Failed restarting port %d\n", priv->port); | ||
775 | } | ||
776 | |||
777 | |||
778 | static int mlx4_en_open(struct net_device *dev) | ||
779 | { | ||
780 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
781 | struct mlx4_en_dev *mdev = priv->mdev; | ||
782 | int i; | ||
783 | int err = 0; | ||
784 | |||
785 | mutex_lock(&mdev->state_lock); | ||
786 | |||
787 | if (!mdev->device_up) { | ||
788 | mlx4_err(mdev, "Cannot open - device down/disabled\n"); | ||
789 | err = -EBUSY; | ||
790 | goto out; | ||
791 | } | ||
792 | |||
793 | /* Reset HW statistics and performance counters */ | ||
794 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | ||
795 | mlx4_dbg(HW, priv, "Failed dumping statistics\n"); | ||
796 | |||
797 | memset(&priv->stats, 0, sizeof(priv->stats)); | ||
798 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | ||
799 | |||
800 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
801 | priv->tx_ring[i].bytes = 0; | ||
802 | priv->tx_ring[i].packets = 0; | ||
803 | } | ||
804 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
805 | priv->rx_ring[i].bytes = 0; | ||
806 | priv->rx_ring[i].packets = 0; | ||
807 | } | ||
808 | |||
809 | mlx4_en_set_default_moderation(priv); | ||
810 | err = mlx4_en_start_port(dev); | ||
811 | if (err) | ||
812 | mlx4_err(mdev, "Failed starting port:%d\n", priv->port); | ||
813 | |||
814 | out: | ||
815 | mutex_unlock(&mdev->state_lock); | ||
816 | return err; | ||
817 | } | ||
818 | |||
819 | |||
820 | static int mlx4_en_close(struct net_device *dev) | ||
821 | { | ||
822 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
823 | struct mlx4_en_dev *mdev = priv->mdev; | ||
824 | |||
825 | if (netif_msg_ifdown(priv)) | ||
826 | mlx4_info(mdev, "Close called for port:%d\n", priv->port); | ||
827 | |||
828 | mutex_lock(&mdev->state_lock); | ||
829 | |||
830 | mlx4_en_stop_port(dev); | ||
831 | netif_carrier_off(dev); | ||
832 | |||
833 | mutex_unlock(&mdev->state_lock); | ||
834 | return 0; | ||
835 | } | ||
836 | |||
837 | static void mlx4_en_free_resources(struct mlx4_en_priv *priv) | ||
838 | { | ||
839 | int i; | ||
840 | |||
841 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
842 | if (priv->tx_ring[i].tx_info) | ||
843 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); | ||
844 | if (priv->tx_cq[i].buf) | ||
845 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); | ||
846 | } | ||
847 | |||
848 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
849 | if (priv->rx_ring[i].rx_info) | ||
850 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); | ||
851 | if (priv->rx_cq[i].buf) | ||
852 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); | ||
853 | } | ||
854 | } | ||
855 | |||
856 | static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | ||
857 | { | ||
858 | struct mlx4_en_dev *mdev = priv->mdev; | ||
859 | struct mlx4_en_port_profile *prof = priv->prof; | ||
860 | int i; | ||
861 | |||
862 | /* Create tx Rings */ | ||
863 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
864 | if (mlx4_en_create_cq(priv, &priv->tx_cq[i], | ||
865 | prof->tx_ring_size, i, TX)) | ||
866 | goto err; | ||
867 | |||
868 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], | ||
869 | prof->tx_ring_size, TXBB_SIZE)) | ||
870 | goto err; | ||
871 | } | ||
872 | |||
873 | /* Create rx Rings */ | ||
874 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
875 | if (mlx4_en_create_cq(priv, &priv->rx_cq[i], | ||
876 | prof->rx_ring_size, i, RX)) | ||
877 | goto err; | ||
878 | |||
879 | if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], | ||
880 | prof->rx_ring_size, priv->stride)) | ||
881 | goto err; | ||
882 | } | ||
883 | |||
884 | return 0; | ||
885 | |||
886 | err: | ||
887 | mlx4_err(mdev, "Failed to allocate NIC resources\n"); | ||
888 | return -ENOMEM; | ||
889 | } | ||
890 | |||
891 | |||
892 | void mlx4_en_destroy_netdev(struct net_device *dev) | ||
893 | { | ||
894 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
895 | struct mlx4_en_dev *mdev = priv->mdev; | ||
896 | |||
897 | mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); | ||
898 | |||
899 | /* Unregister device - this will close the port if it was up */ | ||
900 | if (priv->registered) | ||
901 | unregister_netdev(dev); | ||
902 | |||
903 | if (priv->allocated) | ||
904 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); | ||
905 | |||
906 | cancel_delayed_work(&priv->stats_task); | ||
907 | cancel_delayed_work(&priv->refill_task); | ||
908 | /* flush any pending task for this netdev */ | ||
909 | flush_workqueue(mdev->workqueue); | ||
910 | |||
911 | /* Detach the netdev so tasks would not attempt to access it */ | ||
912 | mutex_lock(&mdev->state_lock); | ||
913 | mdev->pndev[priv->port] = NULL; | ||
914 | mutex_unlock(&mdev->state_lock); | ||
915 | |||
916 | mlx4_en_free_resources(priv); | ||
917 | free_netdev(dev); | ||
918 | } | ||
919 | |||
920 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | ||
921 | { | ||
922 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
923 | struct mlx4_en_dev *mdev = priv->mdev; | ||
924 | int err = 0; | ||
925 | |||
926 | mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", | ||
927 | dev->mtu, new_mtu); | ||
928 | |||
929 | if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { | ||
930 | mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu); | ||
931 | return -EPERM; | ||
932 | } | ||
933 | dev->mtu = new_mtu; | ||
934 | |||
935 | if (netif_running(dev)) { | ||
936 | mutex_lock(&mdev->state_lock); | ||
937 | if (!mdev->device_up) { | ||
938 | /* NIC is probably restarting - let watchdog task reset | ||
939 | * the port */ | ||
940 | mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n"); | ||
941 | } else { | ||
942 | mlx4_en_stop_port(dev); | ||
943 | mlx4_en_set_default_moderation(priv); | ||
944 | err = mlx4_en_start_port(dev); | ||
945 | if (err) { | ||
946 | mlx4_err(mdev, "Failed restarting port:%d\n", | ||
947 | priv->port); | ||
948 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
949 | } | ||
950 | } | ||
951 | mutex_unlock(&mdev->state_lock); | ||
952 | } | ||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | ||
957 | struct mlx4_en_port_profile *prof) | ||
958 | { | ||
959 | struct net_device *dev; | ||
960 | struct mlx4_en_priv *priv; | ||
961 | int i; | ||
962 | int err; | ||
963 | |||
964 | dev = alloc_etherdev(sizeof(struct mlx4_en_priv)); | ||
965 | if (dev == NULL) { | ||
966 | mlx4_err(mdev, "Net device allocation failed\n"); | ||
967 | return -ENOMEM; | ||
968 | } | ||
969 | |||
970 | SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); | ||
971 | |||
972 | /* | ||
973 | * Initialize driver private data | ||
974 | */ | ||
975 | |||
976 | priv = netdev_priv(dev); | ||
977 | memset(priv, 0, sizeof(struct mlx4_en_priv)); | ||
978 | priv->dev = dev; | ||
979 | priv->mdev = mdev; | ||
980 | priv->prof = prof; | ||
981 | priv->port = port; | ||
982 | priv->port_up = false; | ||
983 | priv->rx_csum = 1; | ||
984 | priv->flags = prof->flags; | ||
985 | priv->tx_ring_num = prof->tx_ring_num; | ||
986 | priv->rx_ring_num = prof->rx_ring_num; | ||
987 | priv->mc_list = NULL; | ||
988 | priv->mac_index = -1; | ||
989 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | ||
990 | spin_lock_init(&priv->stats_lock); | ||
991 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); | ||
992 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); | ||
993 | INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill); | ||
994 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | ||
995 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | ||
996 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | ||
997 | |||
998 | /* Query for default mac and max mtu */ | ||
999 | priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; | ||
1000 | priv->mac = mdev->dev->caps.def_mac[priv->port]; | ||
1001 | if (ILLEGAL_MAC(priv->mac)) { | ||
1002 | mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n", | ||
1003 | priv->port, priv->mac); | ||
1004 | err = -EINVAL; | ||
1005 | goto out; | ||
1006 | } | ||
1007 | |||
1008 | priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
1009 | DS_SIZE * MLX4_EN_MAX_RX_FRAGS); | ||
1010 | err = mlx4_en_alloc_resources(priv); | ||
1011 | if (err) | ||
1012 | goto out; | ||
1013 | |||
1014 | /* Populate Rx default RSS mappings */ | ||
1015 | mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num * | ||
1016 | RSS_FACTOR, priv->rx_ring_num); | ||
1017 | /* Allocate page for receive rings */ | ||
1018 | err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, | ||
1019 | MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); | ||
1020 | if (err) { | ||
1021 | mlx4_err(mdev, "Failed to allocate page for rx qps\n"); | ||
1022 | goto out; | ||
1023 | } | ||
1024 | priv->allocated = 1; | ||
1025 | |||
1026 | /* Populate Tx priority mappings */ | ||
1027 | mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num); | ||
1028 | |||
1029 | /* | ||
1030 | * Initialize netdev entry points | ||
1031 | */ | ||
1032 | |||
1033 | dev->open = &mlx4_en_open; | ||
1034 | dev->stop = &mlx4_en_close; | ||
1035 | dev->hard_start_xmit = &mlx4_en_xmit; | ||
1036 | dev->get_stats = &mlx4_en_get_stats; | ||
1037 | dev->set_multicast_list = &mlx4_en_set_multicast; | ||
1038 | dev->set_mac_address = &mlx4_en_set_mac; | ||
1039 | dev->change_mtu = &mlx4_en_change_mtu; | ||
1040 | dev->tx_timeout = &mlx4_en_tx_timeout; | ||
1041 | dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; | ||
1042 | dev->vlan_rx_register = mlx4_en_vlan_rx_register; | ||
1043 | dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid; | ||
1044 | dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid; | ||
1045 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1046 | dev->poll_controller = mlx4_en_netpoll; | ||
1047 | #endif | ||
1048 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); | ||
1049 | |||
1050 | /* Set defualt MAC */ | ||
1051 | dev->addr_len = ETH_ALEN; | ||
1052 | for (i = 0; i < ETH_ALEN; i++) | ||
1053 | dev->dev_addr[ETH_ALEN - 1 - i] = | ||
1054 | (u8) (priv->mac >> (8 * i)); | ||
1055 | |||
1056 | /* | ||
1057 | * Set driver features | ||
1058 | */ | ||
1059 | dev->features |= NETIF_F_SG; | ||
1060 | dev->features |= NETIF_F_HW_CSUM; | ||
1061 | dev->features |= NETIF_F_HIGHDMA; | ||
1062 | dev->features |= NETIF_F_HW_VLAN_TX | | ||
1063 | NETIF_F_HW_VLAN_RX | | ||
1064 | NETIF_F_HW_VLAN_FILTER; | ||
1065 | if (mdev->profile.num_lro) | ||
1066 | dev->features |= NETIF_F_LRO; | ||
1067 | if (mdev->LSO_support) { | ||
1068 | dev->features |= NETIF_F_TSO; | ||
1069 | dev->features |= NETIF_F_TSO6; | ||
1070 | } | ||
1071 | |||
1072 | mdev->pndev[port] = dev; | ||
1073 | |||
1074 | netif_carrier_off(dev); | ||
1075 | err = register_netdev(dev); | ||
1076 | if (err) { | ||
1077 | mlx4_err(mdev, "Netdev registration failed\n"); | ||
1078 | goto out; | ||
1079 | } | ||
1080 | priv->registered = 1; | ||
1081 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
1082 | return 0; | ||
1083 | |||
1084 | out: | ||
1085 | mlx4_en_destroy_netdev(dev); | ||
1086 | return err; | ||
1087 | } | ||
1088 | |||