diff options
Diffstat (limited to 'drivers/net/mlx4/en_netdev.c')
-rw-r--r-- | drivers/net/mlx4/en_netdev.c | 1166 |
1 files changed, 1166 insertions, 0 deletions
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c new file mode 100644 index 00000000000..4b0f32e568f --- /dev/null +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -0,0 +1,1166 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2007 Mellanox Technologies. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include <linux/etherdevice.h> | ||
35 | #include <linux/tcp.h> | ||
36 | #include <linux/if_vlan.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <linux/slab.h> | ||
39 | |||
40 | #include <linux/mlx4/driver.h> | ||
41 | #include <linux/mlx4/device.h> | ||
42 | #include <linux/mlx4/cmd.h> | ||
43 | #include <linux/mlx4/cq.h> | ||
44 | |||
45 | #include "mlx4_en.h" | ||
46 | #include "en_port.h" | ||
47 | |||
48 | static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | ||
49 | { | ||
50 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
51 | struct mlx4_en_dev *mdev = priv->mdev; | ||
52 | int err; | ||
53 | int idx; | ||
54 | |||
55 | en_dbg(HW, priv, "adding VLAN:%d\n", vid); | ||
56 | |||
57 | set_bit(vid, priv->active_vlans); | ||
58 | |||
59 | /* Add VID to port VLAN filter */ | ||
60 | mutex_lock(&mdev->state_lock); | ||
61 | if (mdev->device_up && priv->port_up) { | ||
62 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); | ||
63 | if (err) | ||
64 | en_err(priv, "Failed configuring VLAN filter\n"); | ||
65 | } | ||
66 | if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) | ||
67 | en_err(priv, "failed adding vlan %d\n", vid); | ||
68 | mutex_unlock(&mdev->state_lock); | ||
69 | |||
70 | } | ||
71 | |||
72 | static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) | ||
73 | { | ||
74 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
75 | struct mlx4_en_dev *mdev = priv->mdev; | ||
76 | int err; | ||
77 | int idx; | ||
78 | |||
79 | en_dbg(HW, priv, "Killing VID:%d\n", vid); | ||
80 | |||
81 | clear_bit(vid, priv->active_vlans); | ||
82 | |||
83 | /* Remove VID from port VLAN filter */ | ||
84 | mutex_lock(&mdev->state_lock); | ||
85 | if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx)) | ||
86 | mlx4_unregister_vlan(mdev->dev, priv->port, idx); | ||
87 | else | ||
88 | en_err(priv, "could not find vid %d in cache\n", vid); | ||
89 | |||
90 | if (mdev->device_up && priv->port_up) { | ||
91 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); | ||
92 | if (err) | ||
93 | en_err(priv, "Failed configuring VLAN filter\n"); | ||
94 | } | ||
95 | mutex_unlock(&mdev->state_lock); | ||
96 | } | ||
97 | |||
98 | u64 mlx4_en_mac_to_u64(u8 *addr) | ||
99 | { | ||
100 | u64 mac = 0; | ||
101 | int i; | ||
102 | |||
103 | for (i = 0; i < ETH_ALEN; i++) { | ||
104 | mac <<= 8; | ||
105 | mac |= addr[i]; | ||
106 | } | ||
107 | return mac; | ||
108 | } | ||
109 | |||
110 | static int mlx4_en_set_mac(struct net_device *dev, void *addr) | ||
111 | { | ||
112 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
113 | struct mlx4_en_dev *mdev = priv->mdev; | ||
114 | struct sockaddr *saddr = addr; | ||
115 | |||
116 | if (!is_valid_ether_addr(saddr->sa_data)) | ||
117 | return -EADDRNOTAVAIL; | ||
118 | |||
119 | memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN); | ||
120 | priv->mac = mlx4_en_mac_to_u64(dev->dev_addr); | ||
121 | queue_work(mdev->workqueue, &priv->mac_task); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | static void mlx4_en_do_set_mac(struct work_struct *work) | ||
126 | { | ||
127 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
128 | mac_task); | ||
129 | struct mlx4_en_dev *mdev = priv->mdev; | ||
130 | int err = 0; | ||
131 | |||
132 | mutex_lock(&mdev->state_lock); | ||
133 | if (priv->port_up) { | ||
134 | /* Remove old MAC and insert the new one */ | ||
135 | err = mlx4_replace_mac(mdev->dev, priv->port, | ||
136 | priv->base_qpn, priv->mac, 0); | ||
137 | if (err) | ||
138 | en_err(priv, "Failed changing HW MAC address\n"); | ||
139 | } else | ||
140 | en_dbg(HW, priv, "Port is down while " | ||
141 | "registering mac, exiting...\n"); | ||
142 | |||
143 | mutex_unlock(&mdev->state_lock); | ||
144 | } | ||
145 | |||
146 | static void mlx4_en_clear_list(struct net_device *dev) | ||
147 | { | ||
148 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
149 | |||
150 | kfree(priv->mc_addrs); | ||
151 | priv->mc_addrs_cnt = 0; | ||
152 | } | ||
153 | |||
154 | static void mlx4_en_cache_mclist(struct net_device *dev) | ||
155 | { | ||
156 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
157 | struct netdev_hw_addr *ha; | ||
158 | char *mc_addrs; | ||
159 | int mc_addrs_cnt = netdev_mc_count(dev); | ||
160 | int i; | ||
161 | |||
162 | mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC); | ||
163 | if (!mc_addrs) { | ||
164 | en_err(priv, "failed to allocate multicast list\n"); | ||
165 | return; | ||
166 | } | ||
167 | i = 0; | ||
168 | netdev_for_each_mc_addr(ha, dev) | ||
169 | memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); | ||
170 | priv->mc_addrs = mc_addrs; | ||
171 | priv->mc_addrs_cnt = mc_addrs_cnt; | ||
172 | } | ||
173 | |||
174 | |||
175 | static void mlx4_en_set_multicast(struct net_device *dev) | ||
176 | { | ||
177 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
178 | |||
179 | if (!priv->port_up) | ||
180 | return; | ||
181 | |||
182 | queue_work(priv->mdev->workqueue, &priv->mcast_task); | ||
183 | } | ||
184 | |||
185 | static void mlx4_en_do_set_multicast(struct work_struct *work) | ||
186 | { | ||
187 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
188 | mcast_task); | ||
189 | struct mlx4_en_dev *mdev = priv->mdev; | ||
190 | struct net_device *dev = priv->dev; | ||
191 | u64 mcast_addr = 0; | ||
192 | u8 mc_list[16] = {0}; | ||
193 | int err; | ||
194 | |||
195 | mutex_lock(&mdev->state_lock); | ||
196 | if (!mdev->device_up) { | ||
197 | en_dbg(HW, priv, "Card is not up, " | ||
198 | "ignoring multicast change.\n"); | ||
199 | goto out; | ||
200 | } | ||
201 | if (!priv->port_up) { | ||
202 | en_dbg(HW, priv, "Port is down, " | ||
203 | "ignoring multicast change.\n"); | ||
204 | goto out; | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Promsicuous mode: disable all filters | ||
209 | */ | ||
210 | |||
211 | if (dev->flags & IFF_PROMISC) { | ||
212 | if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { | ||
213 | if (netif_msg_rx_status(priv)) | ||
214 | en_warn(priv, "Entering promiscuous mode\n"); | ||
215 | priv->flags |= MLX4_EN_FLAG_PROMISC; | ||
216 | |||
217 | /* Enable promiscouos mode */ | ||
218 | if (!(mdev->dev->caps.flags & | ||
219 | MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) | ||
220 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
221 | priv->base_qpn, 1); | ||
222 | else | ||
223 | err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, | ||
224 | priv->port); | ||
225 | if (err) | ||
226 | en_err(priv, "Failed enabling " | ||
227 | "promiscuous mode\n"); | ||
228 | |||
229 | /* Disable port multicast filter (unconditionally) */ | ||
230 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
231 | 0, MLX4_MCAST_DISABLE); | ||
232 | if (err) | ||
233 | en_err(priv, "Failed disabling " | ||
234 | "multicast filter\n"); | ||
235 | |||
236 | /* Add the default qp number as multicast promisc */ | ||
237 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { | ||
238 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, | ||
239 | priv->port); | ||
240 | if (err) | ||
241 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
242 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
243 | } | ||
244 | |||
245 | /* Disable port VLAN filter */ | ||
246 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); | ||
247 | if (err) | ||
248 | en_err(priv, "Failed disabling VLAN filter\n"); | ||
249 | } | ||
250 | goto out; | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * Not in promiscuous mode | ||
255 | */ | ||
256 | |||
257 | if (priv->flags & MLX4_EN_FLAG_PROMISC) { | ||
258 | if (netif_msg_rx_status(priv)) | ||
259 | en_warn(priv, "Leaving promiscuous mode\n"); | ||
260 | priv->flags &= ~MLX4_EN_FLAG_PROMISC; | ||
261 | |||
262 | /* Disable promiscouos mode */ | ||
263 | if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) | ||
264 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, | ||
265 | priv->base_qpn, 0); | ||
266 | else | ||
267 | err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
268 | priv->port); | ||
269 | if (err) | ||
270 | en_err(priv, "Failed disabling promiscuous mode\n"); | ||
271 | |||
272 | /* Disable Multicast promisc */ | ||
273 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
274 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
275 | priv->port); | ||
276 | if (err) | ||
277 | en_err(priv, "Failed disabling multicast promiscuous mode\n"); | ||
278 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
279 | } | ||
280 | |||
281 | /* Enable port VLAN filter */ | ||
282 | err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); | ||
283 | if (err) | ||
284 | en_err(priv, "Failed enabling VLAN filter\n"); | ||
285 | } | ||
286 | |||
287 | /* Enable/disable the multicast filter according to IFF_ALLMULTI */ | ||
288 | if (dev->flags & IFF_ALLMULTI) { | ||
289 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
290 | 0, MLX4_MCAST_DISABLE); | ||
291 | if (err) | ||
292 | en_err(priv, "Failed disabling multicast filter\n"); | ||
293 | |||
294 | /* Add the default qp number as multicast promisc */ | ||
295 | if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { | ||
296 | err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, | ||
297 | priv->port); | ||
298 | if (err) | ||
299 | en_err(priv, "Failed entering multicast promisc mode\n"); | ||
300 | priv->flags |= MLX4_EN_FLAG_MC_PROMISC; | ||
301 | } | ||
302 | } else { | ||
303 | int i; | ||
304 | /* Disable Multicast promisc */ | ||
305 | if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { | ||
306 | err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, | ||
307 | priv->port); | ||
308 | if (err) | ||
309 | en_err(priv, "Failed disabling multicast promiscuous mode\n"); | ||
310 | priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; | ||
311 | } | ||
312 | |||
313 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
314 | 0, MLX4_MCAST_DISABLE); | ||
315 | if (err) | ||
316 | en_err(priv, "Failed disabling multicast filter\n"); | ||
317 | |||
318 | /* Detach our qp from all the multicast addresses */ | ||
319 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
320 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
321 | mc_list[5] = priv->port; | ||
322 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
323 | mc_list, MLX4_PROT_ETH); | ||
324 | } | ||
325 | /* Flush mcast filter and init it with broadcast address */ | ||
326 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, | ||
327 | 1, MLX4_MCAST_CONFIG); | ||
328 | |||
329 | /* Update multicast list - we cache all addresses so they won't | ||
330 | * change while HW is updated holding the command semaphor */ | ||
331 | netif_tx_lock_bh(dev); | ||
332 | mlx4_en_cache_mclist(dev); | ||
333 | netif_tx_unlock_bh(dev); | ||
334 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
335 | mcast_addr = | ||
336 | mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); | ||
337 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
338 | mc_list[5] = priv->port; | ||
339 | mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, | ||
340 | mc_list, 0, MLX4_PROT_ETH); | ||
341 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, | ||
342 | mcast_addr, 0, MLX4_MCAST_CONFIG); | ||
343 | } | ||
344 | err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, | ||
345 | 0, MLX4_MCAST_ENABLE); | ||
346 | if (err) | ||
347 | en_err(priv, "Failed enabling multicast filter\n"); | ||
348 | } | ||
349 | out: | ||
350 | mutex_unlock(&mdev->state_lock); | ||
351 | } | ||
352 | |||
353 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
354 | static void mlx4_en_netpoll(struct net_device *dev) | ||
355 | { | ||
356 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
357 | struct mlx4_en_cq *cq; | ||
358 | unsigned long flags; | ||
359 | int i; | ||
360 | |||
361 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
362 | cq = &priv->rx_cq[i]; | ||
363 | spin_lock_irqsave(&cq->lock, flags); | ||
364 | napi_synchronize(&cq->napi); | ||
365 | mlx4_en_process_rx_cq(dev, cq, 0); | ||
366 | spin_unlock_irqrestore(&cq->lock, flags); | ||
367 | } | ||
368 | } | ||
369 | #endif | ||
370 | |||
371 | static void mlx4_en_tx_timeout(struct net_device *dev) | ||
372 | { | ||
373 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
374 | struct mlx4_en_dev *mdev = priv->mdev; | ||
375 | |||
376 | if (netif_msg_timer(priv)) | ||
377 | en_warn(priv, "Tx timeout called on port:%d\n", priv->port); | ||
378 | |||
379 | priv->port_stats.tx_timeout++; | ||
380 | en_dbg(DRV, priv, "Scheduling watchdog\n"); | ||
381 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
382 | } | ||
383 | |||
384 | |||
385 | static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev) | ||
386 | { | ||
387 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
388 | |||
389 | spin_lock_bh(&priv->stats_lock); | ||
390 | memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats)); | ||
391 | spin_unlock_bh(&priv->stats_lock); | ||
392 | |||
393 | return &priv->ret_stats; | ||
394 | } | ||
395 | |||
396 | static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) | ||
397 | { | ||
398 | struct mlx4_en_cq *cq; | ||
399 | int i; | ||
400 | |||
401 | /* If we haven't received a specific coalescing setting | ||
402 | * (module param), we set the moderation parameters as follows: | ||
403 | * - moder_cnt is set to the number of mtu sized packets to | ||
404 | * satisfy our coelsing target. | ||
405 | * - moder_time is set to a fixed value. | ||
406 | */ | ||
407 | priv->rx_frames = MLX4_EN_RX_COAL_TARGET; | ||
408 | priv->rx_usecs = MLX4_EN_RX_COAL_TIME; | ||
409 | en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " | ||
410 | "rx_frames:%d rx_usecs:%d\n", | ||
411 | priv->dev->mtu, priv->rx_frames, priv->rx_usecs); | ||
412 | |||
413 | /* Setup cq moderation params */ | ||
414 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
415 | cq = &priv->rx_cq[i]; | ||
416 | cq->moder_cnt = priv->rx_frames; | ||
417 | cq->moder_time = priv->rx_usecs; | ||
418 | } | ||
419 | |||
420 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
421 | cq = &priv->tx_cq[i]; | ||
422 | cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; | ||
423 | cq->moder_time = MLX4_EN_TX_COAL_TIME; | ||
424 | } | ||
425 | |||
426 | /* Reset auto-moderation params */ | ||
427 | priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; | ||
428 | priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; | ||
429 | priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; | ||
430 | priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; | ||
431 | priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; | ||
432 | priv->adaptive_rx_coal = 1; | ||
433 | priv->last_moder_time = MLX4_EN_AUTO_CONF; | ||
434 | priv->last_moder_jiffies = 0; | ||
435 | priv->last_moder_packets = 0; | ||
436 | priv->last_moder_tx_packets = 0; | ||
437 | priv->last_moder_bytes = 0; | ||
438 | } | ||
439 | |||
440 | static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) | ||
441 | { | ||
442 | unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); | ||
443 | struct mlx4_en_cq *cq; | ||
444 | unsigned long packets; | ||
445 | unsigned long rate; | ||
446 | unsigned long avg_pkt_size; | ||
447 | unsigned long rx_packets; | ||
448 | unsigned long rx_bytes; | ||
449 | unsigned long tx_packets; | ||
450 | unsigned long tx_pkt_diff; | ||
451 | unsigned long rx_pkt_diff; | ||
452 | int moder_time; | ||
453 | int i, err; | ||
454 | |||
455 | if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) | ||
456 | return; | ||
457 | |||
458 | spin_lock_bh(&priv->stats_lock); | ||
459 | rx_packets = priv->stats.rx_packets; | ||
460 | rx_bytes = priv->stats.rx_bytes; | ||
461 | tx_packets = priv->stats.tx_packets; | ||
462 | spin_unlock_bh(&priv->stats_lock); | ||
463 | |||
464 | if (!priv->last_moder_jiffies || !period) | ||
465 | goto out; | ||
466 | |||
467 | tx_pkt_diff = ((unsigned long) (tx_packets - | ||
468 | priv->last_moder_tx_packets)); | ||
469 | rx_pkt_diff = ((unsigned long) (rx_packets - | ||
470 | priv->last_moder_packets)); | ||
471 | packets = max(tx_pkt_diff, rx_pkt_diff); | ||
472 | rate = packets * HZ / period; | ||
473 | avg_pkt_size = packets ? ((unsigned long) (rx_bytes - | ||
474 | priv->last_moder_bytes)) / packets : 0; | ||
475 | |||
476 | /* Apply auto-moderation only when packet rate exceeds a rate that | ||
477 | * it matters */ | ||
478 | if (rate > MLX4_EN_RX_RATE_THRESH && avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { | ||
479 | /* If tx and rx packet rates are not balanced, assume that | ||
480 | * traffic is mainly BW bound and apply maximum moderation. | ||
481 | * Otherwise, moderate according to packet rate */ | ||
482 | if (2 * tx_pkt_diff > 3 * rx_pkt_diff || | ||
483 | 2 * rx_pkt_diff > 3 * tx_pkt_diff) { | ||
484 | moder_time = priv->rx_usecs_high; | ||
485 | } else { | ||
486 | if (rate < priv->pkt_rate_low) | ||
487 | moder_time = priv->rx_usecs_low; | ||
488 | else if (rate > priv->pkt_rate_high) | ||
489 | moder_time = priv->rx_usecs_high; | ||
490 | else | ||
491 | moder_time = (rate - priv->pkt_rate_low) * | ||
492 | (priv->rx_usecs_high - priv->rx_usecs_low) / | ||
493 | (priv->pkt_rate_high - priv->pkt_rate_low) + | ||
494 | priv->rx_usecs_low; | ||
495 | } | ||
496 | } else { | ||
497 | moder_time = priv->rx_usecs_low; | ||
498 | } | ||
499 | |||
500 | en_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n", | ||
501 | tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period); | ||
502 | |||
503 | en_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu " | ||
504 | "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n", | ||
505 | priv->last_moder_time, moder_time, period, packets, | ||
506 | avg_pkt_size, rate); | ||
507 | |||
508 | if (moder_time != priv->last_moder_time) { | ||
509 | priv->last_moder_time = moder_time; | ||
510 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
511 | cq = &priv->rx_cq[i]; | ||
512 | cq->moder_time = moder_time; | ||
513 | err = mlx4_en_set_cq_moder(priv, cq); | ||
514 | if (err) { | ||
515 | en_err(priv, "Failed modifying moderation for cq:%d\n", i); | ||
516 | break; | ||
517 | } | ||
518 | } | ||
519 | } | ||
520 | |||
521 | out: | ||
522 | priv->last_moder_packets = rx_packets; | ||
523 | priv->last_moder_tx_packets = tx_packets; | ||
524 | priv->last_moder_bytes = rx_bytes; | ||
525 | priv->last_moder_jiffies = jiffies; | ||
526 | } | ||
527 | |||
528 | static void mlx4_en_do_get_stats(struct work_struct *work) | ||
529 | { | ||
530 | struct delayed_work *delay = to_delayed_work(work); | ||
531 | struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, | ||
532 | stats_task); | ||
533 | struct mlx4_en_dev *mdev = priv->mdev; | ||
534 | int err; | ||
535 | |||
536 | err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); | ||
537 | if (err) | ||
538 | en_dbg(HW, priv, "Could not update stats\n"); | ||
539 | |||
540 | mutex_lock(&mdev->state_lock); | ||
541 | if (mdev->device_up) { | ||
542 | if (priv->port_up) | ||
543 | mlx4_en_auto_moderation(priv); | ||
544 | |||
545 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
546 | } | ||
547 | if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) { | ||
548 | queue_work(mdev->workqueue, &priv->mac_task); | ||
549 | mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0; | ||
550 | } | ||
551 | mutex_unlock(&mdev->state_lock); | ||
552 | } | ||
553 | |||
554 | static void mlx4_en_linkstate(struct work_struct *work) | ||
555 | { | ||
556 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
557 | linkstate_task); | ||
558 | struct mlx4_en_dev *mdev = priv->mdev; | ||
559 | int linkstate = priv->link_state; | ||
560 | |||
561 | mutex_lock(&mdev->state_lock); | ||
562 | /* If observable port state changed set carrier state and | ||
563 | * report to system log */ | ||
564 | if (priv->last_link_state != linkstate) { | ||
565 | if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { | ||
566 | en_info(priv, "Link Down\n"); | ||
567 | netif_carrier_off(priv->dev); | ||
568 | } else { | ||
569 | en_info(priv, "Link Up\n"); | ||
570 | netif_carrier_on(priv->dev); | ||
571 | } | ||
572 | } | ||
573 | priv->last_link_state = linkstate; | ||
574 | mutex_unlock(&mdev->state_lock); | ||
575 | } | ||
576 | |||
577 | |||
578 | int mlx4_en_start_port(struct net_device *dev) | ||
579 | { | ||
580 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
581 | struct mlx4_en_dev *mdev = priv->mdev; | ||
582 | struct mlx4_en_cq *cq; | ||
583 | struct mlx4_en_tx_ring *tx_ring; | ||
584 | int rx_index = 0; | ||
585 | int tx_index = 0; | ||
586 | int err = 0; | ||
587 | int i; | ||
588 | int j; | ||
589 | u8 mc_list[16] = {0}; | ||
590 | char name[32]; | ||
591 | |||
592 | if (priv->port_up) { | ||
593 | en_dbg(DRV, priv, "start port called while port already up\n"); | ||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | /* Calculate Rx buf size */ | ||
598 | dev->mtu = min(dev->mtu, priv->max_mtu); | ||
599 | mlx4_en_calc_rx_buf(dev); | ||
600 | en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); | ||
601 | |||
602 | /* Configure rx cq's and rings */ | ||
603 | err = mlx4_en_activate_rx_rings(priv); | ||
604 | if (err) { | ||
605 | en_err(priv, "Failed to activate RX rings\n"); | ||
606 | return err; | ||
607 | } | ||
608 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
609 | cq = &priv->rx_cq[i]; | ||
610 | |||
611 | err = mlx4_en_activate_cq(priv, cq); | ||
612 | if (err) { | ||
613 | en_err(priv, "Failed activating Rx CQ\n"); | ||
614 | goto cq_err; | ||
615 | } | ||
616 | for (j = 0; j < cq->size; j++) | ||
617 | cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; | ||
618 | err = mlx4_en_set_cq_moder(priv, cq); | ||
619 | if (err) { | ||
620 | en_err(priv, "Failed setting cq moderation parameters"); | ||
621 | mlx4_en_deactivate_cq(priv, cq); | ||
622 | goto cq_err; | ||
623 | } | ||
624 | mlx4_en_arm_cq(priv, cq); | ||
625 | priv->rx_ring[i].cqn = cq->mcq.cqn; | ||
626 | ++rx_index; | ||
627 | } | ||
628 | |||
629 | /* Set port mac number */ | ||
630 | en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port); | ||
631 | err = mlx4_register_mac(mdev->dev, priv->port, | ||
632 | priv->mac, &priv->base_qpn, 0); | ||
633 | if (err) { | ||
634 | en_err(priv, "Failed setting port mac\n"); | ||
635 | goto cq_err; | ||
636 | } | ||
637 | mdev->mac_removed[priv->port] = 0; | ||
638 | |||
639 | err = mlx4_en_config_rss_steer(priv); | ||
640 | if (err) { | ||
641 | en_err(priv, "Failed configuring rss steering\n"); | ||
642 | goto mac_err; | ||
643 | } | ||
644 | |||
645 | if (mdev->dev->caps.comp_pool && !priv->tx_vector) { | ||
646 | sprintf(name , "%s-tx", priv->dev->name); | ||
647 | if (mlx4_assign_eq(mdev->dev , name, &priv->tx_vector)) { | ||
648 | mlx4_warn(mdev, "Failed Assigning an EQ to " | ||
649 | "%s_tx ,Falling back to legacy " | ||
650 | "EQ's\n", priv->dev->name); | ||
651 | } | ||
652 | } | ||
653 | /* Configure tx cq's and rings */ | ||
654 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
655 | /* Configure cq */ | ||
656 | cq = &priv->tx_cq[i]; | ||
657 | cq->vector = priv->tx_vector; | ||
658 | err = mlx4_en_activate_cq(priv, cq); | ||
659 | if (err) { | ||
660 | en_err(priv, "Failed allocating Tx CQ\n"); | ||
661 | goto tx_err; | ||
662 | } | ||
663 | err = mlx4_en_set_cq_moder(priv, cq); | ||
664 | if (err) { | ||
665 | en_err(priv, "Failed setting cq moderation parameters"); | ||
666 | mlx4_en_deactivate_cq(priv, cq); | ||
667 | goto tx_err; | ||
668 | } | ||
669 | en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); | ||
670 | cq->buf->wqe_index = cpu_to_be16(0xffff); | ||
671 | |||
672 | /* Configure ring */ | ||
673 | tx_ring = &priv->tx_ring[i]; | ||
674 | err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); | ||
675 | if (err) { | ||
676 | en_err(priv, "Failed allocating Tx ring\n"); | ||
677 | mlx4_en_deactivate_cq(priv, cq); | ||
678 | goto tx_err; | ||
679 | } | ||
680 | /* Set initial ownership of all Tx TXBBs to SW (1) */ | ||
681 | for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) | ||
682 | *((u32 *) (tx_ring->buf + j)) = 0xffffffff; | ||
683 | ++tx_index; | ||
684 | } | ||
685 | |||
686 | /* Configure port */ | ||
687 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
688 | priv->rx_skb_size + ETH_FCS_LEN, | ||
689 | priv->prof->tx_pause, | ||
690 | priv->prof->tx_ppp, | ||
691 | priv->prof->rx_pause, | ||
692 | priv->prof->rx_ppp); | ||
693 | if (err) { | ||
694 | en_err(priv, "Failed setting port general configurations " | ||
695 | "for port %d, with error %d\n", priv->port, err); | ||
696 | goto tx_err; | ||
697 | } | ||
698 | /* Set default qp number */ | ||
699 | err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); | ||
700 | if (err) { | ||
701 | en_err(priv, "Failed setting default qp numbers\n"); | ||
702 | goto tx_err; | ||
703 | } | ||
704 | |||
705 | /* Init port */ | ||
706 | en_dbg(HW, priv, "Initializing port\n"); | ||
707 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
708 | if (err) { | ||
709 | en_err(priv, "Failed Initializing port\n"); | ||
710 | goto tx_err; | ||
711 | } | ||
712 | |||
713 | /* Attach rx QP to bradcast address */ | ||
714 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
715 | mc_list[5] = priv->port; | ||
716 | if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
717 | 0, MLX4_PROT_ETH)) | ||
718 | mlx4_warn(mdev, "Failed Attaching Broadcast\n"); | ||
719 | |||
720 | /* Must redo promiscuous mode setup. */ | ||
721 | priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); | ||
722 | |||
723 | /* Schedule multicast task to populate multicast list */ | ||
724 | queue_work(mdev->workqueue, &priv->mcast_task); | ||
725 | |||
726 | priv->port_up = true; | ||
727 | netif_tx_start_all_queues(dev); | ||
728 | return 0; | ||
729 | |||
730 | tx_err: | ||
731 | while (tx_index--) { | ||
732 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); | ||
733 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); | ||
734 | } | ||
735 | |||
736 | mlx4_en_release_rss_steer(priv); | ||
737 | mac_err: | ||
738 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); | ||
739 | cq_err: | ||
740 | while (rx_index--) | ||
741 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]); | ||
742 | for (i = 0; i < priv->rx_ring_num; i++) | ||
743 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); | ||
744 | |||
745 | return err; /* need to close devices */ | ||
746 | } | ||
747 | |||
748 | |||
749 | void mlx4_en_stop_port(struct net_device *dev) | ||
750 | { | ||
751 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
752 | struct mlx4_en_dev *mdev = priv->mdev; | ||
753 | int i; | ||
754 | u8 mc_list[16] = {0}; | ||
755 | |||
756 | if (!priv->port_up) { | ||
757 | en_dbg(DRV, priv, "stop port called while port already down\n"); | ||
758 | return; | ||
759 | } | ||
760 | |||
761 | /* Synchronize with tx routine */ | ||
762 | netif_tx_lock_bh(dev); | ||
763 | netif_tx_stop_all_queues(dev); | ||
764 | netif_tx_unlock_bh(dev); | ||
765 | |||
766 | /* Set port as not active */ | ||
767 | priv->port_up = false; | ||
768 | |||
769 | /* Detach All multicasts */ | ||
770 | memset(&mc_list[10], 0xff, ETH_ALEN); | ||
771 | mc_list[5] = priv->port; | ||
772 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, | ||
773 | MLX4_PROT_ETH); | ||
774 | for (i = 0; i < priv->mc_addrs_cnt; i++) { | ||
775 | memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); | ||
776 | mc_list[5] = priv->port; | ||
777 | mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, | ||
778 | mc_list, MLX4_PROT_ETH); | ||
779 | } | ||
780 | mlx4_en_clear_list(dev); | ||
781 | /* Flush multicast filter */ | ||
782 | mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); | ||
783 | |||
784 | /* Unregister Mac address for the port */ | ||
785 | mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn); | ||
786 | mdev->mac_removed[priv->port] = 1; | ||
787 | |||
788 | /* Free TX Rings */ | ||
789 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
790 | mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); | ||
791 | mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]); | ||
792 | } | ||
793 | msleep(10); | ||
794 | |||
795 | for (i = 0; i < priv->tx_ring_num; i++) | ||
796 | mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]); | ||
797 | |||
798 | /* Free RSS qps */ | ||
799 | mlx4_en_release_rss_steer(priv); | ||
800 | |||
801 | /* Free RX Rings */ | ||
802 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
803 | mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]); | ||
804 | while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state)) | ||
805 | msleep(1); | ||
806 | mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]); | ||
807 | } | ||
808 | |||
809 | /* close port*/ | ||
810 | mlx4_CLOSE_PORT(mdev->dev, priv->port); | ||
811 | } | ||
812 | |||
813 | static void mlx4_en_restart(struct work_struct *work) | ||
814 | { | ||
815 | struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, | ||
816 | watchdog_task); | ||
817 | struct mlx4_en_dev *mdev = priv->mdev; | ||
818 | struct net_device *dev = priv->dev; | ||
819 | |||
820 | en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); | ||
821 | |||
822 | mutex_lock(&mdev->state_lock); | ||
823 | if (priv->port_up) { | ||
824 | mlx4_en_stop_port(dev); | ||
825 | if (mlx4_en_start_port(dev)) | ||
826 | en_err(priv, "Failed restarting port %d\n", priv->port); | ||
827 | } | ||
828 | mutex_unlock(&mdev->state_lock); | ||
829 | } | ||
830 | |||
831 | |||
832 | static int mlx4_en_open(struct net_device *dev) | ||
833 | { | ||
834 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
835 | struct mlx4_en_dev *mdev = priv->mdev; | ||
836 | int i; | ||
837 | int err = 0; | ||
838 | |||
839 | mutex_lock(&mdev->state_lock); | ||
840 | |||
841 | if (!mdev->device_up) { | ||
842 | en_err(priv, "Cannot open - device down/disabled\n"); | ||
843 | err = -EBUSY; | ||
844 | goto out; | ||
845 | } | ||
846 | |||
847 | /* Reset HW statistics and performance counters */ | ||
848 | if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) | ||
849 | en_dbg(HW, priv, "Failed dumping statistics\n"); | ||
850 | |||
851 | memset(&priv->stats, 0, sizeof(priv->stats)); | ||
852 | memset(&priv->pstats, 0, sizeof(priv->pstats)); | ||
853 | |||
854 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
855 | priv->tx_ring[i].bytes = 0; | ||
856 | priv->tx_ring[i].packets = 0; | ||
857 | } | ||
858 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
859 | priv->rx_ring[i].bytes = 0; | ||
860 | priv->rx_ring[i].packets = 0; | ||
861 | } | ||
862 | |||
863 | err = mlx4_en_start_port(dev); | ||
864 | if (err) | ||
865 | en_err(priv, "Failed starting port:%d\n", priv->port); | ||
866 | |||
867 | out: | ||
868 | mutex_unlock(&mdev->state_lock); | ||
869 | return err; | ||
870 | } | ||
871 | |||
872 | |||
873 | static int mlx4_en_close(struct net_device *dev) | ||
874 | { | ||
875 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
876 | struct mlx4_en_dev *mdev = priv->mdev; | ||
877 | |||
878 | en_dbg(IFDOWN, priv, "Close port called\n"); | ||
879 | |||
880 | mutex_lock(&mdev->state_lock); | ||
881 | |||
882 | mlx4_en_stop_port(dev); | ||
883 | netif_carrier_off(dev); | ||
884 | |||
885 | mutex_unlock(&mdev->state_lock); | ||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | void mlx4_en_free_resources(struct mlx4_en_priv *priv, bool reserve_vectors) | ||
890 | { | ||
891 | int i; | ||
892 | |||
893 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
894 | if (priv->tx_ring[i].tx_info) | ||
895 | mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); | ||
896 | if (priv->tx_cq[i].buf) | ||
897 | mlx4_en_destroy_cq(priv, &priv->tx_cq[i], reserve_vectors); | ||
898 | } | ||
899 | |||
900 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
901 | if (priv->rx_ring[i].rx_info) | ||
902 | mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]); | ||
903 | if (priv->rx_cq[i].buf) | ||
904 | mlx4_en_destroy_cq(priv, &priv->rx_cq[i], reserve_vectors); | ||
905 | } | ||
906 | } | ||
907 | |||
908 | int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | ||
909 | { | ||
910 | struct mlx4_en_port_profile *prof = priv->prof; | ||
911 | int i; | ||
912 | int base_tx_qpn, err; | ||
913 | |||
914 | err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &base_tx_qpn); | ||
915 | if (err) { | ||
916 | en_err(priv, "failed reserving range for TX rings\n"); | ||
917 | return err; | ||
918 | } | ||
919 | |||
920 | /* Create tx Rings */ | ||
921 | for (i = 0; i < priv->tx_ring_num; i++) { | ||
922 | if (mlx4_en_create_cq(priv, &priv->tx_cq[i], | ||
923 | prof->tx_ring_size, i, TX)) | ||
924 | goto err; | ||
925 | |||
926 | if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], base_tx_qpn + i, | ||
927 | prof->tx_ring_size, TXBB_SIZE)) | ||
928 | goto err; | ||
929 | } | ||
930 | |||
931 | /* Create rx Rings */ | ||
932 | for (i = 0; i < priv->rx_ring_num; i++) { | ||
933 | if (mlx4_en_create_cq(priv, &priv->rx_cq[i], | ||
934 | prof->rx_ring_size, i, RX)) | ||
935 | goto err; | ||
936 | |||
937 | if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], | ||
938 | prof->rx_ring_size, priv->stride)) | ||
939 | goto err; | ||
940 | } | ||
941 | |||
942 | return 0; | ||
943 | |||
944 | err: | ||
945 | en_err(priv, "Failed to allocate NIC resources\n"); | ||
946 | mlx4_qp_release_range(priv->mdev->dev, base_tx_qpn, priv->tx_ring_num); | ||
947 | return -ENOMEM; | ||
948 | } | ||
949 | |||
950 | |||
951 | void mlx4_en_destroy_netdev(struct net_device *dev) | ||
952 | { | ||
953 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
954 | struct mlx4_en_dev *mdev = priv->mdev; | ||
955 | |||
956 | en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); | ||
957 | |||
958 | /* Unregister device - this will close the port if it was up */ | ||
959 | if (priv->registered) | ||
960 | unregister_netdev(dev); | ||
961 | |||
962 | if (priv->allocated) | ||
963 | mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); | ||
964 | |||
965 | cancel_delayed_work(&priv->stats_task); | ||
966 | /* flush any pending task for this netdev */ | ||
967 | flush_workqueue(mdev->workqueue); | ||
968 | |||
969 | /* Detach the netdev so tasks would not attempt to access it */ | ||
970 | mutex_lock(&mdev->state_lock); | ||
971 | mdev->pndev[priv->port] = NULL; | ||
972 | mutex_unlock(&mdev->state_lock); | ||
973 | |||
974 | mlx4_en_free_resources(priv, false); | ||
975 | free_netdev(dev); | ||
976 | } | ||
977 | |||
978 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | ||
979 | { | ||
980 | struct mlx4_en_priv *priv = netdev_priv(dev); | ||
981 | struct mlx4_en_dev *mdev = priv->mdev; | ||
982 | int err = 0; | ||
983 | |||
984 | en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n", | ||
985 | dev->mtu, new_mtu); | ||
986 | |||
987 | if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { | ||
988 | en_err(priv, "Bad MTU size:%d.\n", new_mtu); | ||
989 | return -EPERM; | ||
990 | } | ||
991 | dev->mtu = new_mtu; | ||
992 | |||
993 | if (netif_running(dev)) { | ||
994 | mutex_lock(&mdev->state_lock); | ||
995 | if (!mdev->device_up) { | ||
996 | /* NIC is probably restarting - let watchdog task reset | ||
997 | * the port */ | ||
998 | en_dbg(DRV, priv, "Change MTU called with card down!?\n"); | ||
999 | } else { | ||
1000 | mlx4_en_stop_port(dev); | ||
1001 | err = mlx4_en_start_port(dev); | ||
1002 | if (err) { | ||
1003 | en_err(priv, "Failed restarting port:%d\n", | ||
1004 | priv->port); | ||
1005 | queue_work(mdev->workqueue, &priv->watchdog_task); | ||
1006 | } | ||
1007 | } | ||
1008 | mutex_unlock(&mdev->state_lock); | ||
1009 | } | ||
1010 | return 0; | ||
1011 | } | ||
1012 | |||
1013 | static const struct net_device_ops mlx4_netdev_ops = { | ||
1014 | .ndo_open = mlx4_en_open, | ||
1015 | .ndo_stop = mlx4_en_close, | ||
1016 | .ndo_start_xmit = mlx4_en_xmit, | ||
1017 | .ndo_select_queue = mlx4_en_select_queue, | ||
1018 | .ndo_get_stats = mlx4_en_get_stats, | ||
1019 | .ndo_set_multicast_list = mlx4_en_set_multicast, | ||
1020 | .ndo_set_mac_address = mlx4_en_set_mac, | ||
1021 | .ndo_validate_addr = eth_validate_addr, | ||
1022 | .ndo_change_mtu = mlx4_en_change_mtu, | ||
1023 | .ndo_tx_timeout = mlx4_en_tx_timeout, | ||
1024 | .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, | ||
1025 | .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, | ||
1026 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1027 | .ndo_poll_controller = mlx4_en_netpoll, | ||
1028 | #endif | ||
1029 | }; | ||
1030 | |||
1031 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | ||
1032 | struct mlx4_en_port_profile *prof) | ||
1033 | { | ||
1034 | struct net_device *dev; | ||
1035 | struct mlx4_en_priv *priv; | ||
1036 | int i; | ||
1037 | int err; | ||
1038 | |||
1039 | dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv), | ||
1040 | prof->tx_ring_num, prof->rx_ring_num); | ||
1041 | if (dev == NULL) { | ||
1042 | mlx4_err(mdev, "Net device allocation failed\n"); | ||
1043 | return -ENOMEM; | ||
1044 | } | ||
1045 | |||
1046 | SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); | ||
1047 | dev->dev_id = port - 1; | ||
1048 | |||
1049 | /* | ||
1050 | * Initialize driver private data | ||
1051 | */ | ||
1052 | |||
1053 | priv = netdev_priv(dev); | ||
1054 | memset(priv, 0, sizeof(struct mlx4_en_priv)); | ||
1055 | priv->dev = dev; | ||
1056 | priv->mdev = mdev; | ||
1057 | priv->prof = prof; | ||
1058 | priv->port = port; | ||
1059 | priv->port_up = false; | ||
1060 | priv->flags = prof->flags; | ||
1061 | priv->tx_ring_num = prof->tx_ring_num; | ||
1062 | priv->rx_ring_num = prof->rx_ring_num; | ||
1063 | priv->mac_index = -1; | ||
1064 | priv->msg_enable = MLX4_EN_MSG_LEVEL; | ||
1065 | spin_lock_init(&priv->stats_lock); | ||
1066 | INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast); | ||
1067 | INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac); | ||
1068 | INIT_WORK(&priv->watchdog_task, mlx4_en_restart); | ||
1069 | INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); | ||
1070 | INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); | ||
1071 | |||
1072 | /* Query for default mac and max mtu */ | ||
1073 | priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; | ||
1074 | priv->mac = mdev->dev->caps.def_mac[priv->port]; | ||
1075 | if (ILLEGAL_MAC(priv->mac)) { | ||
1076 | en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", | ||
1077 | priv->port, priv->mac); | ||
1078 | err = -EINVAL; | ||
1079 | goto out; | ||
1080 | } | ||
1081 | |||
1082 | priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + | ||
1083 | DS_SIZE * MLX4_EN_MAX_RX_FRAGS); | ||
1084 | err = mlx4_en_alloc_resources(priv); | ||
1085 | if (err) | ||
1086 | goto out; | ||
1087 | |||
1088 | /* Allocate page for receive rings */ | ||
1089 | err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, | ||
1090 | MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); | ||
1091 | if (err) { | ||
1092 | en_err(priv, "Failed to allocate page for rx qps\n"); | ||
1093 | goto out; | ||
1094 | } | ||
1095 | priv->allocated = 1; | ||
1096 | |||
1097 | /* | ||
1098 | * Initialize netdev entry points | ||
1099 | */ | ||
1100 | dev->netdev_ops = &mlx4_netdev_ops; | ||
1101 | dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT; | ||
1102 | netif_set_real_num_tx_queues(dev, priv->tx_ring_num); | ||
1103 | netif_set_real_num_rx_queues(dev, priv->rx_ring_num); | ||
1104 | |||
1105 | SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); | ||
1106 | |||
1107 | /* Set defualt MAC */ | ||
1108 | dev->addr_len = ETH_ALEN; | ||
1109 | for (i = 0; i < ETH_ALEN; i++) { | ||
1110 | dev->dev_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); | ||
1111 | dev->perm_addr[ETH_ALEN - 1 - i] = (u8) (priv->mac >> (8 * i)); | ||
1112 | } | ||
1113 | |||
1114 | /* | ||
1115 | * Set driver features | ||
1116 | */ | ||
1117 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; | ||
1118 | if (mdev->LSO_support) | ||
1119 | dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; | ||
1120 | |||
1121 | dev->vlan_features = dev->hw_features; | ||
1122 | |||
1123 | dev->hw_features |= NETIF_F_RXCSUM; | ||
1124 | dev->features = dev->hw_features | NETIF_F_HIGHDMA | | ||
1125 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | | ||
1126 | NETIF_F_HW_VLAN_FILTER; | ||
1127 | |||
1128 | mdev->pndev[port] = dev; | ||
1129 | |||
1130 | netif_carrier_off(dev); | ||
1131 | err = register_netdev(dev); | ||
1132 | if (err) { | ||
1133 | en_err(priv, "Netdev registration failed for port %d\n", port); | ||
1134 | goto out; | ||
1135 | } | ||
1136 | |||
1137 | en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); | ||
1138 | en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); | ||
1139 | |||
1140 | /* Configure port */ | ||
1141 | err = mlx4_SET_PORT_general(mdev->dev, priv->port, | ||
1142 | MLX4_EN_MIN_MTU, | ||
1143 | 0, 0, 0, 0); | ||
1144 | if (err) { | ||
1145 | en_err(priv, "Failed setting port general configurations " | ||
1146 | "for port %d, with error %d\n", priv->port, err); | ||
1147 | goto out; | ||
1148 | } | ||
1149 | |||
1150 | /* Init port */ | ||
1151 | en_warn(priv, "Initializing port\n"); | ||
1152 | err = mlx4_INIT_PORT(mdev->dev, priv->port); | ||
1153 | if (err) { | ||
1154 | en_err(priv, "Failed Initializing port\n"); | ||
1155 | goto out; | ||
1156 | } | ||
1157 | priv->registered = 1; | ||
1158 | mlx4_en_set_default_moderation(priv); | ||
1159 | queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); | ||
1160 | return 0; | ||
1161 | |||
1162 | out: | ||
1163 | mlx4_en_destroy_netdev(dev); | ||
1164 | return err; | ||
1165 | } | ||
1166 | |||