diff options
Diffstat (limited to 'drivers/net/enic/enic_main.c')
-rw-r--r-- | drivers/net/enic/enic_main.c | 2513 |
1 files changed, 0 insertions, 2513 deletions
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c deleted file mode 100644 index 67a27cd304dd..000000000000 --- a/drivers/net/enic/enic_main.c +++ /dev/null | |||
@@ -1,2513 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. | ||
3 | * Copyright 2007 Nuova Systems, Inc. All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you may redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; version 2 of the License. | ||
8 | * | ||
9 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
10 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
11 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
12 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
13 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
14 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
15 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
16 | * SOFTWARE. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/workqueue.h> | ||
28 | #include <linux/pci.h> | ||
29 | #include <linux/netdevice.h> | ||
30 | #include <linux/etherdevice.h> | ||
31 | #include <linux/if_ether.h> | ||
32 | #include <linux/if_vlan.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/in.h> | ||
35 | #include <linux/ip.h> | ||
36 | #include <linux/ipv6.h> | ||
37 | #include <linux/tcp.h> | ||
38 | #include <linux/rtnetlink.h> | ||
39 | #include <linux/prefetch.h> | ||
40 | #include <net/ip6_checksum.h> | ||
41 | |||
42 | #include "cq_enet_desc.h" | ||
43 | #include "vnic_dev.h" | ||
44 | #include "vnic_intr.h" | ||
45 | #include "vnic_stats.h" | ||
46 | #include "vnic_vic.h" | ||
47 | #include "enic_res.h" | ||
48 | #include "enic.h" | ||
49 | #include "enic_dev.h" | ||
50 | #include "enic_pp.h" | ||
51 | |||
52 | #define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ) | ||
53 | #define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS) | ||
54 | #define MAX_TSO (1 << 16) | ||
55 | #define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) | ||
56 | |||
57 | #define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ | ||
58 | #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */ | ||
59 | |||
60 | /* Supported devices */ | ||
61 | static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { | ||
62 | { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, | ||
63 | { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) }, | ||
64 | { 0, } /* end of table */ | ||
65 | }; | ||
66 | |||
67 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
68 | MODULE_AUTHOR("Scott Feldman <scofeldm@cisco.com>"); | ||
69 | MODULE_LICENSE("GPL"); | ||
70 | MODULE_VERSION(DRV_VERSION); | ||
71 | MODULE_DEVICE_TABLE(pci, enic_id_table); | ||
72 | |||
73 | struct enic_stat { | ||
74 | char name[ETH_GSTRING_LEN]; | ||
75 | unsigned int offset; | ||
76 | }; | ||
77 | |||
78 | #define ENIC_TX_STAT(stat) \ | ||
79 | { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 } | ||
80 | #define ENIC_RX_STAT(stat) \ | ||
81 | { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 } | ||
82 | |||
83 | static const struct enic_stat enic_tx_stats[] = { | ||
84 | ENIC_TX_STAT(tx_frames_ok), | ||
85 | ENIC_TX_STAT(tx_unicast_frames_ok), | ||
86 | ENIC_TX_STAT(tx_multicast_frames_ok), | ||
87 | ENIC_TX_STAT(tx_broadcast_frames_ok), | ||
88 | ENIC_TX_STAT(tx_bytes_ok), | ||
89 | ENIC_TX_STAT(tx_unicast_bytes_ok), | ||
90 | ENIC_TX_STAT(tx_multicast_bytes_ok), | ||
91 | ENIC_TX_STAT(tx_broadcast_bytes_ok), | ||
92 | ENIC_TX_STAT(tx_drops), | ||
93 | ENIC_TX_STAT(tx_errors), | ||
94 | ENIC_TX_STAT(tx_tso), | ||
95 | }; | ||
96 | |||
97 | static const struct enic_stat enic_rx_stats[] = { | ||
98 | ENIC_RX_STAT(rx_frames_ok), | ||
99 | ENIC_RX_STAT(rx_frames_total), | ||
100 | ENIC_RX_STAT(rx_unicast_frames_ok), | ||
101 | ENIC_RX_STAT(rx_multicast_frames_ok), | ||
102 | ENIC_RX_STAT(rx_broadcast_frames_ok), | ||
103 | ENIC_RX_STAT(rx_bytes_ok), | ||
104 | ENIC_RX_STAT(rx_unicast_bytes_ok), | ||
105 | ENIC_RX_STAT(rx_multicast_bytes_ok), | ||
106 | ENIC_RX_STAT(rx_broadcast_bytes_ok), | ||
107 | ENIC_RX_STAT(rx_drop), | ||
108 | ENIC_RX_STAT(rx_no_bufs), | ||
109 | ENIC_RX_STAT(rx_errors), | ||
110 | ENIC_RX_STAT(rx_rss), | ||
111 | ENIC_RX_STAT(rx_crc_errors), | ||
112 | ENIC_RX_STAT(rx_frames_64), | ||
113 | ENIC_RX_STAT(rx_frames_127), | ||
114 | ENIC_RX_STAT(rx_frames_255), | ||
115 | ENIC_RX_STAT(rx_frames_511), | ||
116 | ENIC_RX_STAT(rx_frames_1023), | ||
117 | ENIC_RX_STAT(rx_frames_1518), | ||
118 | ENIC_RX_STAT(rx_frames_to_max), | ||
119 | }; | ||
120 | |||
121 | static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); | ||
122 | static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); | ||
123 | |||
124 | static int enic_is_dynamic(struct enic *enic) | ||
125 | { | ||
126 | return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN; | ||
127 | } | ||
128 | |||
129 | static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) | ||
130 | { | ||
131 | return rq; | ||
132 | } | ||
133 | |||
134 | static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) | ||
135 | { | ||
136 | return enic->rq_count + wq; | ||
137 | } | ||
138 | |||
139 | static inline unsigned int enic_legacy_io_intr(void) | ||
140 | { | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static inline unsigned int enic_legacy_err_intr(void) | ||
145 | { | ||
146 | return 1; | ||
147 | } | ||
148 | |||
149 | static inline unsigned int enic_legacy_notify_intr(void) | ||
150 | { | ||
151 | return 2; | ||
152 | } | ||
153 | |||
154 | static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq) | ||
155 | { | ||
156 | return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; | ||
157 | } | ||
158 | |||
159 | static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq) | ||
160 | { | ||
161 | return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; | ||
162 | } | ||
163 | |||
164 | static inline unsigned int enic_msix_err_intr(struct enic *enic) | ||
165 | { | ||
166 | return enic->rq_count + enic->wq_count; | ||
167 | } | ||
168 | |||
169 | static inline unsigned int enic_msix_notify_intr(struct enic *enic) | ||
170 | { | ||
171 | return enic->rq_count + enic->wq_count + 1; | ||
172 | } | ||
173 | |||
174 | static int enic_get_settings(struct net_device *netdev, | ||
175 | struct ethtool_cmd *ecmd) | ||
176 | { | ||
177 | struct enic *enic = netdev_priv(netdev); | ||
178 | |||
179 | ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); | ||
180 | ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); | ||
181 | ecmd->port = PORT_FIBRE; | ||
182 | ecmd->transceiver = XCVR_EXTERNAL; | ||
183 | |||
184 | if (netif_carrier_ok(netdev)) { | ||
185 | ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev)); | ||
186 | ecmd->duplex = DUPLEX_FULL; | ||
187 | } else { | ||
188 | ethtool_cmd_speed_set(ecmd, -1); | ||
189 | ecmd->duplex = -1; | ||
190 | } | ||
191 | |||
192 | ecmd->autoneg = AUTONEG_DISABLE; | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static void enic_get_drvinfo(struct net_device *netdev, | ||
198 | struct ethtool_drvinfo *drvinfo) | ||
199 | { | ||
200 | struct enic *enic = netdev_priv(netdev); | ||
201 | struct vnic_devcmd_fw_info *fw_info; | ||
202 | |||
203 | enic_dev_fw_info(enic, &fw_info); | ||
204 | |||
205 | strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); | ||
206 | strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version)); | ||
207 | strncpy(drvinfo->fw_version, fw_info->fw_version, | ||
208 | sizeof(drvinfo->fw_version)); | ||
209 | strncpy(drvinfo->bus_info, pci_name(enic->pdev), | ||
210 | sizeof(drvinfo->bus_info)); | ||
211 | } | ||
212 | |||
213 | static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data) | ||
214 | { | ||
215 | unsigned int i; | ||
216 | |||
217 | switch (stringset) { | ||
218 | case ETH_SS_STATS: | ||
219 | for (i = 0; i < enic_n_tx_stats; i++) { | ||
220 | memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN); | ||
221 | data += ETH_GSTRING_LEN; | ||
222 | } | ||
223 | for (i = 0; i < enic_n_rx_stats; i++) { | ||
224 | memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN); | ||
225 | data += ETH_GSTRING_LEN; | ||
226 | } | ||
227 | break; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | static int enic_get_sset_count(struct net_device *netdev, int sset) | ||
232 | { | ||
233 | switch (sset) { | ||
234 | case ETH_SS_STATS: | ||
235 | return enic_n_tx_stats + enic_n_rx_stats; | ||
236 | default: | ||
237 | return -EOPNOTSUPP; | ||
238 | } | ||
239 | } | ||
240 | |||
241 | static void enic_get_ethtool_stats(struct net_device *netdev, | ||
242 | struct ethtool_stats *stats, u64 *data) | ||
243 | { | ||
244 | struct enic *enic = netdev_priv(netdev); | ||
245 | struct vnic_stats *vstats; | ||
246 | unsigned int i; | ||
247 | |||
248 | enic_dev_stats_dump(enic, &vstats); | ||
249 | |||
250 | for (i = 0; i < enic_n_tx_stats; i++) | ||
251 | *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset]; | ||
252 | for (i = 0; i < enic_n_rx_stats; i++) | ||
253 | *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset]; | ||
254 | } | ||
255 | |||
256 | static u32 enic_get_msglevel(struct net_device *netdev) | ||
257 | { | ||
258 | struct enic *enic = netdev_priv(netdev); | ||
259 | return enic->msg_enable; | ||
260 | } | ||
261 | |||
262 | static void enic_set_msglevel(struct net_device *netdev, u32 value) | ||
263 | { | ||
264 | struct enic *enic = netdev_priv(netdev); | ||
265 | enic->msg_enable = value; | ||
266 | } | ||
267 | |||
268 | static int enic_get_coalesce(struct net_device *netdev, | ||
269 | struct ethtool_coalesce *ecmd) | ||
270 | { | ||
271 | struct enic *enic = netdev_priv(netdev); | ||
272 | |||
273 | ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; | ||
274 | ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | |||
279 | static int enic_set_coalesce(struct net_device *netdev, | ||
280 | struct ethtool_coalesce *ecmd) | ||
281 | { | ||
282 | struct enic *enic = netdev_priv(netdev); | ||
283 | u32 tx_coalesce_usecs; | ||
284 | u32 rx_coalesce_usecs; | ||
285 | unsigned int i, intr; | ||
286 | |||
287 | tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, | ||
288 | vnic_dev_get_intr_coal_timer_max(enic->vdev)); | ||
289 | rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, | ||
290 | vnic_dev_get_intr_coal_timer_max(enic->vdev)); | ||
291 | |||
292 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
293 | case VNIC_DEV_INTR_MODE_INTX: | ||
294 | if (tx_coalesce_usecs != rx_coalesce_usecs) | ||
295 | return -EINVAL; | ||
296 | |||
297 | intr = enic_legacy_io_intr(); | ||
298 | vnic_intr_coalescing_timer_set(&enic->intr[intr], | ||
299 | tx_coalesce_usecs); | ||
300 | break; | ||
301 | case VNIC_DEV_INTR_MODE_MSI: | ||
302 | if (tx_coalesce_usecs != rx_coalesce_usecs) | ||
303 | return -EINVAL; | ||
304 | |||
305 | vnic_intr_coalescing_timer_set(&enic->intr[0], | ||
306 | tx_coalesce_usecs); | ||
307 | break; | ||
308 | case VNIC_DEV_INTR_MODE_MSIX: | ||
309 | for (i = 0; i < enic->wq_count; i++) { | ||
310 | intr = enic_msix_wq_intr(enic, i); | ||
311 | vnic_intr_coalescing_timer_set(&enic->intr[intr], | ||
312 | tx_coalesce_usecs); | ||
313 | } | ||
314 | |||
315 | for (i = 0; i < enic->rq_count; i++) { | ||
316 | intr = enic_msix_rq_intr(enic, i); | ||
317 | vnic_intr_coalescing_timer_set(&enic->intr[intr], | ||
318 | rx_coalesce_usecs); | ||
319 | } | ||
320 | |||
321 | break; | ||
322 | default: | ||
323 | break; | ||
324 | } | ||
325 | |||
326 | enic->tx_coalesce_usecs = tx_coalesce_usecs; | ||
327 | enic->rx_coalesce_usecs = rx_coalesce_usecs; | ||
328 | |||
329 | return 0; | ||
330 | } | ||
331 | |||
332 | static const struct ethtool_ops enic_ethtool_ops = { | ||
333 | .get_settings = enic_get_settings, | ||
334 | .get_drvinfo = enic_get_drvinfo, | ||
335 | .get_msglevel = enic_get_msglevel, | ||
336 | .set_msglevel = enic_set_msglevel, | ||
337 | .get_link = ethtool_op_get_link, | ||
338 | .get_strings = enic_get_strings, | ||
339 | .get_sset_count = enic_get_sset_count, | ||
340 | .get_ethtool_stats = enic_get_ethtool_stats, | ||
341 | .get_coalesce = enic_get_coalesce, | ||
342 | .set_coalesce = enic_set_coalesce, | ||
343 | }; | ||
344 | |||
345 | static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) | ||
346 | { | ||
347 | struct enic *enic = vnic_dev_priv(wq->vdev); | ||
348 | |||
349 | if (buf->sop) | ||
350 | pci_unmap_single(enic->pdev, buf->dma_addr, | ||
351 | buf->len, PCI_DMA_TODEVICE); | ||
352 | else | ||
353 | pci_unmap_page(enic->pdev, buf->dma_addr, | ||
354 | buf->len, PCI_DMA_TODEVICE); | ||
355 | |||
356 | if (buf->os_buf) | ||
357 | dev_kfree_skb_any(buf->os_buf); | ||
358 | } | ||
359 | |||
360 | static void enic_wq_free_buf(struct vnic_wq *wq, | ||
361 | struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) | ||
362 | { | ||
363 | enic_free_wq_buf(wq, buf); | ||
364 | } | ||
365 | |||
366 | static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | ||
367 | u8 type, u16 q_number, u16 completed_index, void *opaque) | ||
368 | { | ||
369 | struct enic *enic = vnic_dev_priv(vdev); | ||
370 | |||
371 | spin_lock(&enic->wq_lock[q_number]); | ||
372 | |||
373 | vnic_wq_service(&enic->wq[q_number], cq_desc, | ||
374 | completed_index, enic_wq_free_buf, | ||
375 | opaque); | ||
376 | |||
377 | if (netif_queue_stopped(enic->netdev) && | ||
378 | vnic_wq_desc_avail(&enic->wq[q_number]) >= | ||
379 | (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) | ||
380 | netif_wake_queue(enic->netdev); | ||
381 | |||
382 | spin_unlock(&enic->wq_lock[q_number]); | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | static void enic_log_q_error(struct enic *enic) | ||
388 | { | ||
389 | unsigned int i; | ||
390 | u32 error_status; | ||
391 | |||
392 | for (i = 0; i < enic->wq_count; i++) { | ||
393 | error_status = vnic_wq_error_status(&enic->wq[i]); | ||
394 | if (error_status) | ||
395 | netdev_err(enic->netdev, "WQ[%d] error_status %d\n", | ||
396 | i, error_status); | ||
397 | } | ||
398 | |||
399 | for (i = 0; i < enic->rq_count; i++) { | ||
400 | error_status = vnic_rq_error_status(&enic->rq[i]); | ||
401 | if (error_status) | ||
402 | netdev_err(enic->netdev, "RQ[%d] error_status %d\n", | ||
403 | i, error_status); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static void enic_msglvl_check(struct enic *enic) | ||
408 | { | ||
409 | u32 msg_enable = vnic_dev_msg_lvl(enic->vdev); | ||
410 | |||
411 | if (msg_enable != enic->msg_enable) { | ||
412 | netdev_info(enic->netdev, "msg lvl changed from 0x%x to 0x%x\n", | ||
413 | enic->msg_enable, msg_enable); | ||
414 | enic->msg_enable = msg_enable; | ||
415 | } | ||
416 | } | ||
417 | |||
418 | static void enic_mtu_check(struct enic *enic) | ||
419 | { | ||
420 | u32 mtu = vnic_dev_mtu(enic->vdev); | ||
421 | struct net_device *netdev = enic->netdev; | ||
422 | |||
423 | if (mtu && mtu != enic->port_mtu) { | ||
424 | enic->port_mtu = mtu; | ||
425 | if (enic_is_dynamic(enic)) { | ||
426 | mtu = max_t(int, ENIC_MIN_MTU, | ||
427 | min_t(int, ENIC_MAX_MTU, mtu)); | ||
428 | if (mtu != netdev->mtu) | ||
429 | schedule_work(&enic->change_mtu_work); | ||
430 | } else { | ||
431 | if (mtu < netdev->mtu) | ||
432 | netdev_warn(netdev, | ||
433 | "interface MTU (%d) set higher " | ||
434 | "than switch port MTU (%d)\n", | ||
435 | netdev->mtu, mtu); | ||
436 | } | ||
437 | } | ||
438 | } | ||
439 | |||
440 | static void enic_link_check(struct enic *enic) | ||
441 | { | ||
442 | int link_status = vnic_dev_link_status(enic->vdev); | ||
443 | int carrier_ok = netif_carrier_ok(enic->netdev); | ||
444 | |||
445 | if (link_status && !carrier_ok) { | ||
446 | netdev_info(enic->netdev, "Link UP\n"); | ||
447 | netif_carrier_on(enic->netdev); | ||
448 | } else if (!link_status && carrier_ok) { | ||
449 | netdev_info(enic->netdev, "Link DOWN\n"); | ||
450 | netif_carrier_off(enic->netdev); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static void enic_notify_check(struct enic *enic) | ||
455 | { | ||
456 | enic_msglvl_check(enic); | ||
457 | enic_mtu_check(enic); | ||
458 | enic_link_check(enic); | ||
459 | } | ||
460 | |||
461 | #define ENIC_TEST_INTR(pba, i) (pba & (1 << i)) | ||
462 | |||
463 | static irqreturn_t enic_isr_legacy(int irq, void *data) | ||
464 | { | ||
465 | struct net_device *netdev = data; | ||
466 | struct enic *enic = netdev_priv(netdev); | ||
467 | unsigned int io_intr = enic_legacy_io_intr(); | ||
468 | unsigned int err_intr = enic_legacy_err_intr(); | ||
469 | unsigned int notify_intr = enic_legacy_notify_intr(); | ||
470 | u32 pba; | ||
471 | |||
472 | vnic_intr_mask(&enic->intr[io_intr]); | ||
473 | |||
474 | pba = vnic_intr_legacy_pba(enic->legacy_pba); | ||
475 | if (!pba) { | ||
476 | vnic_intr_unmask(&enic->intr[io_intr]); | ||
477 | return IRQ_NONE; /* not our interrupt */ | ||
478 | } | ||
479 | |||
480 | if (ENIC_TEST_INTR(pba, notify_intr)) { | ||
481 | vnic_intr_return_all_credits(&enic->intr[notify_intr]); | ||
482 | enic_notify_check(enic); | ||
483 | } | ||
484 | |||
485 | if (ENIC_TEST_INTR(pba, err_intr)) { | ||
486 | vnic_intr_return_all_credits(&enic->intr[err_intr]); | ||
487 | enic_log_q_error(enic); | ||
488 | /* schedule recovery from WQ/RQ error */ | ||
489 | schedule_work(&enic->reset); | ||
490 | return IRQ_HANDLED; | ||
491 | } | ||
492 | |||
493 | if (ENIC_TEST_INTR(pba, io_intr)) { | ||
494 | if (napi_schedule_prep(&enic->napi[0])) | ||
495 | __napi_schedule(&enic->napi[0]); | ||
496 | } else { | ||
497 | vnic_intr_unmask(&enic->intr[io_intr]); | ||
498 | } | ||
499 | |||
500 | return IRQ_HANDLED; | ||
501 | } | ||
502 | |||
503 | static irqreturn_t enic_isr_msi(int irq, void *data) | ||
504 | { | ||
505 | struct enic *enic = data; | ||
506 | |||
507 | /* With MSI, there is no sharing of interrupts, so this is | ||
508 | * our interrupt and there is no need to ack it. The device | ||
509 | * is not providing per-vector masking, so the OS will not | ||
510 | * write to PCI config space to mask/unmask the interrupt. | ||
511 | * We're using mask_on_assertion for MSI, so the device | ||
512 | * automatically masks the interrupt when the interrupt is | ||
513 | * generated. Later, when exiting polling, the interrupt | ||
514 | * will be unmasked (see enic_poll). | ||
515 | * | ||
516 | * Also, the device uses the same PCIe Traffic Class (TC) | ||
517 | * for Memory Write data and MSI, so there are no ordering | ||
518 | * issues; the MSI will always arrive at the Root Complex | ||
519 | * _after_ corresponding Memory Writes (i.e. descriptor | ||
520 | * writes). | ||
521 | */ | ||
522 | |||
523 | napi_schedule(&enic->napi[0]); | ||
524 | |||
525 | return IRQ_HANDLED; | ||
526 | } | ||
527 | |||
528 | static irqreturn_t enic_isr_msix_rq(int irq, void *data) | ||
529 | { | ||
530 | struct napi_struct *napi = data; | ||
531 | |||
532 | /* schedule NAPI polling for RQ cleanup */ | ||
533 | napi_schedule(napi); | ||
534 | |||
535 | return IRQ_HANDLED; | ||
536 | } | ||
537 | |||
538 | static irqreturn_t enic_isr_msix_wq(int irq, void *data) | ||
539 | { | ||
540 | struct enic *enic = data; | ||
541 | unsigned int cq = enic_cq_wq(enic, 0); | ||
542 | unsigned int intr = enic_msix_wq_intr(enic, 0); | ||
543 | unsigned int wq_work_to_do = -1; /* no limit */ | ||
544 | unsigned int wq_work_done; | ||
545 | |||
546 | wq_work_done = vnic_cq_service(&enic->cq[cq], | ||
547 | wq_work_to_do, enic_wq_service, NULL); | ||
548 | |||
549 | vnic_intr_return_credits(&enic->intr[intr], | ||
550 | wq_work_done, | ||
551 | 1 /* unmask intr */, | ||
552 | 1 /* reset intr timer */); | ||
553 | |||
554 | return IRQ_HANDLED; | ||
555 | } | ||
556 | |||
557 | static irqreturn_t enic_isr_msix_err(int irq, void *data) | ||
558 | { | ||
559 | struct enic *enic = data; | ||
560 | unsigned int intr = enic_msix_err_intr(enic); | ||
561 | |||
562 | vnic_intr_return_all_credits(&enic->intr[intr]); | ||
563 | |||
564 | enic_log_q_error(enic); | ||
565 | |||
566 | /* schedule recovery from WQ/RQ error */ | ||
567 | schedule_work(&enic->reset); | ||
568 | |||
569 | return IRQ_HANDLED; | ||
570 | } | ||
571 | |||
572 | static irqreturn_t enic_isr_msix_notify(int irq, void *data) | ||
573 | { | ||
574 | struct enic *enic = data; | ||
575 | unsigned int intr = enic_msix_notify_intr(enic); | ||
576 | |||
577 | vnic_intr_return_all_credits(&enic->intr[intr]); | ||
578 | enic_notify_check(enic); | ||
579 | |||
580 | return IRQ_HANDLED; | ||
581 | } | ||
582 | |||
583 | static inline void enic_queue_wq_skb_cont(struct enic *enic, | ||
584 | struct vnic_wq *wq, struct sk_buff *skb, | ||
585 | unsigned int len_left, int loopback) | ||
586 | { | ||
587 | skb_frag_t *frag; | ||
588 | |||
589 | /* Queue additional data fragments */ | ||
590 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | ||
591 | len_left -= frag->size; | ||
592 | enic_queue_wq_desc_cont(wq, skb, | ||
593 | pci_map_page(enic->pdev, frag->page, | ||
594 | frag->page_offset, frag->size, | ||
595 | PCI_DMA_TODEVICE), | ||
596 | frag->size, | ||
597 | (len_left == 0), /* EOP? */ | ||
598 | loopback); | ||
599 | } | ||
600 | } | ||
601 | |||
602 | static inline void enic_queue_wq_skb_vlan(struct enic *enic, | ||
603 | struct vnic_wq *wq, struct sk_buff *skb, | ||
604 | int vlan_tag_insert, unsigned int vlan_tag, int loopback) | ||
605 | { | ||
606 | unsigned int head_len = skb_headlen(skb); | ||
607 | unsigned int len_left = skb->len - head_len; | ||
608 | int eop = (len_left == 0); | ||
609 | |||
610 | /* Queue the main skb fragment. The fragments are no larger | ||
611 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | ||
612 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | ||
613 | * per fragment is queued. | ||
614 | */ | ||
615 | enic_queue_wq_desc(wq, skb, | ||
616 | pci_map_single(enic->pdev, skb->data, | ||
617 | head_len, PCI_DMA_TODEVICE), | ||
618 | head_len, | ||
619 | vlan_tag_insert, vlan_tag, | ||
620 | eop, loopback); | ||
621 | |||
622 | if (!eop) | ||
623 | enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); | ||
624 | } | ||
625 | |||
626 | static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, | ||
627 | struct vnic_wq *wq, struct sk_buff *skb, | ||
628 | int vlan_tag_insert, unsigned int vlan_tag, int loopback) | ||
629 | { | ||
630 | unsigned int head_len = skb_headlen(skb); | ||
631 | unsigned int len_left = skb->len - head_len; | ||
632 | unsigned int hdr_len = skb_checksum_start_offset(skb); | ||
633 | unsigned int csum_offset = hdr_len + skb->csum_offset; | ||
634 | int eop = (len_left == 0); | ||
635 | |||
636 | /* Queue the main skb fragment. The fragments are no larger | ||
637 | * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less | ||
638 | * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor | ||
639 | * per fragment is queued. | ||
640 | */ | ||
641 | enic_queue_wq_desc_csum_l4(wq, skb, | ||
642 | pci_map_single(enic->pdev, skb->data, | ||
643 | head_len, PCI_DMA_TODEVICE), | ||
644 | head_len, | ||
645 | csum_offset, | ||
646 | hdr_len, | ||
647 | vlan_tag_insert, vlan_tag, | ||
648 | eop, loopback); | ||
649 | |||
650 | if (!eop) | ||
651 | enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); | ||
652 | } | ||
653 | |||
654 | static inline void enic_queue_wq_skb_tso(struct enic *enic, | ||
655 | struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, | ||
656 | int vlan_tag_insert, unsigned int vlan_tag, int loopback) | ||
657 | { | ||
658 | unsigned int frag_len_left = skb_headlen(skb); | ||
659 | unsigned int len_left = skb->len - frag_len_left; | ||
660 | unsigned int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | ||
661 | int eop = (len_left == 0); | ||
662 | unsigned int len; | ||
663 | dma_addr_t dma_addr; | ||
664 | unsigned int offset = 0; | ||
665 | skb_frag_t *frag; | ||
666 | |||
667 | /* Preload TCP csum field with IP pseudo hdr calculated | ||
668 | * with IP length set to zero. HW will later add in length | ||
669 | * to each TCP segment resulting from the TSO. | ||
670 | */ | ||
671 | |||
672 | if (skb->protocol == cpu_to_be16(ETH_P_IP)) { | ||
673 | ip_hdr(skb)->check = 0; | ||
674 | tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, | ||
675 | ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | ||
676 | } else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) { | ||
677 | tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | ||
678 | &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); | ||
679 | } | ||
680 | |||
681 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors | ||
682 | * for the main skb fragment | ||
683 | */ | ||
684 | while (frag_len_left) { | ||
685 | len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); | ||
686 | dma_addr = pci_map_single(enic->pdev, skb->data + offset, | ||
687 | len, PCI_DMA_TODEVICE); | ||
688 | enic_queue_wq_desc_tso(wq, skb, | ||
689 | dma_addr, | ||
690 | len, | ||
691 | mss, hdr_len, | ||
692 | vlan_tag_insert, vlan_tag, | ||
693 | eop && (len == frag_len_left), loopback); | ||
694 | frag_len_left -= len; | ||
695 | offset += len; | ||
696 | } | ||
697 | |||
698 | if (eop) | ||
699 | return; | ||
700 | |||
701 | /* Queue WQ_ENET_MAX_DESC_LEN length descriptors | ||
702 | * for additional data fragments | ||
703 | */ | ||
704 | for (frag = skb_shinfo(skb)->frags; len_left; frag++) { | ||
705 | len_left -= frag->size; | ||
706 | frag_len_left = frag->size; | ||
707 | offset = frag->page_offset; | ||
708 | |||
709 | while (frag_len_left) { | ||
710 | len = min(frag_len_left, | ||
711 | (unsigned int)WQ_ENET_MAX_DESC_LEN); | ||
712 | dma_addr = pci_map_page(enic->pdev, frag->page, | ||
713 | offset, len, | ||
714 | PCI_DMA_TODEVICE); | ||
715 | enic_queue_wq_desc_cont(wq, skb, | ||
716 | dma_addr, | ||
717 | len, | ||
718 | (len_left == 0) && | ||
719 | (len == frag_len_left), /* EOP? */ | ||
720 | loopback); | ||
721 | frag_len_left -= len; | ||
722 | offset += len; | ||
723 | } | ||
724 | } | ||
725 | } | ||
726 | |||
727 | static inline void enic_queue_wq_skb(struct enic *enic, | ||
728 | struct vnic_wq *wq, struct sk_buff *skb) | ||
729 | { | ||
730 | unsigned int mss = skb_shinfo(skb)->gso_size; | ||
731 | unsigned int vlan_tag = 0; | ||
732 | int vlan_tag_insert = 0; | ||
733 | int loopback = 0; | ||
734 | |||
735 | if (vlan_tx_tag_present(skb)) { | ||
736 | /* VLAN tag from trunking driver */ | ||
737 | vlan_tag_insert = 1; | ||
738 | vlan_tag = vlan_tx_tag_get(skb); | ||
739 | } else if (enic->loop_enable) { | ||
740 | vlan_tag = enic->loop_tag; | ||
741 | loopback = 1; | ||
742 | } | ||
743 | |||
744 | if (mss) | ||
745 | enic_queue_wq_skb_tso(enic, wq, skb, mss, | ||
746 | vlan_tag_insert, vlan_tag, loopback); | ||
747 | else if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
748 | enic_queue_wq_skb_csum_l4(enic, wq, skb, | ||
749 | vlan_tag_insert, vlan_tag, loopback); | ||
750 | else | ||
751 | enic_queue_wq_skb_vlan(enic, wq, skb, | ||
752 | vlan_tag_insert, vlan_tag, loopback); | ||
753 | } | ||
754 | |||
755 | /* netif_tx_lock held, process context with BHs disabled, or BH */ | ||
756 | static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, | ||
757 | struct net_device *netdev) | ||
758 | { | ||
759 | struct enic *enic = netdev_priv(netdev); | ||
760 | struct vnic_wq *wq = &enic->wq[0]; | ||
761 | unsigned long flags; | ||
762 | |||
763 | if (skb->len <= 0) { | ||
764 | dev_kfree_skb(skb); | ||
765 | return NETDEV_TX_OK; | ||
766 | } | ||
767 | |||
768 | /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, | ||
769 | * which is very likely. In the off chance it's going to take | ||
770 | * more than * ENIC_NON_TSO_MAX_DESC, linearize the skb. | ||
771 | */ | ||
772 | |||
773 | if (skb_shinfo(skb)->gso_size == 0 && | ||
774 | skb_shinfo(skb)->nr_frags + 1 > ENIC_NON_TSO_MAX_DESC && | ||
775 | skb_linearize(skb)) { | ||
776 | dev_kfree_skb(skb); | ||
777 | return NETDEV_TX_OK; | ||
778 | } | ||
779 | |||
780 | spin_lock_irqsave(&enic->wq_lock[0], flags); | ||
781 | |||
782 | if (vnic_wq_desc_avail(wq) < | ||
783 | skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { | ||
784 | netif_stop_queue(netdev); | ||
785 | /* This is a hard error, log it */ | ||
786 | netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); | ||
787 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | ||
788 | return NETDEV_TX_BUSY; | ||
789 | } | ||
790 | |||
791 | enic_queue_wq_skb(enic, wq, skb); | ||
792 | |||
793 | if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) | ||
794 | netif_stop_queue(netdev); | ||
795 | |||
796 | spin_unlock_irqrestore(&enic->wq_lock[0], flags); | ||
797 | |||
798 | return NETDEV_TX_OK; | ||
799 | } | ||
800 | |||
801 | /* dev_base_lock rwlock held, nominally process context */ | ||
802 | static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev, | ||
803 | struct rtnl_link_stats64 *net_stats) | ||
804 | { | ||
805 | struct enic *enic = netdev_priv(netdev); | ||
806 | struct vnic_stats *stats; | ||
807 | |||
808 | enic_dev_stats_dump(enic, &stats); | ||
809 | |||
810 | net_stats->tx_packets = stats->tx.tx_frames_ok; | ||
811 | net_stats->tx_bytes = stats->tx.tx_bytes_ok; | ||
812 | net_stats->tx_errors = stats->tx.tx_errors; | ||
813 | net_stats->tx_dropped = stats->tx.tx_drops; | ||
814 | |||
815 | net_stats->rx_packets = stats->rx.rx_frames_ok; | ||
816 | net_stats->rx_bytes = stats->rx.rx_bytes_ok; | ||
817 | net_stats->rx_errors = stats->rx.rx_errors; | ||
818 | net_stats->multicast = stats->rx.rx_multicast_frames_ok; | ||
819 | net_stats->rx_over_errors = enic->rq_truncated_pkts; | ||
820 | net_stats->rx_crc_errors = enic->rq_bad_fcs; | ||
821 | net_stats->rx_dropped = stats->rx.rx_no_bufs + stats->rx.rx_drop; | ||
822 | |||
823 | return net_stats; | ||
824 | } | ||
825 | |||
826 | void enic_reset_addr_lists(struct enic *enic) | ||
827 | { | ||
828 | enic->mc_count = 0; | ||
829 | enic->uc_count = 0; | ||
830 | enic->flags = 0; | ||
831 | } | ||
832 | |||
833 | static int enic_set_mac_addr(struct net_device *netdev, char *addr) | ||
834 | { | ||
835 | struct enic *enic = netdev_priv(netdev); | ||
836 | |||
837 | if (enic_is_dynamic(enic)) { | ||
838 | if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr)) | ||
839 | return -EADDRNOTAVAIL; | ||
840 | } else { | ||
841 | if (!is_valid_ether_addr(addr)) | ||
842 | return -EADDRNOTAVAIL; | ||
843 | } | ||
844 | |||
845 | memcpy(netdev->dev_addr, addr, netdev->addr_len); | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | |||
850 | static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p) | ||
851 | { | ||
852 | struct enic *enic = netdev_priv(netdev); | ||
853 | struct sockaddr *saddr = p; | ||
854 | char *addr = saddr->sa_data; | ||
855 | int err; | ||
856 | |||
857 | if (netif_running(enic->netdev)) { | ||
858 | err = enic_dev_del_station_addr(enic); | ||
859 | if (err) | ||
860 | return err; | ||
861 | } | ||
862 | |||
863 | err = enic_set_mac_addr(netdev, addr); | ||
864 | if (err) | ||
865 | return err; | ||
866 | |||
867 | if (netif_running(enic->netdev)) { | ||
868 | err = enic_dev_add_station_addr(enic); | ||
869 | if (err) | ||
870 | return err; | ||
871 | } | ||
872 | |||
873 | return err; | ||
874 | } | ||
875 | |||
876 | static int enic_set_mac_address(struct net_device *netdev, void *p) | ||
877 | { | ||
878 | struct sockaddr *saddr = p; | ||
879 | char *addr = saddr->sa_data; | ||
880 | struct enic *enic = netdev_priv(netdev); | ||
881 | int err; | ||
882 | |||
883 | err = enic_dev_del_station_addr(enic); | ||
884 | if (err) | ||
885 | return err; | ||
886 | |||
887 | err = enic_set_mac_addr(netdev, addr); | ||
888 | if (err) | ||
889 | return err; | ||
890 | |||
891 | return enic_dev_add_station_addr(enic); | ||
892 | } | ||
893 | |||
894 | static void enic_update_multicast_addr_list(struct enic *enic) | ||
895 | { | ||
896 | struct net_device *netdev = enic->netdev; | ||
897 | struct netdev_hw_addr *ha; | ||
898 | unsigned int mc_count = netdev_mc_count(netdev); | ||
899 | u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; | ||
900 | unsigned int i, j; | ||
901 | |||
902 | if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) { | ||
903 | netdev_warn(netdev, "Registering only %d out of %d " | ||
904 | "multicast addresses\n", | ||
905 | ENIC_MULTICAST_PERFECT_FILTERS, mc_count); | ||
906 | mc_count = ENIC_MULTICAST_PERFECT_FILTERS; | ||
907 | } | ||
908 | |||
909 | /* Is there an easier way? Trying to minimize to | ||
910 | * calls to add/del multicast addrs. We keep the | ||
911 | * addrs from the last call in enic->mc_addr and | ||
912 | * look for changes to add/del. | ||
913 | */ | ||
914 | |||
915 | i = 0; | ||
916 | netdev_for_each_mc_addr(ha, netdev) { | ||
917 | if (i == mc_count) | ||
918 | break; | ||
919 | memcpy(mc_addr[i++], ha->addr, ETH_ALEN); | ||
920 | } | ||
921 | |||
922 | for (i = 0; i < enic->mc_count; i++) { | ||
923 | for (j = 0; j < mc_count; j++) | ||
924 | if (compare_ether_addr(enic->mc_addr[i], | ||
925 | mc_addr[j]) == 0) | ||
926 | break; | ||
927 | if (j == mc_count) | ||
928 | enic_dev_del_addr(enic, enic->mc_addr[i]); | ||
929 | } | ||
930 | |||
931 | for (i = 0; i < mc_count; i++) { | ||
932 | for (j = 0; j < enic->mc_count; j++) | ||
933 | if (compare_ether_addr(mc_addr[i], | ||
934 | enic->mc_addr[j]) == 0) | ||
935 | break; | ||
936 | if (j == enic->mc_count) | ||
937 | enic_dev_add_addr(enic, mc_addr[i]); | ||
938 | } | ||
939 | |||
940 | /* Save the list to compare against next time | ||
941 | */ | ||
942 | |||
943 | for (i = 0; i < mc_count; i++) | ||
944 | memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN); | ||
945 | |||
946 | enic->mc_count = mc_count; | ||
947 | } | ||
948 | |||
949 | static void enic_update_unicast_addr_list(struct enic *enic) | ||
950 | { | ||
951 | struct net_device *netdev = enic->netdev; | ||
952 | struct netdev_hw_addr *ha; | ||
953 | unsigned int uc_count = netdev_uc_count(netdev); | ||
954 | u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN]; | ||
955 | unsigned int i, j; | ||
956 | |||
957 | if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) { | ||
958 | netdev_warn(netdev, "Registering only %d out of %d " | ||
959 | "unicast addresses\n", | ||
960 | ENIC_UNICAST_PERFECT_FILTERS, uc_count); | ||
961 | uc_count = ENIC_UNICAST_PERFECT_FILTERS; | ||
962 | } | ||
963 | |||
964 | /* Is there an easier way? Trying to minimize to | ||
965 | * calls to add/del unicast addrs. We keep the | ||
966 | * addrs from the last call in enic->uc_addr and | ||
967 | * look for changes to add/del. | ||
968 | */ | ||
969 | |||
970 | i = 0; | ||
971 | netdev_for_each_uc_addr(ha, netdev) { | ||
972 | if (i == uc_count) | ||
973 | break; | ||
974 | memcpy(uc_addr[i++], ha->addr, ETH_ALEN); | ||
975 | } | ||
976 | |||
977 | for (i = 0; i < enic->uc_count; i++) { | ||
978 | for (j = 0; j < uc_count; j++) | ||
979 | if (compare_ether_addr(enic->uc_addr[i], | ||
980 | uc_addr[j]) == 0) | ||
981 | break; | ||
982 | if (j == uc_count) | ||
983 | enic_dev_del_addr(enic, enic->uc_addr[i]); | ||
984 | } | ||
985 | |||
986 | for (i = 0; i < uc_count; i++) { | ||
987 | for (j = 0; j < enic->uc_count; j++) | ||
988 | if (compare_ether_addr(uc_addr[i], | ||
989 | enic->uc_addr[j]) == 0) | ||
990 | break; | ||
991 | if (j == enic->uc_count) | ||
992 | enic_dev_add_addr(enic, uc_addr[i]); | ||
993 | } | ||
994 | |||
995 | /* Save the list to compare against next time | ||
996 | */ | ||
997 | |||
998 | for (i = 0; i < uc_count; i++) | ||
999 | memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN); | ||
1000 | |||
1001 | enic->uc_count = uc_count; | ||
1002 | } | ||
1003 | |||
1004 | /* netif_tx_lock held, BHs disabled */ | ||
1005 | static void enic_set_rx_mode(struct net_device *netdev) | ||
1006 | { | ||
1007 | struct enic *enic = netdev_priv(netdev); | ||
1008 | int directed = 1; | ||
1009 | int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; | ||
1010 | int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; | ||
1011 | int promisc = (netdev->flags & IFF_PROMISC) || | ||
1012 | netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS; | ||
1013 | int allmulti = (netdev->flags & IFF_ALLMULTI) || | ||
1014 | netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS; | ||
1015 | unsigned int flags = netdev->flags | | ||
1016 | (allmulti ? IFF_ALLMULTI : 0) | | ||
1017 | (promisc ? IFF_PROMISC : 0); | ||
1018 | |||
1019 | if (enic->flags != flags) { | ||
1020 | enic->flags = flags; | ||
1021 | enic_dev_packet_filter(enic, directed, | ||
1022 | multicast, broadcast, promisc, allmulti); | ||
1023 | } | ||
1024 | |||
1025 | if (!promisc) { | ||
1026 | enic_update_unicast_addr_list(enic); | ||
1027 | if (!allmulti) | ||
1028 | enic_update_multicast_addr_list(enic); | ||
1029 | } | ||
1030 | } | ||
1031 | |||
1032 | /* netif_tx_lock held, BHs disabled */ | ||
1033 | static void enic_tx_timeout(struct net_device *netdev) | ||
1034 | { | ||
1035 | struct enic *enic = netdev_priv(netdev); | ||
1036 | schedule_work(&enic->reset); | ||
1037 | } | ||
1038 | |||
1039 | static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) | ||
1040 | { | ||
1041 | struct enic *enic = netdev_priv(netdev); | ||
1042 | |||
1043 | if (vf != PORT_SELF_VF) | ||
1044 | return -EOPNOTSUPP; | ||
1045 | |||
1046 | /* Ignore the vf argument for now. We can assume the request | ||
1047 | * is coming on a vf. | ||
1048 | */ | ||
1049 | if (is_valid_ether_addr(mac)) { | ||
1050 | memcpy(enic->pp.vf_mac, mac, ETH_ALEN); | ||
1051 | return 0; | ||
1052 | } else | ||
1053 | return -EINVAL; | ||
1054 | } | ||
1055 | |||
1056 | static int enic_set_vf_port(struct net_device *netdev, int vf, | ||
1057 | struct nlattr *port[]) | ||
1058 | { | ||
1059 | struct enic *enic = netdev_priv(netdev); | ||
1060 | struct enic_port_profile prev_pp; | ||
1061 | int err = 0, restore_pp = 1; | ||
1062 | |||
1063 | /* don't support VFs, yet */ | ||
1064 | if (vf != PORT_SELF_VF) | ||
1065 | return -EOPNOTSUPP; | ||
1066 | |||
1067 | if (!port[IFLA_PORT_REQUEST]) | ||
1068 | return -EOPNOTSUPP; | ||
1069 | |||
1070 | memcpy(&prev_pp, &enic->pp, sizeof(enic->pp)); | ||
1071 | memset(&enic->pp, 0, sizeof(enic->pp)); | ||
1072 | |||
1073 | enic->pp.set |= ENIC_SET_REQUEST; | ||
1074 | enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]); | ||
1075 | |||
1076 | if (port[IFLA_PORT_PROFILE]) { | ||
1077 | enic->pp.set |= ENIC_SET_NAME; | ||
1078 | memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]), | ||
1079 | PORT_PROFILE_MAX); | ||
1080 | } | ||
1081 | |||
1082 | if (port[IFLA_PORT_INSTANCE_UUID]) { | ||
1083 | enic->pp.set |= ENIC_SET_INSTANCE; | ||
1084 | memcpy(enic->pp.instance_uuid, | ||
1085 | nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); | ||
1086 | } | ||
1087 | |||
1088 | if (port[IFLA_PORT_HOST_UUID]) { | ||
1089 | enic->pp.set |= ENIC_SET_HOST; | ||
1090 | memcpy(enic->pp.host_uuid, | ||
1091 | nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); | ||
1092 | } | ||
1093 | |||
1094 | /* Special case handling: mac came from IFLA_VF_MAC */ | ||
1095 | if (!is_zero_ether_addr(prev_pp.vf_mac)) | ||
1096 | memcpy(enic->pp.mac_addr, prev_pp.vf_mac, ETH_ALEN); | ||
1097 | |||
1098 | if (is_zero_ether_addr(netdev->dev_addr)) | ||
1099 | random_ether_addr(netdev->dev_addr); | ||
1100 | |||
1101 | err = enic_process_set_pp_request(enic, &prev_pp, &restore_pp); | ||
1102 | if (err) { | ||
1103 | if (restore_pp) { | ||
1104 | /* Things are still the way they were: Implicit | ||
1105 | * DISASSOCIATE failed | ||
1106 | */ | ||
1107 | memcpy(&enic->pp, &prev_pp, sizeof(enic->pp)); | ||
1108 | } else { | ||
1109 | memset(&enic->pp, 0, sizeof(enic->pp)); | ||
1110 | memset(netdev->dev_addr, 0, ETH_ALEN); | ||
1111 | } | ||
1112 | } else { | ||
1113 | /* Set flag to indicate that the port assoc/disassoc | ||
1114 | * request has been sent out to fw | ||
1115 | */ | ||
1116 | enic->pp.set |= ENIC_PORT_REQUEST_APPLIED; | ||
1117 | |||
1118 | /* If DISASSOCIATE, clean up all assigned/saved macaddresses */ | ||
1119 | if (enic->pp.request == PORT_REQUEST_DISASSOCIATE) { | ||
1120 | memset(enic->pp.mac_addr, 0, ETH_ALEN); | ||
1121 | memset(netdev->dev_addr, 0, ETH_ALEN); | ||
1122 | } | ||
1123 | } | ||
1124 | |||
1125 | memset(enic->pp.vf_mac, 0, ETH_ALEN); | ||
1126 | |||
1127 | return err; | ||
1128 | } | ||
1129 | |||
1130 | static int enic_get_vf_port(struct net_device *netdev, int vf, | ||
1131 | struct sk_buff *skb) | ||
1132 | { | ||
1133 | struct enic *enic = netdev_priv(netdev); | ||
1134 | u16 response = PORT_PROFILE_RESPONSE_SUCCESS; | ||
1135 | int err; | ||
1136 | |||
1137 | if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED)) | ||
1138 | return -ENODATA; | ||
1139 | |||
1140 | err = enic_process_get_pp_request(enic, enic->pp.request, &response); | ||
1141 | if (err) | ||
1142 | return err; | ||
1143 | |||
1144 | NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request); | ||
1145 | NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); | ||
1146 | if (enic->pp.set & ENIC_SET_NAME) | ||
1147 | NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, | ||
1148 | enic->pp.name); | ||
1149 | if (enic->pp.set & ENIC_SET_INSTANCE) | ||
1150 | NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, | ||
1151 | enic->pp.instance_uuid); | ||
1152 | if (enic->pp.set & ENIC_SET_HOST) | ||
1153 | NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, | ||
1154 | enic->pp.host_uuid); | ||
1155 | |||
1156 | return 0; | ||
1157 | |||
1158 | nla_put_failure: | ||
1159 | return -EMSGSIZE; | ||
1160 | } | ||
1161 | |||
1162 | static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) | ||
1163 | { | ||
1164 | struct enic *enic = vnic_dev_priv(rq->vdev); | ||
1165 | |||
1166 | if (!buf->os_buf) | ||
1167 | return; | ||
1168 | |||
1169 | pci_unmap_single(enic->pdev, buf->dma_addr, | ||
1170 | buf->len, PCI_DMA_FROMDEVICE); | ||
1171 | dev_kfree_skb_any(buf->os_buf); | ||
1172 | } | ||
1173 | |||
1174 | static int enic_rq_alloc_buf(struct vnic_rq *rq) | ||
1175 | { | ||
1176 | struct enic *enic = vnic_dev_priv(rq->vdev); | ||
1177 | struct net_device *netdev = enic->netdev; | ||
1178 | struct sk_buff *skb; | ||
1179 | unsigned int len = netdev->mtu + VLAN_ETH_HLEN; | ||
1180 | unsigned int os_buf_index = 0; | ||
1181 | dma_addr_t dma_addr; | ||
1182 | |||
1183 | skb = netdev_alloc_skb_ip_align(netdev, len); | ||
1184 | if (!skb) | ||
1185 | return -ENOMEM; | ||
1186 | |||
1187 | dma_addr = pci_map_single(enic->pdev, skb->data, | ||
1188 | len, PCI_DMA_FROMDEVICE); | ||
1189 | |||
1190 | enic_queue_rq_desc(rq, skb, os_buf_index, | ||
1191 | dma_addr, len); | ||
1192 | |||
1193 | return 0; | ||
1194 | } | ||
1195 | |||
1196 | static void enic_rq_indicate_buf(struct vnic_rq *rq, | ||
1197 | struct cq_desc *cq_desc, struct vnic_rq_buf *buf, | ||
1198 | int skipped, void *opaque) | ||
1199 | { | ||
1200 | struct enic *enic = vnic_dev_priv(rq->vdev); | ||
1201 | struct net_device *netdev = enic->netdev; | ||
1202 | struct sk_buff *skb; | ||
1203 | |||
1204 | u8 type, color, eop, sop, ingress_port, vlan_stripped; | ||
1205 | u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof; | ||
1206 | u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; | ||
1207 | u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc; | ||
1208 | u8 packet_error; | ||
1209 | u16 q_number, completed_index, bytes_written, vlan_tci, checksum; | ||
1210 | u32 rss_hash; | ||
1211 | |||
1212 | if (skipped) | ||
1213 | return; | ||
1214 | |||
1215 | skb = buf->os_buf; | ||
1216 | prefetch(skb->data - NET_IP_ALIGN); | ||
1217 | pci_unmap_single(enic->pdev, buf->dma_addr, | ||
1218 | buf->len, PCI_DMA_FROMDEVICE); | ||
1219 | |||
1220 | cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, | ||
1221 | &type, &color, &q_number, &completed_index, | ||
1222 | &ingress_port, &fcoe, &eop, &sop, &rss_type, | ||
1223 | &csum_not_calc, &rss_hash, &bytes_written, | ||
1224 | &packet_error, &vlan_stripped, &vlan_tci, &checksum, | ||
1225 | &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error, | ||
1226 | &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp, | ||
1227 | &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment, | ||
1228 | &fcs_ok); | ||
1229 | |||
1230 | if (packet_error) { | ||
1231 | |||
1232 | if (!fcs_ok) { | ||
1233 | if (bytes_written > 0) | ||
1234 | enic->rq_bad_fcs++; | ||
1235 | else if (bytes_written == 0) | ||
1236 | enic->rq_truncated_pkts++; | ||
1237 | } | ||
1238 | |||
1239 | dev_kfree_skb_any(skb); | ||
1240 | |||
1241 | return; | ||
1242 | } | ||
1243 | |||
1244 | if (eop && bytes_written > 0) { | ||
1245 | |||
1246 | /* Good receive | ||
1247 | */ | ||
1248 | |||
1249 | skb_put(skb, bytes_written); | ||
1250 | skb->protocol = eth_type_trans(skb, netdev); | ||
1251 | |||
1252 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) { | ||
1253 | skb->csum = htons(checksum); | ||
1254 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
1255 | } | ||
1256 | |||
1257 | skb->dev = netdev; | ||
1258 | |||
1259 | if (vlan_stripped) | ||
1260 | __vlan_hwaccel_put_tag(skb, vlan_tci); | ||
1261 | |||
1262 | if (netdev->features & NETIF_F_GRO) | ||
1263 | napi_gro_receive(&enic->napi[q_number], skb); | ||
1264 | else | ||
1265 | netif_receive_skb(skb); | ||
1266 | } else { | ||
1267 | |||
1268 | /* Buffer overflow | ||
1269 | */ | ||
1270 | |||
1271 | dev_kfree_skb_any(skb); | ||
1272 | } | ||
1273 | } | ||
1274 | |||
1275 | static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, | ||
1276 | u8 type, u16 q_number, u16 completed_index, void *opaque) | ||
1277 | { | ||
1278 | struct enic *enic = vnic_dev_priv(vdev); | ||
1279 | |||
1280 | vnic_rq_service(&enic->rq[q_number], cq_desc, | ||
1281 | completed_index, VNIC_RQ_RETURN_DESC, | ||
1282 | enic_rq_indicate_buf, opaque); | ||
1283 | |||
1284 | return 0; | ||
1285 | } | ||
1286 | |||
1287 | static int enic_poll(struct napi_struct *napi, int budget) | ||
1288 | { | ||
1289 | struct net_device *netdev = napi->dev; | ||
1290 | struct enic *enic = netdev_priv(netdev); | ||
1291 | unsigned int cq_rq = enic_cq_rq(enic, 0); | ||
1292 | unsigned int cq_wq = enic_cq_wq(enic, 0); | ||
1293 | unsigned int intr = enic_legacy_io_intr(); | ||
1294 | unsigned int rq_work_to_do = budget; | ||
1295 | unsigned int wq_work_to_do = -1; /* no limit */ | ||
1296 | unsigned int work_done, rq_work_done, wq_work_done; | ||
1297 | int err; | ||
1298 | |||
1299 | /* Service RQ (first) and WQ | ||
1300 | */ | ||
1301 | |||
1302 | rq_work_done = vnic_cq_service(&enic->cq[cq_rq], | ||
1303 | rq_work_to_do, enic_rq_service, NULL); | ||
1304 | |||
1305 | wq_work_done = vnic_cq_service(&enic->cq[cq_wq], | ||
1306 | wq_work_to_do, enic_wq_service, NULL); | ||
1307 | |||
1308 | /* Accumulate intr event credits for this polling | ||
1309 | * cycle. An intr event is the completion of a | ||
1310 | * a WQ or RQ packet. | ||
1311 | */ | ||
1312 | |||
1313 | work_done = rq_work_done + wq_work_done; | ||
1314 | |||
1315 | if (work_done > 0) | ||
1316 | vnic_intr_return_credits(&enic->intr[intr], | ||
1317 | work_done, | ||
1318 | 0 /* don't unmask intr */, | ||
1319 | 0 /* don't reset intr timer */); | ||
1320 | |||
1321 | err = vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); | ||
1322 | |||
1323 | /* Buffer allocation failed. Stay in polling | ||
1324 | * mode so we can try to fill the ring again. | ||
1325 | */ | ||
1326 | |||
1327 | if (err) | ||
1328 | rq_work_done = rq_work_to_do; | ||
1329 | |||
1330 | if (rq_work_done < rq_work_to_do) { | ||
1331 | |||
1332 | /* Some work done, but not enough to stay in polling, | ||
1333 | * exit polling | ||
1334 | */ | ||
1335 | |||
1336 | napi_complete(napi); | ||
1337 | vnic_intr_unmask(&enic->intr[intr]); | ||
1338 | } | ||
1339 | |||
1340 | return rq_work_done; | ||
1341 | } | ||
1342 | |||
1343 | static int enic_poll_msix(struct napi_struct *napi, int budget) | ||
1344 | { | ||
1345 | struct net_device *netdev = napi->dev; | ||
1346 | struct enic *enic = netdev_priv(netdev); | ||
1347 | unsigned int rq = (napi - &enic->napi[0]); | ||
1348 | unsigned int cq = enic_cq_rq(enic, rq); | ||
1349 | unsigned int intr = enic_msix_rq_intr(enic, rq); | ||
1350 | unsigned int work_to_do = budget; | ||
1351 | unsigned int work_done; | ||
1352 | int err; | ||
1353 | |||
1354 | /* Service RQ | ||
1355 | */ | ||
1356 | |||
1357 | work_done = vnic_cq_service(&enic->cq[cq], | ||
1358 | work_to_do, enic_rq_service, NULL); | ||
1359 | |||
1360 | /* Return intr event credits for this polling | ||
1361 | * cycle. An intr event is the completion of a | ||
1362 | * RQ packet. | ||
1363 | */ | ||
1364 | |||
1365 | if (work_done > 0) | ||
1366 | vnic_intr_return_credits(&enic->intr[intr], | ||
1367 | work_done, | ||
1368 | 0 /* don't unmask intr */, | ||
1369 | 0 /* don't reset intr timer */); | ||
1370 | |||
1371 | err = vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf); | ||
1372 | |||
1373 | /* Buffer allocation failed. Stay in polling mode | ||
1374 | * so we can try to fill the ring again. | ||
1375 | */ | ||
1376 | |||
1377 | if (err) | ||
1378 | work_done = work_to_do; | ||
1379 | |||
1380 | if (work_done < work_to_do) { | ||
1381 | |||
1382 | /* Some work done, but not enough to stay in polling, | ||
1383 | * exit polling | ||
1384 | */ | ||
1385 | |||
1386 | napi_complete(napi); | ||
1387 | vnic_intr_unmask(&enic->intr[intr]); | ||
1388 | } | ||
1389 | |||
1390 | return work_done; | ||
1391 | } | ||
1392 | |||
1393 | static void enic_notify_timer(unsigned long data) | ||
1394 | { | ||
1395 | struct enic *enic = (struct enic *)data; | ||
1396 | |||
1397 | enic_notify_check(enic); | ||
1398 | |||
1399 | mod_timer(&enic->notify_timer, | ||
1400 | round_jiffies(jiffies + ENIC_NOTIFY_TIMER_PERIOD)); | ||
1401 | } | ||
1402 | |||
1403 | static void enic_free_intr(struct enic *enic) | ||
1404 | { | ||
1405 | struct net_device *netdev = enic->netdev; | ||
1406 | unsigned int i; | ||
1407 | |||
1408 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1409 | case VNIC_DEV_INTR_MODE_INTX: | ||
1410 | free_irq(enic->pdev->irq, netdev); | ||
1411 | break; | ||
1412 | case VNIC_DEV_INTR_MODE_MSI: | ||
1413 | free_irq(enic->pdev->irq, enic); | ||
1414 | break; | ||
1415 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1416 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) | ||
1417 | if (enic->msix[i].requested) | ||
1418 | free_irq(enic->msix_entry[i].vector, | ||
1419 | enic->msix[i].devid); | ||
1420 | break; | ||
1421 | default: | ||
1422 | break; | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | static int enic_request_intr(struct enic *enic) | ||
1427 | { | ||
1428 | struct net_device *netdev = enic->netdev; | ||
1429 | unsigned int i, intr; | ||
1430 | int err = 0; | ||
1431 | |||
1432 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1433 | |||
1434 | case VNIC_DEV_INTR_MODE_INTX: | ||
1435 | |||
1436 | err = request_irq(enic->pdev->irq, enic_isr_legacy, | ||
1437 | IRQF_SHARED, netdev->name, netdev); | ||
1438 | break; | ||
1439 | |||
1440 | case VNIC_DEV_INTR_MODE_MSI: | ||
1441 | |||
1442 | err = request_irq(enic->pdev->irq, enic_isr_msi, | ||
1443 | 0, netdev->name, enic); | ||
1444 | break; | ||
1445 | |||
1446 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1447 | |||
1448 | for (i = 0; i < enic->rq_count; i++) { | ||
1449 | intr = enic_msix_rq_intr(enic, i); | ||
1450 | sprintf(enic->msix[intr].devname, | ||
1451 | "%.11s-rx-%d", netdev->name, i); | ||
1452 | enic->msix[intr].isr = enic_isr_msix_rq; | ||
1453 | enic->msix[intr].devid = &enic->napi[i]; | ||
1454 | } | ||
1455 | |||
1456 | for (i = 0; i < enic->wq_count; i++) { | ||
1457 | intr = enic_msix_wq_intr(enic, i); | ||
1458 | sprintf(enic->msix[intr].devname, | ||
1459 | "%.11s-tx-%d", netdev->name, i); | ||
1460 | enic->msix[intr].isr = enic_isr_msix_wq; | ||
1461 | enic->msix[intr].devid = enic; | ||
1462 | } | ||
1463 | |||
1464 | intr = enic_msix_err_intr(enic); | ||
1465 | sprintf(enic->msix[intr].devname, | ||
1466 | "%.11s-err", netdev->name); | ||
1467 | enic->msix[intr].isr = enic_isr_msix_err; | ||
1468 | enic->msix[intr].devid = enic; | ||
1469 | |||
1470 | intr = enic_msix_notify_intr(enic); | ||
1471 | sprintf(enic->msix[intr].devname, | ||
1472 | "%.11s-notify", netdev->name); | ||
1473 | enic->msix[intr].isr = enic_isr_msix_notify; | ||
1474 | enic->msix[intr].devid = enic; | ||
1475 | |||
1476 | for (i = 0; i < ARRAY_SIZE(enic->msix); i++) | ||
1477 | enic->msix[i].requested = 0; | ||
1478 | |||
1479 | for (i = 0; i < enic->intr_count; i++) { | ||
1480 | err = request_irq(enic->msix_entry[i].vector, | ||
1481 | enic->msix[i].isr, 0, | ||
1482 | enic->msix[i].devname, | ||
1483 | enic->msix[i].devid); | ||
1484 | if (err) { | ||
1485 | enic_free_intr(enic); | ||
1486 | break; | ||
1487 | } | ||
1488 | enic->msix[i].requested = 1; | ||
1489 | } | ||
1490 | |||
1491 | break; | ||
1492 | |||
1493 | default: | ||
1494 | break; | ||
1495 | } | ||
1496 | |||
1497 | return err; | ||
1498 | } | ||
1499 | |||
1500 | static void enic_synchronize_irqs(struct enic *enic) | ||
1501 | { | ||
1502 | unsigned int i; | ||
1503 | |||
1504 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1505 | case VNIC_DEV_INTR_MODE_INTX: | ||
1506 | case VNIC_DEV_INTR_MODE_MSI: | ||
1507 | synchronize_irq(enic->pdev->irq); | ||
1508 | break; | ||
1509 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1510 | for (i = 0; i < enic->intr_count; i++) | ||
1511 | synchronize_irq(enic->msix_entry[i].vector); | ||
1512 | break; | ||
1513 | default: | ||
1514 | break; | ||
1515 | } | ||
1516 | } | ||
1517 | |||
1518 | static int enic_dev_notify_set(struct enic *enic) | ||
1519 | { | ||
1520 | int err; | ||
1521 | |||
1522 | spin_lock(&enic->devcmd_lock); | ||
1523 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1524 | case VNIC_DEV_INTR_MODE_INTX: | ||
1525 | err = vnic_dev_notify_set(enic->vdev, | ||
1526 | enic_legacy_notify_intr()); | ||
1527 | break; | ||
1528 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1529 | err = vnic_dev_notify_set(enic->vdev, | ||
1530 | enic_msix_notify_intr(enic)); | ||
1531 | break; | ||
1532 | default: | ||
1533 | err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); | ||
1534 | break; | ||
1535 | } | ||
1536 | spin_unlock(&enic->devcmd_lock); | ||
1537 | |||
1538 | return err; | ||
1539 | } | ||
1540 | |||
1541 | static void enic_notify_timer_start(struct enic *enic) | ||
1542 | { | ||
1543 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
1544 | case VNIC_DEV_INTR_MODE_MSI: | ||
1545 | mod_timer(&enic->notify_timer, jiffies); | ||
1546 | break; | ||
1547 | default: | ||
1548 | /* Using intr for notification for INTx/MSI-X */ | ||
1549 | break; | ||
1550 | } | ||
1551 | } | ||
1552 | |||
1553 | /* rtnl lock is held, process context */ | ||
1554 | static int enic_open(struct net_device *netdev) | ||
1555 | { | ||
1556 | struct enic *enic = netdev_priv(netdev); | ||
1557 | unsigned int i; | ||
1558 | int err; | ||
1559 | |||
1560 | err = enic_request_intr(enic); | ||
1561 | if (err) { | ||
1562 | netdev_err(netdev, "Unable to request irq.\n"); | ||
1563 | return err; | ||
1564 | } | ||
1565 | |||
1566 | err = enic_dev_notify_set(enic); | ||
1567 | if (err) { | ||
1568 | netdev_err(netdev, | ||
1569 | "Failed to alloc notify buffer, aborting.\n"); | ||
1570 | goto err_out_free_intr; | ||
1571 | } | ||
1572 | |||
1573 | for (i = 0; i < enic->rq_count; i++) { | ||
1574 | vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf); | ||
1575 | /* Need at least one buffer on ring to get going */ | ||
1576 | if (vnic_rq_desc_used(&enic->rq[i]) == 0) { | ||
1577 | netdev_err(netdev, "Unable to alloc receive buffers\n"); | ||
1578 | err = -ENOMEM; | ||
1579 | goto err_out_notify_unset; | ||
1580 | } | ||
1581 | } | ||
1582 | |||
1583 | for (i = 0; i < enic->wq_count; i++) | ||
1584 | vnic_wq_enable(&enic->wq[i]); | ||
1585 | for (i = 0; i < enic->rq_count; i++) | ||
1586 | vnic_rq_enable(&enic->rq[i]); | ||
1587 | |||
1588 | if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr)) | ||
1589 | enic_dev_add_addr(enic, enic->pp.mac_addr); | ||
1590 | else | ||
1591 | enic_dev_add_station_addr(enic); | ||
1592 | enic_set_rx_mode(netdev); | ||
1593 | |||
1594 | netif_wake_queue(netdev); | ||
1595 | |||
1596 | for (i = 0; i < enic->rq_count; i++) | ||
1597 | napi_enable(&enic->napi[i]); | ||
1598 | |||
1599 | enic_dev_enable(enic); | ||
1600 | |||
1601 | for (i = 0; i < enic->intr_count; i++) | ||
1602 | vnic_intr_unmask(&enic->intr[i]); | ||
1603 | |||
1604 | enic_notify_timer_start(enic); | ||
1605 | |||
1606 | return 0; | ||
1607 | |||
1608 | err_out_notify_unset: | ||
1609 | enic_dev_notify_unset(enic); | ||
1610 | err_out_free_intr: | ||
1611 | enic_free_intr(enic); | ||
1612 | |||
1613 | return err; | ||
1614 | } | ||
1615 | |||
1616 | /* rtnl lock is held, process context */ | ||
1617 | static int enic_stop(struct net_device *netdev) | ||
1618 | { | ||
1619 | struct enic *enic = netdev_priv(netdev); | ||
1620 | unsigned int i; | ||
1621 | int err; | ||
1622 | |||
1623 | for (i = 0; i < enic->intr_count; i++) { | ||
1624 | vnic_intr_mask(&enic->intr[i]); | ||
1625 | (void)vnic_intr_masked(&enic->intr[i]); /* flush write */ | ||
1626 | } | ||
1627 | |||
1628 | enic_synchronize_irqs(enic); | ||
1629 | |||
1630 | del_timer_sync(&enic->notify_timer); | ||
1631 | |||
1632 | enic_dev_disable(enic); | ||
1633 | |||
1634 | for (i = 0; i < enic->rq_count; i++) | ||
1635 | napi_disable(&enic->napi[i]); | ||
1636 | |||
1637 | netif_carrier_off(netdev); | ||
1638 | netif_tx_disable(netdev); | ||
1639 | if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr)) | ||
1640 | enic_dev_del_addr(enic, enic->pp.mac_addr); | ||
1641 | else | ||
1642 | enic_dev_del_station_addr(enic); | ||
1643 | |||
1644 | for (i = 0; i < enic->wq_count; i++) { | ||
1645 | err = vnic_wq_disable(&enic->wq[i]); | ||
1646 | if (err) | ||
1647 | return err; | ||
1648 | } | ||
1649 | for (i = 0; i < enic->rq_count; i++) { | ||
1650 | err = vnic_rq_disable(&enic->rq[i]); | ||
1651 | if (err) | ||
1652 | return err; | ||
1653 | } | ||
1654 | |||
1655 | enic_dev_notify_unset(enic); | ||
1656 | enic_free_intr(enic); | ||
1657 | |||
1658 | for (i = 0; i < enic->wq_count; i++) | ||
1659 | vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); | ||
1660 | for (i = 0; i < enic->rq_count; i++) | ||
1661 | vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); | ||
1662 | for (i = 0; i < enic->cq_count; i++) | ||
1663 | vnic_cq_clean(&enic->cq[i]); | ||
1664 | for (i = 0; i < enic->intr_count; i++) | ||
1665 | vnic_intr_clean(&enic->intr[i]); | ||
1666 | |||
1667 | return 0; | ||
1668 | } | ||
1669 | |||
1670 | static int enic_change_mtu(struct net_device *netdev, int new_mtu) | ||
1671 | { | ||
1672 | struct enic *enic = netdev_priv(netdev); | ||
1673 | int running = netif_running(netdev); | ||
1674 | |||
1675 | if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU) | ||
1676 | return -EINVAL; | ||
1677 | |||
1678 | if (enic_is_dynamic(enic)) | ||
1679 | return -EOPNOTSUPP; | ||
1680 | |||
1681 | if (running) | ||
1682 | enic_stop(netdev); | ||
1683 | |||
1684 | netdev->mtu = new_mtu; | ||
1685 | |||
1686 | if (netdev->mtu > enic->port_mtu) | ||
1687 | netdev_warn(netdev, | ||
1688 | "interface MTU (%d) set higher than port MTU (%d)\n", | ||
1689 | netdev->mtu, enic->port_mtu); | ||
1690 | |||
1691 | if (running) | ||
1692 | enic_open(netdev); | ||
1693 | |||
1694 | return 0; | ||
1695 | } | ||
1696 | |||
1697 | static void enic_change_mtu_work(struct work_struct *work) | ||
1698 | { | ||
1699 | struct enic *enic = container_of(work, struct enic, change_mtu_work); | ||
1700 | struct net_device *netdev = enic->netdev; | ||
1701 | int new_mtu = vnic_dev_mtu(enic->vdev); | ||
1702 | int err; | ||
1703 | unsigned int i; | ||
1704 | |||
1705 | new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu)); | ||
1706 | |||
1707 | rtnl_lock(); | ||
1708 | |||
1709 | /* Stop RQ */ | ||
1710 | del_timer_sync(&enic->notify_timer); | ||
1711 | |||
1712 | for (i = 0; i < enic->rq_count; i++) | ||
1713 | napi_disable(&enic->napi[i]); | ||
1714 | |||
1715 | vnic_intr_mask(&enic->intr[0]); | ||
1716 | enic_synchronize_irqs(enic); | ||
1717 | err = vnic_rq_disable(&enic->rq[0]); | ||
1718 | if (err) { | ||
1719 | netdev_err(netdev, "Unable to disable RQ.\n"); | ||
1720 | return; | ||
1721 | } | ||
1722 | vnic_rq_clean(&enic->rq[0], enic_free_rq_buf); | ||
1723 | vnic_cq_clean(&enic->cq[0]); | ||
1724 | vnic_intr_clean(&enic->intr[0]); | ||
1725 | |||
1726 | /* Fill RQ with new_mtu-sized buffers */ | ||
1727 | netdev->mtu = new_mtu; | ||
1728 | vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf); | ||
1729 | /* Need at least one buffer on ring to get going */ | ||
1730 | if (vnic_rq_desc_used(&enic->rq[0]) == 0) { | ||
1731 | netdev_err(netdev, "Unable to alloc receive buffers.\n"); | ||
1732 | return; | ||
1733 | } | ||
1734 | |||
1735 | /* Start RQ */ | ||
1736 | vnic_rq_enable(&enic->rq[0]); | ||
1737 | napi_enable(&enic->napi[0]); | ||
1738 | vnic_intr_unmask(&enic->intr[0]); | ||
1739 | enic_notify_timer_start(enic); | ||
1740 | |||
1741 | rtnl_unlock(); | ||
1742 | |||
1743 | netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu); | ||
1744 | } | ||
1745 | |||
1746 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1747 | static void enic_poll_controller(struct net_device *netdev) | ||
1748 | { | ||
1749 | struct enic *enic = netdev_priv(netdev); | ||
1750 | struct vnic_dev *vdev = enic->vdev; | ||
1751 | unsigned int i, intr; | ||
1752 | |||
1753 | switch (vnic_dev_get_intr_mode(vdev)) { | ||
1754 | case VNIC_DEV_INTR_MODE_MSIX: | ||
1755 | for (i = 0; i < enic->rq_count; i++) { | ||
1756 | intr = enic_msix_rq_intr(enic, i); | ||
1757 | enic_isr_msix_rq(enic->msix_entry[intr].vector, | ||
1758 | &enic->napi[i]); | ||
1759 | } | ||
1760 | |||
1761 | for (i = 0; i < enic->wq_count; i++) { | ||
1762 | intr = enic_msix_wq_intr(enic, i); | ||
1763 | enic_isr_msix_wq(enic->msix_entry[intr].vector, enic); | ||
1764 | } | ||
1765 | |||
1766 | break; | ||
1767 | case VNIC_DEV_INTR_MODE_MSI: | ||
1768 | enic_isr_msi(enic->pdev->irq, enic); | ||
1769 | break; | ||
1770 | case VNIC_DEV_INTR_MODE_INTX: | ||
1771 | enic_isr_legacy(enic->pdev->irq, netdev); | ||
1772 | break; | ||
1773 | default: | ||
1774 | break; | ||
1775 | } | ||
1776 | } | ||
1777 | #endif | ||
1778 | |||
1779 | static int enic_dev_wait(struct vnic_dev *vdev, | ||
1780 | int (*start)(struct vnic_dev *, int), | ||
1781 | int (*finished)(struct vnic_dev *, int *), | ||
1782 | int arg) | ||
1783 | { | ||
1784 | unsigned long time; | ||
1785 | int done; | ||
1786 | int err; | ||
1787 | |||
1788 | BUG_ON(in_interrupt()); | ||
1789 | |||
1790 | err = start(vdev, arg); | ||
1791 | if (err) | ||
1792 | return err; | ||
1793 | |||
1794 | /* Wait for func to complete...2 seconds max | ||
1795 | */ | ||
1796 | |||
1797 | time = jiffies + (HZ * 2); | ||
1798 | do { | ||
1799 | |||
1800 | err = finished(vdev, &done); | ||
1801 | if (err) | ||
1802 | return err; | ||
1803 | |||
1804 | if (done) | ||
1805 | return 0; | ||
1806 | |||
1807 | schedule_timeout_uninterruptible(HZ / 10); | ||
1808 | |||
1809 | } while (time_after(time, jiffies)); | ||
1810 | |||
1811 | return -ETIMEDOUT; | ||
1812 | } | ||
1813 | |||
1814 | static int enic_dev_open(struct enic *enic) | ||
1815 | { | ||
1816 | int err; | ||
1817 | |||
1818 | err = enic_dev_wait(enic->vdev, vnic_dev_open, | ||
1819 | vnic_dev_open_done, 0); | ||
1820 | if (err) | ||
1821 | dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n", | ||
1822 | err); | ||
1823 | |||
1824 | return err; | ||
1825 | } | ||
1826 | |||
1827 | static int enic_dev_hang_reset(struct enic *enic) | ||
1828 | { | ||
1829 | int err; | ||
1830 | |||
1831 | err = enic_dev_wait(enic->vdev, vnic_dev_hang_reset, | ||
1832 | vnic_dev_hang_reset_done, 0); | ||
1833 | if (err) | ||
1834 | netdev_err(enic->netdev, "vNIC hang reset failed, err %d\n", | ||
1835 | err); | ||
1836 | |||
1837 | return err; | ||
1838 | } | ||
1839 | |||
1840 | static int enic_set_rsskey(struct enic *enic) | ||
1841 | { | ||
1842 | dma_addr_t rss_key_buf_pa; | ||
1843 | union vnic_rss_key *rss_key_buf_va = NULL; | ||
1844 | union vnic_rss_key rss_key = { | ||
1845 | .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, | ||
1846 | .key[1].b = {80, 65, 76, 79, 117, 110, 105, 113, 117, 101}, | ||
1847 | .key[2].b = {76, 73, 78, 85, 88, 114, 111, 99, 107, 115}, | ||
1848 | .key[3].b = {69, 78, 73, 67, 105, 115, 99, 111, 111, 108}, | ||
1849 | }; | ||
1850 | int err; | ||
1851 | |||
1852 | rss_key_buf_va = pci_alloc_consistent(enic->pdev, | ||
1853 | sizeof(union vnic_rss_key), &rss_key_buf_pa); | ||
1854 | if (!rss_key_buf_va) | ||
1855 | return -ENOMEM; | ||
1856 | |||
1857 | memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key)); | ||
1858 | |||
1859 | spin_lock(&enic->devcmd_lock); | ||
1860 | err = enic_set_rss_key(enic, | ||
1861 | rss_key_buf_pa, | ||
1862 | sizeof(union vnic_rss_key)); | ||
1863 | spin_unlock(&enic->devcmd_lock); | ||
1864 | |||
1865 | pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key), | ||
1866 | rss_key_buf_va, rss_key_buf_pa); | ||
1867 | |||
1868 | return err; | ||
1869 | } | ||
1870 | |||
1871 | static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) | ||
1872 | { | ||
1873 | dma_addr_t rss_cpu_buf_pa; | ||
1874 | union vnic_rss_cpu *rss_cpu_buf_va = NULL; | ||
1875 | unsigned int i; | ||
1876 | int err; | ||
1877 | |||
1878 | rss_cpu_buf_va = pci_alloc_consistent(enic->pdev, | ||
1879 | sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa); | ||
1880 | if (!rss_cpu_buf_va) | ||
1881 | return -ENOMEM; | ||
1882 | |||
1883 | for (i = 0; i < (1 << rss_hash_bits); i++) | ||
1884 | (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; | ||
1885 | |||
1886 | spin_lock(&enic->devcmd_lock); | ||
1887 | err = enic_set_rss_cpu(enic, | ||
1888 | rss_cpu_buf_pa, | ||
1889 | sizeof(union vnic_rss_cpu)); | ||
1890 | spin_unlock(&enic->devcmd_lock); | ||
1891 | |||
1892 | pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), | ||
1893 | rss_cpu_buf_va, rss_cpu_buf_pa); | ||
1894 | |||
1895 | return err; | ||
1896 | } | ||
1897 | |||
1898 | static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu, | ||
1899 | u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable) | ||
1900 | { | ||
1901 | const u8 tso_ipid_split_en = 0; | ||
1902 | const u8 ig_vlan_strip_en = 1; | ||
1903 | int err; | ||
1904 | |||
1905 | /* Enable VLAN tag stripping. | ||
1906 | */ | ||
1907 | |||
1908 | spin_lock(&enic->devcmd_lock); | ||
1909 | err = enic_set_nic_cfg(enic, | ||
1910 | rss_default_cpu, rss_hash_type, | ||
1911 | rss_hash_bits, rss_base_cpu, | ||
1912 | rss_enable, tso_ipid_split_en, | ||
1913 | ig_vlan_strip_en); | ||
1914 | spin_unlock(&enic->devcmd_lock); | ||
1915 | |||
1916 | return err; | ||
1917 | } | ||
1918 | |||
1919 | static int enic_set_rss_nic_cfg(struct enic *enic) | ||
1920 | { | ||
1921 | struct device *dev = enic_get_dev(enic); | ||
1922 | const u8 rss_default_cpu = 0; | ||
1923 | const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 | | ||
1924 | NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 | | ||
1925 | NIC_CFG_RSS_HASH_TYPE_IPV6 | | ||
1926 | NIC_CFG_RSS_HASH_TYPE_TCP_IPV6; | ||
1927 | const u8 rss_hash_bits = 7; | ||
1928 | const u8 rss_base_cpu = 0; | ||
1929 | u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1); | ||
1930 | |||
1931 | if (rss_enable) { | ||
1932 | if (!enic_set_rsskey(enic)) { | ||
1933 | if (enic_set_rsscpu(enic, rss_hash_bits)) { | ||
1934 | rss_enable = 0; | ||
1935 | dev_warn(dev, "RSS disabled, " | ||
1936 | "Failed to set RSS cpu indirection table."); | ||
1937 | } | ||
1938 | } else { | ||
1939 | rss_enable = 0; | ||
1940 | dev_warn(dev, "RSS disabled, Failed to set RSS key.\n"); | ||
1941 | } | ||
1942 | } | ||
1943 | |||
1944 | return enic_set_niccfg(enic, rss_default_cpu, rss_hash_type, | ||
1945 | rss_hash_bits, rss_base_cpu, rss_enable); | ||
1946 | } | ||
1947 | |||
1948 | static void enic_reset(struct work_struct *work) | ||
1949 | { | ||
1950 | struct enic *enic = container_of(work, struct enic, reset); | ||
1951 | |||
1952 | if (!netif_running(enic->netdev)) | ||
1953 | return; | ||
1954 | |||
1955 | rtnl_lock(); | ||
1956 | |||
1957 | enic_dev_hang_notify(enic); | ||
1958 | enic_stop(enic->netdev); | ||
1959 | enic_dev_hang_reset(enic); | ||
1960 | enic_reset_addr_lists(enic); | ||
1961 | enic_init_vnic_resources(enic); | ||
1962 | enic_set_rss_nic_cfg(enic); | ||
1963 | enic_dev_set_ig_vlan_rewrite_mode(enic); | ||
1964 | enic_open(enic->netdev); | ||
1965 | |||
1966 | rtnl_unlock(); | ||
1967 | } | ||
1968 | |||
1969 | static int enic_set_intr_mode(struct enic *enic) | ||
1970 | { | ||
1971 | unsigned int n = min_t(unsigned int, enic->rq_count, ENIC_RQ_MAX); | ||
1972 | unsigned int m = min_t(unsigned int, enic->wq_count, ENIC_WQ_MAX); | ||
1973 | unsigned int i; | ||
1974 | |||
1975 | /* Set interrupt mode (INTx, MSI, MSI-X) depending | ||
1976 | * on system capabilities. | ||
1977 | * | ||
1978 | * Try MSI-X first | ||
1979 | * | ||
1980 | * We need n RQs, m WQs, n+m CQs, and n+m+2 INTRs | ||
1981 | * (the second to last INTR is used for WQ/RQ errors) | ||
1982 | * (the last INTR is used for notifications) | ||
1983 | */ | ||
1984 | |||
1985 | BUG_ON(ARRAY_SIZE(enic->msix_entry) < n + m + 2); | ||
1986 | for (i = 0; i < n + m + 2; i++) | ||
1987 | enic->msix_entry[i].entry = i; | ||
1988 | |||
1989 | /* Use multiple RQs if RSS is enabled | ||
1990 | */ | ||
1991 | |||
1992 | if (ENIC_SETTING(enic, RSS) && | ||
1993 | enic->config.intr_mode < 1 && | ||
1994 | enic->rq_count >= n && | ||
1995 | enic->wq_count >= m && | ||
1996 | enic->cq_count >= n + m && | ||
1997 | enic->intr_count >= n + m + 2) { | ||
1998 | |||
1999 | if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) { | ||
2000 | |||
2001 | enic->rq_count = n; | ||
2002 | enic->wq_count = m; | ||
2003 | enic->cq_count = n + m; | ||
2004 | enic->intr_count = n + m + 2; | ||
2005 | |||
2006 | vnic_dev_set_intr_mode(enic->vdev, | ||
2007 | VNIC_DEV_INTR_MODE_MSIX); | ||
2008 | |||
2009 | return 0; | ||
2010 | } | ||
2011 | } | ||
2012 | |||
2013 | if (enic->config.intr_mode < 1 && | ||
2014 | enic->rq_count >= 1 && | ||
2015 | enic->wq_count >= m && | ||
2016 | enic->cq_count >= 1 + m && | ||
2017 | enic->intr_count >= 1 + m + 2) { | ||
2018 | if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) { | ||
2019 | |||
2020 | enic->rq_count = 1; | ||
2021 | enic->wq_count = m; | ||
2022 | enic->cq_count = 1 + m; | ||
2023 | enic->intr_count = 1 + m + 2; | ||
2024 | |||
2025 | vnic_dev_set_intr_mode(enic->vdev, | ||
2026 | VNIC_DEV_INTR_MODE_MSIX); | ||
2027 | |||
2028 | return 0; | ||
2029 | } | ||
2030 | } | ||
2031 | |||
2032 | /* Next try MSI | ||
2033 | * | ||
2034 | * We need 1 RQ, 1 WQ, 2 CQs, and 1 INTR | ||
2035 | */ | ||
2036 | |||
2037 | if (enic->config.intr_mode < 2 && | ||
2038 | enic->rq_count >= 1 && | ||
2039 | enic->wq_count >= 1 && | ||
2040 | enic->cq_count >= 2 && | ||
2041 | enic->intr_count >= 1 && | ||
2042 | !pci_enable_msi(enic->pdev)) { | ||
2043 | |||
2044 | enic->rq_count = 1; | ||
2045 | enic->wq_count = 1; | ||
2046 | enic->cq_count = 2; | ||
2047 | enic->intr_count = 1; | ||
2048 | |||
2049 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_MSI); | ||
2050 | |||
2051 | return 0; | ||
2052 | } | ||
2053 | |||
2054 | /* Next try INTx | ||
2055 | * | ||
2056 | * We need 1 RQ, 1 WQ, 2 CQs, and 3 INTRs | ||
2057 | * (the first INTR is used for WQ/RQ) | ||
2058 | * (the second INTR is used for WQ/RQ errors) | ||
2059 | * (the last INTR is used for notifications) | ||
2060 | */ | ||
2061 | |||
2062 | if (enic->config.intr_mode < 3 && | ||
2063 | enic->rq_count >= 1 && | ||
2064 | enic->wq_count >= 1 && | ||
2065 | enic->cq_count >= 2 && | ||
2066 | enic->intr_count >= 3) { | ||
2067 | |||
2068 | enic->rq_count = 1; | ||
2069 | enic->wq_count = 1; | ||
2070 | enic->cq_count = 2; | ||
2071 | enic->intr_count = 3; | ||
2072 | |||
2073 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_INTX); | ||
2074 | |||
2075 | return 0; | ||
2076 | } | ||
2077 | |||
2078 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | ||
2079 | |||
2080 | return -EINVAL; | ||
2081 | } | ||
2082 | |||
2083 | static void enic_clear_intr_mode(struct enic *enic) | ||
2084 | { | ||
2085 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
2086 | case VNIC_DEV_INTR_MODE_MSIX: | ||
2087 | pci_disable_msix(enic->pdev); | ||
2088 | break; | ||
2089 | case VNIC_DEV_INTR_MODE_MSI: | ||
2090 | pci_disable_msi(enic->pdev); | ||
2091 | break; | ||
2092 | default: | ||
2093 | break; | ||
2094 | } | ||
2095 | |||
2096 | vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); | ||
2097 | } | ||
2098 | |||
2099 | static const struct net_device_ops enic_netdev_dynamic_ops = { | ||
2100 | .ndo_open = enic_open, | ||
2101 | .ndo_stop = enic_stop, | ||
2102 | .ndo_start_xmit = enic_hard_start_xmit, | ||
2103 | .ndo_get_stats64 = enic_get_stats, | ||
2104 | .ndo_validate_addr = eth_validate_addr, | ||
2105 | .ndo_set_rx_mode = enic_set_rx_mode, | ||
2106 | .ndo_set_multicast_list = enic_set_rx_mode, | ||
2107 | .ndo_set_mac_address = enic_set_mac_address_dynamic, | ||
2108 | .ndo_change_mtu = enic_change_mtu, | ||
2109 | .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, | ||
2110 | .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, | ||
2111 | .ndo_tx_timeout = enic_tx_timeout, | ||
2112 | .ndo_set_vf_port = enic_set_vf_port, | ||
2113 | .ndo_get_vf_port = enic_get_vf_port, | ||
2114 | .ndo_set_vf_mac = enic_set_vf_mac, | ||
2115 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2116 | .ndo_poll_controller = enic_poll_controller, | ||
2117 | #endif | ||
2118 | }; | ||
2119 | |||
2120 | static const struct net_device_ops enic_netdev_ops = { | ||
2121 | .ndo_open = enic_open, | ||
2122 | .ndo_stop = enic_stop, | ||
2123 | .ndo_start_xmit = enic_hard_start_xmit, | ||
2124 | .ndo_get_stats64 = enic_get_stats, | ||
2125 | .ndo_validate_addr = eth_validate_addr, | ||
2126 | .ndo_set_mac_address = enic_set_mac_address, | ||
2127 | .ndo_set_rx_mode = enic_set_rx_mode, | ||
2128 | .ndo_set_multicast_list = enic_set_rx_mode, | ||
2129 | .ndo_change_mtu = enic_change_mtu, | ||
2130 | .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, | ||
2131 | .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid, | ||
2132 | .ndo_tx_timeout = enic_tx_timeout, | ||
2133 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2134 | .ndo_poll_controller = enic_poll_controller, | ||
2135 | #endif | ||
2136 | }; | ||
2137 | |||
2138 | static void enic_dev_deinit(struct enic *enic) | ||
2139 | { | ||
2140 | unsigned int i; | ||
2141 | |||
2142 | for (i = 0; i < enic->rq_count; i++) | ||
2143 | netif_napi_del(&enic->napi[i]); | ||
2144 | |||
2145 | enic_free_vnic_resources(enic); | ||
2146 | enic_clear_intr_mode(enic); | ||
2147 | } | ||
2148 | |||
2149 | static int enic_dev_init(struct enic *enic) | ||
2150 | { | ||
2151 | struct device *dev = enic_get_dev(enic); | ||
2152 | struct net_device *netdev = enic->netdev; | ||
2153 | unsigned int i; | ||
2154 | int err; | ||
2155 | |||
2156 | /* Get interrupt coalesce timer info */ | ||
2157 | err = enic_dev_intr_coal_timer_info(enic); | ||
2158 | if (err) { | ||
2159 | dev_warn(dev, "Using default conversion factor for " | ||
2160 | "interrupt coalesce timer\n"); | ||
2161 | vnic_dev_intr_coal_timer_info_default(enic->vdev); | ||
2162 | } | ||
2163 | |||
2164 | /* Get vNIC configuration | ||
2165 | */ | ||
2166 | |||
2167 | err = enic_get_vnic_config(enic); | ||
2168 | if (err) { | ||
2169 | dev_err(dev, "Get vNIC configuration failed, aborting\n"); | ||
2170 | return err; | ||
2171 | } | ||
2172 | |||
2173 | /* Get available resource counts | ||
2174 | */ | ||
2175 | |||
2176 | enic_get_res_counts(enic); | ||
2177 | |||
2178 | /* Set interrupt mode based on resource counts and system | ||
2179 | * capabilities | ||
2180 | */ | ||
2181 | |||
2182 | err = enic_set_intr_mode(enic); | ||
2183 | if (err) { | ||
2184 | dev_err(dev, "Failed to set intr mode based on resource " | ||
2185 | "counts and system capabilities, aborting\n"); | ||
2186 | return err; | ||
2187 | } | ||
2188 | |||
2189 | /* Allocate and configure vNIC resources | ||
2190 | */ | ||
2191 | |||
2192 | err = enic_alloc_vnic_resources(enic); | ||
2193 | if (err) { | ||
2194 | dev_err(dev, "Failed to alloc vNIC resources, aborting\n"); | ||
2195 | goto err_out_free_vnic_resources; | ||
2196 | } | ||
2197 | |||
2198 | enic_init_vnic_resources(enic); | ||
2199 | |||
2200 | err = enic_set_rss_nic_cfg(enic); | ||
2201 | if (err) { | ||
2202 | dev_err(dev, "Failed to config nic, aborting\n"); | ||
2203 | goto err_out_free_vnic_resources; | ||
2204 | } | ||
2205 | |||
2206 | switch (vnic_dev_get_intr_mode(enic->vdev)) { | ||
2207 | default: | ||
2208 | netif_napi_add(netdev, &enic->napi[0], enic_poll, 64); | ||
2209 | break; | ||
2210 | case VNIC_DEV_INTR_MODE_MSIX: | ||
2211 | for (i = 0; i < enic->rq_count; i++) | ||
2212 | netif_napi_add(netdev, &enic->napi[i], | ||
2213 | enic_poll_msix, 64); | ||
2214 | break; | ||
2215 | } | ||
2216 | |||
2217 | return 0; | ||
2218 | |||
2219 | err_out_free_vnic_resources: | ||
2220 | enic_clear_intr_mode(enic); | ||
2221 | enic_free_vnic_resources(enic); | ||
2222 | |||
2223 | return err; | ||
2224 | } | ||
2225 | |||
2226 | static void enic_iounmap(struct enic *enic) | ||
2227 | { | ||
2228 | unsigned int i; | ||
2229 | |||
2230 | for (i = 0; i < ARRAY_SIZE(enic->bar); i++) | ||
2231 | if (enic->bar[i].vaddr) | ||
2232 | iounmap(enic->bar[i].vaddr); | ||
2233 | } | ||
2234 | |||
2235 | static int __devinit enic_probe(struct pci_dev *pdev, | ||
2236 | const struct pci_device_id *ent) | ||
2237 | { | ||
2238 | struct device *dev = &pdev->dev; | ||
2239 | struct net_device *netdev; | ||
2240 | struct enic *enic; | ||
2241 | int using_dac = 0; | ||
2242 | unsigned int i; | ||
2243 | int err; | ||
2244 | |||
2245 | /* Allocate net device structure and initialize. Private | ||
2246 | * instance data is initialized to zero. | ||
2247 | */ | ||
2248 | |||
2249 | netdev = alloc_etherdev(sizeof(struct enic)); | ||
2250 | if (!netdev) { | ||
2251 | pr_err("Etherdev alloc failed, aborting\n"); | ||
2252 | return -ENOMEM; | ||
2253 | } | ||
2254 | |||
2255 | pci_set_drvdata(pdev, netdev); | ||
2256 | |||
2257 | SET_NETDEV_DEV(netdev, &pdev->dev); | ||
2258 | |||
2259 | enic = netdev_priv(netdev); | ||
2260 | enic->netdev = netdev; | ||
2261 | enic->pdev = pdev; | ||
2262 | |||
2263 | /* Setup PCI resources | ||
2264 | */ | ||
2265 | |||
2266 | err = pci_enable_device_mem(pdev); | ||
2267 | if (err) { | ||
2268 | dev_err(dev, "Cannot enable PCI device, aborting\n"); | ||
2269 | goto err_out_free_netdev; | ||
2270 | } | ||
2271 | |||
2272 | err = pci_request_regions(pdev, DRV_NAME); | ||
2273 | if (err) { | ||
2274 | dev_err(dev, "Cannot request PCI regions, aborting\n"); | ||
2275 | goto err_out_disable_device; | ||
2276 | } | ||
2277 | |||
2278 | pci_set_master(pdev); | ||
2279 | |||
2280 | /* Query PCI controller on system for DMA addressing | ||
2281 | * limitation for the device. Try 40-bit first, and | ||
2282 | * fail to 32-bit. | ||
2283 | */ | ||
2284 | |||
2285 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); | ||
2286 | if (err) { | ||
2287 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2288 | if (err) { | ||
2289 | dev_err(dev, "No usable DMA configuration, aborting\n"); | ||
2290 | goto err_out_release_regions; | ||
2291 | } | ||
2292 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2293 | if (err) { | ||
2294 | dev_err(dev, "Unable to obtain %u-bit DMA " | ||
2295 | "for consistent allocations, aborting\n", 32); | ||
2296 | goto err_out_release_regions; | ||
2297 | } | ||
2298 | } else { | ||
2299 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); | ||
2300 | if (err) { | ||
2301 | dev_err(dev, "Unable to obtain %u-bit DMA " | ||
2302 | "for consistent allocations, aborting\n", 40); | ||
2303 | goto err_out_release_regions; | ||
2304 | } | ||
2305 | using_dac = 1; | ||
2306 | } | ||
2307 | |||
2308 | /* Map vNIC resources from BAR0-5 | ||
2309 | */ | ||
2310 | |||
2311 | for (i = 0; i < ARRAY_SIZE(enic->bar); i++) { | ||
2312 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) | ||
2313 | continue; | ||
2314 | enic->bar[i].len = pci_resource_len(pdev, i); | ||
2315 | enic->bar[i].vaddr = pci_iomap(pdev, i, enic->bar[i].len); | ||
2316 | if (!enic->bar[i].vaddr) { | ||
2317 | dev_err(dev, "Cannot memory-map BAR %d, aborting\n", i); | ||
2318 | err = -ENODEV; | ||
2319 | goto err_out_iounmap; | ||
2320 | } | ||
2321 | enic->bar[i].bus_addr = pci_resource_start(pdev, i); | ||
2322 | } | ||
2323 | |||
2324 | /* Register vNIC device | ||
2325 | */ | ||
2326 | |||
2327 | enic->vdev = vnic_dev_register(NULL, enic, pdev, enic->bar, | ||
2328 | ARRAY_SIZE(enic->bar)); | ||
2329 | if (!enic->vdev) { | ||
2330 | dev_err(dev, "vNIC registration failed, aborting\n"); | ||
2331 | err = -ENODEV; | ||
2332 | goto err_out_iounmap; | ||
2333 | } | ||
2334 | |||
2335 | /* Issue device open to get device in known state | ||
2336 | */ | ||
2337 | |||
2338 | err = enic_dev_open(enic); | ||
2339 | if (err) { | ||
2340 | dev_err(dev, "vNIC dev open failed, aborting\n"); | ||
2341 | goto err_out_vnic_unregister; | ||
2342 | } | ||
2343 | |||
2344 | /* Setup devcmd lock | ||
2345 | */ | ||
2346 | |||
2347 | spin_lock_init(&enic->devcmd_lock); | ||
2348 | |||
2349 | /* | ||
2350 | * Set ingress vlan rewrite mode before vnic initialization | ||
2351 | */ | ||
2352 | |||
2353 | err = enic_dev_set_ig_vlan_rewrite_mode(enic); | ||
2354 | if (err) { | ||
2355 | dev_err(dev, | ||
2356 | "Failed to set ingress vlan rewrite mode, aborting.\n"); | ||
2357 | goto err_out_dev_close; | ||
2358 | } | ||
2359 | |||
2360 | /* Issue device init to initialize the vnic-to-switch link. | ||
2361 | * We'll start with carrier off and wait for link UP | ||
2362 | * notification later to turn on carrier. We don't need | ||
2363 | * to wait here for the vnic-to-switch link initialization | ||
2364 | * to complete; link UP notification is the indication that | ||
2365 | * the process is complete. | ||
2366 | */ | ||
2367 | |||
2368 | netif_carrier_off(netdev); | ||
2369 | |||
2370 | /* Do not call dev_init for a dynamic vnic. | ||
2371 | * For a dynamic vnic, init_prov_info will be | ||
2372 | * called later by an upper layer. | ||
2373 | */ | ||
2374 | |||
2375 | if (!enic_is_dynamic(enic)) { | ||
2376 | err = vnic_dev_init(enic->vdev, 0); | ||
2377 | if (err) { | ||
2378 | dev_err(dev, "vNIC dev init failed, aborting\n"); | ||
2379 | goto err_out_dev_close; | ||
2380 | } | ||
2381 | } | ||
2382 | |||
2383 | err = enic_dev_init(enic); | ||
2384 | if (err) { | ||
2385 | dev_err(dev, "Device initialization failed, aborting\n"); | ||
2386 | goto err_out_dev_close; | ||
2387 | } | ||
2388 | |||
2389 | /* Setup notification timer, HW reset task, and wq locks | ||
2390 | */ | ||
2391 | |||
2392 | init_timer(&enic->notify_timer); | ||
2393 | enic->notify_timer.function = enic_notify_timer; | ||
2394 | enic->notify_timer.data = (unsigned long)enic; | ||
2395 | |||
2396 | INIT_WORK(&enic->reset, enic_reset); | ||
2397 | INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work); | ||
2398 | |||
2399 | for (i = 0; i < enic->wq_count; i++) | ||
2400 | spin_lock_init(&enic->wq_lock[i]); | ||
2401 | |||
2402 | /* Register net device | ||
2403 | */ | ||
2404 | |||
2405 | enic->port_mtu = enic->config.mtu; | ||
2406 | (void)enic_change_mtu(netdev, enic->port_mtu); | ||
2407 | |||
2408 | err = enic_set_mac_addr(netdev, enic->mac_addr); | ||
2409 | if (err) { | ||
2410 | dev_err(dev, "Invalid MAC address, aborting\n"); | ||
2411 | goto err_out_dev_deinit; | ||
2412 | } | ||
2413 | |||
2414 | enic->tx_coalesce_usecs = enic->config.intr_timer_usec; | ||
2415 | enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; | ||
2416 | |||
2417 | if (enic_is_dynamic(enic)) | ||
2418 | netdev->netdev_ops = &enic_netdev_dynamic_ops; | ||
2419 | else | ||
2420 | netdev->netdev_ops = &enic_netdev_ops; | ||
2421 | |||
2422 | netdev->watchdog_timeo = 2 * HZ; | ||
2423 | netdev->ethtool_ops = &enic_ethtool_ops; | ||
2424 | |||
2425 | netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
2426 | if (ENIC_SETTING(enic, LOOP)) { | ||
2427 | netdev->features &= ~NETIF_F_HW_VLAN_TX; | ||
2428 | enic->loop_enable = 1; | ||
2429 | enic->loop_tag = enic->config.loop_tag; | ||
2430 | dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag); | ||
2431 | } | ||
2432 | if (ENIC_SETTING(enic, TXCSUM)) | ||
2433 | netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM; | ||
2434 | if (ENIC_SETTING(enic, TSO)) | ||
2435 | netdev->hw_features |= NETIF_F_TSO | | ||
2436 | NETIF_F_TSO6 | NETIF_F_TSO_ECN; | ||
2437 | if (ENIC_SETTING(enic, RXCSUM)) | ||
2438 | netdev->hw_features |= NETIF_F_RXCSUM; | ||
2439 | |||
2440 | netdev->features |= netdev->hw_features; | ||
2441 | |||
2442 | if (using_dac) | ||
2443 | netdev->features |= NETIF_F_HIGHDMA; | ||
2444 | |||
2445 | err = register_netdev(netdev); | ||
2446 | if (err) { | ||
2447 | dev_err(dev, "Cannot register net device, aborting\n"); | ||
2448 | goto err_out_dev_deinit; | ||
2449 | } | ||
2450 | |||
2451 | return 0; | ||
2452 | |||
2453 | err_out_dev_deinit: | ||
2454 | enic_dev_deinit(enic); | ||
2455 | err_out_dev_close: | ||
2456 | vnic_dev_close(enic->vdev); | ||
2457 | err_out_vnic_unregister: | ||
2458 | vnic_dev_unregister(enic->vdev); | ||
2459 | err_out_iounmap: | ||
2460 | enic_iounmap(enic); | ||
2461 | err_out_release_regions: | ||
2462 | pci_release_regions(pdev); | ||
2463 | err_out_disable_device: | ||
2464 | pci_disable_device(pdev); | ||
2465 | err_out_free_netdev: | ||
2466 | pci_set_drvdata(pdev, NULL); | ||
2467 | free_netdev(netdev); | ||
2468 | |||
2469 | return err; | ||
2470 | } | ||
2471 | |||
2472 | static void __devexit enic_remove(struct pci_dev *pdev) | ||
2473 | { | ||
2474 | struct net_device *netdev = pci_get_drvdata(pdev); | ||
2475 | |||
2476 | if (netdev) { | ||
2477 | struct enic *enic = netdev_priv(netdev); | ||
2478 | |||
2479 | cancel_work_sync(&enic->reset); | ||
2480 | cancel_work_sync(&enic->change_mtu_work); | ||
2481 | unregister_netdev(netdev); | ||
2482 | enic_dev_deinit(enic); | ||
2483 | vnic_dev_close(enic->vdev); | ||
2484 | vnic_dev_unregister(enic->vdev); | ||
2485 | enic_iounmap(enic); | ||
2486 | pci_release_regions(pdev); | ||
2487 | pci_disable_device(pdev); | ||
2488 | pci_set_drvdata(pdev, NULL); | ||
2489 | free_netdev(netdev); | ||
2490 | } | ||
2491 | } | ||
2492 | |||
2493 | static struct pci_driver enic_driver = { | ||
2494 | .name = DRV_NAME, | ||
2495 | .id_table = enic_id_table, | ||
2496 | .probe = enic_probe, | ||
2497 | .remove = __devexit_p(enic_remove), | ||
2498 | }; | ||
2499 | |||
2500 | static int __init enic_init_module(void) | ||
2501 | { | ||
2502 | pr_info("%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION); | ||
2503 | |||
2504 | return pci_register_driver(&enic_driver); | ||
2505 | } | ||
2506 | |||
2507 | static void __exit enic_cleanup_module(void) | ||
2508 | { | ||
2509 | pci_unregister_driver(&enic_driver); | ||
2510 | } | ||
2511 | |||
2512 | module_init(enic_init_module); | ||
2513 | module_exit(enic_cleanup_module); | ||