diff options
Diffstat (limited to 'drivers/net/netxen/netxen_nic_main.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_main.c | 1116 |
1 files changed, 1116 insertions, 0 deletions
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c new file mode 100644 index 000000000000..b54ea164e0ea --- /dev/null +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -0,0 +1,1116 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2003 - 2006 NetXen, Inc. | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version 2 | ||
8 | * of the License, or (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, but | ||
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, | ||
18 | * MA 02111-1307, USA. | ||
19 | * | ||
20 | * The full GNU General Public License is included in this distribution | ||
21 | * in the file called LICENSE. | ||
22 | * | ||
23 | * Contact Information: | ||
24 | * info@netxen.com | ||
25 | * NetXen, | ||
26 | * 3965 Freedom Circle, Fourth floor, | ||
27 | * Santa Clara, CA 95054 | ||
28 | * | ||
29 | * | ||
30 | * Main source file for NetXen NIC Driver on Linux | ||
31 | * | ||
32 | */ | ||
33 | |||
34 | #include "netxen_nic_hw.h" | ||
35 | |||
36 | #include "netxen_nic.h" | ||
37 | #define DEFINE_GLOBAL_RECV_CRB | ||
38 | #include "netxen_nic_phan_reg.h" | ||
39 | #include "netxen_nic_ioctl.h" | ||
40 | |||
41 | MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); | ||
42 | MODULE_LICENSE("GPL"); | ||
43 | MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); | ||
44 | |||
45 | char netxen_nic_driver_name[] = "netxen"; | ||
46 | static char netxen_nic_driver_string[] = "NetXen Network Driver version " | ||
47 | NETXEN_NIC_LINUX_VERSIONID "-" NETXEN_NIC_BUILD_NO; | ||
48 | |||
49 | #define NETXEN_NETDEV_WEIGHT 120 | ||
50 | #define NETXEN_ADAPTER_UP_MAGIC 777 | ||
51 | |||
52 | /* Local functions to NetXen NIC driver */ | ||
53 | static int __devinit netxen_nic_probe(struct pci_dev *pdev, | ||
54 | const struct pci_device_id *ent); | ||
55 | static void __devexit netxen_nic_remove(struct pci_dev *pdev); | ||
56 | static int netxen_nic_open(struct net_device *netdev); | ||
57 | static int netxen_nic_close(struct net_device *netdev); | ||
58 | static int netxen_nic_xmit_frame(struct sk_buff *, struct net_device *); | ||
59 | static void netxen_tx_timeout(struct net_device *netdev); | ||
60 | static void netxen_tx_timeout_task(struct net_device *netdev); | ||
61 | static void netxen_watchdog(unsigned long); | ||
62 | static int netxen_handle_int(struct netxen_adapter *, struct net_device *); | ||
63 | static int netxen_nic_ioctl(struct net_device *netdev, | ||
64 | struct ifreq *ifr, int cmd); | ||
65 | static int netxen_nic_poll(struct net_device *dev, int *budget); | ||
66 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
67 | static void netxen_nic_poll_controller(struct net_device *netdev); | ||
68 | #endif | ||
69 | static irqreturn_t netxen_intr(int irq, void *data, struct pt_regs *regs); | ||
70 | |||
71 | /* PCI Device ID Table */ | ||
72 | static struct pci_device_id netxen_pci_tbl[] __devinitdata = { | ||
73 | {PCI_DEVICE(0x4040, 0x0001)}, | ||
74 | {PCI_DEVICE(0x4040, 0x0002)}, | ||
75 | {PCI_DEVICE(0x4040, 0x0003)}, | ||
76 | {PCI_DEVICE(0x4040, 0x0004)}, | ||
77 | {PCI_DEVICE(0x4040, 0x0005)}, | ||
78 | {0,} | ||
79 | }; | ||
80 | |||
81 | MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); | ||
82 | |||
83 | /* | ||
84 | * netxen_nic_probe() | ||
85 | * | ||
86 | * The Linux system will invoke this after identifying the vendor ID and | ||
87 | * device Id in the pci_tbl supported by this module. | ||
88 | * | ||
89 | * A quad port card has one operational PCI config space, (function 0), | ||
90 | * which is used to access all four ports. | ||
91 | * | ||
92 | * This routine will initialize the adapter, and setup the global parameters | ||
93 | * along with the port's specific structure. | ||
94 | */ | ||
95 | static int __devinit | ||
96 | netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
97 | { | ||
98 | struct net_device *netdev = NULL; | ||
99 | struct netxen_adapter *adapter = NULL; | ||
100 | struct netxen_port *port = NULL; | ||
101 | u8 __iomem *mem_ptr = NULL; | ||
102 | unsigned long mem_base, mem_len; | ||
103 | int pci_using_dac, i, err; | ||
104 | int ring; | ||
105 | struct netxen_recv_context *recv_ctx = NULL; | ||
106 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | ||
107 | struct netxen_cmd_buffer *cmd_buf_arr = NULL; | ||
108 | u64 mac_addr[FLASH_NUM_PORTS + 1]; | ||
109 | int valid_mac; | ||
110 | |||
111 | if ((err = pci_enable_device(pdev))) | ||
112 | return err; | ||
113 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | ||
114 | err = -ENODEV; | ||
115 | goto err_out_disable_pdev; | ||
116 | } | ||
117 | |||
118 | if ((err = pci_request_regions(pdev, netxen_nic_driver_name))) | ||
119 | goto err_out_disable_pdev; | ||
120 | |||
121 | pci_set_master(pdev); | ||
122 | if ((pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) && | ||
123 | (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) == 0)) | ||
124 | pci_using_dac = 1; | ||
125 | else { | ||
126 | if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) || | ||
127 | (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) | ||
128 | goto err_out_free_res; | ||
129 | |||
130 | pci_using_dac = 0; | ||
131 | } | ||
132 | |||
133 | /* remap phys address */ | ||
134 | mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ | ||
135 | mem_len = pci_resource_len(pdev, 0); | ||
136 | |||
137 | /* 128 Meg of memory */ | ||
138 | mem_ptr = ioremap(mem_base, NETXEN_PCI_MAPSIZE_BYTES); | ||
139 | if (mem_ptr == 0UL) { | ||
140 | printk(KERN_ERR "%s: Cannot ioremap adapter memory aborting." | ||
141 | ":%p\n", netxen_nic_driver_name, mem_ptr); | ||
142 | err = -EIO; | ||
143 | goto err_out_free_res; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Allocate a adapter structure which will manage all the initialization | ||
148 | * as well as the common resources for all ports... | ||
149 | * all the ports will have pointer to this adapter as well as Adapter | ||
150 | * will have pointers of all the ports structures. | ||
151 | */ | ||
152 | |||
153 | /* One adapter structure for all 4 ports.... */ | ||
154 | adapter = kzalloc(sizeof(struct netxen_adapter), GFP_KERNEL); | ||
155 | if (adapter == NULL) { | ||
156 | printk(KERN_ERR "%s: Could not allocate adapter memory:%d\n", | ||
157 | netxen_nic_driver_name, | ||
158 | (int)sizeof(struct netxen_adapter)); | ||
159 | err = -ENOMEM; | ||
160 | goto err_out_iounmap; | ||
161 | } | ||
162 | |||
163 | adapter->max_tx_desc_count = MAX_CMD_DESCRIPTORS; | ||
164 | adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS; | ||
165 | adapter->max_jumbo_rx_desc_count = MAX_JUMBO_RCV_DESCRIPTORS; | ||
166 | |||
167 | pci_set_drvdata(pdev, adapter); | ||
168 | |||
169 | cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE); | ||
170 | if (cmd_buf_arr == NULL) { | ||
171 | err = -ENOMEM; | ||
172 | goto err_out_free_adapter; | ||
173 | } | ||
174 | memset(cmd_buf_arr, 0, TX_RINGSIZE); | ||
175 | |||
176 | for (i = 0; i < MAX_RCV_CTX; ++i) { | ||
177 | recv_ctx = &adapter->recv_ctx[i]; | ||
178 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
179 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
180 | switch (RCV_DESC_TYPE(ring)) { | ||
181 | case RCV_DESC_NORMAL: | ||
182 | rcv_desc->max_rx_desc_count = | ||
183 | adapter->max_rx_desc_count; | ||
184 | rcv_desc->flags = RCV_DESC_NORMAL; | ||
185 | rcv_desc->dma_size = RX_DMA_MAP_LEN; | ||
186 | rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH; | ||
187 | break; | ||
188 | |||
189 | case RCV_DESC_JUMBO: | ||
190 | rcv_desc->max_rx_desc_count = | ||
191 | adapter->max_jumbo_rx_desc_count; | ||
192 | rcv_desc->flags = RCV_DESC_JUMBO; | ||
193 | rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN; | ||
194 | rcv_desc->skb_size = MAX_RX_JUMBO_BUFFER_LENGTH; | ||
195 | break; | ||
196 | |||
197 | } | ||
198 | rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *) | ||
199 | vmalloc(RCV_BUFFSIZE); | ||
200 | |||
201 | if (rcv_desc->rx_buf_arr == NULL) { | ||
202 | err = -ENOMEM; | ||
203 | goto err_out_free_rx_buffer; | ||
204 | } | ||
205 | memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE); | ||
206 | } | ||
207 | |||
208 | } | ||
209 | |||
210 | adapter->ops = kzalloc(sizeof(struct netxen_drvops), GFP_KERNEL); | ||
211 | if (adapter->ops == NULL) { | ||
212 | printk(KERN_ERR | ||
213 | "%s: Could not allocate memory for adapter->ops:%d\n", | ||
214 | netxen_nic_driver_name, | ||
215 | (int)sizeof(struct netxen_adapter)); | ||
216 | err = -ENOMEM; | ||
217 | goto err_out_free_rx_buffer; | ||
218 | } | ||
219 | |||
220 | adapter->cmd_buf_arr = cmd_buf_arr; | ||
221 | adapter->ahw.pci_base = mem_ptr; | ||
222 | spin_lock_init(&adapter->tx_lock); | ||
223 | spin_lock_init(&adapter->lock); | ||
224 | /* initialize the buffers in adapter */ | ||
225 | netxen_initialize_adapter_sw(adapter); | ||
226 | /* | ||
227 | * Set the CRB window to invalid. If any register in window 0 is | ||
228 | * accessed it should set the window to 0 and then reset it to 1. | ||
229 | */ | ||
230 | adapter->curr_window = 255; | ||
231 | /* | ||
232 | * Adapter in our case is quad port so initialize it before | ||
233 | * initializing the ports | ||
234 | */ | ||
235 | netxen_initialize_adapter_hw(adapter); /* initialize the adapter */ | ||
236 | |||
237 | netxen_initialize_adapter_ops(adapter); | ||
238 | |||
239 | init_timer(&adapter->watchdog_timer); | ||
240 | adapter->ahw.xg_linkup = 0; | ||
241 | adapter->watchdog_timer.function = &netxen_watchdog; | ||
242 | adapter->watchdog_timer.data = (unsigned long)adapter; | ||
243 | INIT_WORK(&adapter->watchdog_task, | ||
244 | (void (*)(void *))netxen_watchdog_task, adapter); | ||
245 | adapter->ahw.pdev = pdev; | ||
246 | adapter->proc_cmd_buf_counter = 0; | ||
247 | pci_read_config_byte(pdev, PCI_REVISION_ID, &adapter->ahw.revision_id); | ||
248 | |||
249 | if (pci_enable_msi(pdev)) { | ||
250 | adapter->flags &= ~NETXEN_NIC_MSI_ENABLED; | ||
251 | printk(KERN_WARNING "%s: unable to allocate MSI interrupt" | ||
252 | " error\n", netxen_nic_driver_name); | ||
253 | } else | ||
254 | adapter->flags |= NETXEN_NIC_MSI_ENABLED; | ||
255 | |||
256 | if (netxen_is_flash_supported(adapter) == 0 && | ||
257 | netxen_get_flash_mac_addr(adapter, mac_addr) == 0) | ||
258 | valid_mac = 1; | ||
259 | else | ||
260 | valid_mac = 0; | ||
261 | |||
262 | /* initialize the all the ports */ | ||
263 | |||
264 | for (i = 0; i < adapter->ahw.max_ports; i++) { | ||
265 | netdev = alloc_etherdev(sizeof(struct netxen_port)); | ||
266 | if (!netdev) { | ||
267 | printk(KERN_ERR "%s: could not allocate netdev for port" | ||
268 | " %d\n", netxen_nic_driver_name, i + 1); | ||
269 | goto err_out_free_dev; | ||
270 | } | ||
271 | |||
272 | SET_MODULE_OWNER(netdev); | ||
273 | |||
274 | port = netdev_priv(netdev); | ||
275 | port->netdev = netdev; | ||
276 | port->pdev = pdev; | ||
277 | port->adapter = adapter; | ||
278 | port->portnum = i; /* Gigabit port number from 0-3 */ | ||
279 | |||
280 | netdev->open = netxen_nic_open; | ||
281 | netdev->stop = netxen_nic_close; | ||
282 | netdev->hard_start_xmit = netxen_nic_xmit_frame; | ||
283 | netdev->get_stats = netxen_nic_get_stats; | ||
284 | netdev->set_multicast_list = netxen_nic_set_multi; | ||
285 | netdev->set_mac_address = netxen_nic_set_mac; | ||
286 | netdev->change_mtu = netxen_nic_change_mtu; | ||
287 | netdev->do_ioctl = netxen_nic_ioctl; | ||
288 | netdev->tx_timeout = netxen_tx_timeout; | ||
289 | netdev->watchdog_timeo = HZ; | ||
290 | |||
291 | SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops); | ||
292 | netdev->poll = netxen_nic_poll; | ||
293 | netdev->weight = NETXEN_NETDEV_WEIGHT; | ||
294 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
295 | netdev->poll_controller = netxen_nic_poll_controller; | ||
296 | #endif | ||
297 | /* ScatterGather support */ | ||
298 | netdev->features = NETIF_F_SG; | ||
299 | netdev->features |= NETIF_F_IP_CSUM; | ||
300 | netdev->features |= NETIF_F_TSO; | ||
301 | |||
302 | if (pci_using_dac) | ||
303 | netdev->features |= NETIF_F_HIGHDMA; | ||
304 | |||
305 | if (valid_mac) { | ||
306 | unsigned char *p = (unsigned char *)&mac_addr[i]; | ||
307 | netdev->dev_addr[0] = *(p + 5); | ||
308 | netdev->dev_addr[1] = *(p + 4); | ||
309 | netdev->dev_addr[2] = *(p + 3); | ||
310 | netdev->dev_addr[3] = *(p + 2); | ||
311 | netdev->dev_addr[4] = *(p + 1); | ||
312 | netdev->dev_addr[5] = *(p + 0); | ||
313 | |||
314 | memcpy(netdev->perm_addr, netdev->dev_addr, | ||
315 | netdev->addr_len); | ||
316 | if (!is_valid_ether_addr(netdev->perm_addr)) { | ||
317 | printk(KERN_ERR "%s: Bad MAC address " | ||
318 | "%02x:%02x:%02x:%02x:%02x:%02x.\n", | ||
319 | netxen_nic_driver_name, | ||
320 | netdev->dev_addr[0], | ||
321 | netdev->dev_addr[1], | ||
322 | netdev->dev_addr[2], | ||
323 | netdev->dev_addr[3], | ||
324 | netdev->dev_addr[4], | ||
325 | netdev->dev_addr[5]); | ||
326 | } else { | ||
327 | if (adapter->ops->macaddr_set) | ||
328 | adapter->ops->macaddr_set(port, | ||
329 | netdev-> | ||
330 | dev_addr); | ||
331 | } | ||
332 | } | ||
333 | INIT_WORK(&adapter->tx_timeout_task, | ||
334 | (void (*)(void *))netxen_tx_timeout_task, netdev); | ||
335 | netif_carrier_off(netdev); | ||
336 | netif_stop_queue(netdev); | ||
337 | |||
338 | if ((err = register_netdev(netdev))) { | ||
339 | printk(KERN_ERR "%s: register_netdev failed port #%d" | ||
340 | " aborting\n", netxen_nic_driver_name, i + 1); | ||
341 | err = -EIO; | ||
342 | free_netdev(netdev); | ||
343 | goto err_out_free_dev; | ||
344 | } | ||
345 | adapter->port_count++; | ||
346 | adapter->active_ports = 0; | ||
347 | adapter->port[i] = port; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * Initialize all the CRB registers here. | ||
352 | */ | ||
353 | /* Window = 1 */ | ||
354 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET)); | ||
355 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET)); | ||
356 | writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_CMD_ADDR_LO)); | ||
357 | |||
358 | netxen_phantom_init(adapter); | ||
359 | /* | ||
360 | * delay a while to ensure that the Pegs are up & running. | ||
361 | * Otherwise, we might see some flaky behaviour. | ||
362 | */ | ||
363 | udelay(100); | ||
364 | |||
365 | switch (adapter->ahw.board_type) { | ||
366 | case NETXEN_NIC_GBE: | ||
367 | printk("%s: QUAD GbE board initialized\n", | ||
368 | netxen_nic_driver_name); | ||
369 | break; | ||
370 | |||
371 | case NETXEN_NIC_XGBE: | ||
372 | printk("%s: XGbE board initialized\n", netxen_nic_driver_name); | ||
373 | break; | ||
374 | } | ||
375 | |||
376 | adapter->driver_mismatch = 0; | ||
377 | |||
378 | return 0; | ||
379 | |||
380 | err_out_free_dev: | ||
381 | if (adapter->flags & NETXEN_NIC_MSI_ENABLED) | ||
382 | pci_disable_msi(pdev); | ||
383 | for (i = 0; i < adapter->port_count; i++) { | ||
384 | port = adapter->port[i]; | ||
385 | if ((port) && (port->netdev)) { | ||
386 | unregister_netdev(port->netdev); | ||
387 | free_netdev(port->netdev); | ||
388 | } | ||
389 | } | ||
390 | kfree(adapter->ops); | ||
391 | |||
392 | err_out_free_rx_buffer: | ||
393 | for (i = 0; i < MAX_RCV_CTX; ++i) { | ||
394 | recv_ctx = &adapter->recv_ctx[i]; | ||
395 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
396 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
397 | if (rcv_desc->rx_buf_arr != NULL) { | ||
398 | vfree(rcv_desc->rx_buf_arr); | ||
399 | rcv_desc->rx_buf_arr = NULL; | ||
400 | } | ||
401 | } | ||
402 | } | ||
403 | |||
404 | vfree(cmd_buf_arr); | ||
405 | |||
406 | kfree(adapter->port); | ||
407 | |||
408 | err_out_free_adapter: | ||
409 | pci_set_drvdata(pdev, NULL); | ||
410 | kfree(adapter); | ||
411 | |||
412 | err_out_iounmap: | ||
413 | iounmap(mem_ptr); | ||
414 | err_out_free_res: | ||
415 | pci_release_regions(pdev); | ||
416 | err_out_disable_pdev: | ||
417 | pci_disable_device(pdev); | ||
418 | return err; | ||
419 | } | ||
420 | |||
421 | static void __devexit netxen_nic_remove(struct pci_dev *pdev) | ||
422 | { | ||
423 | struct netxen_adapter *adapter; | ||
424 | struct netxen_port *port; | ||
425 | struct netxen_rx_buffer *buffer; | ||
426 | struct netxen_recv_context *recv_ctx; | ||
427 | struct netxen_rcv_desc_ctx *rcv_desc; | ||
428 | int i; | ||
429 | int ctxid, ring; | ||
430 | |||
431 | adapter = pci_get_drvdata(pdev); | ||
432 | if (adapter == NULL) | ||
433 | return; | ||
434 | |||
435 | netxen_nic_stop_all_ports(adapter); | ||
436 | /* leave the hw in the same state as reboot */ | ||
437 | netxen_pinit_from_rom(adapter, 0); | ||
438 | udelay(500); | ||
439 | netxen_load_firmware(adapter); | ||
440 | |||
441 | if ((adapter->flags & NETXEN_NIC_MSI_ENABLED)) | ||
442 | netxen_nic_disable_int(adapter); | ||
443 | |||
444 | udelay(500); /* Delay for a while to drain the DMA engines */ | ||
445 | for (i = 0; i < adapter->port_count; i++) { | ||
446 | port = adapter->port[i]; | ||
447 | if ((port) && (port->netdev)) { | ||
448 | unregister_netdev(port->netdev); | ||
449 | free_netdev(port->netdev); | ||
450 | } | ||
451 | } | ||
452 | |||
453 | if ((adapter->flags & NETXEN_NIC_MSI_ENABLED)) | ||
454 | pci_disable_msi(pdev); | ||
455 | pci_set_drvdata(pdev, NULL); | ||
456 | if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) | ||
457 | netxen_free_hw_resources(adapter); | ||
458 | |||
459 | iounmap(adapter->ahw.pci_base); | ||
460 | |||
461 | pci_release_regions(pdev); | ||
462 | pci_disable_device(pdev); | ||
463 | |||
464 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { | ||
465 | recv_ctx = &adapter->recv_ctx[ctxid]; | ||
466 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
467 | rcv_desc = &recv_ctx->rcv_desc[ring]; | ||
468 | for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) { | ||
469 | buffer = &(rcv_desc->rx_buf_arr[i]); | ||
470 | if (buffer->state == NETXEN_BUFFER_FREE) | ||
471 | continue; | ||
472 | pci_unmap_single(pdev, buffer->dma, | ||
473 | rcv_desc->dma_size, | ||
474 | PCI_DMA_FROMDEVICE); | ||
475 | if (buffer->skb != NULL) | ||
476 | dev_kfree_skb_any(buffer->skb); | ||
477 | } | ||
478 | vfree(rcv_desc->rx_buf_arr); | ||
479 | } | ||
480 | } | ||
481 | |||
482 | vfree(adapter->cmd_buf_arr); | ||
483 | kfree(adapter->ops); | ||
484 | kfree(adapter); | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * Called when a network interface is made active | ||
489 | * @returns 0 on success, negative value on failure | ||
490 | */ | ||
491 | static int netxen_nic_open(struct net_device *netdev) | ||
492 | { | ||
493 | struct netxen_port *port = netdev_priv(netdev); | ||
494 | struct netxen_adapter *adapter = port->adapter; | ||
495 | struct netxen_rcv_desc_ctx *rcv_desc; | ||
496 | int err = 0; | ||
497 | int ctx, ring; | ||
498 | |||
499 | if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) { | ||
500 | err = netxen_init_firmware(adapter); | ||
501 | if (err != 0) { | ||
502 | printk(KERN_ERR "Failed to init firmware\n"); | ||
503 | return -EIO; | ||
504 | } | ||
505 | netxen_nic_flash_print(adapter); | ||
506 | |||
507 | /* setup all the resources for the Phantom... */ | ||
508 | /* this include the descriptors for rcv, tx, and status */ | ||
509 | netxen_nic_clear_stats(adapter); | ||
510 | err = netxen_nic_hw_resources(adapter); | ||
511 | if (err) { | ||
512 | printk(KERN_ERR "Error in setting hw resources:%d\n", | ||
513 | err); | ||
514 | return err; | ||
515 | } | ||
516 | if (adapter->ops->init_port | ||
517 | && adapter->ops->init_port(adapter, port->portnum) != 0) { | ||
518 | printk(KERN_ERR "%s: Failed to initialize port %d\n", | ||
519 | netxen_nic_driver_name, port->portnum); | ||
520 | netxen_free_hw_resources(adapter); | ||
521 | return -EIO; | ||
522 | } | ||
523 | if (adapter->ops->init_niu) | ||
524 | adapter->ops->init_niu(adapter); | ||
525 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
526 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | ||
527 | rcv_desc = | ||
528 | &adapter->recv_ctx[ctx].rcv_desc[ring]; | ||
529 | netxen_post_rx_buffers(adapter, ctx, ring); | ||
530 | } | ||
531 | } | ||
532 | adapter->is_up = NETXEN_ADAPTER_UP_MAGIC; | ||
533 | } | ||
534 | adapter->active_ports++; | ||
535 | if (adapter->active_ports == 1) { | ||
536 | err = request_irq(adapter->ahw.pdev->irq, &netxen_intr, | ||
537 | SA_SHIRQ | SA_SAMPLE_RANDOM, netdev->name, | ||
538 | adapter); | ||
539 | if (err) { | ||
540 | printk(KERN_ERR "request_irq failed with: %d\n", err); | ||
541 | adapter->active_ports--; | ||
542 | return err; | ||
543 | } | ||
544 | adapter->irq = adapter->ahw.pdev->irq; | ||
545 | if (!adapter->driver_mismatch) | ||
546 | mod_timer(&adapter->watchdog_timer, jiffies); | ||
547 | |||
548 | netxen_nic_enable_int(adapter); | ||
549 | } | ||
550 | |||
551 | /* Done here again so that even if phantom sw overwrote it, | ||
552 | * we set it */ | ||
553 | if (adapter->ops->macaddr_set) | ||
554 | adapter->ops->macaddr_set(port, netdev->dev_addr); | ||
555 | netxen_nic_set_link_parameters(port); | ||
556 | |||
557 | netxen_nic_set_multi(netdev); | ||
558 | if (!adapter->driver_mismatch) | ||
559 | netif_start_queue(netdev); | ||
560 | |||
561 | return 0; | ||
562 | } | ||
563 | |||
564 | /* | ||
565 | * netxen_nic_close - Disables a network interface entry point | ||
566 | */ | ||
567 | static int netxen_nic_close(struct net_device *netdev) | ||
568 | { | ||
569 | struct netxen_port *port = netdev_priv(netdev); | ||
570 | struct netxen_adapter *adapter = port->adapter; | ||
571 | int i, j; | ||
572 | struct netxen_cmd_buffer *cmd_buff; | ||
573 | struct netxen_skb_frag *buffrag; | ||
574 | |||
575 | netif_carrier_off(netdev); | ||
576 | netif_stop_queue(netdev); | ||
577 | |||
578 | /* disable phy_ints */ | ||
579 | if (adapter->ops->disable_phy_interrupts) | ||
580 | adapter->ops->disable_phy_interrupts(adapter, port->portnum); | ||
581 | |||
582 | adapter->active_ports--; | ||
583 | |||
584 | if (!adapter->active_ports) { | ||
585 | netxen_nic_disable_int(adapter); | ||
586 | if (adapter->irq) | ||
587 | free_irq(adapter->irq, adapter); | ||
588 | cmd_buff = adapter->cmd_buf_arr; | ||
589 | for (i = 0; i < adapter->max_tx_desc_count; i++) { | ||
590 | buffrag = cmd_buff->frag_array; | ||
591 | if (buffrag->dma) { | ||
592 | pci_unmap_single(port->pdev, buffrag->dma, | ||
593 | buffrag->length, | ||
594 | PCI_DMA_TODEVICE); | ||
595 | buffrag->dma = (u64) NULL; | ||
596 | } | ||
597 | for (j = 0; j < cmd_buff->frag_count; j++) { | ||
598 | buffrag++; | ||
599 | if (buffrag->dma) { | ||
600 | pci_unmap_page(port->pdev, | ||
601 | buffrag->dma, | ||
602 | buffrag->length, | ||
603 | PCI_DMA_TODEVICE); | ||
604 | buffrag->dma = (u64) NULL; | ||
605 | } | ||
606 | } | ||
607 | /* Free the skb we received in netxen_nic_xmit_frame */ | ||
608 | if (cmd_buff->skb) { | ||
609 | dev_kfree_skb_any(cmd_buff->skb); | ||
610 | cmd_buff->skb = NULL; | ||
611 | } | ||
612 | cmd_buff++; | ||
613 | } | ||
614 | del_timer_sync(&adapter->watchdog_timer); | ||
615 | } | ||
616 | |||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | ||
621 | { | ||
622 | struct netxen_port *port = netdev_priv(netdev); | ||
623 | struct netxen_adapter *adapter = port->adapter; | ||
624 | struct netxen_hardware_context *hw = &adapter->ahw; | ||
625 | unsigned int first_seg_len = skb->len - skb->data_len; | ||
626 | struct netxen_skb_frag *buffrag; | ||
627 | unsigned int i; | ||
628 | |||
629 | u32 producer = 0; | ||
630 | u32 saved_producer = 0; | ||
631 | struct cmd_desc_type0 *hwdesc; | ||
632 | int k; | ||
633 | struct netxen_cmd_buffer *pbuf = NULL; | ||
634 | unsigned int tries = 0; | ||
635 | static int dropped_packet = 0; | ||
636 | int frag_count; | ||
637 | u32 local_producer = 0; | ||
638 | u32 max_tx_desc_count = 0; | ||
639 | u32 last_cmd_consumer = 0; | ||
640 | int no_of_desc; | ||
641 | |||
642 | port->stats.xmitcalled++; | ||
643 | frag_count = skb_shinfo(skb)->nr_frags + 1; | ||
644 | |||
645 | if (unlikely(skb->len <= 0)) { | ||
646 | dev_kfree_skb_any(skb); | ||
647 | port->stats.badskblen++; | ||
648 | return NETDEV_TX_OK; | ||
649 | } | ||
650 | |||
651 | if (frag_count > MAX_BUFFERS_PER_CMD) { | ||
652 | printk("%s: %s netxen_nic_xmit_frame: frag_count (%d)" | ||
653 | "too large, can handle only %d frags\n", | ||
654 | netxen_nic_driver_name, netdev->name, | ||
655 | frag_count, MAX_BUFFERS_PER_CMD); | ||
656 | port->stats.txdropped++; | ||
657 | if ((++dropped_packet & 0xff) == 0xff) | ||
658 | printk("%s: %s droppped packets = %d\n", | ||
659 | netxen_nic_driver_name, netdev->name, | ||
660 | dropped_packet); | ||
661 | |||
662 | return NETDEV_TX_OK; | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * Everything is set up. Now, we just need to transmit it out. | ||
667 | * Note that we have to copy the contents of buffer over to | ||
668 | * right place. Later on, this can be optimized out by de-coupling the | ||
669 | * producer index from the buffer index. | ||
670 | */ | ||
671 | retry_getting_window: | ||
672 | spin_lock_bh(&adapter->tx_lock); | ||
673 | if (adapter->total_threads == MAX_XMIT_PRODUCERS) { | ||
674 | spin_unlock_bh(&adapter->tx_lock); | ||
675 | /* | ||
676 | * Yield CPU | ||
677 | */ | ||
678 | if (!in_atomic()) | ||
679 | schedule(); | ||
680 | else { | ||
681 | for (i = 0; i < 20; i++) | ||
682 | cpu_relax(); /*This a nop instr on i386 */ | ||
683 | } | ||
684 | goto retry_getting_window; | ||
685 | } | ||
686 | local_producer = adapter->cmd_producer; | ||
687 | /* There 4 fragments per descriptor */ | ||
688 | no_of_desc = (frag_count + 3) >> 2; | ||
689 | if (skb_shinfo(skb)->gso_size > 0) { | ||
690 | no_of_desc++; | ||
691 | if (((skb->nh.iph)->ihl * sizeof(u32)) + | ||
692 | ((skb->h.th)->doff * sizeof(u32)) + | ||
693 | sizeof(struct ethhdr) > | ||
694 | (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) { | ||
695 | no_of_desc++; | ||
696 | } | ||
697 | } | ||
698 | k = adapter->cmd_producer; | ||
699 | max_tx_desc_count = adapter->max_tx_desc_count; | ||
700 | last_cmd_consumer = adapter->last_cmd_consumer; | ||
701 | if ((k + no_of_desc) >= | ||
702 | ((last_cmd_consumer <= k) ? last_cmd_consumer + max_tx_desc_count : | ||
703 | last_cmd_consumer)) { | ||
704 | spin_unlock_bh(&adapter->tx_lock); | ||
705 | if (tries == 0) { | ||
706 | local_bh_disable(); | ||
707 | netxen_process_cmd_ring((unsigned long)adapter); | ||
708 | local_bh_enable(); | ||
709 | ++tries; | ||
710 | goto retry_getting_window; | ||
711 | } else { | ||
712 | port->stats.nocmddescriptor++; | ||
713 | DPRINTK(ERR, "No command descriptors available," | ||
714 | " producer = %d, consumer = %d count=%llu," | ||
715 | " dropping packet\n", producer, | ||
716 | adapter->last_cmd_consumer, | ||
717 | port->stats.nocmddescriptor); | ||
718 | |||
719 | spin_lock_bh(&adapter->tx_lock); | ||
720 | netif_stop_queue(netdev); | ||
721 | port->flags |= NETXEN_NETDEV_STATUS; | ||
722 | spin_unlock_bh(&adapter->tx_lock); | ||
723 | return NETDEV_TX_BUSY; | ||
724 | } | ||
725 | } | ||
726 | k = get_index_range(k, max_tx_desc_count, no_of_desc); | ||
727 | adapter->cmd_producer = k; | ||
728 | adapter->total_threads++; | ||
729 | adapter->num_threads++; | ||
730 | |||
731 | spin_unlock_bh(&adapter->tx_lock); | ||
732 | /* Copy the descriptors into the hardware */ | ||
733 | producer = local_producer; | ||
734 | saved_producer = producer; | ||
735 | hwdesc = &hw->cmd_desc_head[producer]; | ||
736 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); | ||
737 | /* Take skb->data itself */ | ||
738 | pbuf = &adapter->cmd_buf_arr[producer]; | ||
739 | if (skb_shinfo(skb)->gso_size > 0) { | ||
740 | pbuf->mss = skb_shinfo(skb)->gso_size; | ||
741 | hwdesc->mss = skb_shinfo(skb)->gso_size; | ||
742 | } else { | ||
743 | pbuf->mss = 0; | ||
744 | hwdesc->mss = 0; | ||
745 | } | ||
746 | pbuf->no_of_descriptors = no_of_desc; | ||
747 | pbuf->total_length = skb->len; | ||
748 | pbuf->skb = skb; | ||
749 | pbuf->cmd = TX_ETHER_PKT; | ||
750 | pbuf->frag_count = frag_count; | ||
751 | pbuf->port = port->portnum; | ||
752 | buffrag = &pbuf->frag_array[0]; | ||
753 | buffrag->dma = pci_map_single(port->pdev, skb->data, first_seg_len, | ||
754 | PCI_DMA_TODEVICE); | ||
755 | buffrag->length = first_seg_len; | ||
756 | CMD_DESC_TOTAL_LENGTH_WRT(hwdesc, skb->len); | ||
757 | hwdesc->num_of_buffers = frag_count; | ||
758 | hwdesc->opcode = TX_ETHER_PKT; | ||
759 | |||
760 | CMD_DESC_PORT_WRT(hwdesc, port->portnum); | ||
761 | hwdesc->buffer1_length = cpu_to_le16(first_seg_len); | ||
762 | hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma); | ||
763 | |||
764 | for (i = 1, k = 1; i < frag_count; i++, k++) { | ||
765 | struct skb_frag_struct *frag; | ||
766 | int len, temp_len; | ||
767 | unsigned long offset; | ||
768 | dma_addr_t temp_dma; | ||
769 | |||
770 | /* move to next desc. if there is a need */ | ||
771 | if ((i & 0x3) == 0) { | ||
772 | k = 0; | ||
773 | producer = get_next_index(producer, | ||
774 | adapter->max_tx_desc_count); | ||
775 | hwdesc = &hw->cmd_desc_head[producer]; | ||
776 | memset(hwdesc, 0, sizeof(struct cmd_desc_type0)); | ||
777 | } | ||
778 | frag = &skb_shinfo(skb)->frags[i - 1]; | ||
779 | len = frag->size; | ||
780 | offset = frag->page_offset; | ||
781 | |||
782 | temp_len = len; | ||
783 | temp_dma = pci_map_page(port->pdev, frag->page, offset, | ||
784 | len, PCI_DMA_TODEVICE); | ||
785 | |||
786 | buffrag++; | ||
787 | buffrag->dma = temp_dma; | ||
788 | buffrag->length = temp_len; | ||
789 | |||
790 | DPRINTK(INFO, "for loop. i=%d k=%d\n", i, k); | ||
791 | switch (k) { | ||
792 | case 0: | ||
793 | hwdesc->buffer1_length = cpu_to_le16(temp_len); | ||
794 | hwdesc->addr_buffer1 = cpu_to_le64(temp_dma); | ||
795 | break; | ||
796 | case 1: | ||
797 | hwdesc->buffer2_length = cpu_to_le16(temp_len); | ||
798 | hwdesc->addr_buffer2 = cpu_to_le64(temp_dma); | ||
799 | break; | ||
800 | case 2: | ||
801 | hwdesc->buffer3_length = cpu_to_le16(temp_len); | ||
802 | hwdesc->addr_buffer3 = cpu_to_le64(temp_dma); | ||
803 | break; | ||
804 | case 3: | ||
805 | hwdesc->buffer4_length = temp_len; | ||
806 | hwdesc->addr_buffer4 = cpu_to_le64(temp_dma); | ||
807 | break; | ||
808 | } | ||
809 | frag++; | ||
810 | } | ||
811 | producer = get_next_index(producer, adapter->max_tx_desc_count); | ||
812 | |||
813 | /* might change opcode to TX_TCP_LSO */ | ||
814 | netxen_tso_check(adapter, &hw->cmd_desc_head[saved_producer], skb); | ||
815 | |||
816 | /* For LSO, we need to copy the MAC/IP/TCP headers into | ||
817 | * the descriptor ring | ||
818 | */ | ||
819 | if (hw->cmd_desc_head[saved_producer].opcode == TX_TCP_LSO) { | ||
820 | int hdr_len, first_hdr_len, more_hdr; | ||
821 | hdr_len = hw->cmd_desc_head[saved_producer].total_hdr_length; | ||
822 | if (hdr_len > (sizeof(struct cmd_desc_type0) - NET_IP_ALIGN)) { | ||
823 | first_hdr_len = | ||
824 | sizeof(struct cmd_desc_type0) - NET_IP_ALIGN; | ||
825 | more_hdr = 1; | ||
826 | } else { | ||
827 | first_hdr_len = hdr_len; | ||
828 | more_hdr = 0; | ||
829 | } | ||
830 | /* copy the MAC/IP/TCP headers to the cmd descriptor list */ | ||
831 | hwdesc = &hw->cmd_desc_head[producer]; | ||
832 | |||
833 | /* copy the first 64 bytes */ | ||
834 | memcpy(((void *)hwdesc) + NET_IP_ALIGN, | ||
835 | (void *)(skb->data), first_hdr_len); | ||
836 | producer = get_next_index(producer, max_tx_desc_count); | ||
837 | |||
838 | if (more_hdr) { | ||
839 | hwdesc = &hw->cmd_desc_head[producer]; | ||
840 | /* copy the next 64 bytes - should be enough except | ||
841 | * for pathological case | ||
842 | */ | ||
843 | memcpy((void *)hwdesc, (void *)(skb->data) + | ||
844 | first_hdr_len, hdr_len - first_hdr_len); | ||
845 | producer = get_next_index(producer, max_tx_desc_count); | ||
846 | } | ||
847 | } | ||
848 | spin_lock_bh(&adapter->tx_lock); | ||
849 | port->stats.txbytes += | ||
850 | CMD_DESC_TOTAL_LENGTH(&hw->cmd_desc_head[saved_producer]); | ||
851 | /* Code to update the adapter considering how many producer threads | ||
852 | are currently working */ | ||
853 | if ((--adapter->num_threads) == 0) { | ||
854 | /* This is the last thread */ | ||
855 | u32 crb_producer = adapter->cmd_producer; | ||
856 | writel(crb_producer, | ||
857 | NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_PRODUCER_OFFSET)); | ||
858 | wmb(); | ||
859 | adapter->total_threads = 0; | ||
860 | } else { | ||
861 | u32 crb_producer = 0; | ||
862 | crb_producer = | ||
863 | readl(NETXEN_CRB_NORMALIZE | ||
864 | (adapter, CRB_CMD_PRODUCER_OFFSET)); | ||
865 | if (crb_producer == local_producer) { | ||
866 | crb_producer = get_index_range(crb_producer, | ||
867 | max_tx_desc_count, | ||
868 | no_of_desc); | ||
869 | writel(crb_producer, | ||
870 | NETXEN_CRB_NORMALIZE(adapter, | ||
871 | CRB_CMD_PRODUCER_OFFSET)); | ||
872 | wmb(); | ||
873 | } | ||
874 | } | ||
875 | |||
876 | port->stats.xmitfinished++; | ||
877 | spin_unlock_bh(&adapter->tx_lock); | ||
878 | |||
879 | netdev->trans_start = jiffies; | ||
880 | |||
881 | DPRINTK(INFO, "wrote CMD producer %x to phantom\n", producer); | ||
882 | |||
883 | DPRINTK(INFO, "Done. Send\n"); | ||
884 | return NETDEV_TX_OK; | ||
885 | } | ||
886 | |||
887 | static void netxen_watchdog(unsigned long v) | ||
888 | { | ||
889 | struct netxen_adapter *adapter = (struct netxen_adapter *)v; | ||
890 | schedule_work(&adapter->watchdog_task); | ||
891 | } | ||
892 | |||
893 | static void netxen_tx_timeout(struct net_device *netdev) | ||
894 | { | ||
895 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | ||
896 | struct netxen_adapter *adapter = port->adapter; | ||
897 | |||
898 | schedule_work(&adapter->tx_timeout_task); | ||
899 | } | ||
900 | |||
901 | static void netxen_tx_timeout_task(struct net_device *netdev) | ||
902 | { | ||
903 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | ||
904 | unsigned long flags; | ||
905 | |||
906 | printk(KERN_ERR "%s %s: transmit timeout, resetting.\n", | ||
907 | netxen_nic_driver_name, netdev->name); | ||
908 | |||
909 | spin_lock_irqsave(&port->adapter->lock, flags); | ||
910 | netxen_nic_close(netdev); | ||
911 | netxen_nic_open(netdev); | ||
912 | spin_unlock_irqrestore(&port->adapter->lock, flags); | ||
913 | netdev->trans_start = jiffies; | ||
914 | netif_wake_queue(netdev); | ||
915 | } | ||
916 | |||
917 | static int | ||
918 | netxen_handle_int(struct netxen_adapter *adapter, struct net_device *netdev) | ||
919 | { | ||
920 | u32 ret = 0; | ||
921 | |||
922 | DPRINTK(INFO, "Entered handle ISR\n"); | ||
923 | |||
924 | adapter->stats.ints++; | ||
925 | |||
926 | if (!(adapter->flags & NETXEN_NIC_MSI_ENABLED)) { | ||
927 | int count = 0; | ||
928 | u32 mask; | ||
929 | netxen_nic_disable_int(adapter); | ||
930 | /* Window = 0 or 1 */ | ||
931 | do { | ||
932 | writel(0xffffffff, (void __iomem *) | ||
933 | (adapter->ahw.pci_base + ISR_INT_TARGET_STATUS)); | ||
934 | mask = readl((void __iomem *) | ||
935 | (adapter->ahw.pci_base + ISR_INT_VECTOR)); | ||
936 | } while (((mask & 0x80) != 0) && (++count < 32)); | ||
937 | if ((mask & 0x80) != 0) | ||
938 | printk("Could not disable interrupt completely\n"); | ||
939 | |||
940 | } | ||
941 | adapter->stats.hostints++; | ||
942 | |||
943 | if (netxen_nic_rx_has_work(adapter) || netxen_nic_tx_has_work(adapter)) { | ||
944 | if (netif_rx_schedule_prep(netdev)) { | ||
945 | /* | ||
946 | * Interrupts are already disabled. | ||
947 | */ | ||
948 | __netif_rx_schedule(netdev); | ||
949 | } else { | ||
950 | static unsigned int intcount = 0; | ||
951 | if ((++intcount & 0xfff) == 0xfff) | ||
952 | printk(KERN_ERR | ||
953 | "%s: %s interrupt %d while in poll\n", | ||
954 | netxen_nic_driver_name, netdev->name, | ||
955 | intcount); | ||
956 | } | ||
957 | ret = 1; | ||
958 | } | ||
959 | |||
960 | if (ret == 0) { | ||
961 | netxen_nic_enable_int(adapter); | ||
962 | } | ||
963 | |||
964 | return ret; | ||
965 | } | ||
966 | |||
967 | /* | ||
968 | * netxen_intr - Interrupt Handler | ||
969 | * @irq: interrupt number | ||
970 | * data points to adapter stucture (which may be handling more than 1 port | ||
971 | */ | ||
972 | irqreturn_t netxen_intr(int irq, void *data, struct pt_regs * regs) | ||
973 | { | ||
974 | struct netxen_adapter *adapter; | ||
975 | struct netxen_port *port; | ||
976 | struct net_device *netdev; | ||
977 | int i; | ||
978 | |||
979 | if (unlikely(!irq)) { | ||
980 | return IRQ_NONE; /* Not our interrupt */ | ||
981 | } | ||
982 | |||
983 | adapter = (struct netxen_adapter *)data; | ||
984 | for (i = 0; i < adapter->ahw.max_ports; i++) { | ||
985 | port = adapter->port[i]; | ||
986 | netdev = port->netdev; | ||
987 | |||
988 | /* process our status queue (for all 4 ports) */ | ||
989 | netxen_handle_int(adapter, netdev); | ||
990 | } | ||
991 | |||
992 | return IRQ_HANDLED; | ||
993 | } | ||
994 | |||
995 | static int netxen_nic_poll(struct net_device *netdev, int *budget) | ||
996 | { | ||
997 | struct netxen_port *port = (struct netxen_port *)netdev_priv(netdev); | ||
998 | struct netxen_adapter *adapter = port->adapter; | ||
999 | int work_to_do = min(*budget, netdev->quota); | ||
1000 | int done = 1; | ||
1001 | int ctx; | ||
1002 | int this_work_done; | ||
1003 | |||
1004 | DPRINTK(INFO, "polling for %d descriptors\n", *budget); | ||
1005 | port->stats.polled++; | ||
1006 | |||
1007 | adapter->work_done = 0; | ||
1008 | for (ctx = 0; ctx < MAX_RCV_CTX; ++ctx) { | ||
1009 | /* | ||
1010 | * Fairness issue. This will give undue weight to the | ||
1011 | * receive context 0. | ||
1012 | */ | ||
1013 | |||
1014 | /* | ||
1015 | * To avoid starvation, we give each of our receivers, | ||
1016 | * a fraction of the quota. Sometimes, it might happen that we | ||
1017 | * have enough quota to process every packet, but since all the | ||
1018 | * packets are on one context, it gets only half of the quota, | ||
1019 | * and ends up not processing it. | ||
1020 | */ | ||
1021 | this_work_done = netxen_process_rcv_ring(adapter, ctx, | ||
1022 | work_to_do / | ||
1023 | MAX_RCV_CTX); | ||
1024 | adapter->work_done += this_work_done; | ||
1025 | } | ||
1026 | |||
1027 | netdev->quota -= adapter->work_done; | ||
1028 | *budget -= adapter->work_done; | ||
1029 | |||
1030 | if (adapter->work_done >= work_to_do | ||
1031 | && netxen_nic_rx_has_work(adapter) != 0) | ||
1032 | done = 0; | ||
1033 | |||
1034 | netxen_process_cmd_ring((unsigned long)adapter); | ||
1035 | |||
1036 | DPRINTK(INFO, "new work_done: %d work_to_do: %d\n", | ||
1037 | adapter->work_done, work_to_do); | ||
1038 | if (done) { | ||
1039 | netif_rx_complete(netdev); | ||
1040 | netxen_nic_enable_int(adapter); | ||
1041 | } | ||
1042 | |||
1043 | return (done ? 0 : 1); | ||
1044 | } | ||
1045 | |||
1046 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1047 | static void netxen_nic_poll_controller(struct net_device *netdev) | ||
1048 | { | ||
1049 | struct netxen_port *port = netdev_priv(netdev); | ||
1050 | struct netxen_adapter *adapter = port->adapter; | ||
1051 | disable_irq(adapter->irq); | ||
1052 | netxen_intr(adapter->irq, adapter, NULL); | ||
1053 | enable_irq(adapter->irq); | ||
1054 | } | ||
1055 | #endif | ||
1056 | /* | ||
1057 | * netxen_nic_ioctl () We provide the tcl/phanmon support through these | ||
1058 | * ioctls. | ||
1059 | */ | ||
1060 | static int | ||
1061 | netxen_nic_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) | ||
1062 | { | ||
1063 | int err = 0; | ||
1064 | struct netxen_port *port = netdev_priv(netdev); | ||
1065 | struct netxen_adapter *adapter = port->adapter; | ||
1066 | |||
1067 | DPRINTK(INFO, "doing ioctl for %s\n", netdev->name); | ||
1068 | switch (cmd) { | ||
1069 | case NETXEN_NIC_CMD: | ||
1070 | err = netxen_nic_do_ioctl(adapter, (void *)ifr->ifr_data, port); | ||
1071 | break; | ||
1072 | |||
1073 | case NETXEN_NIC_NAME: | ||
1074 | DPRINTK(INFO, "ioctl cmd for NetXen\n"); | ||
1075 | if (ifr->ifr_data) { | ||
1076 | put_user(port->portnum, (u16 __user *) ifr->ifr_data); | ||
1077 | } | ||
1078 | break; | ||
1079 | |||
1080 | default: | ||
1081 | DPRINTK(INFO, "ioctl cmd %x not supported\n", cmd); | ||
1082 | err = -EOPNOTSUPP; | ||
1083 | break; | ||
1084 | } | ||
1085 | |||
1086 | return err; | ||
1087 | } | ||
1088 | |||
1089 | static struct pci_driver netxen_driver = { | ||
1090 | .name = netxen_nic_driver_name, | ||
1091 | .id_table = netxen_pci_tbl, | ||
1092 | .probe = netxen_nic_probe, | ||
1093 | .remove = __devexit_p(netxen_nic_remove) | ||
1094 | }; | ||
1095 | |||
1096 | /* Driver Registration on NetXen card */ | ||
1097 | |||
1098 | static int __init netxen_init_module(void) | ||
1099 | { | ||
1100 | printk(KERN_INFO "%s \n", netxen_nic_driver_string); | ||
1101 | |||
1102 | return pci_module_init(&netxen_driver); | ||
1103 | } | ||
1104 | |||
1105 | module_init(netxen_init_module); | ||
1106 | |||
1107 | static void __exit netxen_exit_module(void) | ||
1108 | { | ||
1109 | /* | ||
1110 | * Wait for some time to allow the dma to drain, if any. | ||
1111 | */ | ||
1112 | mdelay(5); | ||
1113 | pci_unregister_driver(&netxen_driver); | ||
1114 | } | ||
1115 | |||
1116 | module_exit(netxen_exit_module); | ||