diff options
Diffstat (limited to 'drivers/net/ethernet/3com')
-rw-r--r-- | drivers/net/ethernet/3com/3c501.c | 896 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/3c501.h | 91 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/3c509.c | 1594 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/3c515.c | 1584 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/3c574_cs.c | 1181 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/3c589_cs.c | 943 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/3c59x.c | 3326 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/Kconfig | 147 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/Makefile | 12 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/acenic.c | 3206 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/acenic.h | 790 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/typhoon.c | 2574 | ||||
-rw-r--r-- | drivers/net/ethernet/3com/typhoon.h | 624 |
13 files changed, 16968 insertions, 0 deletions
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c new file mode 100644 index 000000000000..5420f6de27df --- /dev/null +++ b/drivers/net/ethernet/3com/3c501.c | |||
@@ -0,0 +1,896 @@ | |||
1 | /* 3c501.c: A 3Com 3c501 Ethernet driver for Linux. */ | ||
2 | /* | ||
3 | Written 1992,1993,1994 Donald Becker | ||
4 | |||
5 | Copyright 1993 United States Government as represented by the | ||
6 | Director, National Security Agency. This software may be used and | ||
7 | distributed according to the terms of the GNU General Public License, | ||
8 | incorporated herein by reference. | ||
9 | |||
10 | This is a device driver for the 3Com Etherlink 3c501. | ||
11 | Do not purchase this card, even as a joke. It's performance is horrible, | ||
12 | and it breaks in many ways. | ||
13 | |||
14 | The original author may be reached as becker@scyld.com, or C/O | ||
15 | Scyld Computing Corporation | ||
16 | 410 Severn Ave., Suite 210 | ||
17 | Annapolis MD 21403 | ||
18 | |||
19 | Fixed (again!) the missing interrupt locking on TX/RX shifting. | ||
20 | Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
21 | |||
22 | Removed calls to init_etherdev since they are no longer needed, and | ||
23 | cleaned up modularization just a bit. The driver still allows only | ||
24 | the default address for cards when loaded as a module, but that's | ||
25 | really less braindead than anyone using a 3c501 board. :) | ||
26 | 19950208 (invid@msen.com) | ||
27 | |||
28 | Added traps for interrupts hitting the window as we clear and TX load | ||
29 | the board. Now getting 150K/second FTP with a 3c501 card. Still playing | ||
30 | with a TX-TX optimisation to see if we can touch 180-200K/second as seems | ||
31 | theoretically maximum. | ||
32 | 19950402 Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
33 | |||
34 | Cleaned up for 2.3.x because we broke SMP now. | ||
35 | 20000208 Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
36 | |||
37 | Check up pass for 2.5. Nothing significant changed | ||
38 | 20021009 Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
39 | |||
40 | Fixed zero fill corner case | ||
41 | 20030104 Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
42 | |||
43 | |||
44 | For the avoidance of doubt the "preferred form" of this code is one which | ||
45 | is in an open non patent encumbered format. Where cryptographic key signing | ||
46 | forms part of the process of creating an executable the information | ||
47 | including keys needed to generate an equivalently functional executable | ||
48 | are deemed to be part of the source code. | ||
49 | |||
50 | */ | ||
51 | |||
52 | |||
53 | /** | ||
54 | * DOC: 3c501 Card Notes | ||
55 | * | ||
56 | * Some notes on this thing if you have to hack it. [Alan] | ||
57 | * | ||
58 | * Some documentation is available from 3Com. Due to the boards age | ||
59 | * standard responses when you ask for this will range from 'be serious' | ||
60 | * to 'give it to a museum'. The documentation is incomplete and mostly | ||
61 | * of historical interest anyway. | ||
62 | * | ||
63 | * The basic system is a single buffer which can be used to receive or | ||
64 | * transmit a packet. A third command mode exists when you are setting | ||
65 | * things up. | ||
66 | * | ||
67 | * If it's transmitting it's not receiving and vice versa. In fact the | ||
68 | * time to get the board back into useful state after an operation is | ||
69 | * quite large. | ||
70 | * | ||
71 | * The driver works by keeping the board in receive mode waiting for a | ||
72 | * packet to arrive. When one arrives it is copied out of the buffer | ||
73 | * and delivered to the kernel. The card is reloaded and off we go. | ||
74 | * | ||
75 | * When transmitting lp->txing is set and the card is reset (from | ||
76 | * receive mode) [possibly losing a packet just received] to command | ||
77 | * mode. A packet is loaded and transmit mode triggered. The interrupt | ||
78 | * handler runs different code for transmit interrupts and can handle | ||
79 | * returning to receive mode or retransmissions (yes you have to help | ||
80 | * out with those too). | ||
81 | * | ||
82 | * DOC: Problems | ||
83 | * | ||
84 | * There are a wide variety of undocumented error returns from the card | ||
85 | * and you basically have to kick the board and pray if they turn up. Most | ||
86 | * only occur under extreme load or if you do something the board doesn't | ||
87 | * like (eg touching a register at the wrong time). | ||
88 | * | ||
89 | * The driver is less efficient than it could be. It switches through | ||
90 | * receive mode even if more transmits are queued. If this worries you buy | ||
91 | * a real Ethernet card. | ||
92 | * | ||
93 | * The combination of slow receive restart and no real multicast | ||
94 | * filter makes the board unusable with a kernel compiled for IP | ||
95 | * multicasting in a real multicast environment. That's down to the board, | ||
96 | * but even with no multicast programs running a multicast IP kernel is | ||
97 | * in group 224.0.0.1 and you will therefore be listening to all multicasts. | ||
98 | * One nv conference running over that Ethernet and you can give up. | ||
99 | * | ||
100 | */ | ||
101 | |||
102 | #define DRV_NAME "3c501" | ||
103 | #define DRV_VERSION "2002/10/09" | ||
104 | |||
105 | |||
106 | static const char version[] = | ||
107 | DRV_NAME ".c: " DRV_VERSION " Alan Cox (alan@lxorguk.ukuu.org.uk).\n"; | ||
108 | |||
109 | /* | ||
110 | * Braindamage remaining: | ||
111 | * The 3c501 board. | ||
112 | */ | ||
113 | |||
114 | #include <linux/module.h> | ||
115 | |||
116 | #include <linux/kernel.h> | ||
117 | #include <linux/fcntl.h> | ||
118 | #include <linux/ioport.h> | ||
119 | #include <linux/interrupt.h> | ||
120 | #include <linux/string.h> | ||
121 | #include <linux/errno.h> | ||
122 | #include <linux/spinlock.h> | ||
123 | #include <linux/ethtool.h> | ||
124 | #include <linux/delay.h> | ||
125 | #include <linux/bitops.h> | ||
126 | |||
127 | #include <asm/uaccess.h> | ||
128 | #include <asm/io.h> | ||
129 | |||
130 | #include <linux/netdevice.h> | ||
131 | #include <linux/etherdevice.h> | ||
132 | #include <linux/skbuff.h> | ||
133 | #include <linux/init.h> | ||
134 | |||
135 | #include "3c501.h" | ||
136 | |||
137 | /* | ||
138 | * The boilerplate probe code. | ||
139 | */ | ||
140 | |||
141 | static int io = 0x280; | ||
142 | static int irq = 5; | ||
143 | static int mem_start; | ||
144 | |||
145 | /** | ||
146 | * el1_probe: - probe for a 3c501 | ||
147 | * @dev: The device structure passed in to probe. | ||
148 | * | ||
149 | * This can be called from two places. The network layer will probe using | ||
150 | * a device structure passed in with the probe information completed. For a | ||
151 | * modular driver we use #init_module to fill in our own structure and probe | ||
152 | * for it. | ||
153 | * | ||
154 | * Returns 0 on success. ENXIO if asked not to probe and ENODEV if asked to | ||
155 | * probe and failing to find anything. | ||
156 | */ | ||
157 | |||
158 | struct net_device * __init el1_probe(int unit) | ||
159 | { | ||
160 | struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); | ||
161 | static const unsigned ports[] = { 0x280, 0x300, 0}; | ||
162 | const unsigned *port; | ||
163 | int err = 0; | ||
164 | |||
165 | if (!dev) | ||
166 | return ERR_PTR(-ENOMEM); | ||
167 | |||
168 | if (unit >= 0) { | ||
169 | sprintf(dev->name, "eth%d", unit); | ||
170 | netdev_boot_setup_check(dev); | ||
171 | io = dev->base_addr; | ||
172 | irq = dev->irq; | ||
173 | mem_start = dev->mem_start & 7; | ||
174 | } | ||
175 | |||
176 | if (io > 0x1ff) { /* Check a single specified location. */ | ||
177 | err = el1_probe1(dev, io); | ||
178 | } else if (io != 0) { | ||
179 | err = -ENXIO; /* Don't probe at all. */ | ||
180 | } else { | ||
181 | for (port = ports; *port && el1_probe1(dev, *port); port++) | ||
182 | ; | ||
183 | if (!*port) | ||
184 | err = -ENODEV; | ||
185 | } | ||
186 | if (err) | ||
187 | goto out; | ||
188 | err = register_netdev(dev); | ||
189 | if (err) | ||
190 | goto out1; | ||
191 | return dev; | ||
192 | out1: | ||
193 | release_region(dev->base_addr, EL1_IO_EXTENT); | ||
194 | out: | ||
195 | free_netdev(dev); | ||
196 | return ERR_PTR(err); | ||
197 | } | ||
198 | |||
199 | static const struct net_device_ops el_netdev_ops = { | ||
200 | .ndo_open = el_open, | ||
201 | .ndo_stop = el1_close, | ||
202 | .ndo_start_xmit = el_start_xmit, | ||
203 | .ndo_tx_timeout = el_timeout, | ||
204 | .ndo_set_multicast_list = set_multicast_list, | ||
205 | .ndo_change_mtu = eth_change_mtu, | ||
206 | .ndo_set_mac_address = eth_mac_addr, | ||
207 | .ndo_validate_addr = eth_validate_addr, | ||
208 | }; | ||
209 | |||
210 | /** | ||
211 | * el1_probe1: | ||
212 | * @dev: The device structure to use | ||
213 | * @ioaddr: An I/O address to probe at. | ||
214 | * | ||
215 | * The actual probe. This is iterated over by #el1_probe in order to | ||
216 | * check all the applicable device locations. | ||
217 | * | ||
218 | * Returns 0 for a success, in which case the device is activated, | ||
219 | * EAGAIN if the IRQ is in use by another driver, and ENODEV if the | ||
220 | * board cannot be found. | ||
221 | */ | ||
222 | |||
223 | static int __init el1_probe1(struct net_device *dev, int ioaddr) | ||
224 | { | ||
225 | struct net_local *lp; | ||
226 | const char *mname; /* Vendor name */ | ||
227 | unsigned char station_addr[6]; | ||
228 | int autoirq = 0; | ||
229 | int i; | ||
230 | |||
231 | /* | ||
232 | * Reserve I/O resource for exclusive use by this driver | ||
233 | */ | ||
234 | |||
235 | if (!request_region(ioaddr, EL1_IO_EXTENT, DRV_NAME)) | ||
236 | return -ENODEV; | ||
237 | |||
238 | /* | ||
239 | * Read the station address PROM data from the special port. | ||
240 | */ | ||
241 | |||
242 | for (i = 0; i < 6; i++) { | ||
243 | outw(i, ioaddr + EL1_DATAPTR); | ||
244 | station_addr[i] = inb(ioaddr + EL1_SAPROM); | ||
245 | } | ||
246 | /* | ||
247 | * Check the first three octets of the S.A. for 3Com's prefix, or | ||
248 | * for the Sager NP943 prefix. | ||
249 | */ | ||
250 | |||
251 | if (station_addr[0] == 0x02 && station_addr[1] == 0x60 && | ||
252 | station_addr[2] == 0x8c) | ||
253 | mname = "3c501"; | ||
254 | else if (station_addr[0] == 0x00 && station_addr[1] == 0x80 && | ||
255 | station_addr[2] == 0xC8) | ||
256 | mname = "NP943"; | ||
257 | else { | ||
258 | release_region(ioaddr, EL1_IO_EXTENT); | ||
259 | return -ENODEV; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * We auto-IRQ by shutting off the interrupt line and letting it | ||
264 | * float high. | ||
265 | */ | ||
266 | |||
267 | dev->irq = irq; | ||
268 | |||
269 | if (dev->irq < 2) { | ||
270 | unsigned long irq_mask; | ||
271 | |||
272 | irq_mask = probe_irq_on(); | ||
273 | inb(RX_STATUS); /* Clear pending interrupts. */ | ||
274 | inb(TX_STATUS); | ||
275 | outb(AX_LOOP + 1, AX_CMD); | ||
276 | |||
277 | outb(0x00, AX_CMD); | ||
278 | |||
279 | mdelay(20); | ||
280 | autoirq = probe_irq_off(irq_mask); | ||
281 | |||
282 | if (autoirq == 0) { | ||
283 | pr_warning("%s probe at %#x failed to detect IRQ line.\n", | ||
284 | mname, ioaddr); | ||
285 | release_region(ioaddr, EL1_IO_EXTENT); | ||
286 | return -EAGAIN; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | outb(AX_RESET+AX_LOOP, AX_CMD); /* Loopback mode. */ | ||
291 | dev->base_addr = ioaddr; | ||
292 | memcpy(dev->dev_addr, station_addr, ETH_ALEN); | ||
293 | |||
294 | if (mem_start & 0xf) | ||
295 | el_debug = mem_start & 0x7; | ||
296 | if (autoirq) | ||
297 | dev->irq = autoirq; | ||
298 | |||
299 | pr_info("%s: %s EtherLink at %#lx, using %sIRQ %d.\n", | ||
300 | dev->name, mname, dev->base_addr, | ||
301 | autoirq ? "auto":"assigned ", dev->irq); | ||
302 | |||
303 | #ifdef CONFIG_IP_MULTICAST | ||
304 | pr_warning("WARNING: Use of the 3c501 in a multicast kernel is NOT recommended.\n"); | ||
305 | #endif | ||
306 | |||
307 | if (el_debug) | ||
308 | pr_debug("%s", version); | ||
309 | |||
310 | lp = netdev_priv(dev); | ||
311 | memset(lp, 0, sizeof(struct net_local)); | ||
312 | spin_lock_init(&lp->lock); | ||
313 | |||
314 | /* | ||
315 | * The EL1-specific entries in the device structure. | ||
316 | */ | ||
317 | |||
318 | dev->netdev_ops = &el_netdev_ops; | ||
319 | dev->watchdog_timeo = HZ; | ||
320 | dev->ethtool_ops = &netdev_ethtool_ops; | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * el1_open: | ||
326 | * @dev: device that is being opened | ||
327 | * | ||
328 | * When an ifconfig is issued which changes the device flags to include | ||
329 | * IFF_UP this function is called. It is only called when the change | ||
330 | * occurs, not when the interface remains up. #el1_close will be called | ||
331 | * when it goes down. | ||
332 | * | ||
333 | * Returns 0 for a successful open, or -EAGAIN if someone has run off | ||
334 | * with our interrupt line. | ||
335 | */ | ||
336 | |||
337 | static int el_open(struct net_device *dev) | ||
338 | { | ||
339 | int retval; | ||
340 | int ioaddr = dev->base_addr; | ||
341 | struct net_local *lp = netdev_priv(dev); | ||
342 | unsigned long flags; | ||
343 | |||
344 | if (el_debug > 2) | ||
345 | pr_debug("%s: Doing el_open()...\n", dev->name); | ||
346 | |||
347 | retval = request_irq(dev->irq, el_interrupt, 0, dev->name, dev); | ||
348 | if (retval) | ||
349 | return retval; | ||
350 | |||
351 | spin_lock_irqsave(&lp->lock, flags); | ||
352 | el_reset(dev); | ||
353 | spin_unlock_irqrestore(&lp->lock, flags); | ||
354 | |||
355 | lp->txing = 0; /* Board in RX mode */ | ||
356 | outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */ | ||
357 | netif_start_queue(dev); | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | /** | ||
362 | * el_timeout: | ||
363 | * @dev: The 3c501 card that has timed out | ||
364 | * | ||
365 | * Attempt to restart the board. This is basically a mixture of extreme | ||
366 | * violence and prayer | ||
367 | * | ||
368 | */ | ||
369 | |||
370 | static void el_timeout(struct net_device *dev) | ||
371 | { | ||
372 | struct net_local *lp = netdev_priv(dev); | ||
373 | int ioaddr = dev->base_addr; | ||
374 | |||
375 | if (el_debug) | ||
376 | pr_debug("%s: transmit timed out, txsr %#2x axsr=%02x rxsr=%02x.\n", | ||
377 | dev->name, inb(TX_STATUS), | ||
378 | inb(AX_STATUS), inb(RX_STATUS)); | ||
379 | dev->stats.tx_errors++; | ||
380 | outb(TX_NORM, TX_CMD); | ||
381 | outb(RX_NORM, RX_CMD); | ||
382 | outb(AX_OFF, AX_CMD); /* Just trigger a false interrupt. */ | ||
383 | outb(AX_RX, AX_CMD); /* Aux control, irq and receive enabled */ | ||
384 | lp->txing = 0; /* Ripped back in to RX */ | ||
385 | netif_wake_queue(dev); | ||
386 | } | ||
387 | |||
388 | |||
389 | /** | ||
390 | * el_start_xmit: | ||
391 | * @skb: The packet that is queued to be sent | ||
392 | * @dev: The 3c501 card we want to throw it down | ||
393 | * | ||
394 | * Attempt to send a packet to a 3c501 card. There are some interesting | ||
395 | * catches here because the 3c501 is an extremely old and therefore | ||
396 | * stupid piece of technology. | ||
397 | * | ||
398 | * If we are handling an interrupt on the other CPU we cannot load a packet | ||
399 | * as we may still be attempting to retrieve the last RX packet buffer. | ||
400 | * | ||
401 | * When a transmit times out we dump the card into control mode and just | ||
402 | * start again. It happens enough that it isn't worth logging. | ||
403 | * | ||
404 | * We avoid holding the spin locks when doing the packet load to the board. | ||
405 | * The device is very slow, and its DMA mode is even slower. If we held the | ||
406 | * lock while loading 1500 bytes onto the controller we would drop a lot of | ||
407 | * serial port characters. This requires we do extra locking, but we have | ||
408 | * no real choice. | ||
409 | */ | ||
410 | |||
411 | static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
412 | { | ||
413 | struct net_local *lp = netdev_priv(dev); | ||
414 | int ioaddr = dev->base_addr; | ||
415 | unsigned long flags; | ||
416 | |||
417 | /* | ||
418 | * Avoid incoming interrupts between us flipping txing and flipping | ||
419 | * mode as the driver assumes txing is a faithful indicator of card | ||
420 | * state | ||
421 | */ | ||
422 | |||
423 | spin_lock_irqsave(&lp->lock, flags); | ||
424 | |||
425 | /* | ||
426 | * Avoid timer-based retransmission conflicts. | ||
427 | */ | ||
428 | |||
429 | netif_stop_queue(dev); | ||
430 | |||
431 | do { | ||
432 | int len = skb->len; | ||
433 | int pad = 0; | ||
434 | int gp_start; | ||
435 | unsigned char *buf = skb->data; | ||
436 | |||
437 | if (len < ETH_ZLEN) | ||
438 | pad = ETH_ZLEN - len; | ||
439 | |||
440 | gp_start = 0x800 - (len + pad); | ||
441 | |||
442 | lp->tx_pkt_start = gp_start; | ||
443 | lp->collisions = 0; | ||
444 | |||
445 | dev->stats.tx_bytes += skb->len; | ||
446 | |||
447 | /* | ||
448 | * Command mode with status cleared should [in theory] | ||
449 | * mean no more interrupts can be pending on the card. | ||
450 | */ | ||
451 | |||
452 | outb_p(AX_SYS, AX_CMD); | ||
453 | inb_p(RX_STATUS); | ||
454 | inb_p(TX_STATUS); | ||
455 | |||
456 | lp->loading = 1; | ||
457 | lp->txing = 1; | ||
458 | |||
459 | /* | ||
460 | * Turn interrupts back on while we spend a pleasant | ||
461 | * afternoon loading bytes into the board | ||
462 | */ | ||
463 | |||
464 | spin_unlock_irqrestore(&lp->lock, flags); | ||
465 | |||
466 | /* Set rx packet area to 0. */ | ||
467 | outw(0x00, RX_BUF_CLR); | ||
468 | /* aim - packet will be loaded into buffer start */ | ||
469 | outw(gp_start, GP_LOW); | ||
470 | /* load buffer (usual thing each byte increments the pointer) */ | ||
471 | outsb(DATAPORT, buf, len); | ||
472 | if (pad) { | ||
473 | while (pad--) /* Zero fill buffer tail */ | ||
474 | outb(0, DATAPORT); | ||
475 | } | ||
476 | /* the board reuses the same register */ | ||
477 | outw(gp_start, GP_LOW); | ||
478 | |||
479 | if (lp->loading != 2) { | ||
480 | /* fire ... Trigger xmit. */ | ||
481 | outb(AX_XMIT, AX_CMD); | ||
482 | lp->loading = 0; | ||
483 | if (el_debug > 2) | ||
484 | pr_debug(" queued xmit.\n"); | ||
485 | dev_kfree_skb(skb); | ||
486 | return NETDEV_TX_OK; | ||
487 | } | ||
488 | /* A receive upset our load, despite our best efforts */ | ||
489 | if (el_debug > 2) | ||
490 | pr_debug("%s: burped during tx load.\n", dev->name); | ||
491 | spin_lock_irqsave(&lp->lock, flags); | ||
492 | } while (1); | ||
493 | } | ||
494 | |||
495 | /** | ||
496 | * el_interrupt: | ||
497 | * @irq: Interrupt number | ||
498 | * @dev_id: The 3c501 that burped | ||
499 | * | ||
500 | * Handle the ether interface interrupts. The 3c501 needs a lot more | ||
501 | * hand holding than most cards. In particular we get a transmit interrupt | ||
502 | * with a collision error because the board firmware isn't capable of rewinding | ||
503 | * its own transmit buffer pointers. It can however count to 16 for us. | ||
504 | * | ||
505 | * On the receive side the card is also very dumb. It has no buffering to | ||
506 | * speak of. We simply pull the packet out of its PIO buffer (which is slow) | ||
507 | * and queue it for the kernel. Then we reset the card for the next packet. | ||
508 | * | ||
509 | * We sometimes get surprise interrupts late both because the SMP IRQ delivery | ||
510 | * is message passing and because the card sometimes seems to deliver late. I | ||
511 | * think if it is part way through a receive and the mode is changed it carries | ||
512 | * on receiving and sends us an interrupt. We have to band aid all these cases | ||
513 | * to get a sensible 150kBytes/second performance. Even then you want a small | ||
514 | * TCP window. | ||
515 | */ | ||
516 | |||
517 | static irqreturn_t el_interrupt(int irq, void *dev_id) | ||
518 | { | ||
519 | struct net_device *dev = dev_id; | ||
520 | struct net_local *lp; | ||
521 | int ioaddr; | ||
522 | int axsr; /* Aux. status reg. */ | ||
523 | |||
524 | ioaddr = dev->base_addr; | ||
525 | lp = netdev_priv(dev); | ||
526 | |||
527 | spin_lock(&lp->lock); | ||
528 | |||
529 | /* | ||
530 | * What happened ? | ||
531 | */ | ||
532 | |||
533 | axsr = inb(AX_STATUS); | ||
534 | |||
535 | /* | ||
536 | * Log it | ||
537 | */ | ||
538 | |||
539 | if (el_debug > 3) | ||
540 | pr_debug("%s: el_interrupt() aux=%#02x\n", dev->name, axsr); | ||
541 | |||
542 | if (lp->loading == 1 && !lp->txing) | ||
543 | pr_warning("%s: Inconsistent state loading while not in tx\n", | ||
544 | dev->name); | ||
545 | |||
546 | if (lp->txing) { | ||
547 | /* | ||
548 | * Board in transmit mode. May be loading. If we are | ||
549 | * loading we shouldn't have got this. | ||
550 | */ | ||
551 | int txsr = inb(TX_STATUS); | ||
552 | |||
553 | if (lp->loading == 1) { | ||
554 | if (el_debug > 2) | ||
555 | pr_debug("%s: Interrupt while loading [txsr=%02x gp=%04x rp=%04x]\n", | ||
556 | dev->name, txsr, inw(GP_LOW), inw(RX_LOW)); | ||
557 | |||
558 | /* Force a reload */ | ||
559 | lp->loading = 2; | ||
560 | spin_unlock(&lp->lock); | ||
561 | goto out; | ||
562 | } | ||
563 | if (el_debug > 6) | ||
564 | pr_debug("%s: txsr=%02x gp=%04x rp=%04x\n", dev->name, | ||
565 | txsr, inw(GP_LOW), inw(RX_LOW)); | ||
566 | |||
567 | if ((axsr & 0x80) && (txsr & TX_READY) == 0) { | ||
568 | /* | ||
569 | * FIXME: is there a logic to whether to keep | ||
570 | * on trying or reset immediately ? | ||
571 | */ | ||
572 | if (el_debug > 1) | ||
573 | pr_debug("%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x gp=%03x rp=%03x.\n", | ||
574 | dev->name, txsr, axsr, | ||
575 | inw(ioaddr + EL1_DATAPTR), | ||
576 | inw(ioaddr + EL1_RXPTR)); | ||
577 | lp->txing = 0; | ||
578 | netif_wake_queue(dev); | ||
579 | } else if (txsr & TX_16COLLISIONS) { | ||
580 | /* | ||
581 | * Timed out | ||
582 | */ | ||
583 | if (el_debug) | ||
584 | pr_debug("%s: Transmit failed 16 times, Ethernet jammed?\n", dev->name); | ||
585 | outb(AX_SYS, AX_CMD); | ||
586 | lp->txing = 0; | ||
587 | dev->stats.tx_aborted_errors++; | ||
588 | netif_wake_queue(dev); | ||
589 | } else if (txsr & TX_COLLISION) { | ||
590 | /* | ||
591 | * Retrigger xmit. | ||
592 | */ | ||
593 | |||
594 | if (el_debug > 6) | ||
595 | pr_debug("%s: retransmitting after a collision.\n", dev->name); | ||
596 | /* | ||
597 | * Poor little chip can't reset its own start | ||
598 | * pointer | ||
599 | */ | ||
600 | |||
601 | outb(AX_SYS, AX_CMD); | ||
602 | outw(lp->tx_pkt_start, GP_LOW); | ||
603 | outb(AX_XMIT, AX_CMD); | ||
604 | dev->stats.collisions++; | ||
605 | spin_unlock(&lp->lock); | ||
606 | goto out; | ||
607 | } else { | ||
608 | /* | ||
609 | * It worked.. we will now fall through and receive | ||
610 | */ | ||
611 | dev->stats.tx_packets++; | ||
612 | if (el_debug > 6) | ||
613 | pr_debug("%s: Tx succeeded %s\n", dev->name, | ||
614 | (txsr & TX_RDY) ? "." : "but tx is busy!"); | ||
615 | /* | ||
616 | * This is safe the interrupt is atomic WRT itself. | ||
617 | */ | ||
618 | lp->txing = 0; | ||
619 | /* In case more to transmit */ | ||
620 | netif_wake_queue(dev); | ||
621 | } | ||
622 | } else { | ||
623 | /* | ||
624 | * In receive mode. | ||
625 | */ | ||
626 | |||
627 | int rxsr = inb(RX_STATUS); | ||
628 | if (el_debug > 5) | ||
629 | pr_debug("%s: rxsr=%02x txsr=%02x rp=%04x\n", | ||
630 | dev->name, rxsr, inb(TX_STATUS), inw(RX_LOW)); | ||
631 | /* | ||
632 | * Just reading rx_status fixes most errors. | ||
633 | */ | ||
634 | if (rxsr & RX_MISSED) | ||
635 | dev->stats.rx_missed_errors++; | ||
636 | else if (rxsr & RX_RUNT) { | ||
637 | /* Handled to avoid board lock-up. */ | ||
638 | dev->stats.rx_length_errors++; | ||
639 | if (el_debug > 5) | ||
640 | pr_debug("%s: runt.\n", dev->name); | ||
641 | } else if (rxsr & RX_GOOD) { | ||
642 | /* | ||
643 | * Receive worked. | ||
644 | */ | ||
645 | el_receive(dev); | ||
646 | } else { | ||
647 | /* | ||
648 | * Nothing? Something is broken! | ||
649 | */ | ||
650 | if (el_debug > 2) | ||
651 | pr_debug("%s: No packet seen, rxsr=%02x **resetting 3c501***\n", | ||
652 | dev->name, rxsr); | ||
653 | el_reset(dev); | ||
654 | } | ||
655 | } | ||
656 | |||
657 | /* | ||
658 | * Move into receive mode | ||
659 | */ | ||
660 | |||
661 | outb(AX_RX, AX_CMD); | ||
662 | outw(0x00, RX_BUF_CLR); | ||
663 | inb(RX_STATUS); /* Be certain that interrupts are cleared. */ | ||
664 | inb(TX_STATUS); | ||
665 | spin_unlock(&lp->lock); | ||
666 | out: | ||
667 | return IRQ_HANDLED; | ||
668 | } | ||
669 | |||
670 | |||
671 | /** | ||
672 | * el_receive: | ||
673 | * @dev: Device to pull the packets from | ||
674 | * | ||
675 | * We have a good packet. Well, not really "good", just mostly not broken. | ||
676 | * We must check everything to see if it is good. In particular we occasionally | ||
677 | * get wild packet sizes from the card. If the packet seems sane we PIO it | ||
678 | * off the card and queue it for the protocol layers. | ||
679 | */ | ||
680 | |||
681 | static void el_receive(struct net_device *dev) | ||
682 | { | ||
683 | int ioaddr = dev->base_addr; | ||
684 | int pkt_len; | ||
685 | struct sk_buff *skb; | ||
686 | |||
687 | pkt_len = inw(RX_LOW); | ||
688 | |||
689 | if (el_debug > 4) | ||
690 | pr_debug(" el_receive %d.\n", pkt_len); | ||
691 | |||
692 | if (pkt_len < 60 || pkt_len > 1536) { | ||
693 | if (el_debug) | ||
694 | pr_debug("%s: bogus packet, length=%d\n", | ||
695 | dev->name, pkt_len); | ||
696 | dev->stats.rx_over_errors++; | ||
697 | return; | ||
698 | } | ||
699 | |||
700 | /* | ||
701 | * Command mode so we can empty the buffer | ||
702 | */ | ||
703 | |||
704 | outb(AX_SYS, AX_CMD); | ||
705 | skb = dev_alloc_skb(pkt_len+2); | ||
706 | |||
707 | /* | ||
708 | * Start of frame | ||
709 | */ | ||
710 | |||
711 | outw(0x00, GP_LOW); | ||
712 | if (skb == NULL) { | ||
713 | pr_info("%s: Memory squeeze, dropping packet.\n", dev->name); | ||
714 | dev->stats.rx_dropped++; | ||
715 | return; | ||
716 | } else { | ||
717 | skb_reserve(skb, 2); /* Force 16 byte alignment */ | ||
718 | /* | ||
719 | * The read increments through the bytes. The interrupt | ||
720 | * handler will fix the pointer when it returns to | ||
721 | * receive mode. | ||
722 | */ | ||
723 | insb(DATAPORT, skb_put(skb, pkt_len), pkt_len); | ||
724 | skb->protocol = eth_type_trans(skb, dev); | ||
725 | netif_rx(skb); | ||
726 | dev->stats.rx_packets++; | ||
727 | dev->stats.rx_bytes += pkt_len; | ||
728 | } | ||
729 | } | ||
730 | |||
731 | /** | ||
732 | * el_reset: Reset a 3c501 card | ||
733 | * @dev: The 3c501 card about to get zapped | ||
734 | * | ||
735 | * Even resetting a 3c501 isn't simple. When you activate reset it loses all | ||
736 | * its configuration. You must hold the lock when doing this. The function | ||
737 | * cannot take the lock itself as it is callable from the irq handler. | ||
738 | */ | ||
739 | |||
740 | static void el_reset(struct net_device *dev) | ||
741 | { | ||
742 | struct net_local *lp = netdev_priv(dev); | ||
743 | int ioaddr = dev->base_addr; | ||
744 | |||
745 | if (el_debug > 2) | ||
746 | pr_info("3c501 reset...\n"); | ||
747 | outb(AX_RESET, AX_CMD); /* Reset the chip */ | ||
748 | /* Aux control, irq and loopback enabled */ | ||
749 | outb(AX_LOOP, AX_CMD); | ||
750 | { | ||
751 | int i; | ||
752 | for (i = 0; i < 6; i++) /* Set the station address. */ | ||
753 | outb(dev->dev_addr[i], ioaddr + i); | ||
754 | } | ||
755 | |||
756 | outw(0, RX_BUF_CLR); /* Set rx packet area to 0. */ | ||
757 | outb(TX_NORM, TX_CMD); /* tx irq on done, collision */ | ||
758 | outb(RX_NORM, RX_CMD); /* Set Rx commands. */ | ||
759 | inb(RX_STATUS); /* Clear status. */ | ||
760 | inb(TX_STATUS); | ||
761 | lp->txing = 0; | ||
762 | } | ||
763 | |||
764 | /** | ||
765 | * el1_close: | ||
766 | * @dev: 3c501 card to shut down | ||
767 | * | ||
768 | * Close a 3c501 card. The IFF_UP flag has been cleared by the user via | ||
769 | * the SIOCSIFFLAGS ioctl. We stop any further transmissions being queued, | ||
770 | * and then disable the interrupts. Finally we reset the chip. The effects | ||
771 | * of the rest will be cleaned up by #el1_open. Always returns 0 indicating | ||
772 | * a success. | ||
773 | */ | ||
774 | |||
775 | static int el1_close(struct net_device *dev) | ||
776 | { | ||
777 | int ioaddr = dev->base_addr; | ||
778 | |||
779 | if (el_debug > 2) | ||
780 | pr_info("%s: Shutting down Ethernet card at %#x.\n", | ||
781 | dev->name, ioaddr); | ||
782 | |||
783 | netif_stop_queue(dev); | ||
784 | |||
785 | /* | ||
786 | * Free and disable the IRQ. | ||
787 | */ | ||
788 | |||
789 | free_irq(dev->irq, dev); | ||
790 | outb(AX_RESET, AX_CMD); /* Reset the chip */ | ||
791 | |||
792 | return 0; | ||
793 | } | ||
794 | |||
795 | /** | ||
796 | * set_multicast_list: | ||
797 | * @dev: The device to adjust | ||
798 | * | ||
799 | * Set or clear the multicast filter for this adaptor to use the best-effort | ||
800 | * filtering supported. The 3c501 supports only three modes of filtering. | ||
801 | * It always receives broadcasts and packets for itself. You can choose to | ||
802 | * optionally receive all packets, or all multicast packets on top of this. | ||
803 | */ | ||
804 | |||
805 | static void set_multicast_list(struct net_device *dev) | ||
806 | { | ||
807 | int ioaddr = dev->base_addr; | ||
808 | |||
809 | if (dev->flags & IFF_PROMISC) { | ||
810 | outb(RX_PROM, RX_CMD); | ||
811 | inb(RX_STATUS); | ||
812 | } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { | ||
813 | /* Multicast or all multicast is the same */ | ||
814 | outb(RX_MULT, RX_CMD); | ||
815 | inb(RX_STATUS); /* Clear status. */ | ||
816 | } else { | ||
817 | outb(RX_NORM, RX_CMD); | ||
818 | inb(RX_STATUS); | ||
819 | } | ||
820 | } | ||
821 | |||
822 | |||
823 | static void netdev_get_drvinfo(struct net_device *dev, | ||
824 | struct ethtool_drvinfo *info) | ||
825 | { | ||
826 | strcpy(info->driver, DRV_NAME); | ||
827 | strcpy(info->version, DRV_VERSION); | ||
828 | sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); | ||
829 | } | ||
830 | |||
831 | static u32 netdev_get_msglevel(struct net_device *dev) | ||
832 | { | ||
833 | return debug; | ||
834 | } | ||
835 | |||
836 | static void netdev_set_msglevel(struct net_device *dev, u32 level) | ||
837 | { | ||
838 | debug = level; | ||
839 | } | ||
840 | |||
841 | static const struct ethtool_ops netdev_ethtool_ops = { | ||
842 | .get_drvinfo = netdev_get_drvinfo, | ||
843 | .get_msglevel = netdev_get_msglevel, | ||
844 | .set_msglevel = netdev_set_msglevel, | ||
845 | }; | ||
846 | |||
847 | #ifdef MODULE | ||
848 | |||
849 | static struct net_device *dev_3c501; | ||
850 | |||
851 | module_param(io, int, 0); | ||
852 | module_param(irq, int, 0); | ||
853 | MODULE_PARM_DESC(io, "EtherLink I/O base address"); | ||
854 | MODULE_PARM_DESC(irq, "EtherLink IRQ number"); | ||
855 | |||
856 | /** | ||
857 | * init_module: | ||
858 | * | ||
859 | * When the driver is loaded as a module this function is called. We fake up | ||
860 | * a device structure with the base I/O and interrupt set as if it were being | ||
861 | * called from Space.c. This minimises the extra code that would otherwise | ||
862 | * be required. | ||
863 | * | ||
864 | * Returns 0 for success or -EIO if a card is not found. Returning an error | ||
865 | * here also causes the module to be unloaded | ||
866 | */ | ||
867 | |||
868 | int __init init_module(void) | ||
869 | { | ||
870 | dev_3c501 = el1_probe(-1); | ||
871 | if (IS_ERR(dev_3c501)) | ||
872 | return PTR_ERR(dev_3c501); | ||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | /** | ||
877 | * cleanup_module: | ||
878 | * | ||
879 | * The module is being unloaded. We unhook our network device from the system | ||
880 | * and then free up the resources we took when the card was found. | ||
881 | */ | ||
882 | |||
883 | void __exit cleanup_module(void) | ||
884 | { | ||
885 | struct net_device *dev = dev_3c501; | ||
886 | unregister_netdev(dev); | ||
887 | release_region(dev->base_addr, EL1_IO_EXTENT); | ||
888 | free_netdev(dev); | ||
889 | } | ||
890 | |||
891 | #endif /* MODULE */ | ||
892 | |||
893 | MODULE_AUTHOR("Donald Becker, Alan Cox"); | ||
894 | MODULE_DESCRIPTION("Support for the ancient 3Com 3c501 ethernet card"); | ||
895 | MODULE_LICENSE("GPL"); | ||
896 | |||
diff --git a/drivers/net/ethernet/3com/3c501.h b/drivers/net/ethernet/3com/3c501.h new file mode 100644 index 000000000000..183fd55f03cb --- /dev/null +++ b/drivers/net/ethernet/3com/3c501.h | |||
@@ -0,0 +1,91 @@ | |||
1 | |||
2 | /* | ||
3 | * Index to functions. | ||
4 | */ | ||
5 | |||
6 | static int el1_probe1(struct net_device *dev, int ioaddr); | ||
7 | static int el_open(struct net_device *dev); | ||
8 | static void el_timeout(struct net_device *dev); | ||
9 | static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
10 | static irqreturn_t el_interrupt(int irq, void *dev_id); | ||
11 | static void el_receive(struct net_device *dev); | ||
12 | static void el_reset(struct net_device *dev); | ||
13 | static int el1_close(struct net_device *dev); | ||
14 | static void set_multicast_list(struct net_device *dev); | ||
15 | static const struct ethtool_ops netdev_ethtool_ops; | ||
16 | |||
17 | #define EL1_IO_EXTENT 16 | ||
18 | |||
19 | #ifndef EL_DEBUG | ||
20 | #define EL_DEBUG 0 /* use 0 for production, 1 for devel., >2 for debug */ | ||
21 | #endif /* Anything above 5 is wordy death! */ | ||
22 | #define debug el_debug | ||
23 | static int el_debug = EL_DEBUG; | ||
24 | |||
25 | /* | ||
26 | * Board-specific info in netdev_priv(dev). | ||
27 | */ | ||
28 | |||
29 | struct net_local | ||
30 | { | ||
31 | int tx_pkt_start; /* The length of the current Tx packet. */ | ||
32 | int collisions; /* Tx collisions this packet */ | ||
33 | int loading; /* Spot buffer load collisions */ | ||
34 | int txing; /* True if card is in TX mode */ | ||
35 | spinlock_t lock; /* Serializing lock */ | ||
36 | }; | ||
37 | |||
38 | |||
39 | #define RX_STATUS (ioaddr + 0x06) | ||
40 | #define RX_CMD RX_STATUS | ||
41 | #define TX_STATUS (ioaddr + 0x07) | ||
42 | #define TX_CMD TX_STATUS | ||
43 | #define GP_LOW (ioaddr + 0x08) | ||
44 | #define GP_HIGH (ioaddr + 0x09) | ||
45 | #define RX_BUF_CLR (ioaddr + 0x0A) | ||
46 | #define RX_LOW (ioaddr + 0x0A) | ||
47 | #define RX_HIGH (ioaddr + 0x0B) | ||
48 | #define SAPROM (ioaddr + 0x0C) | ||
49 | #define AX_STATUS (ioaddr + 0x0E) | ||
50 | #define AX_CMD AX_STATUS | ||
51 | #define DATAPORT (ioaddr + 0x0F) | ||
52 | #define TX_RDY 0x08 /* In TX_STATUS */ | ||
53 | |||
54 | #define EL1_DATAPTR 0x08 | ||
55 | #define EL1_RXPTR 0x0A | ||
56 | #define EL1_SAPROM 0x0C | ||
57 | #define EL1_DATAPORT 0x0f | ||
58 | |||
59 | /* | ||
60 | * Writes to the ax command register. | ||
61 | */ | ||
62 | |||
63 | #define AX_OFF 0x00 /* Irq off, buffer access on */ | ||
64 | #define AX_SYS 0x40 /* Load the buffer */ | ||
65 | #define AX_XMIT 0x44 /* Transmit a packet */ | ||
66 | #define AX_RX 0x48 /* Receive a packet */ | ||
67 | #define AX_LOOP 0x0C /* Loopback mode */ | ||
68 | #define AX_RESET 0x80 | ||
69 | |||
70 | /* | ||
71 | * Normal receive mode written to RX_STATUS. We must intr on short packets | ||
72 | * to avoid bogus rx lockups. | ||
73 | */ | ||
74 | |||
75 | #define RX_NORM 0xA8 /* 0x68 == all addrs, 0xA8 only to me. */ | ||
76 | #define RX_PROM 0x68 /* Senior Prom, uhmm promiscuous mode. */ | ||
77 | #define RX_MULT 0xE8 /* Accept multicast packets. */ | ||
78 | #define TX_NORM 0x0A /* Interrupt on everything that might hang the chip */ | ||
79 | |||
80 | /* | ||
81 | * TX_STATUS register. | ||
82 | */ | ||
83 | |||
84 | #define TX_COLLISION 0x02 | ||
85 | #define TX_16COLLISIONS 0x04 | ||
86 | #define TX_READY 0x08 | ||
87 | |||
88 | #define RX_RUNT 0x08 | ||
89 | #define RX_MISSED 0x01 /* Missed a packet due to 3c501 braindamage. */ | ||
90 | #define RX_GOOD 0x30 /* Good packet 0x20, or simple overflow 0x10. */ | ||
91 | |||
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c new file mode 100644 index 000000000000..44b28b2d7003 --- /dev/null +++ b/drivers/net/ethernet/3com/3c509.c | |||
@@ -0,0 +1,1594 @@ | |||
1 | /* 3c509.c: A 3c509 EtherLink3 ethernet driver for linux. */ | ||
2 | /* | ||
3 | Written 1993-2000 by Donald Becker. | ||
4 | |||
5 | Copyright 1994-2000 by Donald Becker. | ||
6 | Copyright 1993 United States Government as represented by the | ||
7 | Director, National Security Agency. This software may be used and | ||
8 | distributed according to the terms of the GNU General Public License, | ||
9 | incorporated herein by reference. | ||
10 | |||
11 | This driver is for the 3Com EtherLinkIII series. | ||
12 | |||
13 | The author may be reached as becker@scyld.com, or C/O | ||
14 | Scyld Computing Corporation | ||
15 | 410 Severn Ave., Suite 210 | ||
16 | Annapolis MD 21403 | ||
17 | |||
18 | Known limitations: | ||
19 | Because of the way 3c509 ISA detection works it's difficult to predict | ||
20 | a priori which of several ISA-mode cards will be detected first. | ||
21 | |||
22 | This driver does not use predictive interrupt mode, resulting in higher | ||
23 | packet latency but lower overhead. If interrupts are disabled for an | ||
24 | unusually long time it could also result in missed packets, but in | ||
25 | practice this rarely happens. | ||
26 | |||
27 | |||
28 | FIXES: | ||
29 | Alan Cox: Removed the 'Unexpected interrupt' bug. | ||
30 | Michael Meskes: Upgraded to Donald Becker's version 1.07. | ||
31 | Alan Cox: Increased the eeprom delay. Regardless of | ||
32 | what the docs say some people definitely | ||
33 | get problems with lower (but in card spec) | ||
34 | delays | ||
35 | v1.10 4/21/97 Fixed module code so that multiple cards may be detected, | ||
36 | other cleanups. -djb | ||
37 | Andrea Arcangeli: Upgraded to Donald Becker's version 1.12. | ||
38 | Rick Payne: Fixed SMP race condition | ||
39 | v1.13 9/8/97 Made 'max_interrupt_work' an insmod-settable variable -djb | ||
40 | v1.14 10/15/97 Avoided waiting..discard message for fast machines -djb | ||
41 | v1.15 1/31/98 Faster recovery for Tx errors. -djb | ||
42 | v1.16 2/3/98 Different ID port handling to avoid sound cards. -djb | ||
43 | v1.18 12Mar2001 Andrew Morton | ||
44 | - Avoid bogus detect of 3c590's (Andrzej Krzysztofowicz) | ||
45 | - Reviewed against 1.18 from scyld.com | ||
46 | v1.18a 17Nov2001 Jeff Garzik <jgarzik@pobox.com> | ||
47 | - ethtool support | ||
48 | v1.18b 1Mar2002 Zwane Mwaikambo <zwane@commfireservices.com> | ||
49 | - Power Management support | ||
50 | v1.18c 1Mar2002 David Ruggiero <jdr@farfalle.com> | ||
51 | - Full duplex support | ||
52 | v1.19 16Oct2002 Zwane Mwaikambo <zwane@linuxpower.ca> | ||
53 | - Additional ethtool features | ||
54 | v1.19a 28Oct2002 Davud Ruggiero <jdr@farfalle.com> | ||
55 | - Increase *read_eeprom udelay to workaround oops with 2 cards. | ||
56 | v1.19b 08Nov2002 Marc Zyngier <maz@wild-wind.fr.eu.org> | ||
57 | - Introduce driver model for EISA cards. | ||
58 | v1.20 04Feb2008 Ondrej Zary <linux@rainbow-software.org> | ||
59 | - convert to isa_driver and pnp_driver and some cleanups | ||
60 | */ | ||
61 | |||
62 | #define DRV_NAME "3c509" | ||
63 | #define DRV_VERSION "1.20" | ||
64 | #define DRV_RELDATE "04Feb2008" | ||
65 | |||
66 | /* A few values that may be tweaked. */ | ||
67 | |||
68 | /* Time in jiffies before concluding the transmitter is hung. */ | ||
69 | #define TX_TIMEOUT (400*HZ/1000) | ||
70 | |||
71 | #include <linux/module.h> | ||
72 | #include <linux/mca.h> | ||
73 | #include <linux/isa.h> | ||
74 | #include <linux/pnp.h> | ||
75 | #include <linux/string.h> | ||
76 | #include <linux/interrupt.h> | ||
77 | #include <linux/errno.h> | ||
78 | #include <linux/in.h> | ||
79 | #include <linux/ioport.h> | ||
80 | #include <linux/init.h> | ||
81 | #include <linux/netdevice.h> | ||
82 | #include <linux/etherdevice.h> | ||
83 | #include <linux/pm.h> | ||
84 | #include <linux/skbuff.h> | ||
85 | #include <linux/delay.h> /* for udelay() */ | ||
86 | #include <linux/spinlock.h> | ||
87 | #include <linux/ethtool.h> | ||
88 | #include <linux/device.h> | ||
89 | #include <linux/eisa.h> | ||
90 | #include <linux/bitops.h> | ||
91 | |||
92 | #include <asm/uaccess.h> | ||
93 | #include <asm/io.h> | ||
94 | #include <asm/irq.h> | ||
95 | |||
96 | static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; | ||
97 | |||
98 | #ifdef EL3_DEBUG | ||
99 | static int el3_debug = EL3_DEBUG; | ||
100 | #else | ||
101 | static int el3_debug = 2; | ||
102 | #endif | ||
103 | |||
104 | /* Used to do a global count of all the cards in the system. Must be | ||
105 | * a global variable so that the mca/eisa probe routines can increment | ||
106 | * it */ | ||
107 | static int el3_cards = 0; | ||
108 | #define EL3_MAX_CARDS 8 | ||
109 | |||
110 | /* To minimize the size of the driver source I only define operating | ||
111 | constants if they are used several times. You'll need the manual | ||
112 | anyway if you want to understand driver details. */ | ||
113 | /* Offsets from base I/O address. */ | ||
114 | #define EL3_DATA 0x00 | ||
115 | #define EL3_CMD 0x0e | ||
116 | #define EL3_STATUS 0x0e | ||
117 | #define EEPROM_READ 0x80 | ||
118 | |||
119 | #define EL3_IO_EXTENT 16 | ||
120 | |||
121 | #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) | ||
122 | |||
123 | |||
124 | /* The top five bits written to EL3_CMD are a command, the lower | ||
125 | 11 bits are the parameter, if applicable. */ | ||
126 | enum c509cmd { | ||
127 | TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, | ||
128 | RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, | ||
129 | TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, | ||
130 | FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, | ||
131 | SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, | ||
132 | SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, | ||
133 | StatsDisable = 22<<11, StopCoax = 23<<11, PowerUp = 27<<11, | ||
134 | PowerDown = 28<<11, PowerAuto = 29<<11}; | ||
135 | |||
136 | enum c509status { | ||
137 | IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, | ||
138 | TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, | ||
139 | IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000, }; | ||
140 | |||
141 | /* The SetRxFilter command accepts the following classes: */ | ||
142 | enum RxFilter { | ||
143 | RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; | ||
144 | |||
145 | /* Register window 1 offsets, the window used in normal operation. */ | ||
146 | #define TX_FIFO 0x00 | ||
147 | #define RX_FIFO 0x00 | ||
148 | #define RX_STATUS 0x08 | ||
149 | #define TX_STATUS 0x0B | ||
150 | #define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ | ||
151 | |||
152 | #define WN0_CONF_CTRL 0x04 /* Window 0: Configuration control register */ | ||
153 | #define WN0_ADDR_CONF 0x06 /* Window 0: Address configuration register */ | ||
154 | #define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ | ||
155 | #define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */ | ||
156 | #define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ | ||
157 | #define WN4_NETDIAG 0x06 /* Window 4: Net diagnostic */ | ||
158 | #define FD_ENABLE 0x8000 /* Enable full-duplex ("external loopback") */ | ||
159 | |||
160 | /* | ||
161 | * Must be a power of two (we use a binary and in the | ||
162 | * circular queue) | ||
163 | */ | ||
164 | #define SKB_QUEUE_SIZE 64 | ||
165 | |||
166 | enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; | ||
167 | |||
168 | struct el3_private { | ||
169 | spinlock_t lock; | ||
170 | /* skb send-queue */ | ||
171 | int head, size; | ||
172 | struct sk_buff *queue[SKB_QUEUE_SIZE]; | ||
173 | enum el3_cardtype type; | ||
174 | }; | ||
175 | static int id_port; | ||
176 | static int current_tag; | ||
177 | static struct net_device *el3_devs[EL3_MAX_CARDS]; | ||
178 | |||
179 | /* Parameters that may be passed into the module. */ | ||
180 | static int debug = -1; | ||
181 | static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1}; | ||
182 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
183 | static int max_interrupt_work = 10; | ||
184 | #ifdef CONFIG_PNP | ||
185 | static int nopnp; | ||
186 | #endif | ||
187 | |||
188 | static int __devinit el3_common_init(struct net_device *dev); | ||
189 | static void el3_common_remove(struct net_device *dev); | ||
190 | static ushort id_read_eeprom(int index); | ||
191 | static ushort read_eeprom(int ioaddr, int index); | ||
192 | static int el3_open(struct net_device *dev); | ||
193 | static netdev_tx_t el3_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
194 | static irqreturn_t el3_interrupt(int irq, void *dev_id); | ||
195 | static void update_stats(struct net_device *dev); | ||
196 | static struct net_device_stats *el3_get_stats(struct net_device *dev); | ||
197 | static int el3_rx(struct net_device *dev); | ||
198 | static int el3_close(struct net_device *dev); | ||
199 | static void set_multicast_list(struct net_device *dev); | ||
200 | static void el3_tx_timeout (struct net_device *dev); | ||
201 | static void el3_down(struct net_device *dev); | ||
202 | static void el3_up(struct net_device *dev); | ||
203 | static const struct ethtool_ops ethtool_ops; | ||
204 | #ifdef CONFIG_PM | ||
205 | static int el3_suspend(struct device *, pm_message_t); | ||
206 | static int el3_resume(struct device *); | ||
207 | #else | ||
208 | #define el3_suspend NULL | ||
209 | #define el3_resume NULL | ||
210 | #endif | ||
211 | |||
212 | |||
213 | /* generic device remove for all device types */ | ||
214 | static int el3_device_remove (struct device *device); | ||
215 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
216 | static void el3_poll_controller(struct net_device *dev); | ||
217 | #endif | ||
218 | |||
219 | /* Return 0 on success, 1 on error, 2 when found already detected PnP card */ | ||
220 | static int el3_isa_id_sequence(__be16 *phys_addr) | ||
221 | { | ||
222 | short lrs_state = 0xff; | ||
223 | int i; | ||
224 | |||
225 | /* ISA boards are detected by sending the ID sequence to the | ||
226 | ID_PORT. We find cards past the first by setting the 'current_tag' | ||
227 | on cards as they are found. Cards with their tag set will not | ||
228 | respond to subsequent ID sequences. */ | ||
229 | |||
230 | outb(0x00, id_port); | ||
231 | outb(0x00, id_port); | ||
232 | for (i = 0; i < 255; i++) { | ||
233 | outb(lrs_state, id_port); | ||
234 | lrs_state <<= 1; | ||
235 | lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state; | ||
236 | } | ||
237 | /* For the first probe, clear all board's tag registers. */ | ||
238 | if (current_tag == 0) | ||
239 | outb(0xd0, id_port); | ||
240 | else /* Otherwise kill off already-found boards. */ | ||
241 | outb(0xd8, id_port); | ||
242 | if (id_read_eeprom(7) != 0x6d50) | ||
243 | return 1; | ||
244 | /* Read in EEPROM data, which does contention-select. | ||
245 | Only the lowest address board will stay "on-line". | ||
246 | 3Com got the byte order backwards. */ | ||
247 | for (i = 0; i < 3; i++) | ||
248 | phys_addr[i] = htons(id_read_eeprom(i)); | ||
249 | #ifdef CONFIG_PNP | ||
250 | if (!nopnp) { | ||
251 | /* The ISA PnP 3c509 cards respond to the ID sequence too. | ||
252 | This check is needed in order not to register them twice. */ | ||
253 | for (i = 0; i < el3_cards; i++) { | ||
254 | struct el3_private *lp = netdev_priv(el3_devs[i]); | ||
255 | if (lp->type == EL3_PNP && | ||
256 | !memcmp(phys_addr, el3_devs[i]->dev_addr, | ||
257 | ETH_ALEN)) { | ||
258 | if (el3_debug > 3) | ||
259 | pr_debug("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n", | ||
260 | phys_addr[0] & 0xff, phys_addr[0] >> 8, | ||
261 | phys_addr[1] & 0xff, phys_addr[1] >> 8, | ||
262 | phys_addr[2] & 0xff, phys_addr[2] >> 8); | ||
263 | /* Set the adaptor tag so that the next card can be found. */ | ||
264 | outb(0xd0 + ++current_tag, id_port); | ||
265 | return 2; | ||
266 | } | ||
267 | } | ||
268 | } | ||
269 | #endif /* CONFIG_PNP */ | ||
270 | return 0; | ||
271 | |||
272 | } | ||
273 | |||
274 | static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr, | ||
275 | int ioaddr, int irq, int if_port, | ||
276 | enum el3_cardtype type) | ||
277 | { | ||
278 | struct el3_private *lp = netdev_priv(dev); | ||
279 | |||
280 | memcpy(dev->dev_addr, phys_addr, ETH_ALEN); | ||
281 | dev->base_addr = ioaddr; | ||
282 | dev->irq = irq; | ||
283 | dev->if_port = if_port; | ||
284 | lp->type = type; | ||
285 | } | ||
286 | |||
287 | static int __devinit el3_isa_match(struct device *pdev, | ||
288 | unsigned int ndev) | ||
289 | { | ||
290 | struct net_device *dev; | ||
291 | int ioaddr, isa_irq, if_port, err; | ||
292 | unsigned int iobase; | ||
293 | __be16 phys_addr[3]; | ||
294 | |||
295 | while ((err = el3_isa_id_sequence(phys_addr)) == 2) | ||
296 | ; /* Skip to next card when PnP card found */ | ||
297 | if (err == 1) | ||
298 | return 0; | ||
299 | |||
300 | iobase = id_read_eeprom(8); | ||
301 | if_port = iobase >> 14; | ||
302 | ioaddr = 0x200 + ((iobase & 0x1f) << 4); | ||
303 | if (irq[el3_cards] > 1 && irq[el3_cards] < 16) | ||
304 | isa_irq = irq[el3_cards]; | ||
305 | else | ||
306 | isa_irq = id_read_eeprom(9) >> 12; | ||
307 | |||
308 | dev = alloc_etherdev(sizeof(struct el3_private)); | ||
309 | if (!dev) | ||
310 | return -ENOMEM; | ||
311 | |||
312 | netdev_boot_setup_check(dev); | ||
313 | |||
314 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { | ||
315 | free_netdev(dev); | ||
316 | return 0; | ||
317 | } | ||
318 | |||
319 | /* Set the adaptor tag so that the next card can be found. */ | ||
320 | outb(0xd0 + ++current_tag, id_port); | ||
321 | |||
322 | /* Activate the adaptor at the EEPROM location. */ | ||
323 | outb((ioaddr >> 4) | 0xe0, id_port); | ||
324 | |||
325 | EL3WINDOW(0); | ||
326 | if (inw(ioaddr) != 0x6d50) { | ||
327 | free_netdev(dev); | ||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | /* Free the interrupt so that some other card can use it. */ | ||
332 | outw(0x0f00, ioaddr + WN0_IRQ); | ||
333 | |||
334 | el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA); | ||
335 | dev_set_drvdata(pdev, dev); | ||
336 | if (el3_common_init(dev)) { | ||
337 | free_netdev(dev); | ||
338 | return 0; | ||
339 | } | ||
340 | |||
341 | el3_devs[el3_cards++] = dev; | ||
342 | return 1; | ||
343 | } | ||
344 | |||
345 | static int __devexit el3_isa_remove(struct device *pdev, | ||
346 | unsigned int ndev) | ||
347 | { | ||
348 | el3_device_remove(pdev); | ||
349 | dev_set_drvdata(pdev, NULL); | ||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | #ifdef CONFIG_PM | ||
354 | static int el3_isa_suspend(struct device *dev, unsigned int n, | ||
355 | pm_message_t state) | ||
356 | { | ||
357 | current_tag = 0; | ||
358 | return el3_suspend(dev, state); | ||
359 | } | ||
360 | |||
361 | static int el3_isa_resume(struct device *dev, unsigned int n) | ||
362 | { | ||
363 | struct net_device *ndev = dev_get_drvdata(dev); | ||
364 | int ioaddr = ndev->base_addr, err; | ||
365 | __be16 phys_addr[3]; | ||
366 | |||
367 | while ((err = el3_isa_id_sequence(phys_addr)) == 2) | ||
368 | ; /* Skip to next card when PnP card found */ | ||
369 | if (err == 1) | ||
370 | return 0; | ||
371 | /* Set the adaptor tag so that the next card can be found. */ | ||
372 | outb(0xd0 + ++current_tag, id_port); | ||
373 | /* Enable the card */ | ||
374 | outb((ioaddr >> 4) | 0xe0, id_port); | ||
375 | EL3WINDOW(0); | ||
376 | if (inw(ioaddr) != 0x6d50) | ||
377 | return 1; | ||
378 | /* Free the interrupt so that some other card can use it. */ | ||
379 | outw(0x0f00, ioaddr + WN0_IRQ); | ||
380 | return el3_resume(dev); | ||
381 | } | ||
382 | #endif | ||
383 | |||
384 | static struct isa_driver el3_isa_driver = { | ||
385 | .match = el3_isa_match, | ||
386 | .remove = __devexit_p(el3_isa_remove), | ||
387 | #ifdef CONFIG_PM | ||
388 | .suspend = el3_isa_suspend, | ||
389 | .resume = el3_isa_resume, | ||
390 | #endif | ||
391 | .driver = { | ||
392 | .name = "3c509" | ||
393 | }, | ||
394 | }; | ||
395 | static int isa_registered; | ||
396 | |||
397 | #ifdef CONFIG_PNP | ||
398 | static struct pnp_device_id el3_pnp_ids[] = { | ||
399 | { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */ | ||
400 | { .id = "TCM5091" }, /* 3Com Etherlink III */ | ||
401 | { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */ | ||
402 | { .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */ | ||
403 | { .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */ | ||
404 | { .id = "PNP80f7" }, /* 3Com Etherlink III compatible */ | ||
405 | { .id = "PNP80f8" }, /* 3Com Etherlink III compatible */ | ||
406 | { .id = "" } | ||
407 | }; | ||
408 | MODULE_DEVICE_TABLE(pnp, el3_pnp_ids); | ||
409 | |||
410 | static int __devinit el3_pnp_probe(struct pnp_dev *pdev, | ||
411 | const struct pnp_device_id *id) | ||
412 | { | ||
413 | short i; | ||
414 | int ioaddr, irq, if_port; | ||
415 | __be16 phys_addr[3]; | ||
416 | struct net_device *dev = NULL; | ||
417 | int err; | ||
418 | |||
419 | ioaddr = pnp_port_start(pdev, 0); | ||
420 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp")) | ||
421 | return -EBUSY; | ||
422 | irq = pnp_irq(pdev, 0); | ||
423 | EL3WINDOW(0); | ||
424 | for (i = 0; i < 3; i++) | ||
425 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); | ||
426 | if_port = read_eeprom(ioaddr, 8) >> 14; | ||
427 | dev = alloc_etherdev(sizeof(struct el3_private)); | ||
428 | if (!dev) { | ||
429 | release_region(ioaddr, EL3_IO_EXTENT); | ||
430 | return -ENOMEM; | ||
431 | } | ||
432 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
433 | netdev_boot_setup_check(dev); | ||
434 | |||
435 | el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP); | ||
436 | pnp_set_drvdata(pdev, dev); | ||
437 | err = el3_common_init(dev); | ||
438 | |||
439 | if (err) { | ||
440 | pnp_set_drvdata(pdev, NULL); | ||
441 | free_netdev(dev); | ||
442 | return err; | ||
443 | } | ||
444 | |||
445 | el3_devs[el3_cards++] = dev; | ||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | static void __devexit el3_pnp_remove(struct pnp_dev *pdev) | ||
450 | { | ||
451 | el3_common_remove(pnp_get_drvdata(pdev)); | ||
452 | pnp_set_drvdata(pdev, NULL); | ||
453 | } | ||
454 | |||
455 | #ifdef CONFIG_PM | ||
456 | static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state) | ||
457 | { | ||
458 | return el3_suspend(&pdev->dev, state); | ||
459 | } | ||
460 | |||
461 | static int el3_pnp_resume(struct pnp_dev *pdev) | ||
462 | { | ||
463 | return el3_resume(&pdev->dev); | ||
464 | } | ||
465 | #endif | ||
466 | |||
467 | static struct pnp_driver el3_pnp_driver = { | ||
468 | .name = "3c509", | ||
469 | .id_table = el3_pnp_ids, | ||
470 | .probe = el3_pnp_probe, | ||
471 | .remove = __devexit_p(el3_pnp_remove), | ||
472 | #ifdef CONFIG_PM | ||
473 | .suspend = el3_pnp_suspend, | ||
474 | .resume = el3_pnp_resume, | ||
475 | #endif | ||
476 | }; | ||
477 | static int pnp_registered; | ||
478 | #endif /* CONFIG_PNP */ | ||
479 | |||
480 | #ifdef CONFIG_EISA | ||
481 | static struct eisa_device_id el3_eisa_ids[] = { | ||
482 | { "TCM5090" }, | ||
483 | { "TCM5091" }, | ||
484 | { "TCM5092" }, | ||
485 | { "TCM5093" }, | ||
486 | { "TCM5094" }, | ||
487 | { "TCM5095" }, | ||
488 | { "TCM5098" }, | ||
489 | { "" } | ||
490 | }; | ||
491 | MODULE_DEVICE_TABLE(eisa, el3_eisa_ids); | ||
492 | |||
493 | static int el3_eisa_probe (struct device *device); | ||
494 | |||
495 | static struct eisa_driver el3_eisa_driver = { | ||
496 | .id_table = el3_eisa_ids, | ||
497 | .driver = { | ||
498 | .name = "3c579", | ||
499 | .probe = el3_eisa_probe, | ||
500 | .remove = __devexit_p (el3_device_remove), | ||
501 | .suspend = el3_suspend, | ||
502 | .resume = el3_resume, | ||
503 | } | ||
504 | }; | ||
505 | static int eisa_registered; | ||
506 | #endif | ||
507 | |||
508 | #ifdef CONFIG_MCA | ||
509 | static int el3_mca_probe(struct device *dev); | ||
510 | |||
511 | static short el3_mca_adapter_ids[] __initdata = { | ||
512 | 0x627c, | ||
513 | 0x627d, | ||
514 | 0x62db, | ||
515 | 0x62f6, | ||
516 | 0x62f7, | ||
517 | 0x0000 | ||
518 | }; | ||
519 | |||
520 | static char *el3_mca_adapter_names[] __initdata = { | ||
521 | "3Com 3c529 EtherLink III (10base2)", | ||
522 | "3Com 3c529 EtherLink III (10baseT)", | ||
523 | "3Com 3c529 EtherLink III (test mode)", | ||
524 | "3Com 3c529 EtherLink III (TP or coax)", | ||
525 | "3Com 3c529 EtherLink III (TP)", | ||
526 | NULL | ||
527 | }; | ||
528 | |||
529 | static struct mca_driver el3_mca_driver = { | ||
530 | .id_table = el3_mca_adapter_ids, | ||
531 | .driver = { | ||
532 | .name = "3c529", | ||
533 | .bus = &mca_bus_type, | ||
534 | .probe = el3_mca_probe, | ||
535 | .remove = __devexit_p(el3_device_remove), | ||
536 | .suspend = el3_suspend, | ||
537 | .resume = el3_resume, | ||
538 | }, | ||
539 | }; | ||
540 | static int mca_registered; | ||
541 | #endif /* CONFIG_MCA */ | ||
542 | |||
543 | static const struct net_device_ops netdev_ops = { | ||
544 | .ndo_open = el3_open, | ||
545 | .ndo_stop = el3_close, | ||
546 | .ndo_start_xmit = el3_start_xmit, | ||
547 | .ndo_get_stats = el3_get_stats, | ||
548 | .ndo_set_multicast_list = set_multicast_list, | ||
549 | .ndo_tx_timeout = el3_tx_timeout, | ||
550 | .ndo_change_mtu = eth_change_mtu, | ||
551 | .ndo_set_mac_address = eth_mac_addr, | ||
552 | .ndo_validate_addr = eth_validate_addr, | ||
553 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
554 | .ndo_poll_controller = el3_poll_controller, | ||
555 | #endif | ||
556 | }; | ||
557 | |||
558 | static int __devinit el3_common_init(struct net_device *dev) | ||
559 | { | ||
560 | struct el3_private *lp = netdev_priv(dev); | ||
561 | int err; | ||
562 | const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; | ||
563 | |||
564 | spin_lock_init(&lp->lock); | ||
565 | |||
566 | if (dev->mem_start & 0x05) { /* xcvr codes 1/3/4/12 */ | ||
567 | dev->if_port = (dev->mem_start & 0x0f); | ||
568 | } else { /* xcvr codes 0/8 */ | ||
569 | /* use eeprom value, but save user's full-duplex selection */ | ||
570 | dev->if_port |= (dev->mem_start & 0x08); | ||
571 | } | ||
572 | |||
573 | /* The EL3-specific entries in the device structure. */ | ||
574 | dev->netdev_ops = &netdev_ops; | ||
575 | dev->watchdog_timeo = TX_TIMEOUT; | ||
576 | SET_ETHTOOL_OPS(dev, ðtool_ops); | ||
577 | |||
578 | err = register_netdev(dev); | ||
579 | if (err) { | ||
580 | pr_err("Failed to register 3c5x9 at %#3.3lx, IRQ %d.\n", | ||
581 | dev->base_addr, dev->irq); | ||
582 | release_region(dev->base_addr, EL3_IO_EXTENT); | ||
583 | return err; | ||
584 | } | ||
585 | |||
586 | pr_info("%s: 3c5x9 found at %#3.3lx, %s port, address %pM, IRQ %d.\n", | ||
587 | dev->name, dev->base_addr, if_names[(dev->if_port & 0x03)], | ||
588 | dev->dev_addr, dev->irq); | ||
589 | |||
590 | if (el3_debug > 0) | ||
591 | pr_info("%s", version); | ||
592 | return 0; | ||
593 | |||
594 | } | ||
595 | |||
596 | static void el3_common_remove (struct net_device *dev) | ||
597 | { | ||
598 | unregister_netdev (dev); | ||
599 | release_region(dev->base_addr, EL3_IO_EXTENT); | ||
600 | free_netdev (dev); | ||
601 | } | ||
602 | |||
603 | #ifdef CONFIG_MCA | ||
604 | static int __init el3_mca_probe(struct device *device) | ||
605 | { | ||
606 | /* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch, | ||
607 | * heavily modified by Chris Beauregard | ||
608 | * (cpbeaure@csclub.uwaterloo.ca) to support standard MCA | ||
609 | * probing. | ||
610 | * | ||
611 | * redone for multi-card detection by ZP Gu (zpg@castle.net) | ||
612 | * now works as a module */ | ||
613 | |||
614 | short i; | ||
615 | int ioaddr, irq, if_port; | ||
616 | __be16 phys_addr[3]; | ||
617 | struct net_device *dev = NULL; | ||
618 | u_char pos4, pos5; | ||
619 | struct mca_device *mdev = to_mca_device(device); | ||
620 | int slot = mdev->slot; | ||
621 | int err; | ||
622 | |||
623 | pos4 = mca_device_read_stored_pos(mdev, 4); | ||
624 | pos5 = mca_device_read_stored_pos(mdev, 5); | ||
625 | |||
626 | ioaddr = ((short)((pos4&0xfc)|0x02)) << 8; | ||
627 | irq = pos5 & 0x0f; | ||
628 | |||
629 | |||
630 | pr_info("3c529: found %s at slot %d\n", | ||
631 | el3_mca_adapter_names[mdev->index], slot + 1); | ||
632 | |||
633 | /* claim the slot */ | ||
634 | strncpy(mdev->name, el3_mca_adapter_names[mdev->index], | ||
635 | sizeof(mdev->name)); | ||
636 | mca_device_set_claim(mdev, 1); | ||
637 | |||
638 | if_port = pos4 & 0x03; | ||
639 | |||
640 | irq = mca_device_transform_irq(mdev, irq); | ||
641 | ioaddr = mca_device_transform_ioport(mdev, ioaddr); | ||
642 | if (el3_debug > 2) { | ||
643 | pr_debug("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); | ||
644 | } | ||
645 | EL3WINDOW(0); | ||
646 | for (i = 0; i < 3; i++) | ||
647 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); | ||
648 | |||
649 | dev = alloc_etherdev(sizeof (struct el3_private)); | ||
650 | if (dev == NULL) { | ||
651 | release_region(ioaddr, EL3_IO_EXTENT); | ||
652 | return -ENOMEM; | ||
653 | } | ||
654 | |||
655 | netdev_boot_setup_check(dev); | ||
656 | |||
657 | el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA); | ||
658 | dev_set_drvdata(device, dev); | ||
659 | err = el3_common_init(dev); | ||
660 | |||
661 | if (err) { | ||
662 | dev_set_drvdata(device, NULL); | ||
663 | free_netdev(dev); | ||
664 | return -ENOMEM; | ||
665 | } | ||
666 | |||
667 | el3_devs[el3_cards++] = dev; | ||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | #endif /* CONFIG_MCA */ | ||
672 | |||
673 | #ifdef CONFIG_EISA | ||
674 | static int __init el3_eisa_probe (struct device *device) | ||
675 | { | ||
676 | short i; | ||
677 | int ioaddr, irq, if_port; | ||
678 | __be16 phys_addr[3]; | ||
679 | struct net_device *dev = NULL; | ||
680 | struct eisa_device *edev; | ||
681 | int err; | ||
682 | |||
683 | /* Yeepee, The driver framework is calling us ! */ | ||
684 | edev = to_eisa_device (device); | ||
685 | ioaddr = edev->base_addr; | ||
686 | |||
687 | if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa")) | ||
688 | return -EBUSY; | ||
689 | |||
690 | /* Change the register set to the configuration window 0. */ | ||
691 | outw(SelectWindow | 0, ioaddr + 0xC80 + EL3_CMD); | ||
692 | |||
693 | irq = inw(ioaddr + WN0_IRQ) >> 12; | ||
694 | if_port = inw(ioaddr + 6)>>14; | ||
695 | for (i = 0; i < 3; i++) | ||
696 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); | ||
697 | |||
698 | /* Restore the "Product ID" to the EEPROM read register. */ | ||
699 | read_eeprom(ioaddr, 3); | ||
700 | |||
701 | dev = alloc_etherdev(sizeof (struct el3_private)); | ||
702 | if (dev == NULL) { | ||
703 | release_region(ioaddr, EL3_IO_EXTENT); | ||
704 | return -ENOMEM; | ||
705 | } | ||
706 | |||
707 | netdev_boot_setup_check(dev); | ||
708 | |||
709 | el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); | ||
710 | eisa_set_drvdata (edev, dev); | ||
711 | err = el3_common_init(dev); | ||
712 | |||
713 | if (err) { | ||
714 | eisa_set_drvdata (edev, NULL); | ||
715 | free_netdev(dev); | ||
716 | return err; | ||
717 | } | ||
718 | |||
719 | el3_devs[el3_cards++] = dev; | ||
720 | return 0; | ||
721 | } | ||
722 | #endif | ||
723 | |||
724 | /* This remove works for all device types. | ||
725 | * | ||
726 | * The net dev must be stored in the driver data field */ | ||
727 | static int __devexit el3_device_remove (struct device *device) | ||
728 | { | ||
729 | struct net_device *dev; | ||
730 | |||
731 | dev = dev_get_drvdata(device); | ||
732 | |||
733 | el3_common_remove (dev); | ||
734 | return 0; | ||
735 | } | ||
736 | |||
737 | /* Read a word from the EEPROM using the regular EEPROM access register. | ||
738 | Assume that we are in register window zero. | ||
739 | */ | ||
740 | static ushort read_eeprom(int ioaddr, int index) | ||
741 | { | ||
742 | outw(EEPROM_READ + index, ioaddr + 10); | ||
743 | /* Pause for at least 162 us. for the read to take place. | ||
744 | Some chips seem to require much longer */ | ||
745 | mdelay(2); | ||
746 | return inw(ioaddr + 12); | ||
747 | } | ||
748 | |||
749 | /* Read a word from the EEPROM when in the ISA ID probe state. */ | ||
750 | static ushort id_read_eeprom(int index) | ||
751 | { | ||
752 | int bit, word = 0; | ||
753 | |||
754 | /* Issue read command, and pause for at least 162 us. for it to complete. | ||
755 | Assume extra-fast 16Mhz bus. */ | ||
756 | outb(EEPROM_READ + index, id_port); | ||
757 | |||
758 | /* Pause for at least 162 us. for the read to take place. */ | ||
759 | /* Some chips seem to require much longer */ | ||
760 | mdelay(4); | ||
761 | |||
762 | for (bit = 15; bit >= 0; bit--) | ||
763 | word = (word << 1) + (inb(id_port) & 0x01); | ||
764 | |||
765 | if (el3_debug > 3) | ||
766 | pr_debug(" 3c509 EEPROM word %d %#4.4x.\n", index, word); | ||
767 | |||
768 | return word; | ||
769 | } | ||
770 | |||
771 | |||
772 | static int | ||
773 | el3_open(struct net_device *dev) | ||
774 | { | ||
775 | int ioaddr = dev->base_addr; | ||
776 | int i; | ||
777 | |||
778 | outw(TxReset, ioaddr + EL3_CMD); | ||
779 | outw(RxReset, ioaddr + EL3_CMD); | ||
780 | outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); | ||
781 | |||
782 | i = request_irq(dev->irq, el3_interrupt, 0, dev->name, dev); | ||
783 | if (i) | ||
784 | return i; | ||
785 | |||
786 | EL3WINDOW(0); | ||
787 | if (el3_debug > 3) | ||
788 | pr_debug("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name, | ||
789 | dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS)); | ||
790 | |||
791 | el3_up(dev); | ||
792 | |||
793 | if (el3_debug > 3) | ||
794 | pr_debug("%s: Opened 3c509 IRQ %d status %4.4x.\n", | ||
795 | dev->name, dev->irq, inw(ioaddr + EL3_STATUS)); | ||
796 | |||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | static void | ||
801 | el3_tx_timeout (struct net_device *dev) | ||
802 | { | ||
803 | int ioaddr = dev->base_addr; | ||
804 | |||
805 | /* Transmitter timeout, serious problems. */ | ||
806 | pr_warning("%s: transmit timed out, Tx_status %2.2x status %4.4x Tx FIFO room %d.\n", | ||
807 | dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), | ||
808 | inw(ioaddr + TX_FREE)); | ||
809 | dev->stats.tx_errors++; | ||
810 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
811 | /* Issue TX_RESET and TX_START commands. */ | ||
812 | outw(TxReset, ioaddr + EL3_CMD); | ||
813 | outw(TxEnable, ioaddr + EL3_CMD); | ||
814 | netif_wake_queue(dev); | ||
815 | } | ||
816 | |||
817 | |||
818 | static netdev_tx_t | ||
819 | el3_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
820 | { | ||
821 | struct el3_private *lp = netdev_priv(dev); | ||
822 | int ioaddr = dev->base_addr; | ||
823 | unsigned long flags; | ||
824 | |||
825 | netif_stop_queue (dev); | ||
826 | |||
827 | dev->stats.tx_bytes += skb->len; | ||
828 | |||
829 | if (el3_debug > 4) { | ||
830 | pr_debug("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", | ||
831 | dev->name, skb->len, inw(ioaddr + EL3_STATUS)); | ||
832 | } | ||
833 | #if 0 | ||
834 | #ifndef final_version | ||
835 | { /* Error-checking code, delete someday. */ | ||
836 | ushort status = inw(ioaddr + EL3_STATUS); | ||
837 | if (status & 0x0001 && /* IRQ line active, missed one. */ | ||
838 | inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */ | ||
839 | pr_debug("%s: Missed interrupt, status then %04x now %04x" | ||
840 | " Tx %2.2x Rx %4.4x.\n", dev->name, status, | ||
841 | inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS), | ||
842 | inw(ioaddr + RX_STATUS)); | ||
843 | /* Fake interrupt trigger by masking, acknowledge interrupts. */ | ||
844 | outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); | ||
845 | outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, | ||
846 | ioaddr + EL3_CMD); | ||
847 | outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); | ||
848 | } | ||
849 | } | ||
850 | #endif | ||
851 | #endif | ||
852 | /* | ||
853 | * We lock the driver against other processors. Note | ||
854 | * we don't need to lock versus the IRQ as we suspended | ||
855 | * that. This means that we lose the ability to take | ||
856 | * an RX during a TX upload. That sucks a bit with SMP | ||
857 | * on an original 3c509 (2K buffer) | ||
858 | * | ||
859 | * Using disable_irq stops us crapping on other | ||
860 | * time sensitive devices. | ||
861 | */ | ||
862 | |||
863 | spin_lock_irqsave(&lp->lock, flags); | ||
864 | |||
865 | /* Put out the doubleword header... */ | ||
866 | outw(skb->len, ioaddr + TX_FIFO); | ||
867 | outw(0x00, ioaddr + TX_FIFO); | ||
868 | /* ... and the packet rounded to a doubleword. */ | ||
869 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); | ||
870 | |||
871 | if (inw(ioaddr + TX_FREE) > 1536) | ||
872 | netif_start_queue(dev); | ||
873 | else | ||
874 | /* Interrupt us when the FIFO has room for max-sized packet. */ | ||
875 | outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); | ||
876 | |||
877 | spin_unlock_irqrestore(&lp->lock, flags); | ||
878 | |||
879 | dev_kfree_skb (skb); | ||
880 | |||
881 | /* Clear the Tx status stack. */ | ||
882 | { | ||
883 | short tx_status; | ||
884 | int i = 4; | ||
885 | |||
886 | while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { | ||
887 | if (tx_status & 0x38) dev->stats.tx_aborted_errors++; | ||
888 | if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); | ||
889 | if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); | ||
890 | outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ | ||
891 | } | ||
892 | } | ||
893 | return NETDEV_TX_OK; | ||
894 | } | ||
895 | |||
896 | /* The EL3 interrupt handler. */ | ||
897 | static irqreturn_t | ||
898 | el3_interrupt(int irq, void *dev_id) | ||
899 | { | ||
900 | struct net_device *dev = dev_id; | ||
901 | struct el3_private *lp; | ||
902 | int ioaddr, status; | ||
903 | int i = max_interrupt_work; | ||
904 | |||
905 | lp = netdev_priv(dev); | ||
906 | spin_lock(&lp->lock); | ||
907 | |||
908 | ioaddr = dev->base_addr; | ||
909 | |||
910 | if (el3_debug > 4) { | ||
911 | status = inw(ioaddr + EL3_STATUS); | ||
912 | pr_debug("%s: interrupt, status %4.4x.\n", dev->name, status); | ||
913 | } | ||
914 | |||
915 | while ((status = inw(ioaddr + EL3_STATUS)) & | ||
916 | (IntLatch | RxComplete | StatsFull)) { | ||
917 | |||
918 | if (status & RxComplete) | ||
919 | el3_rx(dev); | ||
920 | |||
921 | if (status & TxAvailable) { | ||
922 | if (el3_debug > 5) | ||
923 | pr_debug(" TX room bit was handled.\n"); | ||
924 | /* There's room in the FIFO for a full-sized packet. */ | ||
925 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); | ||
926 | netif_wake_queue (dev); | ||
927 | } | ||
928 | if (status & (AdapterFailure | RxEarly | StatsFull | TxComplete)) { | ||
929 | /* Handle all uncommon interrupts. */ | ||
930 | if (status & StatsFull) /* Empty statistics. */ | ||
931 | update_stats(dev); | ||
932 | if (status & RxEarly) { /* Rx early is unused. */ | ||
933 | el3_rx(dev); | ||
934 | outw(AckIntr | RxEarly, ioaddr + EL3_CMD); | ||
935 | } | ||
936 | if (status & TxComplete) { /* Really Tx error. */ | ||
937 | short tx_status; | ||
938 | int i = 4; | ||
939 | |||
940 | while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { | ||
941 | if (tx_status & 0x38) dev->stats.tx_aborted_errors++; | ||
942 | if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); | ||
943 | if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); | ||
944 | outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ | ||
945 | } | ||
946 | } | ||
947 | if (status & AdapterFailure) { | ||
948 | /* Adapter failure requires Rx reset and reinit. */ | ||
949 | outw(RxReset, ioaddr + EL3_CMD); | ||
950 | /* Set the Rx filter to the current state. */ | ||
951 | outw(SetRxFilter | RxStation | RxBroadcast | ||
952 | | (dev->flags & IFF_ALLMULTI ? RxMulticast : 0) | ||
953 | | (dev->flags & IFF_PROMISC ? RxProm : 0), | ||
954 | ioaddr + EL3_CMD); | ||
955 | outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ | ||
956 | outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); | ||
957 | } | ||
958 | } | ||
959 | |||
960 | if (--i < 0) { | ||
961 | pr_err("%s: Infinite loop in interrupt, status %4.4x.\n", | ||
962 | dev->name, status); | ||
963 | /* Clear all interrupts. */ | ||
964 | outw(AckIntr | 0xFF, ioaddr + EL3_CMD); | ||
965 | break; | ||
966 | } | ||
967 | /* Acknowledge the IRQ. */ | ||
968 | outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); /* Ack IRQ */ | ||
969 | } | ||
970 | |||
971 | if (el3_debug > 4) { | ||
972 | pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, | ||
973 | inw(ioaddr + EL3_STATUS)); | ||
974 | } | ||
975 | spin_unlock(&lp->lock); | ||
976 | return IRQ_HANDLED; | ||
977 | } | ||
978 | |||
979 | |||
980 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
981 | /* | ||
982 | * Polling receive - used by netconsole and other diagnostic tools | ||
983 | * to allow network i/o with interrupts disabled. | ||
984 | */ | ||
985 | static void el3_poll_controller(struct net_device *dev) | ||
986 | { | ||
987 | disable_irq(dev->irq); | ||
988 | el3_interrupt(dev->irq, dev); | ||
989 | enable_irq(dev->irq); | ||
990 | } | ||
991 | #endif | ||
992 | |||
993 | static struct net_device_stats * | ||
994 | el3_get_stats(struct net_device *dev) | ||
995 | { | ||
996 | struct el3_private *lp = netdev_priv(dev); | ||
997 | unsigned long flags; | ||
998 | |||
999 | /* | ||
1000 | * This is fast enough not to bother with disable IRQ | ||
1001 | * stuff. | ||
1002 | */ | ||
1003 | |||
1004 | spin_lock_irqsave(&lp->lock, flags); | ||
1005 | update_stats(dev); | ||
1006 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1007 | return &dev->stats; | ||
1008 | } | ||
1009 | |||
1010 | /* Update statistics. We change to register window 6, so this should be run | ||
1011 | single-threaded if the device is active. This is expected to be a rare | ||
1012 | operation, and it's simpler for the rest of the driver to assume that | ||
1013 | window 1 is always valid rather than use a special window-state variable. | ||
1014 | */ | ||
1015 | static void update_stats(struct net_device *dev) | ||
1016 | { | ||
1017 | int ioaddr = dev->base_addr; | ||
1018 | |||
1019 | if (el3_debug > 5) | ||
1020 | pr_debug(" Updating the statistics.\n"); | ||
1021 | /* Turn off statistics updates while reading. */ | ||
1022 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
1023 | /* Switch to the stats window, and read everything. */ | ||
1024 | EL3WINDOW(6); | ||
1025 | dev->stats.tx_carrier_errors += inb(ioaddr + 0); | ||
1026 | dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); | ||
1027 | /* Multiple collisions. */ inb(ioaddr + 2); | ||
1028 | dev->stats.collisions += inb(ioaddr + 3); | ||
1029 | dev->stats.tx_window_errors += inb(ioaddr + 4); | ||
1030 | dev->stats.rx_fifo_errors += inb(ioaddr + 5); | ||
1031 | dev->stats.tx_packets += inb(ioaddr + 6); | ||
1032 | /* Rx packets */ inb(ioaddr + 7); | ||
1033 | /* Tx deferrals */ inb(ioaddr + 8); | ||
1034 | inw(ioaddr + 10); /* Total Rx and Tx octets. */ | ||
1035 | inw(ioaddr + 12); | ||
1036 | |||
1037 | /* Back to window 1, and turn statistics back on. */ | ||
1038 | EL3WINDOW(1); | ||
1039 | outw(StatsEnable, ioaddr + EL3_CMD); | ||
1040 | } | ||
1041 | |||
1042 | static int | ||
1043 | el3_rx(struct net_device *dev) | ||
1044 | { | ||
1045 | int ioaddr = dev->base_addr; | ||
1046 | short rx_status; | ||
1047 | |||
1048 | if (el3_debug > 5) | ||
1049 | pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n", | ||
1050 | inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); | ||
1051 | while ((rx_status = inw(ioaddr + RX_STATUS)) > 0) { | ||
1052 | if (rx_status & 0x4000) { /* Error, update stats. */ | ||
1053 | short error = rx_status & 0x3800; | ||
1054 | |||
1055 | outw(RxDiscard, ioaddr + EL3_CMD); | ||
1056 | dev->stats.rx_errors++; | ||
1057 | switch (error) { | ||
1058 | case 0x0000: dev->stats.rx_over_errors++; break; | ||
1059 | case 0x0800: dev->stats.rx_length_errors++; break; | ||
1060 | case 0x1000: dev->stats.rx_frame_errors++; break; | ||
1061 | case 0x1800: dev->stats.rx_length_errors++; break; | ||
1062 | case 0x2000: dev->stats.rx_frame_errors++; break; | ||
1063 | case 0x2800: dev->stats.rx_crc_errors++; break; | ||
1064 | } | ||
1065 | } else { | ||
1066 | short pkt_len = rx_status & 0x7ff; | ||
1067 | struct sk_buff *skb; | ||
1068 | |||
1069 | skb = dev_alloc_skb(pkt_len+5); | ||
1070 | if (el3_debug > 4) | ||
1071 | pr_debug("Receiving packet size %d status %4.4x.\n", | ||
1072 | pkt_len, rx_status); | ||
1073 | if (skb != NULL) { | ||
1074 | skb_reserve(skb, 2); /* Align IP on 16 byte */ | ||
1075 | |||
1076 | /* 'skb->data' points to the start of sk_buff data area. */ | ||
1077 | insl(ioaddr + RX_FIFO, skb_put(skb,pkt_len), | ||
1078 | (pkt_len + 3) >> 2); | ||
1079 | |||
1080 | outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ | ||
1081 | skb->protocol = eth_type_trans(skb,dev); | ||
1082 | netif_rx(skb); | ||
1083 | dev->stats.rx_bytes += pkt_len; | ||
1084 | dev->stats.rx_packets++; | ||
1085 | continue; | ||
1086 | } | ||
1087 | outw(RxDiscard, ioaddr + EL3_CMD); | ||
1088 | dev->stats.rx_dropped++; | ||
1089 | if (el3_debug) | ||
1090 | pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", | ||
1091 | dev->name, pkt_len); | ||
1092 | } | ||
1093 | inw(ioaddr + EL3_STATUS); /* Delay. */ | ||
1094 | while (inw(ioaddr + EL3_STATUS) & 0x1000) | ||
1095 | pr_debug(" Waiting for 3c509 to discard packet, status %x.\n", | ||
1096 | inw(ioaddr + EL3_STATUS) ); | ||
1097 | } | ||
1098 | |||
1099 | return 0; | ||
1100 | } | ||
1101 | |||
1102 | /* | ||
1103 | * Set or clear the multicast filter for this adaptor. | ||
1104 | */ | ||
1105 | static void | ||
1106 | set_multicast_list(struct net_device *dev) | ||
1107 | { | ||
1108 | unsigned long flags; | ||
1109 | struct el3_private *lp = netdev_priv(dev); | ||
1110 | int ioaddr = dev->base_addr; | ||
1111 | int mc_count = netdev_mc_count(dev); | ||
1112 | |||
1113 | if (el3_debug > 1) { | ||
1114 | static int old; | ||
1115 | if (old != mc_count) { | ||
1116 | old = mc_count; | ||
1117 | pr_debug("%s: Setting Rx mode to %d addresses.\n", | ||
1118 | dev->name, mc_count); | ||
1119 | } | ||
1120 | } | ||
1121 | spin_lock_irqsave(&lp->lock, flags); | ||
1122 | if (dev->flags&IFF_PROMISC) { | ||
1123 | outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, | ||
1124 | ioaddr + EL3_CMD); | ||
1125 | } | ||
1126 | else if (mc_count || (dev->flags&IFF_ALLMULTI)) { | ||
1127 | outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD); | ||
1128 | } | ||
1129 | else | ||
1130 | outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); | ||
1131 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1132 | } | ||
1133 | |||
1134 | static int | ||
1135 | el3_close(struct net_device *dev) | ||
1136 | { | ||
1137 | int ioaddr = dev->base_addr; | ||
1138 | struct el3_private *lp = netdev_priv(dev); | ||
1139 | |||
1140 | if (el3_debug > 2) | ||
1141 | pr_debug("%s: Shutting down ethercard.\n", dev->name); | ||
1142 | |||
1143 | el3_down(dev); | ||
1144 | |||
1145 | free_irq(dev->irq, dev); | ||
1146 | /* Switching back to window 0 disables the IRQ. */ | ||
1147 | EL3WINDOW(0); | ||
1148 | if (lp->type != EL3_EISA) { | ||
1149 | /* But we explicitly zero the IRQ line select anyway. Don't do | ||
1150 | * it on EISA cards, it prevents the module from getting an | ||
1151 | * IRQ after unload+reload... */ | ||
1152 | outw(0x0f00, ioaddr + WN0_IRQ); | ||
1153 | } | ||
1154 | |||
1155 | return 0; | ||
1156 | } | ||
1157 | |||
1158 | static int | ||
1159 | el3_link_ok(struct net_device *dev) | ||
1160 | { | ||
1161 | int ioaddr = dev->base_addr; | ||
1162 | u16 tmp; | ||
1163 | |||
1164 | EL3WINDOW(4); | ||
1165 | tmp = inw(ioaddr + WN4_MEDIA); | ||
1166 | EL3WINDOW(1); | ||
1167 | return tmp & (1<<11); | ||
1168 | } | ||
1169 | |||
1170 | static int | ||
1171 | el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1172 | { | ||
1173 | u16 tmp; | ||
1174 | int ioaddr = dev->base_addr; | ||
1175 | |||
1176 | EL3WINDOW(0); | ||
1177 | /* obtain current transceiver via WN4_MEDIA? */ | ||
1178 | tmp = inw(ioaddr + WN0_ADDR_CONF); | ||
1179 | ecmd->transceiver = XCVR_INTERNAL; | ||
1180 | switch (tmp >> 14) { | ||
1181 | case 0: | ||
1182 | ecmd->port = PORT_TP; | ||
1183 | break; | ||
1184 | case 1: | ||
1185 | ecmd->port = PORT_AUI; | ||
1186 | ecmd->transceiver = XCVR_EXTERNAL; | ||
1187 | break; | ||
1188 | case 3: | ||
1189 | ecmd->port = PORT_BNC; | ||
1190 | default: | ||
1191 | break; | ||
1192 | } | ||
1193 | |||
1194 | ecmd->duplex = DUPLEX_HALF; | ||
1195 | ecmd->supported = 0; | ||
1196 | tmp = inw(ioaddr + WN0_CONF_CTRL); | ||
1197 | if (tmp & (1<<13)) | ||
1198 | ecmd->supported |= SUPPORTED_AUI; | ||
1199 | if (tmp & (1<<12)) | ||
1200 | ecmd->supported |= SUPPORTED_BNC; | ||
1201 | if (tmp & (1<<9)) { | ||
1202 | ecmd->supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half | | ||
1203 | SUPPORTED_10baseT_Full; /* hmm... */ | ||
1204 | EL3WINDOW(4); | ||
1205 | tmp = inw(ioaddr + WN4_NETDIAG); | ||
1206 | if (tmp & FD_ENABLE) | ||
1207 | ecmd->duplex = DUPLEX_FULL; | ||
1208 | } | ||
1209 | |||
1210 | ethtool_cmd_speed_set(ecmd, SPEED_10); | ||
1211 | EL3WINDOW(1); | ||
1212 | return 0; | ||
1213 | } | ||
1214 | |||
1215 | static int | ||
1216 | el3_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1217 | { | ||
1218 | u16 tmp; | ||
1219 | int ioaddr = dev->base_addr; | ||
1220 | |||
1221 | if (ecmd->speed != SPEED_10) | ||
1222 | return -EINVAL; | ||
1223 | if ((ecmd->duplex != DUPLEX_HALF) && (ecmd->duplex != DUPLEX_FULL)) | ||
1224 | return -EINVAL; | ||
1225 | if ((ecmd->transceiver != XCVR_INTERNAL) && (ecmd->transceiver != XCVR_EXTERNAL)) | ||
1226 | return -EINVAL; | ||
1227 | |||
1228 | /* change XCVR type */ | ||
1229 | EL3WINDOW(0); | ||
1230 | tmp = inw(ioaddr + WN0_ADDR_CONF); | ||
1231 | switch (ecmd->port) { | ||
1232 | case PORT_TP: | ||
1233 | tmp &= ~(3<<14); | ||
1234 | dev->if_port = 0; | ||
1235 | break; | ||
1236 | case PORT_AUI: | ||
1237 | tmp |= (1<<14); | ||
1238 | dev->if_port = 1; | ||
1239 | break; | ||
1240 | case PORT_BNC: | ||
1241 | tmp |= (3<<14); | ||
1242 | dev->if_port = 3; | ||
1243 | break; | ||
1244 | default: | ||
1245 | return -EINVAL; | ||
1246 | } | ||
1247 | |||
1248 | outw(tmp, ioaddr + WN0_ADDR_CONF); | ||
1249 | if (dev->if_port == 3) { | ||
1250 | /* fire up the DC-DC convertor if BNC gets enabled */ | ||
1251 | tmp = inw(ioaddr + WN0_ADDR_CONF); | ||
1252 | if (tmp & (3 << 14)) { | ||
1253 | outw(StartCoax, ioaddr + EL3_CMD); | ||
1254 | udelay(800); | ||
1255 | } else | ||
1256 | return -EIO; | ||
1257 | } | ||
1258 | |||
1259 | EL3WINDOW(4); | ||
1260 | tmp = inw(ioaddr + WN4_NETDIAG); | ||
1261 | if (ecmd->duplex == DUPLEX_FULL) | ||
1262 | tmp |= FD_ENABLE; | ||
1263 | else | ||
1264 | tmp &= ~FD_ENABLE; | ||
1265 | outw(tmp, ioaddr + WN4_NETDIAG); | ||
1266 | EL3WINDOW(1); | ||
1267 | |||
1268 | return 0; | ||
1269 | } | ||
1270 | |||
1271 | static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
1272 | { | ||
1273 | strcpy(info->driver, DRV_NAME); | ||
1274 | strcpy(info->version, DRV_VERSION); | ||
1275 | } | ||
1276 | |||
1277 | static int el3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1278 | { | ||
1279 | struct el3_private *lp = netdev_priv(dev); | ||
1280 | int ret; | ||
1281 | |||
1282 | spin_lock_irq(&lp->lock); | ||
1283 | ret = el3_netdev_get_ecmd(dev, ecmd); | ||
1284 | spin_unlock_irq(&lp->lock); | ||
1285 | return ret; | ||
1286 | } | ||
1287 | |||
1288 | static int el3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1289 | { | ||
1290 | struct el3_private *lp = netdev_priv(dev); | ||
1291 | int ret; | ||
1292 | |||
1293 | spin_lock_irq(&lp->lock); | ||
1294 | ret = el3_netdev_set_ecmd(dev, ecmd); | ||
1295 | spin_unlock_irq(&lp->lock); | ||
1296 | return ret; | ||
1297 | } | ||
1298 | |||
1299 | static u32 el3_get_link(struct net_device *dev) | ||
1300 | { | ||
1301 | struct el3_private *lp = netdev_priv(dev); | ||
1302 | u32 ret; | ||
1303 | |||
1304 | spin_lock_irq(&lp->lock); | ||
1305 | ret = el3_link_ok(dev); | ||
1306 | spin_unlock_irq(&lp->lock); | ||
1307 | return ret; | ||
1308 | } | ||
1309 | |||
1310 | static u32 el3_get_msglevel(struct net_device *dev) | ||
1311 | { | ||
1312 | return el3_debug; | ||
1313 | } | ||
1314 | |||
1315 | static void el3_set_msglevel(struct net_device *dev, u32 v) | ||
1316 | { | ||
1317 | el3_debug = v; | ||
1318 | } | ||
1319 | |||
1320 | static const struct ethtool_ops ethtool_ops = { | ||
1321 | .get_drvinfo = el3_get_drvinfo, | ||
1322 | .get_settings = el3_get_settings, | ||
1323 | .set_settings = el3_set_settings, | ||
1324 | .get_link = el3_get_link, | ||
1325 | .get_msglevel = el3_get_msglevel, | ||
1326 | .set_msglevel = el3_set_msglevel, | ||
1327 | }; | ||
1328 | |||
1329 | static void | ||
1330 | el3_down(struct net_device *dev) | ||
1331 | { | ||
1332 | int ioaddr = dev->base_addr; | ||
1333 | |||
1334 | netif_stop_queue(dev); | ||
1335 | |||
1336 | /* Turn off statistics ASAP. We update lp->stats below. */ | ||
1337 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
1338 | |||
1339 | /* Disable the receiver and transmitter. */ | ||
1340 | outw(RxDisable, ioaddr + EL3_CMD); | ||
1341 | outw(TxDisable, ioaddr + EL3_CMD); | ||
1342 | |||
1343 | if (dev->if_port == 3) | ||
1344 | /* Turn off thinnet power. Green! */ | ||
1345 | outw(StopCoax, ioaddr + EL3_CMD); | ||
1346 | else if (dev->if_port == 0) { | ||
1347 | /* Disable link beat and jabber, if_port may change here next open(). */ | ||
1348 | EL3WINDOW(4); | ||
1349 | outw(inw(ioaddr + WN4_MEDIA) & ~MEDIA_TP, ioaddr + WN4_MEDIA); | ||
1350 | } | ||
1351 | |||
1352 | outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); | ||
1353 | |||
1354 | update_stats(dev); | ||
1355 | } | ||
1356 | |||
1357 | static void | ||
1358 | el3_up(struct net_device *dev) | ||
1359 | { | ||
1360 | int i, sw_info, net_diag; | ||
1361 | int ioaddr = dev->base_addr; | ||
1362 | |||
1363 | /* Activating the board required and does no harm otherwise */ | ||
1364 | outw(0x0001, ioaddr + 4); | ||
1365 | |||
1366 | /* Set the IRQ line. */ | ||
1367 | outw((dev->irq << 12) | 0x0f00, ioaddr + WN0_IRQ); | ||
1368 | |||
1369 | /* Set the station address in window 2 each time opened. */ | ||
1370 | EL3WINDOW(2); | ||
1371 | |||
1372 | for (i = 0; i < 6; i++) | ||
1373 | outb(dev->dev_addr[i], ioaddr + i); | ||
1374 | |||
1375 | if ((dev->if_port & 0x03) == 3) /* BNC interface */ | ||
1376 | /* Start the thinnet transceiver. We should really wait 50ms...*/ | ||
1377 | outw(StartCoax, ioaddr + EL3_CMD); | ||
1378 | else if ((dev->if_port & 0x03) == 0) { /* 10baseT interface */ | ||
1379 | /* Combine secondary sw_info word (the adapter level) and primary | ||
1380 | sw_info word (duplex setting plus other useless bits) */ | ||
1381 | EL3WINDOW(0); | ||
1382 | sw_info = (read_eeprom(ioaddr, 0x14) & 0x400f) | | ||
1383 | (read_eeprom(ioaddr, 0x0d) & 0xBff0); | ||
1384 | |||
1385 | EL3WINDOW(4); | ||
1386 | net_diag = inw(ioaddr + WN4_NETDIAG); | ||
1387 | net_diag = (net_diag | FD_ENABLE); /* temporarily assume full-duplex will be set */ | ||
1388 | pr_info("%s: ", dev->name); | ||
1389 | switch (dev->if_port & 0x0c) { | ||
1390 | case 12: | ||
1391 | /* force full-duplex mode if 3c5x9b */ | ||
1392 | if (sw_info & 0x000f) { | ||
1393 | pr_cont("Forcing 3c5x9b full-duplex mode"); | ||
1394 | break; | ||
1395 | } | ||
1396 | case 8: | ||
1397 | /* set full-duplex mode based on eeprom config setting */ | ||
1398 | if ((sw_info & 0x000f) && (sw_info & 0x8000)) { | ||
1399 | pr_cont("Setting 3c5x9b full-duplex mode (from EEPROM configuration bit)"); | ||
1400 | break; | ||
1401 | } | ||
1402 | default: | ||
1403 | /* xcvr=(0 || 4) OR user has an old 3c5x9 non "B" model */ | ||
1404 | pr_cont("Setting 3c5x9/3c5x9B half-duplex mode"); | ||
1405 | net_diag = (net_diag & ~FD_ENABLE); /* disable full duplex */ | ||
1406 | } | ||
1407 | |||
1408 | outw(net_diag, ioaddr + WN4_NETDIAG); | ||
1409 | pr_cont(" if_port: %d, sw_info: %4.4x\n", dev->if_port, sw_info); | ||
1410 | if (el3_debug > 3) | ||
1411 | pr_debug("%s: 3c5x9 net diag word is now: %4.4x.\n", dev->name, net_diag); | ||
1412 | /* Enable link beat and jabber check. */ | ||
1413 | outw(inw(ioaddr + WN4_MEDIA) | MEDIA_TP, ioaddr + WN4_MEDIA); | ||
1414 | } | ||
1415 | |||
1416 | /* Switch to the stats window, and clear all stats by reading. */ | ||
1417 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
1418 | EL3WINDOW(6); | ||
1419 | for (i = 0; i < 9; i++) | ||
1420 | inb(ioaddr + i); | ||
1421 | inw(ioaddr + 10); | ||
1422 | inw(ioaddr + 12); | ||
1423 | |||
1424 | /* Switch to register set 1 for normal use. */ | ||
1425 | EL3WINDOW(1); | ||
1426 | |||
1427 | /* Accept b-case and phys addr only. */ | ||
1428 | outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); | ||
1429 | outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ | ||
1430 | |||
1431 | outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ | ||
1432 | outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ | ||
1433 | /* Allow status bits to be seen. */ | ||
1434 | outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); | ||
1435 | /* Ack all pending events, and set active indicator mask. */ | ||
1436 | outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, | ||
1437 | ioaddr + EL3_CMD); | ||
1438 | outw(SetIntrEnb | IntLatch|TxAvailable|TxComplete|RxComplete|StatsFull, | ||
1439 | ioaddr + EL3_CMD); | ||
1440 | |||
1441 | netif_start_queue(dev); | ||
1442 | } | ||
1443 | |||
1444 | /* Power Management support functions */ | ||
1445 | #ifdef CONFIG_PM | ||
1446 | |||
1447 | static int | ||
1448 | el3_suspend(struct device *pdev, pm_message_t state) | ||
1449 | { | ||
1450 | unsigned long flags; | ||
1451 | struct net_device *dev; | ||
1452 | struct el3_private *lp; | ||
1453 | int ioaddr; | ||
1454 | |||
1455 | dev = dev_get_drvdata(pdev); | ||
1456 | lp = netdev_priv(dev); | ||
1457 | ioaddr = dev->base_addr; | ||
1458 | |||
1459 | spin_lock_irqsave(&lp->lock, flags); | ||
1460 | |||
1461 | if (netif_running(dev)) | ||
1462 | netif_device_detach(dev); | ||
1463 | |||
1464 | el3_down(dev); | ||
1465 | outw(PowerDown, ioaddr + EL3_CMD); | ||
1466 | |||
1467 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1468 | return 0; | ||
1469 | } | ||
1470 | |||
1471 | static int | ||
1472 | el3_resume(struct device *pdev) | ||
1473 | { | ||
1474 | unsigned long flags; | ||
1475 | struct net_device *dev; | ||
1476 | struct el3_private *lp; | ||
1477 | int ioaddr; | ||
1478 | |||
1479 | dev = dev_get_drvdata(pdev); | ||
1480 | lp = netdev_priv(dev); | ||
1481 | ioaddr = dev->base_addr; | ||
1482 | |||
1483 | spin_lock_irqsave(&lp->lock, flags); | ||
1484 | |||
1485 | outw(PowerUp, ioaddr + EL3_CMD); | ||
1486 | EL3WINDOW(0); | ||
1487 | el3_up(dev); | ||
1488 | |||
1489 | if (netif_running(dev)) | ||
1490 | netif_device_attach(dev); | ||
1491 | |||
1492 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | #endif /* CONFIG_PM */ | ||
1497 | |||
1498 | module_param(debug,int, 0); | ||
1499 | module_param_array(irq, int, NULL, 0); | ||
1500 | module_param(max_interrupt_work, int, 0); | ||
1501 | MODULE_PARM_DESC(debug, "debug level (0-6)"); | ||
1502 | MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); | ||
1503 | MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt"); | ||
1504 | #ifdef CONFIG_PNP | ||
1505 | module_param(nopnp, int, 0); | ||
1506 | MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)"); | ||
1507 | #endif /* CONFIG_PNP */ | ||
1508 | MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver"); | ||
1509 | MODULE_LICENSE("GPL"); | ||
1510 | |||
1511 | static int __init el3_init_module(void) | ||
1512 | { | ||
1513 | int ret = 0; | ||
1514 | |||
1515 | if (debug >= 0) | ||
1516 | el3_debug = debug; | ||
1517 | |||
1518 | #ifdef CONFIG_PNP | ||
1519 | if (!nopnp) { | ||
1520 | ret = pnp_register_driver(&el3_pnp_driver); | ||
1521 | if (!ret) | ||
1522 | pnp_registered = 1; | ||
1523 | } | ||
1524 | #endif | ||
1525 | /* Select an open I/O location at 0x1*0 to do ISA contention select. */ | ||
1526 | /* Start with 0x110 to avoid some sound cards.*/ | ||
1527 | for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) { | ||
1528 | if (!request_region(id_port, 1, "3c509-control")) | ||
1529 | continue; | ||
1530 | outb(0x00, id_port); | ||
1531 | outb(0xff, id_port); | ||
1532 | if (inb(id_port) & 0x01) | ||
1533 | break; | ||
1534 | else | ||
1535 | release_region(id_port, 1); | ||
1536 | } | ||
1537 | if (id_port >= 0x200) { | ||
1538 | id_port = 0; | ||
1539 | pr_err("No I/O port available for 3c509 activation.\n"); | ||
1540 | } else { | ||
1541 | ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS); | ||
1542 | if (!ret) | ||
1543 | isa_registered = 1; | ||
1544 | } | ||
1545 | #ifdef CONFIG_EISA | ||
1546 | ret = eisa_driver_register(&el3_eisa_driver); | ||
1547 | if (!ret) | ||
1548 | eisa_registered = 1; | ||
1549 | #endif | ||
1550 | #ifdef CONFIG_MCA | ||
1551 | ret = mca_register_driver(&el3_mca_driver); | ||
1552 | if (!ret) | ||
1553 | mca_registered = 1; | ||
1554 | #endif | ||
1555 | |||
1556 | #ifdef CONFIG_PNP | ||
1557 | if (pnp_registered) | ||
1558 | ret = 0; | ||
1559 | #endif | ||
1560 | if (isa_registered) | ||
1561 | ret = 0; | ||
1562 | #ifdef CONFIG_EISA | ||
1563 | if (eisa_registered) | ||
1564 | ret = 0; | ||
1565 | #endif | ||
1566 | #ifdef CONFIG_MCA | ||
1567 | if (mca_registered) | ||
1568 | ret = 0; | ||
1569 | #endif | ||
1570 | return ret; | ||
1571 | } | ||
1572 | |||
1573 | static void __exit el3_cleanup_module(void) | ||
1574 | { | ||
1575 | #ifdef CONFIG_PNP | ||
1576 | if (pnp_registered) | ||
1577 | pnp_unregister_driver(&el3_pnp_driver); | ||
1578 | #endif | ||
1579 | if (isa_registered) | ||
1580 | isa_unregister_driver(&el3_isa_driver); | ||
1581 | if (id_port) | ||
1582 | release_region(id_port, 1); | ||
1583 | #ifdef CONFIG_EISA | ||
1584 | if (eisa_registered) | ||
1585 | eisa_driver_unregister(&el3_eisa_driver); | ||
1586 | #endif | ||
1587 | #ifdef CONFIG_MCA | ||
1588 | if (mca_registered) | ||
1589 | mca_unregister_driver(&el3_mca_driver); | ||
1590 | #endif | ||
1591 | } | ||
1592 | |||
1593 | module_init (el3_init_module); | ||
1594 | module_exit (el3_cleanup_module); | ||
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c new file mode 100644 index 000000000000..d2bb4b254c57 --- /dev/null +++ b/drivers/net/ethernet/3com/3c515.c | |||
@@ -0,0 +1,1584 @@ | |||
1 | /* | ||
2 | Written 1997-1998 by Donald Becker. | ||
3 | |||
4 | This software may be used and distributed according to the terms | ||
5 | of the GNU General Public License, incorporated herein by reference. | ||
6 | |||
7 | This driver is for the 3Com ISA EtherLink XL "Corkscrew" 3c515 ethercard. | ||
8 | |||
9 | The author may be reached as becker@scyld.com, or C/O | ||
10 | Scyld Computing Corporation | ||
11 | 410 Severn Ave., Suite 210 | ||
12 | Annapolis MD 21403 | ||
13 | |||
14 | |||
15 | 2000/2/2- Added support for kernel-level ISAPnP | ||
16 | by Stephen Frost <sfrost@snowman.net> and Alessandro Zummo | ||
17 | Cleaned up for 2.3.x/softnet by Jeff Garzik and Alan Cox. | ||
18 | |||
19 | 2001/11/17 - Added ethtool support (jgarzik) | ||
20 | |||
21 | 2002/10/28 - Locking updates for 2.5 (alan@lxorguk.ukuu.org.uk) | ||
22 | |||
23 | */ | ||
24 | |||
25 | #define DRV_NAME "3c515" | ||
26 | #define DRV_VERSION "0.99t-ac" | ||
27 | #define DRV_RELDATE "28-Oct-2002" | ||
28 | |||
29 | static char *version = | ||
30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " becker@scyld.com and others\n"; | ||
31 | |||
32 | #define CORKSCREW 1 | ||
33 | |||
34 | /* "Knobs" that adjust features and parameters. */ | ||
35 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | ||
36 | Setting to > 1512 effectively disables this feature. */ | ||
37 | static int rx_copybreak = 200; | ||
38 | |||
39 | /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ | ||
40 | static const int mtu = 1500; | ||
41 | |||
42 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
43 | static int max_interrupt_work = 20; | ||
44 | |||
45 | /* Enable the automatic media selection code -- usually set. */ | ||
46 | #define AUTOMEDIA 1 | ||
47 | |||
48 | /* Allow the use of fragment bus master transfers instead of only | ||
49 | programmed-I/O for Vortex cards. Full-bus-master transfers are always | ||
50 | enabled by default on Boomerang cards. If VORTEX_BUS_MASTER is defined, | ||
51 | the feature may be turned on using 'options'. */ | ||
52 | #define VORTEX_BUS_MASTER | ||
53 | |||
54 | /* A few values that may be tweaked. */ | ||
55 | /* Keep the ring sizes a power of two for efficiency. */ | ||
56 | #define TX_RING_SIZE 16 | ||
57 | #define RX_RING_SIZE 16 | ||
58 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ | ||
59 | |||
60 | #include <linux/module.h> | ||
61 | #include <linux/isapnp.h> | ||
62 | #include <linux/kernel.h> | ||
63 | #include <linux/netdevice.h> | ||
64 | #include <linux/string.h> | ||
65 | #include <linux/errno.h> | ||
66 | #include <linux/in.h> | ||
67 | #include <linux/ioport.h> | ||
68 | #include <linux/skbuff.h> | ||
69 | #include <linux/etherdevice.h> | ||
70 | #include <linux/interrupt.h> | ||
71 | #include <linux/timer.h> | ||
72 | #include <linux/ethtool.h> | ||
73 | #include <linux/bitops.h> | ||
74 | |||
75 | #include <asm/uaccess.h> | ||
76 | #include <asm/io.h> | ||
77 | #include <asm/dma.h> | ||
78 | |||
79 | #define NEW_MULTICAST | ||
80 | #include <linux/delay.h> | ||
81 | |||
82 | #define MAX_UNITS 8 | ||
83 | |||
84 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | ||
85 | MODULE_DESCRIPTION("3Com 3c515 Corkscrew driver"); | ||
86 | MODULE_LICENSE("GPL"); | ||
87 | MODULE_VERSION(DRV_VERSION); | ||
88 | |||
89 | /* "Knobs" for adjusting internal parameters. */ | ||
90 | /* Put out somewhat more debugging messages. (0 - no msg, 1 minimal msgs). */ | ||
91 | #define DRIVER_DEBUG 1 | ||
92 | /* Some values here only for performance evaluation and path-coverage | ||
93 | debugging. */ | ||
94 | static int rx_nocopy, rx_copy, queued_packet; | ||
95 | |||
96 | /* Number of times to check to see if the Tx FIFO has space, used in some | ||
97 | limited cases. */ | ||
98 | #define WAIT_TX_AVAIL 200 | ||
99 | |||
100 | /* Operational parameter that usually are not changed. */ | ||
101 | #define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */ | ||
102 | |||
103 | /* The size here is somewhat misleading: the Corkscrew also uses the ISA | ||
104 | aliased registers at <base>+0x400. | ||
105 | */ | ||
106 | #define CORKSCREW_TOTAL_SIZE 0x20 | ||
107 | |||
108 | #ifdef DRIVER_DEBUG | ||
109 | static int corkscrew_debug = DRIVER_DEBUG; | ||
110 | #else | ||
111 | static int corkscrew_debug = 1; | ||
112 | #endif | ||
113 | |||
114 | #define CORKSCREW_ID 10 | ||
115 | |||
116 | /* | ||
117 | Theory of Operation | ||
118 | |||
119 | I. Board Compatibility | ||
120 | |||
121 | This device driver is designed for the 3Com 3c515 ISA Fast EtherLink XL, | ||
122 | 3Com's ISA bus adapter for Fast Ethernet. Due to the unique I/O port layout, | ||
123 | it's not practical to integrate this driver with the other EtherLink drivers. | ||
124 | |||
125 | II. Board-specific settings | ||
126 | |||
127 | The Corkscrew has an EEPROM for configuration, but no special settings are | ||
128 | needed for Linux. | ||
129 | |||
130 | III. Driver operation | ||
131 | |||
132 | The 3c515 series use an interface that's very similar to the 3c900 "Boomerang" | ||
133 | PCI cards, with the bus master interface extensively modified to work with | ||
134 | the ISA bus. | ||
135 | |||
136 | The card is capable of full-bus-master transfers with separate | ||
137 | lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, | ||
138 | DEC Tulip and Intel Speedo3. | ||
139 | |||
140 | This driver uses a "RX_COPYBREAK" scheme rather than a fixed intermediate | ||
141 | receive buffer. This scheme allocates full-sized skbuffs as receive | ||
142 | buffers. The value RX_COPYBREAK is used as the copying breakpoint: it is | ||
143 | chosen to trade-off the memory wasted by passing the full-sized skbuff to | ||
144 | the queue layer for all frames vs. the copying cost of copying a frame to a | ||
145 | correctly-sized skbuff. | ||
146 | |||
147 | |||
148 | IIIC. Synchronization | ||
149 | The driver runs as two independent, single-threaded flows of control. One | ||
150 | is the send-packet routine, which enforces single-threaded use by the netif | ||
151 | layer. The other thread is the interrupt handler, which is single | ||
152 | threaded by the hardware and other software. | ||
153 | |||
154 | IV. Notes | ||
155 | |||
156 | Thanks to Terry Murphy of 3Com for providing documentation and a development | ||
157 | board. | ||
158 | |||
159 | The names "Vortex", "Boomerang" and "Corkscrew" are the internal 3Com | ||
160 | project names. I use these names to eliminate confusion -- 3Com product | ||
161 | numbers and names are very similar and often confused. | ||
162 | |||
163 | The new chips support both ethernet (1.5K) and FDDI (4.5K) frame sizes! | ||
164 | This driver only supports ethernet frames because of the recent MTU limit | ||
165 | of 1.5K, but the changes to support 4.5K are minimal. | ||
166 | */ | ||
167 | |||
168 | /* Operational definitions. | ||
169 | These are not used by other compilation units and thus are not | ||
170 | exported in a ".h" file. | ||
171 | |||
172 | First the windows. There are eight register windows, with the command | ||
173 | and status registers available in each. | ||
174 | */ | ||
175 | #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) | ||
176 | #define EL3_CMD 0x0e | ||
177 | #define EL3_STATUS 0x0e | ||
178 | |||
179 | /* The top five bits written to EL3_CMD are a command, the lower | ||
180 | 11 bits are the parameter, if applicable. | ||
181 | Note that 11 parameters bits was fine for ethernet, but the new chips | ||
182 | can handle FDDI length frames (~4500 octets) and now parameters count | ||
183 | 32-bit 'Dwords' rather than octets. */ | ||
184 | |||
185 | enum corkscrew_cmd { | ||
186 | TotalReset = 0 << 11, SelectWindow = 1 << 11, StartCoax = 2 << 11, | ||
187 | RxDisable = 3 << 11, RxEnable = 4 << 11, RxReset = 5 << 11, | ||
188 | UpStall = 6 << 11, UpUnstall = (6 << 11) + 1, DownStall = (6 << 11) + 2, | ||
189 | DownUnstall = (6 << 11) + 3, RxDiscard = 8 << 11, TxEnable = 9 << 11, | ||
190 | TxDisable = 10 << 11, TxReset = 11 << 11, FakeIntr = 12 << 11, | ||
191 | AckIntr = 13 << 11, SetIntrEnb = 14 << 11, SetStatusEnb = 15 << 11, | ||
192 | SetRxFilter = 16 << 11, SetRxThreshold = 17 << 11, | ||
193 | SetTxThreshold = 18 << 11, SetTxStart = 19 << 11, StartDMAUp = 20 << 11, | ||
194 | StartDMADown = (20 << 11) + 1, StatsEnable = 21 << 11, | ||
195 | StatsDisable = 22 << 11, StopCoax = 23 << 11, | ||
196 | }; | ||
197 | |||
198 | /* The SetRxFilter command accepts the following classes: */ | ||
199 | enum RxFilter { | ||
200 | RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 | ||
201 | }; | ||
202 | |||
203 | /* Bits in the general status register. */ | ||
204 | enum corkscrew_status { | ||
205 | IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, | ||
206 | TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, | ||
207 | IntReq = 0x0040, StatsFull = 0x0080, | ||
208 | DMADone = 1 << 8, DownComplete = 1 << 9, UpComplete = 1 << 10, | ||
209 | DMAInProgress = 1 << 11, /* DMA controller is still busy. */ | ||
210 | CmdInProgress = 1 << 12, /* EL3_CMD is still busy. */ | ||
211 | }; | ||
212 | |||
213 | /* Register window 1 offsets, the window used in normal operation. | ||
214 | On the Corkscrew this window is always mapped at offsets 0x10-0x1f. */ | ||
215 | enum Window1 { | ||
216 | TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, | ||
217 | RxStatus = 0x18, Timer = 0x1A, TxStatus = 0x1B, | ||
218 | TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ | ||
219 | }; | ||
220 | enum Window0 { | ||
221 | Wn0IRQ = 0x08, | ||
222 | #if defined(CORKSCREW) | ||
223 | Wn0EepromCmd = 0x200A, /* Corkscrew EEPROM command register. */ | ||
224 | Wn0EepromData = 0x200C, /* Corkscrew EEPROM results register. */ | ||
225 | #else | ||
226 | Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ | ||
227 | Wn0EepromData = 12, /* Window 0: EEPROM results register. */ | ||
228 | #endif | ||
229 | }; | ||
230 | enum Win0_EEPROM_bits { | ||
231 | EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, | ||
232 | EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ | ||
233 | EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ | ||
234 | }; | ||
235 | |||
236 | /* EEPROM locations. */ | ||
237 | enum eeprom_offset { | ||
238 | PhysAddr01 = 0, PhysAddr23 = 1, PhysAddr45 = 2, ModelID = 3, | ||
239 | EtherLink3ID = 7, | ||
240 | }; | ||
241 | |||
242 | enum Window3 { /* Window 3: MAC/config bits. */ | ||
243 | Wn3_Config = 0, Wn3_MAC_Ctrl = 6, Wn3_Options = 8, | ||
244 | }; | ||
245 | enum wn3_config { | ||
246 | Ram_size = 7, | ||
247 | Ram_width = 8, | ||
248 | Ram_speed = 0x30, | ||
249 | Rom_size = 0xc0, | ||
250 | Ram_split_shift = 16, | ||
251 | Ram_split = 3 << Ram_split_shift, | ||
252 | Xcvr_shift = 20, | ||
253 | Xcvr = 7 << Xcvr_shift, | ||
254 | Autoselect = 0x1000000, | ||
255 | }; | ||
256 | |||
257 | enum Window4 { | ||
258 | Wn4_NetDiag = 6, Wn4_Media = 10, /* Window 4: Xcvr/media bits. */ | ||
259 | }; | ||
260 | enum Win4_Media_bits { | ||
261 | Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ | ||
262 | Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ | ||
263 | Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ | ||
264 | Media_LnkBeat = 0x0800, | ||
265 | }; | ||
266 | enum Window7 { /* Window 7: Bus Master control. */ | ||
267 | Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12, | ||
268 | }; | ||
269 | |||
270 | /* Boomerang-style bus master control registers. Note ISA aliases! */ | ||
271 | enum MasterCtrl { | ||
272 | PktStatus = 0x400, DownListPtr = 0x404, FragAddr = 0x408, FragLen = | ||
273 | 0x40c, | ||
274 | TxFreeThreshold = 0x40f, UpPktStatus = 0x410, UpListPtr = 0x418, | ||
275 | }; | ||
276 | |||
277 | /* The Rx and Tx descriptor lists. | ||
278 | Caution Alpha hackers: these types are 32 bits! Note also the 8 byte | ||
279 | alignment contraint on tx_ring[] and rx_ring[]. */ | ||
280 | struct boom_rx_desc { | ||
281 | u32 next; | ||
282 | s32 status; | ||
283 | u32 addr; | ||
284 | s32 length; | ||
285 | }; | ||
286 | |||
287 | /* Values for the Rx status entry. */ | ||
288 | enum rx_desc_status { | ||
289 | RxDComplete = 0x00008000, RxDError = 0x4000, | ||
290 | /* See boomerang_rx() for actual error bits */ | ||
291 | }; | ||
292 | |||
293 | struct boom_tx_desc { | ||
294 | u32 next; | ||
295 | s32 status; | ||
296 | u32 addr; | ||
297 | s32 length; | ||
298 | }; | ||
299 | |||
300 | struct corkscrew_private { | ||
301 | const char *product_name; | ||
302 | struct list_head list; | ||
303 | struct net_device *our_dev; | ||
304 | /* The Rx and Tx rings are here to keep them quad-word-aligned. */ | ||
305 | struct boom_rx_desc rx_ring[RX_RING_SIZE]; | ||
306 | struct boom_tx_desc tx_ring[TX_RING_SIZE]; | ||
307 | /* The addresses of transmit- and receive-in-place skbuffs. */ | ||
308 | struct sk_buff *rx_skbuff[RX_RING_SIZE]; | ||
309 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | ||
310 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ | ||
311 | unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */ | ||
312 | struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ | ||
313 | struct timer_list timer; /* Media selection timer. */ | ||
314 | int capabilities ; /* Adapter capabilities word. */ | ||
315 | int options; /* User-settable misc. driver options. */ | ||
316 | int last_rx_packets; /* For media autoselection. */ | ||
317 | unsigned int available_media:8, /* From Wn3_Options */ | ||
318 | media_override:3, /* Passed-in media type. */ | ||
319 | default_media:3, /* Read from the EEPROM. */ | ||
320 | full_duplex:1, autoselect:1, bus_master:1, /* Vortex can only do a fragment bus-m. */ | ||
321 | full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */ | ||
322 | tx_full:1; | ||
323 | spinlock_t lock; | ||
324 | struct device *dev; | ||
325 | }; | ||
326 | |||
327 | /* The action to take with a media selection timer tick. | ||
328 | Note that we deviate from the 3Com order by checking 10base2 before AUI. | ||
329 | */ | ||
330 | enum xcvr_types { | ||
331 | XCVR_10baseT = 0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, | ||
332 | XCVR_100baseFx, XCVR_MII = 6, XCVR_Default = 8, | ||
333 | }; | ||
334 | |||
335 | static struct media_table { | ||
336 | char *name; | ||
337 | unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ | ||
338 | mask:8, /* The transceiver-present bit in Wn3_Config. */ | ||
339 | next:8; /* The media type to try next. */ | ||
340 | short wait; /* Time before we check media status. */ | ||
341 | } media_tbl[] = { | ||
342 | { "10baseT", Media_10TP, 0x08, XCVR_10base2, (14 * HZ) / 10 }, | ||
343 | { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1 * HZ) / 10}, | ||
344 | { "undefined", 0, 0x80, XCVR_10baseT, 10000}, | ||
345 | { "10base2", 0, 0x10, XCVR_AUI, (1 * HZ) / 10}, | ||
346 | { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14 * HZ) / 10}, | ||
347 | { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14 * HZ) / 10}, | ||
348 | { "MII", 0, 0x40, XCVR_10baseT, 3 * HZ}, | ||
349 | { "undefined", 0, 0x01, XCVR_10baseT, 10000}, | ||
350 | { "Default", 0, 0xFF, XCVR_10baseT, 10000}, | ||
351 | }; | ||
352 | |||
353 | #ifdef __ISAPNP__ | ||
354 | static struct isapnp_device_id corkscrew_isapnp_adapters[] = { | ||
355 | { ISAPNP_ANY_ID, ISAPNP_ANY_ID, | ||
356 | ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5051), | ||
357 | (long) "3Com Fast EtherLink ISA" }, | ||
358 | { } /* terminate list */ | ||
359 | }; | ||
360 | |||
361 | MODULE_DEVICE_TABLE(isapnp, corkscrew_isapnp_adapters); | ||
362 | |||
363 | static int nopnp; | ||
364 | #endif /* __ISAPNP__ */ | ||
365 | |||
366 | static struct net_device *corkscrew_scan(int unit); | ||
367 | static int corkscrew_setup(struct net_device *dev, int ioaddr, | ||
368 | struct pnp_dev *idev, int card_number); | ||
369 | static int corkscrew_open(struct net_device *dev); | ||
370 | static void corkscrew_timer(unsigned long arg); | ||
371 | static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, | ||
372 | struct net_device *dev); | ||
373 | static int corkscrew_rx(struct net_device *dev); | ||
374 | static void corkscrew_timeout(struct net_device *dev); | ||
375 | static int boomerang_rx(struct net_device *dev); | ||
376 | static irqreturn_t corkscrew_interrupt(int irq, void *dev_id); | ||
377 | static int corkscrew_close(struct net_device *dev); | ||
378 | static void update_stats(int addr, struct net_device *dev); | ||
379 | static struct net_device_stats *corkscrew_get_stats(struct net_device *dev); | ||
380 | static void set_rx_mode(struct net_device *dev); | ||
381 | static const struct ethtool_ops netdev_ethtool_ops; | ||
382 | |||
383 | |||
384 | /* | ||
385 | Unfortunately maximizing the shared code between the integrated and | ||
386 | module version of the driver results in a complicated set of initialization | ||
387 | procedures. | ||
388 | init_module() -- modules / tc59x_init() -- built-in | ||
389 | The wrappers for corkscrew_scan() | ||
390 | corkscrew_scan() The common routine that scans for PCI and EISA cards | ||
391 | corkscrew_found_device() Allocate a device structure when we find a card. | ||
392 | Different versions exist for modules and built-in. | ||
393 | corkscrew_probe1() Fill in the device structure -- this is separated | ||
394 | so that the modules code can put it in dev->init. | ||
395 | */ | ||
396 | /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ | ||
397 | /* Note: this is the only limit on the number of cards supported!! */ | ||
398 | static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1, }; | ||
399 | |||
400 | #ifdef MODULE | ||
401 | static int debug = -1; | ||
402 | |||
403 | module_param(debug, int, 0); | ||
404 | module_param_array(options, int, NULL, 0); | ||
405 | module_param(rx_copybreak, int, 0); | ||
406 | module_param(max_interrupt_work, int, 0); | ||
407 | MODULE_PARM_DESC(debug, "3c515 debug level (0-6)"); | ||
408 | MODULE_PARM_DESC(options, "3c515: Bits 0-2: media type, bit 3: full duplex, bit 4: bus mastering"); | ||
409 | MODULE_PARM_DESC(rx_copybreak, "3c515 copy breakpoint for copy-only-tiny-frames"); | ||
410 | MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt"); | ||
411 | |||
412 | /* A list of all installed Vortex devices, for removing the driver module. */ | ||
413 | /* we will need locking (and refcounting) if we ever use it for more */ | ||
414 | static LIST_HEAD(root_corkscrew_dev); | ||
415 | |||
416 | int init_module(void) | ||
417 | { | ||
418 | int found = 0; | ||
419 | if (debug >= 0) | ||
420 | corkscrew_debug = debug; | ||
421 | if (corkscrew_debug) | ||
422 | pr_debug("%s", version); | ||
423 | while (corkscrew_scan(-1)) | ||
424 | found++; | ||
425 | return found ? 0 : -ENODEV; | ||
426 | } | ||
427 | |||
428 | #else | ||
429 | struct net_device *tc515_probe(int unit) | ||
430 | { | ||
431 | struct net_device *dev = corkscrew_scan(unit); | ||
432 | static int printed; | ||
433 | |||
434 | if (!dev) | ||
435 | return ERR_PTR(-ENODEV); | ||
436 | |||
437 | if (corkscrew_debug > 0 && !printed) { | ||
438 | printed = 1; | ||
439 | pr_debug("%s", version); | ||
440 | } | ||
441 | |||
442 | return dev; | ||
443 | } | ||
444 | #endif /* not MODULE */ | ||
445 | |||
446 | static int check_device(unsigned ioaddr) | ||
447 | { | ||
448 | int timer; | ||
449 | |||
450 | if (!request_region(ioaddr, CORKSCREW_TOTAL_SIZE, "3c515")) | ||
451 | return 0; | ||
452 | /* Check the resource configuration for a matching ioaddr. */ | ||
453 | if ((inw(ioaddr + 0x2002) & 0x1f0) != (ioaddr & 0x1f0)) { | ||
454 | release_region(ioaddr, CORKSCREW_TOTAL_SIZE); | ||
455 | return 0; | ||
456 | } | ||
457 | /* Verify by reading the device ID from the EEPROM. */ | ||
458 | outw(EEPROM_Read + 7, ioaddr + Wn0EepromCmd); | ||
459 | /* Pause for at least 162 us. for the read to take place. */ | ||
460 | for (timer = 4; timer >= 0; timer--) { | ||
461 | udelay(162); | ||
462 | if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0) | ||
463 | break; | ||
464 | } | ||
465 | if (inw(ioaddr + Wn0EepromData) != 0x6d50) { | ||
466 | release_region(ioaddr, CORKSCREW_TOTAL_SIZE); | ||
467 | return 0; | ||
468 | } | ||
469 | return 1; | ||
470 | } | ||
471 | |||
472 | static void cleanup_card(struct net_device *dev) | ||
473 | { | ||
474 | struct corkscrew_private *vp = netdev_priv(dev); | ||
475 | list_del_init(&vp->list); | ||
476 | if (dev->dma) | ||
477 | free_dma(dev->dma); | ||
478 | outw(TotalReset, dev->base_addr + EL3_CMD); | ||
479 | release_region(dev->base_addr, CORKSCREW_TOTAL_SIZE); | ||
480 | if (vp->dev) | ||
481 | pnp_device_detach(to_pnp_dev(vp->dev)); | ||
482 | } | ||
483 | |||
484 | static struct net_device *corkscrew_scan(int unit) | ||
485 | { | ||
486 | struct net_device *dev; | ||
487 | static int cards_found = 0; | ||
488 | static int ioaddr; | ||
489 | int err; | ||
490 | #ifdef __ISAPNP__ | ||
491 | short i; | ||
492 | static int pnp_cards; | ||
493 | #endif | ||
494 | |||
495 | dev = alloc_etherdev(sizeof(struct corkscrew_private)); | ||
496 | if (!dev) | ||
497 | return ERR_PTR(-ENOMEM); | ||
498 | |||
499 | if (unit >= 0) { | ||
500 | sprintf(dev->name, "eth%d", unit); | ||
501 | netdev_boot_setup_check(dev); | ||
502 | } | ||
503 | |||
504 | #ifdef __ISAPNP__ | ||
505 | if(nopnp == 1) | ||
506 | goto no_pnp; | ||
507 | for(i=0; corkscrew_isapnp_adapters[i].vendor != 0; i++) { | ||
508 | struct pnp_dev *idev = NULL; | ||
509 | int irq; | ||
510 | while((idev = pnp_find_dev(NULL, | ||
511 | corkscrew_isapnp_adapters[i].vendor, | ||
512 | corkscrew_isapnp_adapters[i].function, | ||
513 | idev))) { | ||
514 | |||
515 | if (pnp_device_attach(idev) < 0) | ||
516 | continue; | ||
517 | if (pnp_activate_dev(idev) < 0) { | ||
518 | pr_warning("pnp activate failed (out of resources?)\n"); | ||
519 | pnp_device_detach(idev); | ||
520 | continue; | ||
521 | } | ||
522 | if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0)) { | ||
523 | pnp_device_detach(idev); | ||
524 | continue; | ||
525 | } | ||
526 | ioaddr = pnp_port_start(idev, 0); | ||
527 | irq = pnp_irq(idev, 0); | ||
528 | if (!check_device(ioaddr)) { | ||
529 | pnp_device_detach(idev); | ||
530 | continue; | ||
531 | } | ||
532 | if(corkscrew_debug) | ||
533 | pr_debug("ISAPNP reports %s at i/o 0x%x, irq %d\n", | ||
534 | (char*) corkscrew_isapnp_adapters[i].driver_data, ioaddr, irq); | ||
535 | pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n", | ||
536 | inl(ioaddr + 0x2002), inw(ioaddr + 0x2000)); | ||
537 | /* irq = inw(ioaddr + 0x2002) & 15; */ /* Use the irq from isapnp */ | ||
538 | SET_NETDEV_DEV(dev, &idev->dev); | ||
539 | pnp_cards++; | ||
540 | err = corkscrew_setup(dev, ioaddr, idev, cards_found++); | ||
541 | if (!err) | ||
542 | return dev; | ||
543 | cleanup_card(dev); | ||
544 | } | ||
545 | } | ||
546 | no_pnp: | ||
547 | #endif /* __ISAPNP__ */ | ||
548 | |||
549 | /* Check all locations on the ISA bus -- evil! */ | ||
550 | for (ioaddr = 0x100; ioaddr < 0x400; ioaddr += 0x20) { | ||
551 | if (!check_device(ioaddr)) | ||
552 | continue; | ||
553 | |||
554 | pr_info("3c515 Resource configuration register %#4.4x, DCR %4.4x.\n", | ||
555 | inl(ioaddr + 0x2002), inw(ioaddr + 0x2000)); | ||
556 | err = corkscrew_setup(dev, ioaddr, NULL, cards_found++); | ||
557 | if (!err) | ||
558 | return dev; | ||
559 | cleanup_card(dev); | ||
560 | } | ||
561 | free_netdev(dev); | ||
562 | return NULL; | ||
563 | } | ||
564 | |||
565 | |||
566 | static const struct net_device_ops netdev_ops = { | ||
567 | .ndo_open = corkscrew_open, | ||
568 | .ndo_stop = corkscrew_close, | ||
569 | .ndo_start_xmit = corkscrew_start_xmit, | ||
570 | .ndo_tx_timeout = corkscrew_timeout, | ||
571 | .ndo_get_stats = corkscrew_get_stats, | ||
572 | .ndo_set_multicast_list = set_rx_mode, | ||
573 | .ndo_change_mtu = eth_change_mtu, | ||
574 | .ndo_set_mac_address = eth_mac_addr, | ||
575 | .ndo_validate_addr = eth_validate_addr, | ||
576 | }; | ||
577 | |||
578 | |||
579 | static int corkscrew_setup(struct net_device *dev, int ioaddr, | ||
580 | struct pnp_dev *idev, int card_number) | ||
581 | { | ||
582 | struct corkscrew_private *vp = netdev_priv(dev); | ||
583 | unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ | ||
584 | int i; | ||
585 | int irq; | ||
586 | |||
587 | #ifdef __ISAPNP__ | ||
588 | if (idev) { | ||
589 | irq = pnp_irq(idev, 0); | ||
590 | vp->dev = &idev->dev; | ||
591 | } else { | ||
592 | irq = inw(ioaddr + 0x2002) & 15; | ||
593 | } | ||
594 | #else | ||
595 | irq = inw(ioaddr + 0x2002) & 15; | ||
596 | #endif | ||
597 | |||
598 | dev->base_addr = ioaddr; | ||
599 | dev->irq = irq; | ||
600 | dev->dma = inw(ioaddr + 0x2000) & 7; | ||
601 | vp->product_name = "3c515"; | ||
602 | vp->options = dev->mem_start; | ||
603 | vp->our_dev = dev; | ||
604 | |||
605 | if (!vp->options) { | ||
606 | if (card_number >= MAX_UNITS) | ||
607 | vp->options = -1; | ||
608 | else | ||
609 | vp->options = options[card_number]; | ||
610 | } | ||
611 | |||
612 | if (vp->options >= 0) { | ||
613 | vp->media_override = vp->options & 7; | ||
614 | if (vp->media_override == 2) | ||
615 | vp->media_override = 0; | ||
616 | vp->full_duplex = (vp->options & 8) ? 1 : 0; | ||
617 | vp->bus_master = (vp->options & 16) ? 1 : 0; | ||
618 | } else { | ||
619 | vp->media_override = 7; | ||
620 | vp->full_duplex = 0; | ||
621 | vp->bus_master = 0; | ||
622 | } | ||
623 | #ifdef MODULE | ||
624 | list_add(&vp->list, &root_corkscrew_dev); | ||
625 | #endif | ||
626 | |||
627 | pr_info("%s: 3Com %s at %#3x,", dev->name, vp->product_name, ioaddr); | ||
628 | |||
629 | spin_lock_init(&vp->lock); | ||
630 | |||
631 | /* Read the station address from the EEPROM. */ | ||
632 | EL3WINDOW(0); | ||
633 | for (i = 0; i < 0x18; i++) { | ||
634 | __be16 *phys_addr = (__be16 *) dev->dev_addr; | ||
635 | int timer; | ||
636 | outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd); | ||
637 | /* Pause for at least 162 us. for the read to take place. */ | ||
638 | for (timer = 4; timer >= 0; timer--) { | ||
639 | udelay(162); | ||
640 | if ((inw(ioaddr + Wn0EepromCmd) & 0x0200) == 0) | ||
641 | break; | ||
642 | } | ||
643 | eeprom[i] = inw(ioaddr + Wn0EepromData); | ||
644 | checksum ^= eeprom[i]; | ||
645 | if (i < 3) | ||
646 | phys_addr[i] = htons(eeprom[i]); | ||
647 | } | ||
648 | checksum = (checksum ^ (checksum >> 8)) & 0xff; | ||
649 | if (checksum != 0x00) | ||
650 | pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); | ||
651 | pr_cont(" %pM", dev->dev_addr); | ||
652 | if (eeprom[16] == 0x11c7) { /* Corkscrew */ | ||
653 | if (request_dma(dev->dma, "3c515")) { | ||
654 | pr_cont(", DMA %d allocation failed", dev->dma); | ||
655 | dev->dma = 0; | ||
656 | } else | ||
657 | pr_cont(", DMA %d", dev->dma); | ||
658 | } | ||
659 | pr_cont(", IRQ %d\n", dev->irq); | ||
660 | /* Tell them about an invalid IRQ. */ | ||
661 | if (corkscrew_debug && (dev->irq <= 0 || dev->irq > 15)) | ||
662 | pr_warning(" *** Warning: this IRQ is unlikely to work! ***\n"); | ||
663 | |||
664 | { | ||
665 | static const char * const ram_split[] = { | ||
666 | "5:3", "3:1", "1:1", "3:5" | ||
667 | }; | ||
668 | __u32 config; | ||
669 | EL3WINDOW(3); | ||
670 | vp->available_media = inw(ioaddr + Wn3_Options); | ||
671 | config = inl(ioaddr + Wn3_Config); | ||
672 | if (corkscrew_debug > 1) | ||
673 | pr_info(" Internal config register is %4.4x, transceivers %#x.\n", | ||
674 | config, inw(ioaddr + Wn3_Options)); | ||
675 | pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", | ||
676 | 8 << config & Ram_size, | ||
677 | config & Ram_width ? "word" : "byte", | ||
678 | ram_split[(config & Ram_split) >> Ram_split_shift], | ||
679 | config & Autoselect ? "autoselect/" : "", | ||
680 | media_tbl[(config & Xcvr) >> Xcvr_shift].name); | ||
681 | vp->default_media = (config & Xcvr) >> Xcvr_shift; | ||
682 | vp->autoselect = config & Autoselect ? 1 : 0; | ||
683 | dev->if_port = vp->default_media; | ||
684 | } | ||
685 | if (vp->media_override != 7) { | ||
686 | pr_info(" Media override to transceiver type %d (%s).\n", | ||
687 | vp->media_override, | ||
688 | media_tbl[vp->media_override].name); | ||
689 | dev->if_port = vp->media_override; | ||
690 | } | ||
691 | |||
692 | vp->capabilities = eeprom[16]; | ||
693 | vp->full_bus_master_tx = (vp->capabilities & 0x20) ? 1 : 0; | ||
694 | /* Rx is broken at 10mbps, so we always disable it. */ | ||
695 | /* vp->full_bus_master_rx = 0; */ | ||
696 | vp->full_bus_master_rx = (vp->capabilities & 0x20) ? 1 : 0; | ||
697 | |||
698 | /* The 3c51x-specific entries in the device structure. */ | ||
699 | dev->netdev_ops = &netdev_ops; | ||
700 | dev->watchdog_timeo = (400 * HZ) / 1000; | ||
701 | dev->ethtool_ops = &netdev_ethtool_ops; | ||
702 | |||
703 | return register_netdev(dev); | ||
704 | } | ||
705 | |||
706 | |||
707 | static int corkscrew_open(struct net_device *dev) | ||
708 | { | ||
709 | int ioaddr = dev->base_addr; | ||
710 | struct corkscrew_private *vp = netdev_priv(dev); | ||
711 | __u32 config; | ||
712 | int i; | ||
713 | |||
714 | /* Before initializing select the active media port. */ | ||
715 | EL3WINDOW(3); | ||
716 | if (vp->full_duplex) | ||
717 | outb(0x20, ioaddr + Wn3_MAC_Ctrl); /* Set the full-duplex bit. */ | ||
718 | config = inl(ioaddr + Wn3_Config); | ||
719 | |||
720 | if (vp->media_override != 7) { | ||
721 | if (corkscrew_debug > 1) | ||
722 | pr_info("%s: Media override to transceiver %d (%s).\n", | ||
723 | dev->name, vp->media_override, | ||
724 | media_tbl[vp->media_override].name); | ||
725 | dev->if_port = vp->media_override; | ||
726 | } else if (vp->autoselect) { | ||
727 | /* Find first available media type, starting with 100baseTx. */ | ||
728 | dev->if_port = 4; | ||
729 | while (!(vp->available_media & media_tbl[dev->if_port].mask)) | ||
730 | dev->if_port = media_tbl[dev->if_port].next; | ||
731 | |||
732 | if (corkscrew_debug > 1) | ||
733 | pr_debug("%s: Initial media type %s.\n", | ||
734 | dev->name, media_tbl[dev->if_port].name); | ||
735 | |||
736 | init_timer(&vp->timer); | ||
737 | vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; | ||
738 | vp->timer.data = (unsigned long) dev; | ||
739 | vp->timer.function = corkscrew_timer; /* timer handler */ | ||
740 | add_timer(&vp->timer); | ||
741 | } else | ||
742 | dev->if_port = vp->default_media; | ||
743 | |||
744 | config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift); | ||
745 | outl(config, ioaddr + Wn3_Config); | ||
746 | |||
747 | if (corkscrew_debug > 1) { | ||
748 | pr_debug("%s: corkscrew_open() InternalConfig %8.8x.\n", | ||
749 | dev->name, config); | ||
750 | } | ||
751 | |||
752 | outw(TxReset, ioaddr + EL3_CMD); | ||
753 | for (i = 20; i >= 0; i--) | ||
754 | if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) | ||
755 | break; | ||
756 | |||
757 | outw(RxReset, ioaddr + EL3_CMD); | ||
758 | /* Wait a few ticks for the RxReset command to complete. */ | ||
759 | for (i = 20; i >= 0; i--) | ||
760 | if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) | ||
761 | break; | ||
762 | |||
763 | outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD); | ||
764 | |||
765 | /* Use the now-standard shared IRQ implementation. */ | ||
766 | if (vp->capabilities == 0x11c7) { | ||
767 | /* Corkscrew: Cannot share ISA resources. */ | ||
768 | if (dev->irq == 0 || | ||
769 | dev->dma == 0 || | ||
770 | request_irq(dev->irq, corkscrew_interrupt, 0, | ||
771 | vp->product_name, dev)) | ||
772 | return -EAGAIN; | ||
773 | enable_dma(dev->dma); | ||
774 | set_dma_mode(dev->dma, DMA_MODE_CASCADE); | ||
775 | } else if (request_irq(dev->irq, corkscrew_interrupt, IRQF_SHARED, | ||
776 | vp->product_name, dev)) { | ||
777 | return -EAGAIN; | ||
778 | } | ||
779 | |||
780 | if (corkscrew_debug > 1) { | ||
781 | EL3WINDOW(4); | ||
782 | pr_debug("%s: corkscrew_open() irq %d media status %4.4x.\n", | ||
783 | dev->name, dev->irq, inw(ioaddr + Wn4_Media)); | ||
784 | } | ||
785 | |||
786 | /* Set the station address and mask in window 2 each time opened. */ | ||
787 | EL3WINDOW(2); | ||
788 | for (i = 0; i < 6; i++) | ||
789 | outb(dev->dev_addr[i], ioaddr + i); | ||
790 | for (; i < 12; i += 2) | ||
791 | outw(0, ioaddr + i); | ||
792 | |||
793 | if (dev->if_port == 3) | ||
794 | /* Start the thinnet transceiver. We should really wait 50ms... */ | ||
795 | outw(StartCoax, ioaddr + EL3_CMD); | ||
796 | EL3WINDOW(4); | ||
797 | outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP | Media_SQE)) | | ||
798 | media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media); | ||
799 | |||
800 | /* Switch to the stats window, and clear all stats by reading. */ | ||
801 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
802 | EL3WINDOW(6); | ||
803 | for (i = 0; i < 10; i++) | ||
804 | inb(ioaddr + i); | ||
805 | inw(ioaddr + 10); | ||
806 | inw(ioaddr + 12); | ||
807 | /* New: On the Vortex we must also clear the BadSSD counter. */ | ||
808 | EL3WINDOW(4); | ||
809 | inb(ioaddr + 12); | ||
810 | /* ..and on the Boomerang we enable the extra statistics bits. */ | ||
811 | outw(0x0040, ioaddr + Wn4_NetDiag); | ||
812 | |||
813 | /* Switch to register set 7 for normal use. */ | ||
814 | EL3WINDOW(7); | ||
815 | |||
816 | if (vp->full_bus_master_rx) { /* Boomerang bus master. */ | ||
817 | vp->cur_rx = vp->dirty_rx = 0; | ||
818 | if (corkscrew_debug > 2) | ||
819 | pr_debug("%s: Filling in the Rx ring.\n", dev->name); | ||
820 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
821 | struct sk_buff *skb; | ||
822 | if (i < (RX_RING_SIZE - 1)) | ||
823 | vp->rx_ring[i].next = | ||
824 | isa_virt_to_bus(&vp->rx_ring[i + 1]); | ||
825 | else | ||
826 | vp->rx_ring[i].next = 0; | ||
827 | vp->rx_ring[i].status = 0; /* Clear complete bit. */ | ||
828 | vp->rx_ring[i].length = PKT_BUF_SZ | 0x80000000; | ||
829 | skb = dev_alloc_skb(PKT_BUF_SZ); | ||
830 | vp->rx_skbuff[i] = skb; | ||
831 | if (skb == NULL) | ||
832 | break; /* Bad news! */ | ||
833 | skb->dev = dev; /* Mark as being used by this device. */ | ||
834 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | ||
835 | vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); | ||
836 | } | ||
837 | if (i != 0) | ||
838 | vp->rx_ring[i - 1].next = | ||
839 | isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ | ||
840 | outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); | ||
841 | } | ||
842 | if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ | ||
843 | vp->cur_tx = vp->dirty_tx = 0; | ||
844 | outb(PKT_BUF_SZ >> 8, ioaddr + TxFreeThreshold); /* Room for a packet. */ | ||
845 | /* Clear the Tx ring. */ | ||
846 | for (i = 0; i < TX_RING_SIZE; i++) | ||
847 | vp->tx_skbuff[i] = NULL; | ||
848 | outl(0, ioaddr + DownListPtr); | ||
849 | } | ||
850 | /* Set receiver mode: presumably accept b-case and phys addr only. */ | ||
851 | set_rx_mode(dev); | ||
852 | outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ | ||
853 | |||
854 | netif_start_queue(dev); | ||
855 | |||
856 | outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ | ||
857 | outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ | ||
858 | /* Allow status bits to be seen. */ | ||
859 | outw(SetStatusEnb | AdapterFailure | IntReq | StatsFull | | ||
860 | (vp->full_bus_master_tx ? DownComplete : TxAvailable) | | ||
861 | (vp->full_bus_master_rx ? UpComplete : RxComplete) | | ||
862 | (vp->bus_master ? DMADone : 0), ioaddr + EL3_CMD); | ||
863 | /* Ack all pending events, and set active indicator mask. */ | ||
864 | outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, | ||
865 | ioaddr + EL3_CMD); | ||
866 | outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull | ||
867 | | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete, | ||
868 | ioaddr + EL3_CMD); | ||
869 | |||
870 | return 0; | ||
871 | } | ||
872 | |||
873 | static void corkscrew_timer(unsigned long data) | ||
874 | { | ||
875 | #ifdef AUTOMEDIA | ||
876 | struct net_device *dev = (struct net_device *) data; | ||
877 | struct corkscrew_private *vp = netdev_priv(dev); | ||
878 | int ioaddr = dev->base_addr; | ||
879 | unsigned long flags; | ||
880 | int ok = 0; | ||
881 | |||
882 | if (corkscrew_debug > 1) | ||
883 | pr_debug("%s: Media selection timer tick happened, %s.\n", | ||
884 | dev->name, media_tbl[dev->if_port].name); | ||
885 | |||
886 | spin_lock_irqsave(&vp->lock, flags); | ||
887 | |||
888 | { | ||
889 | int old_window = inw(ioaddr + EL3_CMD) >> 13; | ||
890 | int media_status; | ||
891 | EL3WINDOW(4); | ||
892 | media_status = inw(ioaddr + Wn4_Media); | ||
893 | switch (dev->if_port) { | ||
894 | case 0: | ||
895 | case 4: | ||
896 | case 5: /* 10baseT, 100baseTX, 100baseFX */ | ||
897 | if (media_status & Media_LnkBeat) { | ||
898 | ok = 1; | ||
899 | if (corkscrew_debug > 1) | ||
900 | pr_debug("%s: Media %s has link beat, %x.\n", | ||
901 | dev->name, | ||
902 | media_tbl[dev->if_port].name, | ||
903 | media_status); | ||
904 | } else if (corkscrew_debug > 1) | ||
905 | pr_debug("%s: Media %s is has no link beat, %x.\n", | ||
906 | dev->name, | ||
907 | media_tbl[dev->if_port].name, | ||
908 | media_status); | ||
909 | |||
910 | break; | ||
911 | default: /* Other media types handled by Tx timeouts. */ | ||
912 | if (corkscrew_debug > 1) | ||
913 | pr_debug("%s: Media %s is has no indication, %x.\n", | ||
914 | dev->name, | ||
915 | media_tbl[dev->if_port].name, | ||
916 | media_status); | ||
917 | ok = 1; | ||
918 | } | ||
919 | if (!ok) { | ||
920 | __u32 config; | ||
921 | |||
922 | do { | ||
923 | dev->if_port = | ||
924 | media_tbl[dev->if_port].next; | ||
925 | } | ||
926 | while (!(vp->available_media & media_tbl[dev->if_port].mask)); | ||
927 | |||
928 | if (dev->if_port == 8) { /* Go back to default. */ | ||
929 | dev->if_port = vp->default_media; | ||
930 | if (corkscrew_debug > 1) | ||
931 | pr_debug("%s: Media selection failing, using default %s port.\n", | ||
932 | dev->name, | ||
933 | media_tbl[dev->if_port].name); | ||
934 | } else { | ||
935 | if (corkscrew_debug > 1) | ||
936 | pr_debug("%s: Media selection failed, now trying %s port.\n", | ||
937 | dev->name, | ||
938 | media_tbl[dev->if_port].name); | ||
939 | vp->timer.expires = jiffies + media_tbl[dev->if_port].wait; | ||
940 | add_timer(&vp->timer); | ||
941 | } | ||
942 | outw((media_status & ~(Media_10TP | Media_SQE)) | | ||
943 | media_tbl[dev->if_port].media_bits, | ||
944 | ioaddr + Wn4_Media); | ||
945 | |||
946 | EL3WINDOW(3); | ||
947 | config = inl(ioaddr + Wn3_Config); | ||
948 | config = (config & ~Xcvr) | (dev->if_port << Xcvr_shift); | ||
949 | outl(config, ioaddr + Wn3_Config); | ||
950 | |||
951 | outw(dev->if_port == 3 ? StartCoax : StopCoax, | ||
952 | ioaddr + EL3_CMD); | ||
953 | } | ||
954 | EL3WINDOW(old_window); | ||
955 | } | ||
956 | |||
957 | spin_unlock_irqrestore(&vp->lock, flags); | ||
958 | if (corkscrew_debug > 1) | ||
959 | pr_debug("%s: Media selection timer finished, %s.\n", | ||
960 | dev->name, media_tbl[dev->if_port].name); | ||
961 | |||
962 | #endif /* AUTOMEDIA */ | ||
963 | } | ||
964 | |||
965 | static void corkscrew_timeout(struct net_device *dev) | ||
966 | { | ||
967 | int i; | ||
968 | struct corkscrew_private *vp = netdev_priv(dev); | ||
969 | int ioaddr = dev->base_addr; | ||
970 | |||
971 | pr_warning("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", | ||
972 | dev->name, inb(ioaddr + TxStatus), | ||
973 | inw(ioaddr + EL3_STATUS)); | ||
974 | /* Slight code bloat to be user friendly. */ | ||
975 | if ((inb(ioaddr + TxStatus) & 0x88) == 0x88) | ||
976 | pr_warning("%s: Transmitter encountered 16 collisions --" | ||
977 | " network cable problem?\n", dev->name); | ||
978 | #ifndef final_version | ||
979 | pr_debug(" Flags; bus-master %d, full %d; dirty %d current %d.\n", | ||
980 | vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, | ||
981 | vp->cur_tx); | ||
982 | pr_debug(" Down list %8.8x vs. %p.\n", inl(ioaddr + DownListPtr), | ||
983 | &vp->tx_ring[0]); | ||
984 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
985 | pr_debug(" %d: %p length %8.8x status %8.8x\n", i, | ||
986 | &vp->tx_ring[i], | ||
987 | vp->tx_ring[i].length, vp->tx_ring[i].status); | ||
988 | } | ||
989 | #endif | ||
990 | /* Issue TX_RESET and TX_START commands. */ | ||
991 | outw(TxReset, ioaddr + EL3_CMD); | ||
992 | for (i = 20; i >= 0; i--) | ||
993 | if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) | ||
994 | break; | ||
995 | outw(TxEnable, ioaddr + EL3_CMD); | ||
996 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
997 | dev->stats.tx_errors++; | ||
998 | dev->stats.tx_dropped++; | ||
999 | netif_wake_queue(dev); | ||
1000 | } | ||
1001 | |||
1002 | static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb, | ||
1003 | struct net_device *dev) | ||
1004 | { | ||
1005 | struct corkscrew_private *vp = netdev_priv(dev); | ||
1006 | int ioaddr = dev->base_addr; | ||
1007 | |||
1008 | /* Block a timer-based transmit from overlapping. */ | ||
1009 | |||
1010 | netif_stop_queue(dev); | ||
1011 | |||
1012 | if (vp->full_bus_master_tx) { /* BOOMERANG bus-master */ | ||
1013 | /* Calculate the next Tx descriptor entry. */ | ||
1014 | int entry = vp->cur_tx % TX_RING_SIZE; | ||
1015 | struct boom_tx_desc *prev_entry; | ||
1016 | unsigned long flags; | ||
1017 | int i; | ||
1018 | |||
1019 | if (vp->tx_full) /* No room to transmit with */ | ||
1020 | return NETDEV_TX_BUSY; | ||
1021 | if (vp->cur_tx != 0) | ||
1022 | prev_entry = &vp->tx_ring[(vp->cur_tx - 1) % TX_RING_SIZE]; | ||
1023 | else | ||
1024 | prev_entry = NULL; | ||
1025 | if (corkscrew_debug > 3) | ||
1026 | pr_debug("%s: Trying to send a packet, Tx index %d.\n", | ||
1027 | dev->name, vp->cur_tx); | ||
1028 | /* vp->tx_full = 1; */ | ||
1029 | vp->tx_skbuff[entry] = skb; | ||
1030 | vp->tx_ring[entry].next = 0; | ||
1031 | vp->tx_ring[entry].addr = isa_virt_to_bus(skb->data); | ||
1032 | vp->tx_ring[entry].length = skb->len | 0x80000000; | ||
1033 | vp->tx_ring[entry].status = skb->len | 0x80000000; | ||
1034 | |||
1035 | spin_lock_irqsave(&vp->lock, flags); | ||
1036 | outw(DownStall, ioaddr + EL3_CMD); | ||
1037 | /* Wait for the stall to complete. */ | ||
1038 | for (i = 20; i >= 0; i--) | ||
1039 | if ((inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0) | ||
1040 | break; | ||
1041 | if (prev_entry) | ||
1042 | prev_entry->next = isa_virt_to_bus(&vp->tx_ring[entry]); | ||
1043 | if (inl(ioaddr + DownListPtr) == 0) { | ||
1044 | outl(isa_virt_to_bus(&vp->tx_ring[entry]), | ||
1045 | ioaddr + DownListPtr); | ||
1046 | queued_packet++; | ||
1047 | } | ||
1048 | outw(DownUnstall, ioaddr + EL3_CMD); | ||
1049 | spin_unlock_irqrestore(&vp->lock, flags); | ||
1050 | |||
1051 | vp->cur_tx++; | ||
1052 | if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) | ||
1053 | vp->tx_full = 1; | ||
1054 | else { /* Clear previous interrupt enable. */ | ||
1055 | if (prev_entry) | ||
1056 | prev_entry->status &= ~0x80000000; | ||
1057 | netif_wake_queue(dev); | ||
1058 | } | ||
1059 | return NETDEV_TX_OK; | ||
1060 | } | ||
1061 | /* Put out the doubleword header... */ | ||
1062 | outl(skb->len, ioaddr + TX_FIFO); | ||
1063 | dev->stats.tx_bytes += skb->len; | ||
1064 | #ifdef VORTEX_BUS_MASTER | ||
1065 | if (vp->bus_master) { | ||
1066 | /* Set the bus-master controller to transfer the packet. */ | ||
1067 | outl((int) (skb->data), ioaddr + Wn7_MasterAddr); | ||
1068 | outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); | ||
1069 | vp->tx_skb = skb; | ||
1070 | outw(StartDMADown, ioaddr + EL3_CMD); | ||
1071 | /* queue will be woken at the DMADone interrupt. */ | ||
1072 | } else { | ||
1073 | /* ... and the packet rounded to a doubleword. */ | ||
1074 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); | ||
1075 | dev_kfree_skb(skb); | ||
1076 | if (inw(ioaddr + TxFree) > 1536) { | ||
1077 | netif_wake_queue(dev); | ||
1078 | } else | ||
1079 | /* Interrupt us when the FIFO has room for max-sized packet. */ | ||
1080 | outw(SetTxThreshold + (1536 >> 2), | ||
1081 | ioaddr + EL3_CMD); | ||
1082 | } | ||
1083 | #else | ||
1084 | /* ... and the packet rounded to a doubleword. */ | ||
1085 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); | ||
1086 | dev_kfree_skb(skb); | ||
1087 | if (inw(ioaddr + TxFree) > 1536) { | ||
1088 | netif_wake_queue(dev); | ||
1089 | } else | ||
1090 | /* Interrupt us when the FIFO has room for max-sized packet. */ | ||
1091 | outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD); | ||
1092 | #endif /* bus master */ | ||
1093 | |||
1094 | |||
1095 | /* Clear the Tx status stack. */ | ||
1096 | { | ||
1097 | short tx_status; | ||
1098 | int i = 4; | ||
1099 | |||
1100 | while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) { | ||
1101 | if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ | ||
1102 | if (corkscrew_debug > 2) | ||
1103 | pr_debug("%s: Tx error, status %2.2x.\n", | ||
1104 | dev->name, tx_status); | ||
1105 | if (tx_status & 0x04) | ||
1106 | dev->stats.tx_fifo_errors++; | ||
1107 | if (tx_status & 0x38) | ||
1108 | dev->stats.tx_aborted_errors++; | ||
1109 | if (tx_status & 0x30) { | ||
1110 | int j; | ||
1111 | outw(TxReset, ioaddr + EL3_CMD); | ||
1112 | for (j = 20; j >= 0; j--) | ||
1113 | if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) | ||
1114 | break; | ||
1115 | } | ||
1116 | outw(TxEnable, ioaddr + EL3_CMD); | ||
1117 | } | ||
1118 | outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ | ||
1119 | } | ||
1120 | } | ||
1121 | return NETDEV_TX_OK; | ||
1122 | } | ||
1123 | |||
1124 | /* The interrupt handler does all of the Rx thread work and cleans up | ||
1125 | after the Tx thread. */ | ||
1126 | |||
1127 | static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) | ||
1128 | { | ||
1129 | /* Use the now-standard shared IRQ implementation. */ | ||
1130 | struct net_device *dev = dev_id; | ||
1131 | struct corkscrew_private *lp = netdev_priv(dev); | ||
1132 | int ioaddr, status; | ||
1133 | int latency; | ||
1134 | int i = max_interrupt_work; | ||
1135 | |||
1136 | ioaddr = dev->base_addr; | ||
1137 | latency = inb(ioaddr + Timer); | ||
1138 | |||
1139 | spin_lock(&lp->lock); | ||
1140 | |||
1141 | status = inw(ioaddr + EL3_STATUS); | ||
1142 | |||
1143 | if (corkscrew_debug > 4) | ||
1144 | pr_debug("%s: interrupt, status %4.4x, timer %d.\n", | ||
1145 | dev->name, status, latency); | ||
1146 | if ((status & 0xE000) != 0xE000) { | ||
1147 | static int donedidthis; | ||
1148 | /* Some interrupt controllers store a bogus interrupt from boot-time. | ||
1149 | Ignore a single early interrupt, but don't hang the machine for | ||
1150 | other interrupt problems. */ | ||
1151 | if (donedidthis++ > 100) { | ||
1152 | pr_err("%s: Bogus interrupt, bailing. Status %4.4x, start=%d.\n", | ||
1153 | dev->name, status, netif_running(dev)); | ||
1154 | free_irq(dev->irq, dev); | ||
1155 | dev->irq = -1; | ||
1156 | } | ||
1157 | } | ||
1158 | |||
1159 | do { | ||
1160 | if (corkscrew_debug > 5) | ||
1161 | pr_debug("%s: In interrupt loop, status %4.4x.\n", | ||
1162 | dev->name, status); | ||
1163 | if (status & RxComplete) | ||
1164 | corkscrew_rx(dev); | ||
1165 | |||
1166 | if (status & TxAvailable) { | ||
1167 | if (corkscrew_debug > 5) | ||
1168 | pr_debug(" TX room bit was handled.\n"); | ||
1169 | /* There's room in the FIFO for a full-sized packet. */ | ||
1170 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); | ||
1171 | netif_wake_queue(dev); | ||
1172 | } | ||
1173 | if (status & DownComplete) { | ||
1174 | unsigned int dirty_tx = lp->dirty_tx; | ||
1175 | |||
1176 | while (lp->cur_tx - dirty_tx > 0) { | ||
1177 | int entry = dirty_tx % TX_RING_SIZE; | ||
1178 | if (inl(ioaddr + DownListPtr) == isa_virt_to_bus(&lp->tx_ring[entry])) | ||
1179 | break; /* It still hasn't been processed. */ | ||
1180 | if (lp->tx_skbuff[entry]) { | ||
1181 | dev_kfree_skb_irq(lp->tx_skbuff[entry]); | ||
1182 | lp->tx_skbuff[entry] = NULL; | ||
1183 | } | ||
1184 | dirty_tx++; | ||
1185 | } | ||
1186 | lp->dirty_tx = dirty_tx; | ||
1187 | outw(AckIntr | DownComplete, ioaddr + EL3_CMD); | ||
1188 | if (lp->tx_full && (lp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) { | ||
1189 | lp->tx_full = 0; | ||
1190 | netif_wake_queue(dev); | ||
1191 | } | ||
1192 | } | ||
1193 | #ifdef VORTEX_BUS_MASTER | ||
1194 | if (status & DMADone) { | ||
1195 | outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ | ||
1196 | dev_kfree_skb_irq(lp->tx_skb); /* Release the transferred buffer */ | ||
1197 | netif_wake_queue(dev); | ||
1198 | } | ||
1199 | #endif | ||
1200 | if (status & UpComplete) { | ||
1201 | boomerang_rx(dev); | ||
1202 | outw(AckIntr | UpComplete, ioaddr + EL3_CMD); | ||
1203 | } | ||
1204 | if (status & (AdapterFailure | RxEarly | StatsFull)) { | ||
1205 | /* Handle all uncommon interrupts at once. */ | ||
1206 | if (status & RxEarly) { /* Rx early is unused. */ | ||
1207 | corkscrew_rx(dev); | ||
1208 | outw(AckIntr | RxEarly, ioaddr + EL3_CMD); | ||
1209 | } | ||
1210 | if (status & StatsFull) { /* Empty statistics. */ | ||
1211 | static int DoneDidThat; | ||
1212 | if (corkscrew_debug > 4) | ||
1213 | pr_debug("%s: Updating stats.\n", dev->name); | ||
1214 | update_stats(ioaddr, dev); | ||
1215 | /* DEBUG HACK: Disable statistics as an interrupt source. */ | ||
1216 | /* This occurs when we have the wrong media type! */ | ||
1217 | if (DoneDidThat == 0 && inw(ioaddr + EL3_STATUS) & StatsFull) { | ||
1218 | int win, reg; | ||
1219 | pr_notice("%s: Updating stats failed, disabling stats as an interrupt source.\n", | ||
1220 | dev->name); | ||
1221 | for (win = 0; win < 8; win++) { | ||
1222 | EL3WINDOW(win); | ||
1223 | pr_notice("Vortex window %d:", win); | ||
1224 | for (reg = 0; reg < 16; reg++) | ||
1225 | pr_cont(" %2.2x", inb(ioaddr + reg)); | ||
1226 | pr_cont("\n"); | ||
1227 | } | ||
1228 | EL3WINDOW(7); | ||
1229 | outw(SetIntrEnb | TxAvailable | | ||
1230 | RxComplete | AdapterFailure | | ||
1231 | UpComplete | DownComplete | | ||
1232 | TxComplete, ioaddr + EL3_CMD); | ||
1233 | DoneDidThat++; | ||
1234 | } | ||
1235 | } | ||
1236 | if (status & AdapterFailure) { | ||
1237 | /* Adapter failure requires Rx reset and reinit. */ | ||
1238 | outw(RxReset, ioaddr + EL3_CMD); | ||
1239 | /* Set the Rx filter to the current state. */ | ||
1240 | set_rx_mode(dev); | ||
1241 | outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ | ||
1242 | outw(AckIntr | AdapterFailure, | ||
1243 | ioaddr + EL3_CMD); | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | if (--i < 0) { | ||
1248 | pr_err("%s: Too much work in interrupt, status %4.4x. Disabling functions (%4.4x).\n", | ||
1249 | dev->name, status, SetStatusEnb | ((~status) & 0x7FE)); | ||
1250 | /* Disable all pending interrupts. */ | ||
1251 | outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD); | ||
1252 | outw(AckIntr | 0x7FF, ioaddr + EL3_CMD); | ||
1253 | break; | ||
1254 | } | ||
1255 | /* Acknowledge the IRQ. */ | ||
1256 | outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); | ||
1257 | |||
1258 | } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); | ||
1259 | |||
1260 | spin_unlock(&lp->lock); | ||
1261 | |||
1262 | if (corkscrew_debug > 4) | ||
1263 | pr_debug("%s: exiting interrupt, status %4.4x.\n", dev->name, status); | ||
1264 | return IRQ_HANDLED; | ||
1265 | } | ||
1266 | |||
1267 | static int corkscrew_rx(struct net_device *dev) | ||
1268 | { | ||
1269 | int ioaddr = dev->base_addr; | ||
1270 | int i; | ||
1271 | short rx_status; | ||
1272 | |||
1273 | if (corkscrew_debug > 5) | ||
1274 | pr_debug(" In rx_packet(), status %4.4x, rx_status %4.4x.\n", | ||
1275 | inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus)); | ||
1276 | while ((rx_status = inw(ioaddr + RxStatus)) > 0) { | ||
1277 | if (rx_status & 0x4000) { /* Error, update stats. */ | ||
1278 | unsigned char rx_error = inb(ioaddr + RxErrors); | ||
1279 | if (corkscrew_debug > 2) | ||
1280 | pr_debug(" Rx error: status %2.2x.\n", | ||
1281 | rx_error); | ||
1282 | dev->stats.rx_errors++; | ||
1283 | if (rx_error & 0x01) | ||
1284 | dev->stats.rx_over_errors++; | ||
1285 | if (rx_error & 0x02) | ||
1286 | dev->stats.rx_length_errors++; | ||
1287 | if (rx_error & 0x04) | ||
1288 | dev->stats.rx_frame_errors++; | ||
1289 | if (rx_error & 0x08) | ||
1290 | dev->stats.rx_crc_errors++; | ||
1291 | if (rx_error & 0x10) | ||
1292 | dev->stats.rx_length_errors++; | ||
1293 | } else { | ||
1294 | /* The packet length: up to 4.5K!. */ | ||
1295 | short pkt_len = rx_status & 0x1fff; | ||
1296 | struct sk_buff *skb; | ||
1297 | |||
1298 | skb = dev_alloc_skb(pkt_len + 5 + 2); | ||
1299 | if (corkscrew_debug > 4) | ||
1300 | pr_debug("Receiving packet size %d status %4.4x.\n", | ||
1301 | pkt_len, rx_status); | ||
1302 | if (skb != NULL) { | ||
1303 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | ||
1304 | /* 'skb_put()' points to the start of sk_buff data area. */ | ||
1305 | insl(ioaddr + RX_FIFO, | ||
1306 | skb_put(skb, pkt_len), | ||
1307 | (pkt_len + 3) >> 2); | ||
1308 | outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ | ||
1309 | skb->protocol = eth_type_trans(skb, dev); | ||
1310 | netif_rx(skb); | ||
1311 | dev->stats.rx_packets++; | ||
1312 | dev->stats.rx_bytes += pkt_len; | ||
1313 | /* Wait a limited time to go to next packet. */ | ||
1314 | for (i = 200; i >= 0; i--) | ||
1315 | if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) | ||
1316 | break; | ||
1317 | continue; | ||
1318 | } else if (corkscrew_debug) | ||
1319 | pr_debug("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); | ||
1320 | } | ||
1321 | outw(RxDiscard, ioaddr + EL3_CMD); | ||
1322 | dev->stats.rx_dropped++; | ||
1323 | /* Wait a limited time to skip this packet. */ | ||
1324 | for (i = 200; i >= 0; i--) | ||
1325 | if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) | ||
1326 | break; | ||
1327 | } | ||
1328 | return 0; | ||
1329 | } | ||
1330 | |||
1331 | static int boomerang_rx(struct net_device *dev) | ||
1332 | { | ||
1333 | struct corkscrew_private *vp = netdev_priv(dev); | ||
1334 | int entry = vp->cur_rx % RX_RING_SIZE; | ||
1335 | int ioaddr = dev->base_addr; | ||
1336 | int rx_status; | ||
1337 | |||
1338 | if (corkscrew_debug > 5) | ||
1339 | pr_debug(" In boomerang_rx(), status %4.4x, rx_status %4.4x.\n", | ||
1340 | inw(ioaddr + EL3_STATUS), inw(ioaddr + RxStatus)); | ||
1341 | while ((rx_status = vp->rx_ring[entry].status) & RxDComplete) { | ||
1342 | if (rx_status & RxDError) { /* Error, update stats. */ | ||
1343 | unsigned char rx_error = rx_status >> 16; | ||
1344 | if (corkscrew_debug > 2) | ||
1345 | pr_debug(" Rx error: status %2.2x.\n", | ||
1346 | rx_error); | ||
1347 | dev->stats.rx_errors++; | ||
1348 | if (rx_error & 0x01) | ||
1349 | dev->stats.rx_over_errors++; | ||
1350 | if (rx_error & 0x02) | ||
1351 | dev->stats.rx_length_errors++; | ||
1352 | if (rx_error & 0x04) | ||
1353 | dev->stats.rx_frame_errors++; | ||
1354 | if (rx_error & 0x08) | ||
1355 | dev->stats.rx_crc_errors++; | ||
1356 | if (rx_error & 0x10) | ||
1357 | dev->stats.rx_length_errors++; | ||
1358 | } else { | ||
1359 | /* The packet length: up to 4.5K!. */ | ||
1360 | short pkt_len = rx_status & 0x1fff; | ||
1361 | struct sk_buff *skb; | ||
1362 | |||
1363 | dev->stats.rx_bytes += pkt_len; | ||
1364 | if (corkscrew_debug > 4) | ||
1365 | pr_debug("Receiving packet size %d status %4.4x.\n", | ||
1366 | pkt_len, rx_status); | ||
1367 | |||
1368 | /* Check if the packet is long enough to just accept without | ||
1369 | copying to a properly sized skbuff. */ | ||
1370 | if (pkt_len < rx_copybreak && | ||
1371 | (skb = dev_alloc_skb(pkt_len + 4)) != NULL) { | ||
1372 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | ||
1373 | /* 'skb_put()' points to the start of sk_buff data area. */ | ||
1374 | memcpy(skb_put(skb, pkt_len), | ||
1375 | isa_bus_to_virt(vp->rx_ring[entry]. | ||
1376 | addr), pkt_len); | ||
1377 | rx_copy++; | ||
1378 | } else { | ||
1379 | void *temp; | ||
1380 | /* Pass up the skbuff already on the Rx ring. */ | ||
1381 | skb = vp->rx_skbuff[entry]; | ||
1382 | vp->rx_skbuff[entry] = NULL; | ||
1383 | temp = skb_put(skb, pkt_len); | ||
1384 | /* Remove this checking code for final release. */ | ||
1385 | if (isa_bus_to_virt(vp->rx_ring[entry].addr) != temp) | ||
1386 | pr_warning("%s: Warning -- the skbuff addresses do not match" | ||
1387 | " in boomerang_rx: %p vs. %p / %p.\n", | ||
1388 | dev->name, | ||
1389 | isa_bus_to_virt(vp-> | ||
1390 | rx_ring[entry]. | ||
1391 | addr), skb->head, | ||
1392 | temp); | ||
1393 | rx_nocopy++; | ||
1394 | } | ||
1395 | skb->protocol = eth_type_trans(skb, dev); | ||
1396 | netif_rx(skb); | ||
1397 | dev->stats.rx_packets++; | ||
1398 | } | ||
1399 | entry = (++vp->cur_rx) % RX_RING_SIZE; | ||
1400 | } | ||
1401 | /* Refill the Rx ring buffers. */ | ||
1402 | for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { | ||
1403 | struct sk_buff *skb; | ||
1404 | entry = vp->dirty_rx % RX_RING_SIZE; | ||
1405 | if (vp->rx_skbuff[entry] == NULL) { | ||
1406 | skb = dev_alloc_skb(PKT_BUF_SZ); | ||
1407 | if (skb == NULL) | ||
1408 | break; /* Bad news! */ | ||
1409 | skb->dev = dev; /* Mark as being used by this device. */ | ||
1410 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | ||
1411 | vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data); | ||
1412 | vp->rx_skbuff[entry] = skb; | ||
1413 | } | ||
1414 | vp->rx_ring[entry].status = 0; /* Clear complete bit. */ | ||
1415 | } | ||
1416 | return 0; | ||
1417 | } | ||
1418 | |||
1419 | static int corkscrew_close(struct net_device *dev) | ||
1420 | { | ||
1421 | struct corkscrew_private *vp = netdev_priv(dev); | ||
1422 | int ioaddr = dev->base_addr; | ||
1423 | int i; | ||
1424 | |||
1425 | netif_stop_queue(dev); | ||
1426 | |||
1427 | if (corkscrew_debug > 1) { | ||
1428 | pr_debug("%s: corkscrew_close() status %4.4x, Tx status %2.2x.\n", | ||
1429 | dev->name, inw(ioaddr + EL3_STATUS), | ||
1430 | inb(ioaddr + TxStatus)); | ||
1431 | pr_debug("%s: corkscrew close stats: rx_nocopy %d rx_copy %d tx_queued %d.\n", | ||
1432 | dev->name, rx_nocopy, rx_copy, queued_packet); | ||
1433 | } | ||
1434 | |||
1435 | del_timer(&vp->timer); | ||
1436 | |||
1437 | /* Turn off statistics ASAP. We update lp->stats below. */ | ||
1438 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
1439 | |||
1440 | /* Disable the receiver and transmitter. */ | ||
1441 | outw(RxDisable, ioaddr + EL3_CMD); | ||
1442 | outw(TxDisable, ioaddr + EL3_CMD); | ||
1443 | |||
1444 | if (dev->if_port == XCVR_10base2) | ||
1445 | /* Turn off thinnet power. Green! */ | ||
1446 | outw(StopCoax, ioaddr + EL3_CMD); | ||
1447 | |||
1448 | free_irq(dev->irq, dev); | ||
1449 | |||
1450 | outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); | ||
1451 | |||
1452 | update_stats(ioaddr, dev); | ||
1453 | if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ | ||
1454 | outl(0, ioaddr + UpListPtr); | ||
1455 | for (i = 0; i < RX_RING_SIZE; i++) | ||
1456 | if (vp->rx_skbuff[i]) { | ||
1457 | dev_kfree_skb(vp->rx_skbuff[i]); | ||
1458 | vp->rx_skbuff[i] = NULL; | ||
1459 | } | ||
1460 | } | ||
1461 | if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ | ||
1462 | outl(0, ioaddr + DownListPtr); | ||
1463 | for (i = 0; i < TX_RING_SIZE; i++) | ||
1464 | if (vp->tx_skbuff[i]) { | ||
1465 | dev_kfree_skb(vp->tx_skbuff[i]); | ||
1466 | vp->tx_skbuff[i] = NULL; | ||
1467 | } | ||
1468 | } | ||
1469 | |||
1470 | return 0; | ||
1471 | } | ||
1472 | |||
1473 | static struct net_device_stats *corkscrew_get_stats(struct net_device *dev) | ||
1474 | { | ||
1475 | struct corkscrew_private *vp = netdev_priv(dev); | ||
1476 | unsigned long flags; | ||
1477 | |||
1478 | if (netif_running(dev)) { | ||
1479 | spin_lock_irqsave(&vp->lock, flags); | ||
1480 | update_stats(dev->base_addr, dev); | ||
1481 | spin_unlock_irqrestore(&vp->lock, flags); | ||
1482 | } | ||
1483 | return &dev->stats; | ||
1484 | } | ||
1485 | |||
1486 | /* Update statistics. | ||
1487 | Unlike with the EL3 we need not worry about interrupts changing | ||
1488 | the window setting from underneath us, but we must still guard | ||
1489 | against a race condition with a StatsUpdate interrupt updating the | ||
1490 | table. This is done by checking that the ASM (!) code generated uses | ||
1491 | atomic updates with '+='. | ||
1492 | */ | ||
1493 | static void update_stats(int ioaddr, struct net_device *dev) | ||
1494 | { | ||
1495 | /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ | ||
1496 | /* Switch to the stats window, and read everything. */ | ||
1497 | EL3WINDOW(6); | ||
1498 | dev->stats.tx_carrier_errors += inb(ioaddr + 0); | ||
1499 | dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); | ||
1500 | /* Multiple collisions. */ inb(ioaddr + 2); | ||
1501 | dev->stats.collisions += inb(ioaddr + 3); | ||
1502 | dev->stats.tx_window_errors += inb(ioaddr + 4); | ||
1503 | dev->stats.rx_fifo_errors += inb(ioaddr + 5); | ||
1504 | dev->stats.tx_packets += inb(ioaddr + 6); | ||
1505 | dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4; | ||
1506 | /* Rx packets */ inb(ioaddr + 7); | ||
1507 | /* Must read to clear */ | ||
1508 | /* Tx deferrals */ inb(ioaddr + 8); | ||
1509 | /* Don't bother with register 9, an extension of registers 6&7. | ||
1510 | If we do use the 6&7 values the atomic update assumption above | ||
1511 | is invalid. */ | ||
1512 | inw(ioaddr + 10); /* Total Rx and Tx octets. */ | ||
1513 | inw(ioaddr + 12); | ||
1514 | /* New: On the Vortex we must also clear the BadSSD counter. */ | ||
1515 | EL3WINDOW(4); | ||
1516 | inb(ioaddr + 12); | ||
1517 | |||
1518 | /* We change back to window 7 (not 1) with the Vortex. */ | ||
1519 | EL3WINDOW(7); | ||
1520 | } | ||
1521 | |||
1522 | /* This new version of set_rx_mode() supports v1.4 kernels. | ||
1523 | The Vortex chip has no documented multicast filter, so the only | ||
1524 | multicast setting is to receive all multicast frames. At least | ||
1525 | the chip has a very clean way to set the mode, unlike many others. */ | ||
1526 | static void set_rx_mode(struct net_device *dev) | ||
1527 | { | ||
1528 | int ioaddr = dev->base_addr; | ||
1529 | short new_mode; | ||
1530 | |||
1531 | if (dev->flags & IFF_PROMISC) { | ||
1532 | if (corkscrew_debug > 3) | ||
1533 | pr_debug("%s: Setting promiscuous mode.\n", | ||
1534 | dev->name); | ||
1535 | new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm; | ||
1536 | } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { | ||
1537 | new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast; | ||
1538 | } else | ||
1539 | new_mode = SetRxFilter | RxStation | RxBroadcast; | ||
1540 | |||
1541 | outw(new_mode, ioaddr + EL3_CMD); | ||
1542 | } | ||
1543 | |||
1544 | static void netdev_get_drvinfo(struct net_device *dev, | ||
1545 | struct ethtool_drvinfo *info) | ||
1546 | { | ||
1547 | strcpy(info->driver, DRV_NAME); | ||
1548 | strcpy(info->version, DRV_VERSION); | ||
1549 | sprintf(info->bus_info, "ISA 0x%lx", dev->base_addr); | ||
1550 | } | ||
1551 | |||
1552 | static u32 netdev_get_msglevel(struct net_device *dev) | ||
1553 | { | ||
1554 | return corkscrew_debug; | ||
1555 | } | ||
1556 | |||
1557 | static void netdev_set_msglevel(struct net_device *dev, u32 level) | ||
1558 | { | ||
1559 | corkscrew_debug = level; | ||
1560 | } | ||
1561 | |||
1562 | static const struct ethtool_ops netdev_ethtool_ops = { | ||
1563 | .get_drvinfo = netdev_get_drvinfo, | ||
1564 | .get_msglevel = netdev_get_msglevel, | ||
1565 | .set_msglevel = netdev_set_msglevel, | ||
1566 | }; | ||
1567 | |||
1568 | |||
1569 | #ifdef MODULE | ||
1570 | void cleanup_module(void) | ||
1571 | { | ||
1572 | while (!list_empty(&root_corkscrew_dev)) { | ||
1573 | struct net_device *dev; | ||
1574 | struct corkscrew_private *vp; | ||
1575 | |||
1576 | vp = list_entry(root_corkscrew_dev.next, | ||
1577 | struct corkscrew_private, list); | ||
1578 | dev = vp->our_dev; | ||
1579 | unregister_netdev(dev); | ||
1580 | cleanup_card(dev); | ||
1581 | free_netdev(dev); | ||
1582 | } | ||
1583 | } | ||
1584 | #endif /* MODULE */ | ||
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c new file mode 100644 index 000000000000..34c5e1cbf65d --- /dev/null +++ b/drivers/net/ethernet/3com/3c574_cs.c | |||
@@ -0,0 +1,1181 @@ | |||
1 | /* 3c574.c: A PCMCIA ethernet driver for the 3com 3c574 "RoadRunner". | ||
2 | |||
3 | Written 1993-1998 by | ||
4 | Donald Becker, becker@scyld.com, (driver core) and | ||
5 | David Hinds, dahinds@users.sourceforge.net (from his PC card code). | ||
6 | Locking fixes (C) Copyright 2003 Red Hat Inc | ||
7 | |||
8 | This software may be used and distributed according to the terms of | ||
9 | the GNU General Public License, incorporated herein by reference. | ||
10 | |||
11 | This driver derives from Donald Becker's 3c509 core, which has the | ||
12 | following copyright: | ||
13 | Copyright 1993 United States Government as represented by the | ||
14 | Director, National Security Agency. | ||
15 | |||
16 | |||
17 | */ | ||
18 | |||
19 | /* | ||
20 | Theory of Operation | ||
21 | |||
22 | I. Board Compatibility | ||
23 | |||
24 | This device driver is designed for the 3Com 3c574 PC card Fast Ethernet | ||
25 | Adapter. | ||
26 | |||
27 | II. Board-specific settings | ||
28 | |||
29 | None -- PC cards are autoconfigured. | ||
30 | |||
31 | III. Driver operation | ||
32 | |||
33 | The 3c574 uses a Boomerang-style interface, without the bus-master capability. | ||
34 | See the Boomerang driver and documentation for most details. | ||
35 | |||
36 | IV. Notes and chip documentation. | ||
37 | |||
38 | Two added registers are used to enhance PIO performance, RunnerRdCtrl and | ||
39 | RunnerWrCtrl. These are 11 bit down-counters that are preloaded with the | ||
40 | count of word (16 bits) reads or writes the driver is about to do to the Rx | ||
41 | or Tx FIFO. The chip is then able to hide the internal-PCI-bus to PC-card | ||
42 | translation latency by buffering the I/O operations with an 8 word FIFO. | ||
43 | Note: No other chip accesses are permitted when this buffer is used. | ||
44 | |||
45 | A second enhancement is that both attribute and common memory space | ||
46 | 0x0800-0x0fff can translated to the PIO FIFO. Thus memory operations (faster | ||
47 | with *some* PCcard bridges) may be used instead of I/O operations. | ||
48 | This is enabled by setting the 0x10 bit in the PCMCIA LAN COR. | ||
49 | |||
50 | Some slow PC card bridges work better if they never see a WAIT signal. | ||
51 | This is configured by setting the 0x20 bit in the PCMCIA LAN COR. | ||
52 | Only do this after testing that it is reliable and improves performance. | ||
53 | |||
54 | The upper five bits of RunnerRdCtrl are used to window into PCcard | ||
55 | configuration space registers. Window 0 is the regular Boomerang/Odie | ||
56 | register set, 1-5 are various PC card control registers, and 16-31 are | ||
57 | the (reversed!) CIS table. | ||
58 | |||
59 | A final note: writing the InternalConfig register in window 3 with an | ||
60 | invalid ramWidth is Very Bad. | ||
61 | |||
62 | V. References | ||
63 | |||
64 | http://www.scyld.com/expert/NWay.html | ||
65 | http://www.national.com/opf/DP/DP83840A.html | ||
66 | |||
67 | Thanks to Terry Murphy of 3Com for providing development information for | ||
68 | earlier 3Com products. | ||
69 | |||
70 | */ | ||
71 | |||
72 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
73 | |||
74 | #include <linux/module.h> | ||
75 | #include <linux/kernel.h> | ||
76 | #include <linux/init.h> | ||
77 | #include <linux/slab.h> | ||
78 | #include <linux/string.h> | ||
79 | #include <linux/timer.h> | ||
80 | #include <linux/interrupt.h> | ||
81 | #include <linux/in.h> | ||
82 | #include <linux/delay.h> | ||
83 | #include <linux/netdevice.h> | ||
84 | #include <linux/etherdevice.h> | ||
85 | #include <linux/skbuff.h> | ||
86 | #include <linux/if_arp.h> | ||
87 | #include <linux/ioport.h> | ||
88 | #include <linux/bitops.h> | ||
89 | #include <linux/mii.h> | ||
90 | |||
91 | #include <pcmcia/cistpl.h> | ||
92 | #include <pcmcia/cisreg.h> | ||
93 | #include <pcmcia/ciscode.h> | ||
94 | #include <pcmcia/ds.h> | ||
95 | |||
96 | #include <asm/uaccess.h> | ||
97 | #include <asm/io.h> | ||
98 | #include <asm/system.h> | ||
99 | |||
100 | /*====================================================================*/ | ||
101 | |||
102 | /* Module parameters */ | ||
103 | |||
104 | MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); | ||
105 | MODULE_DESCRIPTION("3Com 3c574 series PCMCIA ethernet driver"); | ||
106 | MODULE_LICENSE("GPL"); | ||
107 | |||
108 | #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) | ||
109 | |||
110 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
111 | INT_MODULE_PARM(max_interrupt_work, 32); | ||
112 | |||
113 | /* Force full duplex modes? */ | ||
114 | INT_MODULE_PARM(full_duplex, 0); | ||
115 | |||
116 | /* Autodetect link polarity reversal? */ | ||
117 | INT_MODULE_PARM(auto_polarity, 1); | ||
118 | |||
119 | |||
120 | /*====================================================================*/ | ||
121 | |||
122 | /* Time in jiffies before concluding the transmitter is hung. */ | ||
123 | #define TX_TIMEOUT ((800*HZ)/1000) | ||
124 | |||
125 | /* To minimize the size of the driver source and make the driver more | ||
126 | readable not all constants are symbolically defined. | ||
127 | You'll need the manual if you want to understand driver details anyway. */ | ||
128 | /* Offsets from base I/O address. */ | ||
129 | #define EL3_DATA 0x00 | ||
130 | #define EL3_CMD 0x0e | ||
131 | #define EL3_STATUS 0x0e | ||
132 | |||
133 | #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) | ||
134 | |||
135 | /* The top five bits written to EL3_CMD are a command, the lower | ||
136 | 11 bits are the parameter, if applicable. */ | ||
137 | enum el3_cmds { | ||
138 | TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, | ||
139 | RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, | ||
140 | TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, | ||
141 | FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, | ||
142 | SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, | ||
143 | SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, | ||
144 | StatsDisable = 22<<11, StopCoax = 23<<11, | ||
145 | }; | ||
146 | |||
147 | enum elxl_status { | ||
148 | IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, | ||
149 | TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, | ||
150 | IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 }; | ||
151 | |||
152 | /* The SetRxFilter command accepts the following classes: */ | ||
153 | enum RxFilter { | ||
154 | RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 | ||
155 | }; | ||
156 | |||
157 | enum Window0 { | ||
158 | Wn0EepromCmd = 10, Wn0EepromData = 12, /* EEPROM command/address, data. */ | ||
159 | IntrStatus=0x0E, /* Valid in all windows. */ | ||
160 | }; | ||
161 | /* These assumes the larger EEPROM. */ | ||
162 | enum Win0_EEPROM_cmds { | ||
163 | EEPROM_Read = 0x200, EEPROM_WRITE = 0x100, EEPROM_ERASE = 0x300, | ||
164 | EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ | ||
165 | EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ | ||
166 | }; | ||
167 | |||
168 | /* Register window 1 offsets, the window used in normal operation. | ||
169 | On the "Odie" this window is always mapped at offsets 0x10-0x1f. | ||
170 | Except for TxFree, which is overlapped by RunnerWrCtrl. */ | ||
171 | enum Window1 { | ||
172 | TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, | ||
173 | RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, | ||
174 | TxFree = 0x0C, /* Remaining free bytes in Tx buffer. */ | ||
175 | RunnerRdCtrl = 0x16, RunnerWrCtrl = 0x1c, | ||
176 | }; | ||
177 | |||
178 | enum Window3 { /* Window 3: MAC/config bits. */ | ||
179 | Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8, | ||
180 | }; | ||
181 | enum wn3_config { | ||
182 | Ram_size = 7, | ||
183 | Ram_width = 8, | ||
184 | Ram_speed = 0x30, | ||
185 | Rom_size = 0xc0, | ||
186 | Ram_split_shift = 16, | ||
187 | Ram_split = 3 << Ram_split_shift, | ||
188 | Xcvr_shift = 20, | ||
189 | Xcvr = 7 << Xcvr_shift, | ||
190 | Autoselect = 0x1000000, | ||
191 | }; | ||
192 | |||
193 | enum Window4 { /* Window 4: Xcvr/media bits. */ | ||
194 | Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, | ||
195 | }; | ||
196 | |||
197 | #define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ | ||
198 | |||
199 | struct el3_private { | ||
200 | struct pcmcia_device *p_dev; | ||
201 | u16 advertising, partner; /* NWay media advertisement */ | ||
202 | unsigned char phys; /* MII device address */ | ||
203 | unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */ | ||
204 | /* for transceiver monitoring */ | ||
205 | struct timer_list media; | ||
206 | unsigned short media_status; | ||
207 | unsigned short fast_poll; | ||
208 | unsigned long last_irq; | ||
209 | spinlock_t window_lock; /* Guards the Window selection */ | ||
210 | }; | ||
211 | |||
212 | /* Set iff a MII transceiver on any interface requires mdio preamble. | ||
213 | This only set with the original DP83840 on older 3c905 boards, so the extra | ||
214 | code size of a per-interface flag is not worthwhile. */ | ||
215 | static char mii_preamble_required = 0; | ||
216 | |||
217 | /* Index of functions. */ | ||
218 | |||
219 | static int tc574_config(struct pcmcia_device *link); | ||
220 | static void tc574_release(struct pcmcia_device *link); | ||
221 | |||
222 | static void mdio_sync(unsigned int ioaddr, int bits); | ||
223 | static int mdio_read(unsigned int ioaddr, int phy_id, int location); | ||
224 | static void mdio_write(unsigned int ioaddr, int phy_id, int location, | ||
225 | int value); | ||
226 | static unsigned short read_eeprom(unsigned int ioaddr, int index); | ||
227 | static void tc574_wait_for_completion(struct net_device *dev, int cmd); | ||
228 | |||
229 | static void tc574_reset(struct net_device *dev); | ||
230 | static void media_check(unsigned long arg); | ||
231 | static int el3_open(struct net_device *dev); | ||
232 | static netdev_tx_t el3_start_xmit(struct sk_buff *skb, | ||
233 | struct net_device *dev); | ||
234 | static irqreturn_t el3_interrupt(int irq, void *dev_id); | ||
235 | static void update_stats(struct net_device *dev); | ||
236 | static struct net_device_stats *el3_get_stats(struct net_device *dev); | ||
237 | static int el3_rx(struct net_device *dev, int worklimit); | ||
238 | static int el3_close(struct net_device *dev); | ||
239 | static void el3_tx_timeout(struct net_device *dev); | ||
240 | static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
241 | static void set_rx_mode(struct net_device *dev); | ||
242 | static void set_multicast_list(struct net_device *dev); | ||
243 | |||
244 | static void tc574_detach(struct pcmcia_device *p_dev); | ||
245 | |||
246 | /* | ||
247 | tc574_attach() creates an "instance" of the driver, allocating | ||
248 | local data structures for one device. The device is registered | ||
249 | with Card Services. | ||
250 | */ | ||
251 | static const struct net_device_ops el3_netdev_ops = { | ||
252 | .ndo_open = el3_open, | ||
253 | .ndo_stop = el3_close, | ||
254 | .ndo_start_xmit = el3_start_xmit, | ||
255 | .ndo_tx_timeout = el3_tx_timeout, | ||
256 | .ndo_get_stats = el3_get_stats, | ||
257 | .ndo_do_ioctl = el3_ioctl, | ||
258 | .ndo_set_multicast_list = set_multicast_list, | ||
259 | .ndo_change_mtu = eth_change_mtu, | ||
260 | .ndo_set_mac_address = eth_mac_addr, | ||
261 | .ndo_validate_addr = eth_validate_addr, | ||
262 | }; | ||
263 | |||
264 | static int tc574_probe(struct pcmcia_device *link) | ||
265 | { | ||
266 | struct el3_private *lp; | ||
267 | struct net_device *dev; | ||
268 | |||
269 | dev_dbg(&link->dev, "3c574_attach()\n"); | ||
270 | |||
271 | /* Create the PC card device object. */ | ||
272 | dev = alloc_etherdev(sizeof(struct el3_private)); | ||
273 | if (!dev) | ||
274 | return -ENOMEM; | ||
275 | lp = netdev_priv(dev); | ||
276 | link->priv = dev; | ||
277 | lp->p_dev = link; | ||
278 | |||
279 | spin_lock_init(&lp->window_lock); | ||
280 | link->resource[0]->end = 32; | ||
281 | link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; | ||
282 | link->config_flags |= CONF_ENABLE_IRQ; | ||
283 | link->config_index = 1; | ||
284 | |||
285 | dev->netdev_ops = &el3_netdev_ops; | ||
286 | dev->watchdog_timeo = TX_TIMEOUT; | ||
287 | |||
288 | return tc574_config(link); | ||
289 | } | ||
290 | |||
291 | static void tc574_detach(struct pcmcia_device *link) | ||
292 | { | ||
293 | struct net_device *dev = link->priv; | ||
294 | |||
295 | dev_dbg(&link->dev, "3c574_detach()\n"); | ||
296 | |||
297 | unregister_netdev(dev); | ||
298 | |||
299 | tc574_release(link); | ||
300 | |||
301 | free_netdev(dev); | ||
302 | } /* tc574_detach */ | ||
303 | |||
304 | static const char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | ||
305 | |||
306 | static int tc574_config(struct pcmcia_device *link) | ||
307 | { | ||
308 | struct net_device *dev = link->priv; | ||
309 | struct el3_private *lp = netdev_priv(dev); | ||
310 | int ret, i, j; | ||
311 | unsigned int ioaddr; | ||
312 | __be16 *phys_addr; | ||
313 | char *cardname; | ||
314 | __u32 config; | ||
315 | u8 *buf; | ||
316 | size_t len; | ||
317 | |||
318 | phys_addr = (__be16 *)dev->dev_addr; | ||
319 | |||
320 | dev_dbg(&link->dev, "3c574_config()\n"); | ||
321 | |||
322 | link->io_lines = 16; | ||
323 | |||
324 | for (i = j = 0; j < 0x400; j += 0x20) { | ||
325 | link->resource[0]->start = j ^ 0x300; | ||
326 | i = pcmcia_request_io(link); | ||
327 | if (i == 0) | ||
328 | break; | ||
329 | } | ||
330 | if (i != 0) | ||
331 | goto failed; | ||
332 | |||
333 | ret = pcmcia_request_irq(link, el3_interrupt); | ||
334 | if (ret) | ||
335 | goto failed; | ||
336 | |||
337 | ret = pcmcia_enable_device(link); | ||
338 | if (ret) | ||
339 | goto failed; | ||
340 | |||
341 | dev->irq = link->irq; | ||
342 | dev->base_addr = link->resource[0]->start; | ||
343 | |||
344 | ioaddr = dev->base_addr; | ||
345 | |||
346 | /* The 3c574 normally uses an EEPROM for configuration info, including | ||
347 | the hardware address. The future products may include a modem chip | ||
348 | and put the address in the CIS. */ | ||
349 | |||
350 | len = pcmcia_get_tuple(link, 0x88, &buf); | ||
351 | if (buf && len >= 6) { | ||
352 | for (i = 0; i < 3; i++) | ||
353 | phys_addr[i] = htons(le16_to_cpu(buf[i * 2])); | ||
354 | kfree(buf); | ||
355 | } else { | ||
356 | kfree(buf); /* 0 < len < 6 */ | ||
357 | EL3WINDOW(0); | ||
358 | for (i = 0; i < 3; i++) | ||
359 | phys_addr[i] = htons(read_eeprom(ioaddr, i + 10)); | ||
360 | if (phys_addr[0] == htons(0x6060)) { | ||
361 | pr_notice("IO port conflict at 0x%03lx-0x%03lx\n", | ||
362 | dev->base_addr, dev->base_addr+15); | ||
363 | goto failed; | ||
364 | } | ||
365 | } | ||
366 | if (link->prod_id[1]) | ||
367 | cardname = link->prod_id[1]; | ||
368 | else | ||
369 | cardname = "3Com 3c574"; | ||
370 | |||
371 | { | ||
372 | u_char mcr; | ||
373 | outw(2<<11, ioaddr + RunnerRdCtrl); | ||
374 | mcr = inb(ioaddr + 2); | ||
375 | outw(0<<11, ioaddr + RunnerRdCtrl); | ||
376 | pr_info(" ASIC rev %d,", mcr>>3); | ||
377 | EL3WINDOW(3); | ||
378 | config = inl(ioaddr + Wn3_Config); | ||
379 | lp->default_media = (config & Xcvr) >> Xcvr_shift; | ||
380 | lp->autoselect = config & Autoselect ? 1 : 0; | ||
381 | } | ||
382 | |||
383 | init_timer(&lp->media); | ||
384 | |||
385 | { | ||
386 | int phy; | ||
387 | |||
388 | /* Roadrunner only: Turn on the MII transceiver */ | ||
389 | outw(0x8040, ioaddr + Wn3_Options); | ||
390 | mdelay(1); | ||
391 | outw(0xc040, ioaddr + Wn3_Options); | ||
392 | tc574_wait_for_completion(dev, TxReset); | ||
393 | tc574_wait_for_completion(dev, RxReset); | ||
394 | mdelay(1); | ||
395 | outw(0x8040, ioaddr + Wn3_Options); | ||
396 | |||
397 | EL3WINDOW(4); | ||
398 | for (phy = 1; phy <= 32; phy++) { | ||
399 | int mii_status; | ||
400 | mdio_sync(ioaddr, 32); | ||
401 | mii_status = mdio_read(ioaddr, phy & 0x1f, 1); | ||
402 | if (mii_status != 0xffff) { | ||
403 | lp->phys = phy & 0x1f; | ||
404 | dev_dbg(&link->dev, " MII transceiver at " | ||
405 | "index %d, status %x.\n", | ||
406 | phy, mii_status); | ||
407 | if ((mii_status & 0x0040) == 0) | ||
408 | mii_preamble_required = 1; | ||
409 | break; | ||
410 | } | ||
411 | } | ||
412 | if (phy > 32) { | ||
413 | pr_notice(" No MII transceivers found!\n"); | ||
414 | goto failed; | ||
415 | } | ||
416 | i = mdio_read(ioaddr, lp->phys, 16) | 0x40; | ||
417 | mdio_write(ioaddr, lp->phys, 16, i); | ||
418 | lp->advertising = mdio_read(ioaddr, lp->phys, 4); | ||
419 | if (full_duplex) { | ||
420 | /* Only advertise the FD media types. */ | ||
421 | lp->advertising &= ~0x02a0; | ||
422 | mdio_write(ioaddr, lp->phys, 4, lp->advertising); | ||
423 | } | ||
424 | } | ||
425 | |||
426 | SET_NETDEV_DEV(dev, &link->dev); | ||
427 | |||
428 | if (register_netdev(dev) != 0) { | ||
429 | pr_notice("register_netdev() failed\n"); | ||
430 | goto failed; | ||
431 | } | ||
432 | |||
433 | netdev_info(dev, "%s at io %#3lx, irq %d, hw_addr %pM\n", | ||
434 | cardname, dev->base_addr, dev->irq, dev->dev_addr); | ||
435 | netdev_info(dev, " %dK FIFO split %s Rx:Tx, %sMII interface.\n", | ||
436 | 8 << config & Ram_size, | ||
437 | ram_split[(config & Ram_split) >> Ram_split_shift], | ||
438 | config & Autoselect ? "autoselect " : ""); | ||
439 | |||
440 | return 0; | ||
441 | |||
442 | failed: | ||
443 | tc574_release(link); | ||
444 | return -ENODEV; | ||
445 | |||
446 | } /* tc574_config */ | ||
447 | |||
448 | static void tc574_release(struct pcmcia_device *link) | ||
449 | { | ||
450 | pcmcia_disable_device(link); | ||
451 | } | ||
452 | |||
453 | static int tc574_suspend(struct pcmcia_device *link) | ||
454 | { | ||
455 | struct net_device *dev = link->priv; | ||
456 | |||
457 | if (link->open) | ||
458 | netif_device_detach(dev); | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | static int tc574_resume(struct pcmcia_device *link) | ||
464 | { | ||
465 | struct net_device *dev = link->priv; | ||
466 | |||
467 | if (link->open) { | ||
468 | tc574_reset(dev); | ||
469 | netif_device_attach(dev); | ||
470 | } | ||
471 | |||
472 | return 0; | ||
473 | } | ||
474 | |||
475 | static void dump_status(struct net_device *dev) | ||
476 | { | ||
477 | unsigned int ioaddr = dev->base_addr; | ||
478 | EL3WINDOW(1); | ||
479 | netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x, tx free %04x\n", | ||
480 | inw(ioaddr+EL3_STATUS), | ||
481 | inw(ioaddr+RxStatus), inb(ioaddr+TxStatus), | ||
482 | inw(ioaddr+TxFree)); | ||
483 | EL3WINDOW(4); | ||
484 | netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", | ||
485 | inw(ioaddr+0x04), inw(ioaddr+0x06), | ||
486 | inw(ioaddr+0x08), inw(ioaddr+0x0a)); | ||
487 | EL3WINDOW(1); | ||
488 | } | ||
489 | |||
490 | /* | ||
491 | Use this for commands that may take time to finish | ||
492 | */ | ||
493 | static void tc574_wait_for_completion(struct net_device *dev, int cmd) | ||
494 | { | ||
495 | int i = 1500; | ||
496 | outw(cmd, dev->base_addr + EL3_CMD); | ||
497 | while (--i > 0) | ||
498 | if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; | ||
499 | if (i == 0) | ||
500 | netdev_notice(dev, "command 0x%04x did not complete!\n", cmd); | ||
501 | } | ||
502 | |||
503 | /* Read a word from the EEPROM using the regular EEPROM access register. | ||
504 | Assume that we are in register window zero. | ||
505 | */ | ||
506 | static unsigned short read_eeprom(unsigned int ioaddr, int index) | ||
507 | { | ||
508 | int timer; | ||
509 | outw(EEPROM_Read + index, ioaddr + Wn0EepromCmd); | ||
510 | /* Pause for at least 162 usec for the read to take place. */ | ||
511 | for (timer = 1620; timer >= 0; timer--) { | ||
512 | if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0) | ||
513 | break; | ||
514 | } | ||
515 | return inw(ioaddr + Wn0EepromData); | ||
516 | } | ||
517 | |||
518 | /* MII transceiver control section. | ||
519 | Read and write the MII registers using software-generated serial | ||
520 | MDIO protocol. See the MII specifications or DP83840A data sheet | ||
521 | for details. | ||
522 | The maxium data clock rate is 2.5 Mhz. The timing is easily met by the | ||
523 | slow PC card interface. */ | ||
524 | |||
525 | #define MDIO_SHIFT_CLK 0x01 | ||
526 | #define MDIO_DIR_WRITE 0x04 | ||
527 | #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) | ||
528 | #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) | ||
529 | #define MDIO_DATA_READ 0x02 | ||
530 | #define MDIO_ENB_IN 0x00 | ||
531 | |||
532 | /* Generate the preamble required for initial synchronization and | ||
533 | a few older transceivers. */ | ||
534 | static void mdio_sync(unsigned int ioaddr, int bits) | ||
535 | { | ||
536 | unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; | ||
537 | |||
538 | /* Establish sync by sending at least 32 logic ones. */ | ||
539 | while (-- bits >= 0) { | ||
540 | outw(MDIO_DATA_WRITE1, mdio_addr); | ||
541 | outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); | ||
542 | } | ||
543 | } | ||
544 | |||
545 | static int mdio_read(unsigned int ioaddr, int phy_id, int location) | ||
546 | { | ||
547 | int i; | ||
548 | int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; | ||
549 | unsigned int retval = 0; | ||
550 | unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; | ||
551 | |||
552 | if (mii_preamble_required) | ||
553 | mdio_sync(ioaddr, 32); | ||
554 | |||
555 | /* Shift the read command bits out. */ | ||
556 | for (i = 14; i >= 0; i--) { | ||
557 | int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; | ||
558 | outw(dataval, mdio_addr); | ||
559 | outw(dataval | MDIO_SHIFT_CLK, mdio_addr); | ||
560 | } | ||
561 | /* Read the two transition, 16 data, and wire-idle bits. */ | ||
562 | for (i = 19; i > 0; i--) { | ||
563 | outw(MDIO_ENB_IN, mdio_addr); | ||
564 | retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); | ||
565 | outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); | ||
566 | } | ||
567 | return (retval>>1) & 0xffff; | ||
568 | } | ||
569 | |||
570 | static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value) | ||
571 | { | ||
572 | int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; | ||
573 | unsigned int mdio_addr = ioaddr + Wn4_PhysicalMgmt; | ||
574 | int i; | ||
575 | |||
576 | if (mii_preamble_required) | ||
577 | mdio_sync(ioaddr, 32); | ||
578 | |||
579 | /* Shift the command bits out. */ | ||
580 | for (i = 31; i >= 0; i--) { | ||
581 | int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; | ||
582 | outw(dataval, mdio_addr); | ||
583 | outw(dataval | MDIO_SHIFT_CLK, mdio_addr); | ||
584 | } | ||
585 | /* Leave the interface idle. */ | ||
586 | for (i = 1; i >= 0; i--) { | ||
587 | outw(MDIO_ENB_IN, mdio_addr); | ||
588 | outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); | ||
589 | } | ||
590 | } | ||
591 | |||
592 | /* Reset and restore all of the 3c574 registers. */ | ||
593 | static void tc574_reset(struct net_device *dev) | ||
594 | { | ||
595 | struct el3_private *lp = netdev_priv(dev); | ||
596 | int i; | ||
597 | unsigned int ioaddr = dev->base_addr; | ||
598 | unsigned long flags; | ||
599 | |||
600 | tc574_wait_for_completion(dev, TotalReset|0x10); | ||
601 | |||
602 | spin_lock_irqsave(&lp->window_lock, flags); | ||
603 | /* Clear any transactions in progress. */ | ||
604 | outw(0, ioaddr + RunnerWrCtrl); | ||
605 | outw(0, ioaddr + RunnerRdCtrl); | ||
606 | |||
607 | /* Set the station address and mask. */ | ||
608 | EL3WINDOW(2); | ||
609 | for (i = 0; i < 6; i++) | ||
610 | outb(dev->dev_addr[i], ioaddr + i); | ||
611 | for (; i < 12; i+=2) | ||
612 | outw(0, ioaddr + i); | ||
613 | |||
614 | /* Reset config options */ | ||
615 | EL3WINDOW(3); | ||
616 | outb((dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); | ||
617 | outl((lp->autoselect ? 0x01000000 : 0) | 0x0062001b, | ||
618 | ioaddr + Wn3_Config); | ||
619 | /* Roadrunner only: Turn on the MII transceiver. */ | ||
620 | outw(0x8040, ioaddr + Wn3_Options); | ||
621 | mdelay(1); | ||
622 | outw(0xc040, ioaddr + Wn3_Options); | ||
623 | EL3WINDOW(1); | ||
624 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
625 | |||
626 | tc574_wait_for_completion(dev, TxReset); | ||
627 | tc574_wait_for_completion(dev, RxReset); | ||
628 | mdelay(1); | ||
629 | spin_lock_irqsave(&lp->window_lock, flags); | ||
630 | EL3WINDOW(3); | ||
631 | outw(0x8040, ioaddr + Wn3_Options); | ||
632 | |||
633 | /* Switch to the stats window, and clear all stats by reading. */ | ||
634 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
635 | EL3WINDOW(6); | ||
636 | for (i = 0; i < 10; i++) | ||
637 | inb(ioaddr + i); | ||
638 | inw(ioaddr + 10); | ||
639 | inw(ioaddr + 12); | ||
640 | EL3WINDOW(4); | ||
641 | inb(ioaddr + 12); | ||
642 | inb(ioaddr + 13); | ||
643 | |||
644 | /* .. enable any extra statistics bits.. */ | ||
645 | outw(0x0040, ioaddr + Wn4_NetDiag); | ||
646 | |||
647 | EL3WINDOW(1); | ||
648 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
649 | |||
650 | /* .. re-sync MII and re-fill what NWay is advertising. */ | ||
651 | mdio_sync(ioaddr, 32); | ||
652 | mdio_write(ioaddr, lp->phys, 4, lp->advertising); | ||
653 | if (!auto_polarity) { | ||
654 | /* works for TDK 78Q2120 series MII's */ | ||
655 | i = mdio_read(ioaddr, lp->phys, 16) | 0x20; | ||
656 | mdio_write(ioaddr, lp->phys, 16, i); | ||
657 | } | ||
658 | |||
659 | spin_lock_irqsave(&lp->window_lock, flags); | ||
660 | /* Switch to register set 1 for normal use, just for TxFree. */ | ||
661 | set_rx_mode(dev); | ||
662 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
663 | outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ | ||
664 | outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ | ||
665 | outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ | ||
666 | /* Allow status bits to be seen. */ | ||
667 | outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); | ||
668 | /* Ack all pending events, and set active indicator mask. */ | ||
669 | outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, | ||
670 | ioaddr + EL3_CMD); | ||
671 | outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull | ||
672 | | AdapterFailure | RxEarly, ioaddr + EL3_CMD); | ||
673 | } | ||
674 | |||
675 | static int el3_open(struct net_device *dev) | ||
676 | { | ||
677 | struct el3_private *lp = netdev_priv(dev); | ||
678 | struct pcmcia_device *link = lp->p_dev; | ||
679 | |||
680 | if (!pcmcia_dev_present(link)) | ||
681 | return -ENODEV; | ||
682 | |||
683 | link->open++; | ||
684 | netif_start_queue(dev); | ||
685 | |||
686 | tc574_reset(dev); | ||
687 | lp->media.function = media_check; | ||
688 | lp->media.data = (unsigned long) dev; | ||
689 | lp->media.expires = jiffies + HZ; | ||
690 | add_timer(&lp->media); | ||
691 | |||
692 | dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", | ||
693 | dev->name, inw(dev->base_addr + EL3_STATUS)); | ||
694 | |||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | static void el3_tx_timeout(struct net_device *dev) | ||
699 | { | ||
700 | unsigned int ioaddr = dev->base_addr; | ||
701 | |||
702 | netdev_notice(dev, "Transmit timed out!\n"); | ||
703 | dump_status(dev); | ||
704 | dev->stats.tx_errors++; | ||
705 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
706 | /* Issue TX_RESET and TX_START commands. */ | ||
707 | tc574_wait_for_completion(dev, TxReset); | ||
708 | outw(TxEnable, ioaddr + EL3_CMD); | ||
709 | netif_wake_queue(dev); | ||
710 | } | ||
711 | |||
712 | static void pop_tx_status(struct net_device *dev) | ||
713 | { | ||
714 | unsigned int ioaddr = dev->base_addr; | ||
715 | int i; | ||
716 | |||
717 | /* Clear the Tx status stack. */ | ||
718 | for (i = 32; i > 0; i--) { | ||
719 | u_char tx_status = inb(ioaddr + TxStatus); | ||
720 | if (!(tx_status & 0x84)) | ||
721 | break; | ||
722 | /* reset transmitter on jabber error or underrun */ | ||
723 | if (tx_status & 0x30) | ||
724 | tc574_wait_for_completion(dev, TxReset); | ||
725 | if (tx_status & 0x38) { | ||
726 | pr_debug("%s: transmit error: status 0x%02x\n", | ||
727 | dev->name, tx_status); | ||
728 | outw(TxEnable, ioaddr + EL3_CMD); | ||
729 | dev->stats.tx_aborted_errors++; | ||
730 | } | ||
731 | outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */ | ||
732 | } | ||
733 | } | ||
734 | |||
735 | static netdev_tx_t el3_start_xmit(struct sk_buff *skb, | ||
736 | struct net_device *dev) | ||
737 | { | ||
738 | unsigned int ioaddr = dev->base_addr; | ||
739 | struct el3_private *lp = netdev_priv(dev); | ||
740 | unsigned long flags; | ||
741 | |||
742 | pr_debug("%s: el3_start_xmit(length = %ld) called, " | ||
743 | "status %4.4x.\n", dev->name, (long)skb->len, | ||
744 | inw(ioaddr + EL3_STATUS)); | ||
745 | |||
746 | spin_lock_irqsave(&lp->window_lock, flags); | ||
747 | |||
748 | dev->stats.tx_bytes += skb->len; | ||
749 | |||
750 | /* Put out the doubleword header... */ | ||
751 | outw(skb->len, ioaddr + TX_FIFO); | ||
752 | outw(0, ioaddr + TX_FIFO); | ||
753 | /* ... and the packet rounded to a doubleword. */ | ||
754 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); | ||
755 | |||
756 | /* TxFree appears only in Window 1, not offset 0x1c. */ | ||
757 | if (inw(ioaddr + TxFree) <= 1536) { | ||
758 | netif_stop_queue(dev); | ||
759 | /* Interrupt us when the FIFO has room for max-sized packet. | ||
760 | The threshold is in units of dwords. */ | ||
761 | outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); | ||
762 | } | ||
763 | |||
764 | pop_tx_status(dev); | ||
765 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
766 | dev_kfree_skb(skb); | ||
767 | return NETDEV_TX_OK; | ||
768 | } | ||
769 | |||
770 | /* The EL3 interrupt handler. */ | ||
771 | static irqreturn_t el3_interrupt(int irq, void *dev_id) | ||
772 | { | ||
773 | struct net_device *dev = (struct net_device *) dev_id; | ||
774 | struct el3_private *lp = netdev_priv(dev); | ||
775 | unsigned int ioaddr; | ||
776 | unsigned status; | ||
777 | int work_budget = max_interrupt_work; | ||
778 | int handled = 0; | ||
779 | |||
780 | if (!netif_device_present(dev)) | ||
781 | return IRQ_NONE; | ||
782 | ioaddr = dev->base_addr; | ||
783 | |||
784 | pr_debug("%s: interrupt, status %4.4x.\n", | ||
785 | dev->name, inw(ioaddr + EL3_STATUS)); | ||
786 | |||
787 | spin_lock(&lp->window_lock); | ||
788 | |||
789 | while ((status = inw(ioaddr + EL3_STATUS)) & | ||
790 | (IntLatch | RxComplete | RxEarly | StatsFull)) { | ||
791 | if (!netif_device_present(dev) || | ||
792 | ((status & 0xe000) != 0x2000)) { | ||
793 | pr_debug("%s: Interrupt from dead card\n", dev->name); | ||
794 | break; | ||
795 | } | ||
796 | |||
797 | handled = 1; | ||
798 | |||
799 | if (status & RxComplete) | ||
800 | work_budget = el3_rx(dev, work_budget); | ||
801 | |||
802 | if (status & TxAvailable) { | ||
803 | pr_debug(" TX room bit was handled.\n"); | ||
804 | /* There's room in the FIFO for a full-sized packet. */ | ||
805 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); | ||
806 | netif_wake_queue(dev); | ||
807 | } | ||
808 | |||
809 | if (status & TxComplete) | ||
810 | pop_tx_status(dev); | ||
811 | |||
812 | if (status & (AdapterFailure | RxEarly | StatsFull)) { | ||
813 | /* Handle all uncommon interrupts. */ | ||
814 | if (status & StatsFull) | ||
815 | update_stats(dev); | ||
816 | if (status & RxEarly) { | ||
817 | work_budget = el3_rx(dev, work_budget); | ||
818 | outw(AckIntr | RxEarly, ioaddr + EL3_CMD); | ||
819 | } | ||
820 | if (status & AdapterFailure) { | ||
821 | u16 fifo_diag; | ||
822 | EL3WINDOW(4); | ||
823 | fifo_diag = inw(ioaddr + Wn4_FIFODiag); | ||
824 | EL3WINDOW(1); | ||
825 | netdev_notice(dev, "adapter failure, FIFO diagnostic register %04x\n", | ||
826 | fifo_diag); | ||
827 | if (fifo_diag & 0x0400) { | ||
828 | /* Tx overrun */ | ||
829 | tc574_wait_for_completion(dev, TxReset); | ||
830 | outw(TxEnable, ioaddr + EL3_CMD); | ||
831 | } | ||
832 | if (fifo_diag & 0x2000) { | ||
833 | /* Rx underrun */ | ||
834 | tc574_wait_for_completion(dev, RxReset); | ||
835 | set_rx_mode(dev); | ||
836 | outw(RxEnable, ioaddr + EL3_CMD); | ||
837 | } | ||
838 | outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); | ||
839 | } | ||
840 | } | ||
841 | |||
842 | if (--work_budget < 0) { | ||
843 | pr_debug("%s: Too much work in interrupt, " | ||
844 | "status %4.4x.\n", dev->name, status); | ||
845 | /* Clear all interrupts */ | ||
846 | outw(AckIntr | 0xFF, ioaddr + EL3_CMD); | ||
847 | break; | ||
848 | } | ||
849 | /* Acknowledge the IRQ. */ | ||
850 | outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); | ||
851 | } | ||
852 | |||
853 | pr_debug("%s: exiting interrupt, status %4.4x.\n", | ||
854 | dev->name, inw(ioaddr + EL3_STATUS)); | ||
855 | |||
856 | spin_unlock(&lp->window_lock); | ||
857 | return IRQ_RETVAL(handled); | ||
858 | } | ||
859 | |||
860 | /* | ||
861 | This timer serves two purposes: to check for missed interrupts | ||
862 | (and as a last resort, poll the NIC for events), and to monitor | ||
863 | the MII, reporting changes in cable status. | ||
864 | */ | ||
865 | static void media_check(unsigned long arg) | ||
866 | { | ||
867 | struct net_device *dev = (struct net_device *) arg; | ||
868 | struct el3_private *lp = netdev_priv(dev); | ||
869 | unsigned int ioaddr = dev->base_addr; | ||
870 | unsigned long flags; | ||
871 | unsigned short /* cable, */ media, partner; | ||
872 | |||
873 | if (!netif_device_present(dev)) | ||
874 | goto reschedule; | ||
875 | |||
876 | /* Check for pending interrupt with expired latency timer: with | ||
877 | this, we can limp along even if the interrupt is blocked */ | ||
878 | if ((inw(ioaddr + EL3_STATUS) & IntLatch) && (inb(ioaddr + Timer) == 0xff)) { | ||
879 | if (!lp->fast_poll) | ||
880 | netdev_info(dev, "interrupt(s) dropped!\n"); | ||
881 | |||
882 | local_irq_save(flags); | ||
883 | el3_interrupt(dev->irq, dev); | ||
884 | local_irq_restore(flags); | ||
885 | |||
886 | lp->fast_poll = HZ; | ||
887 | } | ||
888 | if (lp->fast_poll) { | ||
889 | lp->fast_poll--; | ||
890 | lp->media.expires = jiffies + 2*HZ/100; | ||
891 | add_timer(&lp->media); | ||
892 | return; | ||
893 | } | ||
894 | |||
895 | spin_lock_irqsave(&lp->window_lock, flags); | ||
896 | EL3WINDOW(4); | ||
897 | media = mdio_read(ioaddr, lp->phys, 1); | ||
898 | partner = mdio_read(ioaddr, lp->phys, 5); | ||
899 | EL3WINDOW(1); | ||
900 | |||
901 | if (media != lp->media_status) { | ||
902 | if ((media ^ lp->media_status) & 0x0004) | ||
903 | netdev_info(dev, "%s link beat\n", | ||
904 | (lp->media_status & 0x0004) ? "lost" : "found"); | ||
905 | if ((media ^ lp->media_status) & 0x0020) { | ||
906 | lp->partner = 0; | ||
907 | if (lp->media_status & 0x0020) { | ||
908 | netdev_info(dev, "autonegotiation restarted\n"); | ||
909 | } else if (partner) { | ||
910 | partner &= lp->advertising; | ||
911 | lp->partner = partner; | ||
912 | netdev_info(dev, "autonegotiation complete: " | ||
913 | "%dbaseT-%cD selected\n", | ||
914 | (partner & 0x0180) ? 100 : 10, | ||
915 | (partner & 0x0140) ? 'F' : 'H'); | ||
916 | } else { | ||
917 | netdev_info(dev, "link partner did not autonegotiate\n"); | ||
918 | } | ||
919 | |||
920 | EL3WINDOW(3); | ||
921 | outb((partner & 0x0140 ? 0x20 : 0) | | ||
922 | (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl); | ||
923 | EL3WINDOW(1); | ||
924 | |||
925 | } | ||
926 | if (media & 0x0010) | ||
927 | netdev_info(dev, "remote fault detected\n"); | ||
928 | if (media & 0x0002) | ||
929 | netdev_info(dev, "jabber detected\n"); | ||
930 | lp->media_status = media; | ||
931 | } | ||
932 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
933 | |||
934 | reschedule: | ||
935 | lp->media.expires = jiffies + HZ; | ||
936 | add_timer(&lp->media); | ||
937 | } | ||
938 | |||
939 | static struct net_device_stats *el3_get_stats(struct net_device *dev) | ||
940 | { | ||
941 | struct el3_private *lp = netdev_priv(dev); | ||
942 | |||
943 | if (netif_device_present(dev)) { | ||
944 | unsigned long flags; | ||
945 | spin_lock_irqsave(&lp->window_lock, flags); | ||
946 | update_stats(dev); | ||
947 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
948 | } | ||
949 | return &dev->stats; | ||
950 | } | ||
951 | |||
952 | /* Update statistics. | ||
953 | Surprisingly this need not be run single-threaded, but it effectively is. | ||
954 | The counters clear when read, so the adds must merely be atomic. | ||
955 | */ | ||
956 | static void update_stats(struct net_device *dev) | ||
957 | { | ||
958 | unsigned int ioaddr = dev->base_addr; | ||
959 | u8 rx, tx, up; | ||
960 | |||
961 | pr_debug("%s: updating the statistics.\n", dev->name); | ||
962 | |||
963 | if (inw(ioaddr+EL3_STATUS) == 0xffff) /* No card. */ | ||
964 | return; | ||
965 | |||
966 | /* Unlike the 3c509 we need not turn off stats updates while reading. */ | ||
967 | /* Switch to the stats window, and read everything. */ | ||
968 | EL3WINDOW(6); | ||
969 | dev->stats.tx_carrier_errors += inb(ioaddr + 0); | ||
970 | dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); | ||
971 | /* Multiple collisions. */ inb(ioaddr + 2); | ||
972 | dev->stats.collisions += inb(ioaddr + 3); | ||
973 | dev->stats.tx_window_errors += inb(ioaddr + 4); | ||
974 | dev->stats.rx_fifo_errors += inb(ioaddr + 5); | ||
975 | dev->stats.tx_packets += inb(ioaddr + 6); | ||
976 | up = inb(ioaddr + 9); | ||
977 | dev->stats.tx_packets += (up&0x30) << 4; | ||
978 | /* Rx packets */ inb(ioaddr + 7); | ||
979 | /* Tx deferrals */ inb(ioaddr + 8); | ||
980 | rx = inw(ioaddr + 10); | ||
981 | tx = inw(ioaddr + 12); | ||
982 | |||
983 | EL3WINDOW(4); | ||
984 | /* BadSSD */ inb(ioaddr + 12); | ||
985 | up = inb(ioaddr + 13); | ||
986 | |||
987 | EL3WINDOW(1); | ||
988 | } | ||
989 | |||
990 | static int el3_rx(struct net_device *dev, int worklimit) | ||
991 | { | ||
992 | unsigned int ioaddr = dev->base_addr; | ||
993 | short rx_status; | ||
994 | |||
995 | pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", | ||
996 | dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus)); | ||
997 | while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) && | ||
998 | worklimit > 0) { | ||
999 | worklimit--; | ||
1000 | if (rx_status & 0x4000) { /* Error, update stats. */ | ||
1001 | short error = rx_status & 0x3800; | ||
1002 | dev->stats.rx_errors++; | ||
1003 | switch (error) { | ||
1004 | case 0x0000: dev->stats.rx_over_errors++; break; | ||
1005 | case 0x0800: dev->stats.rx_length_errors++; break; | ||
1006 | case 0x1000: dev->stats.rx_frame_errors++; break; | ||
1007 | case 0x1800: dev->stats.rx_length_errors++; break; | ||
1008 | case 0x2000: dev->stats.rx_frame_errors++; break; | ||
1009 | case 0x2800: dev->stats.rx_crc_errors++; break; | ||
1010 | } | ||
1011 | } else { | ||
1012 | short pkt_len = rx_status & 0x7ff; | ||
1013 | struct sk_buff *skb; | ||
1014 | |||
1015 | skb = dev_alloc_skb(pkt_len+5); | ||
1016 | |||
1017 | pr_debug(" Receiving packet size %d status %4.4x.\n", | ||
1018 | pkt_len, rx_status); | ||
1019 | if (skb != NULL) { | ||
1020 | skb_reserve(skb, 2); | ||
1021 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), | ||
1022 | ((pkt_len+3)>>2)); | ||
1023 | skb->protocol = eth_type_trans(skb, dev); | ||
1024 | netif_rx(skb); | ||
1025 | dev->stats.rx_packets++; | ||
1026 | dev->stats.rx_bytes += pkt_len; | ||
1027 | } else { | ||
1028 | pr_debug("%s: couldn't allocate a sk_buff of" | ||
1029 | " size %d.\n", dev->name, pkt_len); | ||
1030 | dev->stats.rx_dropped++; | ||
1031 | } | ||
1032 | } | ||
1033 | tc574_wait_for_completion(dev, RxDiscard); | ||
1034 | } | ||
1035 | |||
1036 | return worklimit; | ||
1037 | } | ||
1038 | |||
1039 | /* Provide ioctl() calls to examine the MII xcvr state. */ | ||
1040 | static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1041 | { | ||
1042 | struct el3_private *lp = netdev_priv(dev); | ||
1043 | unsigned int ioaddr = dev->base_addr; | ||
1044 | struct mii_ioctl_data *data = if_mii(rq); | ||
1045 | int phy = lp->phys & 0x1f; | ||
1046 | |||
1047 | pr_debug("%s: In ioct(%-.6s, %#4.4x) %4.4x %4.4x %4.4x %4.4x.\n", | ||
1048 | dev->name, rq->ifr_ifrn.ifrn_name, cmd, | ||
1049 | data->phy_id, data->reg_num, data->val_in, data->val_out); | ||
1050 | |||
1051 | switch(cmd) { | ||
1052 | case SIOCGMIIPHY: /* Get the address of the PHY in use. */ | ||
1053 | data->phy_id = phy; | ||
1054 | case SIOCGMIIREG: /* Read the specified MII register. */ | ||
1055 | { | ||
1056 | int saved_window; | ||
1057 | unsigned long flags; | ||
1058 | |||
1059 | spin_lock_irqsave(&lp->window_lock, flags); | ||
1060 | saved_window = inw(ioaddr + EL3_CMD) >> 13; | ||
1061 | EL3WINDOW(4); | ||
1062 | data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, | ||
1063 | data->reg_num & 0x1f); | ||
1064 | EL3WINDOW(saved_window); | ||
1065 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
1066 | return 0; | ||
1067 | } | ||
1068 | case SIOCSMIIREG: /* Write the specified MII register */ | ||
1069 | { | ||
1070 | int saved_window; | ||
1071 | unsigned long flags; | ||
1072 | |||
1073 | spin_lock_irqsave(&lp->window_lock, flags); | ||
1074 | saved_window = inw(ioaddr + EL3_CMD) >> 13; | ||
1075 | EL3WINDOW(4); | ||
1076 | mdio_write(ioaddr, data->phy_id & 0x1f, | ||
1077 | data->reg_num & 0x1f, data->val_in); | ||
1078 | EL3WINDOW(saved_window); | ||
1079 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
1080 | return 0; | ||
1081 | } | ||
1082 | default: | ||
1083 | return -EOPNOTSUPP; | ||
1084 | } | ||
1085 | } | ||
1086 | |||
1087 | /* The Odie chip has a 64 bin multicast filter, but the bit layout is not | ||
1088 | documented. Until it is we revert to receiving all multicast frames when | ||
1089 | any multicast reception is desired. | ||
1090 | Note: My other drivers emit a log message whenever promiscuous mode is | ||
1091 | entered to help detect password sniffers. This is less desirable on | ||
1092 | typical PC card machines, so we omit the message. | ||
1093 | */ | ||
1094 | |||
1095 | static void set_rx_mode(struct net_device *dev) | ||
1096 | { | ||
1097 | unsigned int ioaddr = dev->base_addr; | ||
1098 | |||
1099 | if (dev->flags & IFF_PROMISC) | ||
1100 | outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm, | ||
1101 | ioaddr + EL3_CMD); | ||
1102 | else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) | ||
1103 | outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); | ||
1104 | else | ||
1105 | outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); | ||
1106 | } | ||
1107 | |||
1108 | static void set_multicast_list(struct net_device *dev) | ||
1109 | { | ||
1110 | struct el3_private *lp = netdev_priv(dev); | ||
1111 | unsigned long flags; | ||
1112 | |||
1113 | spin_lock_irqsave(&lp->window_lock, flags); | ||
1114 | set_rx_mode(dev); | ||
1115 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
1116 | } | ||
1117 | |||
1118 | static int el3_close(struct net_device *dev) | ||
1119 | { | ||
1120 | unsigned int ioaddr = dev->base_addr; | ||
1121 | struct el3_private *lp = netdev_priv(dev); | ||
1122 | struct pcmcia_device *link = lp->p_dev; | ||
1123 | |||
1124 | dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); | ||
1125 | |||
1126 | if (pcmcia_dev_present(link)) { | ||
1127 | unsigned long flags; | ||
1128 | |||
1129 | /* Turn off statistics ASAP. We update lp->stats below. */ | ||
1130 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
1131 | |||
1132 | /* Disable the receiver and transmitter. */ | ||
1133 | outw(RxDisable, ioaddr + EL3_CMD); | ||
1134 | outw(TxDisable, ioaddr + EL3_CMD); | ||
1135 | |||
1136 | /* Note: Switching to window 0 may disable the IRQ. */ | ||
1137 | EL3WINDOW(0); | ||
1138 | spin_lock_irqsave(&lp->window_lock, flags); | ||
1139 | update_stats(dev); | ||
1140 | spin_unlock_irqrestore(&lp->window_lock, flags); | ||
1141 | |||
1142 | /* force interrupts off */ | ||
1143 | outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); | ||
1144 | } | ||
1145 | |||
1146 | link->open--; | ||
1147 | netif_stop_queue(dev); | ||
1148 | del_timer_sync(&lp->media); | ||
1149 | |||
1150 | return 0; | ||
1151 | } | ||
1152 | |||
1153 | static const struct pcmcia_device_id tc574_ids[] = { | ||
1154 | PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0574), | ||
1155 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0556, "cis/3CCFEM556.cis"), | ||
1156 | PCMCIA_DEVICE_NULL, | ||
1157 | }; | ||
1158 | MODULE_DEVICE_TABLE(pcmcia, tc574_ids); | ||
1159 | |||
1160 | static struct pcmcia_driver tc574_driver = { | ||
1161 | .owner = THIS_MODULE, | ||
1162 | .name = "3c574_cs", | ||
1163 | .probe = tc574_probe, | ||
1164 | .remove = tc574_detach, | ||
1165 | .id_table = tc574_ids, | ||
1166 | .suspend = tc574_suspend, | ||
1167 | .resume = tc574_resume, | ||
1168 | }; | ||
1169 | |||
1170 | static int __init init_tc574(void) | ||
1171 | { | ||
1172 | return pcmcia_register_driver(&tc574_driver); | ||
1173 | } | ||
1174 | |||
1175 | static void __exit exit_tc574(void) | ||
1176 | { | ||
1177 | pcmcia_unregister_driver(&tc574_driver); | ||
1178 | } | ||
1179 | |||
1180 | module_init(init_tc574); | ||
1181 | module_exit(exit_tc574); | ||
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c new file mode 100644 index 000000000000..4a1a35809807 --- /dev/null +++ b/drivers/net/ethernet/3com/3c589_cs.c | |||
@@ -0,0 +1,943 @@ | |||
1 | /*====================================================================== | ||
2 | |||
3 | A PCMCIA ethernet driver for the 3com 3c589 card. | ||
4 | |||
5 | Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net | ||
6 | |||
7 | 3c589_cs.c 1.162 2001/10/13 00:08:50 | ||
8 | |||
9 | The network driver code is based on Donald Becker's 3c589 code: | ||
10 | |||
11 | Written 1994 by Donald Becker. | ||
12 | Copyright 1993 United States Government as represented by the | ||
13 | Director, National Security Agency. This software may be used and | ||
14 | distributed according to the terms of the GNU General Public License, | ||
15 | incorporated herein by reference. | ||
16 | Donald Becker may be reached at becker@scyld.com | ||
17 | |||
18 | Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
19 | |||
20 | ======================================================================*/ | ||
21 | |||
22 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
23 | |||
24 | #define DRV_NAME "3c589_cs" | ||
25 | #define DRV_VERSION "1.162-ac" | ||
26 | |||
27 | #include <linux/module.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/ptrace.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/string.h> | ||
33 | #include <linux/timer.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/in.h> | ||
36 | #include <linux/delay.h> | ||
37 | #include <linux/ethtool.h> | ||
38 | #include <linux/netdevice.h> | ||
39 | #include <linux/etherdevice.h> | ||
40 | #include <linux/skbuff.h> | ||
41 | #include <linux/if_arp.h> | ||
42 | #include <linux/ioport.h> | ||
43 | #include <linux/bitops.h> | ||
44 | #include <linux/jiffies.h> | ||
45 | |||
46 | #include <pcmcia/cistpl.h> | ||
47 | #include <pcmcia/cisreg.h> | ||
48 | #include <pcmcia/ciscode.h> | ||
49 | #include <pcmcia/ds.h> | ||
50 | |||
51 | #include <asm/uaccess.h> | ||
52 | #include <asm/io.h> | ||
53 | #include <asm/system.h> | ||
54 | |||
55 | /* To minimize the size of the driver source I only define operating | ||
56 | constants if they are used several times. You'll need the manual | ||
57 | if you want to understand driver details. */ | ||
58 | /* Offsets from base I/O address. */ | ||
59 | #define EL3_DATA 0x00 | ||
60 | #define EL3_TIMER 0x0a | ||
61 | #define EL3_CMD 0x0e | ||
62 | #define EL3_STATUS 0x0e | ||
63 | |||
64 | #define EEPROM_READ 0x0080 | ||
65 | #define EEPROM_BUSY 0x8000 | ||
66 | |||
67 | #define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD) | ||
68 | |||
69 | /* The top five bits written to EL3_CMD are a command, the lower | ||
70 | 11 bits are the parameter, if applicable. */ | ||
71 | enum c509cmd { | ||
72 | TotalReset = 0<<11, | ||
73 | SelectWindow = 1<<11, | ||
74 | StartCoax = 2<<11, | ||
75 | RxDisable = 3<<11, | ||
76 | RxEnable = 4<<11, | ||
77 | RxReset = 5<<11, | ||
78 | RxDiscard = 8<<11, | ||
79 | TxEnable = 9<<11, | ||
80 | TxDisable = 10<<11, | ||
81 | TxReset = 11<<11, | ||
82 | FakeIntr = 12<<11, | ||
83 | AckIntr = 13<<11, | ||
84 | SetIntrEnb = 14<<11, | ||
85 | SetStatusEnb = 15<<11, | ||
86 | SetRxFilter = 16<<11, | ||
87 | SetRxThreshold = 17<<11, | ||
88 | SetTxThreshold = 18<<11, | ||
89 | SetTxStart = 19<<11, | ||
90 | StatsEnable = 21<<11, | ||
91 | StatsDisable = 22<<11, | ||
92 | StopCoax = 23<<11 | ||
93 | }; | ||
94 | |||
95 | enum c509status { | ||
96 | IntLatch = 0x0001, | ||
97 | AdapterFailure = 0x0002, | ||
98 | TxComplete = 0x0004, | ||
99 | TxAvailable = 0x0008, | ||
100 | RxComplete = 0x0010, | ||
101 | RxEarly = 0x0020, | ||
102 | IntReq = 0x0040, | ||
103 | StatsFull = 0x0080, | ||
104 | CmdBusy = 0x1000 | ||
105 | }; | ||
106 | |||
107 | /* The SetRxFilter command accepts the following classes: */ | ||
108 | enum RxFilter { | ||
109 | RxStation = 1, | ||
110 | RxMulticast = 2, | ||
111 | RxBroadcast = 4, | ||
112 | RxProm = 8 | ||
113 | }; | ||
114 | |||
115 | /* Register window 1 offsets, the window used in normal operation. */ | ||
116 | #define TX_FIFO 0x00 | ||
117 | #define RX_FIFO 0x00 | ||
118 | #define RX_STATUS 0x08 | ||
119 | #define TX_STATUS 0x0B | ||
120 | #define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ | ||
121 | |||
122 | #define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ | ||
123 | #define WN4_MEDIA 0x0A /* Window 4: Various transcvr/media bits. */ | ||
124 | #define MEDIA_TP 0x00C0 /* Enable link beat and jabber for 10baseT. */ | ||
125 | #define MEDIA_LED 0x0001 /* Enable link light on 3C589E cards. */ | ||
126 | |||
127 | /* Time in jiffies before concluding Tx hung */ | ||
128 | #define TX_TIMEOUT ((400*HZ)/1000) | ||
129 | |||
130 | struct el3_private { | ||
131 | struct pcmcia_device *p_dev; | ||
132 | /* For transceiver monitoring */ | ||
133 | struct timer_list media; | ||
134 | u16 media_status; | ||
135 | u16 fast_poll; | ||
136 | unsigned long last_irq; | ||
137 | spinlock_t lock; | ||
138 | }; | ||
139 | |||
140 | static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; | ||
141 | |||
142 | /*====================================================================*/ | ||
143 | |||
144 | /* Module parameters */ | ||
145 | |||
146 | MODULE_AUTHOR("David Hinds <dahinds@users.sourceforge.net>"); | ||
147 | MODULE_DESCRIPTION("3Com 3c589 series PCMCIA ethernet driver"); | ||
148 | MODULE_LICENSE("GPL"); | ||
149 | |||
150 | #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0) | ||
151 | |||
152 | /* Special hook for setting if_port when module is loaded */ | ||
153 | INT_MODULE_PARM(if_port, 0); | ||
154 | |||
155 | |||
156 | /*====================================================================*/ | ||
157 | |||
158 | static int tc589_config(struct pcmcia_device *link); | ||
159 | static void tc589_release(struct pcmcia_device *link); | ||
160 | |||
161 | static u16 read_eeprom(unsigned int ioaddr, int index); | ||
162 | static void tc589_reset(struct net_device *dev); | ||
163 | static void media_check(unsigned long arg); | ||
164 | static int el3_config(struct net_device *dev, struct ifmap *map); | ||
165 | static int el3_open(struct net_device *dev); | ||
166 | static netdev_tx_t el3_start_xmit(struct sk_buff *skb, | ||
167 | struct net_device *dev); | ||
168 | static irqreturn_t el3_interrupt(int irq, void *dev_id); | ||
169 | static void update_stats(struct net_device *dev); | ||
170 | static struct net_device_stats *el3_get_stats(struct net_device *dev); | ||
171 | static int el3_rx(struct net_device *dev); | ||
172 | static int el3_close(struct net_device *dev); | ||
173 | static void el3_tx_timeout(struct net_device *dev); | ||
174 | static void set_rx_mode(struct net_device *dev); | ||
175 | static void set_multicast_list(struct net_device *dev); | ||
176 | static const struct ethtool_ops netdev_ethtool_ops; | ||
177 | |||
178 | static void tc589_detach(struct pcmcia_device *p_dev); | ||
179 | |||
180 | static const struct net_device_ops el3_netdev_ops = { | ||
181 | .ndo_open = el3_open, | ||
182 | .ndo_stop = el3_close, | ||
183 | .ndo_start_xmit = el3_start_xmit, | ||
184 | .ndo_tx_timeout = el3_tx_timeout, | ||
185 | .ndo_set_config = el3_config, | ||
186 | .ndo_get_stats = el3_get_stats, | ||
187 | .ndo_set_multicast_list = set_multicast_list, | ||
188 | .ndo_change_mtu = eth_change_mtu, | ||
189 | .ndo_set_mac_address = eth_mac_addr, | ||
190 | .ndo_validate_addr = eth_validate_addr, | ||
191 | }; | ||
192 | |||
193 | static int tc589_probe(struct pcmcia_device *link) | ||
194 | { | ||
195 | struct el3_private *lp; | ||
196 | struct net_device *dev; | ||
197 | |||
198 | dev_dbg(&link->dev, "3c589_attach()\n"); | ||
199 | |||
200 | /* Create new ethernet device */ | ||
201 | dev = alloc_etherdev(sizeof(struct el3_private)); | ||
202 | if (!dev) | ||
203 | return -ENOMEM; | ||
204 | lp = netdev_priv(dev); | ||
205 | link->priv = dev; | ||
206 | lp->p_dev = link; | ||
207 | |||
208 | spin_lock_init(&lp->lock); | ||
209 | link->resource[0]->end = 16; | ||
210 | link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16; | ||
211 | |||
212 | link->config_flags |= CONF_ENABLE_IRQ; | ||
213 | link->config_index = 1; | ||
214 | |||
215 | dev->netdev_ops = &el3_netdev_ops; | ||
216 | dev->watchdog_timeo = TX_TIMEOUT; | ||
217 | |||
218 | SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); | ||
219 | |||
220 | return tc589_config(link); | ||
221 | } | ||
222 | |||
223 | static void tc589_detach(struct pcmcia_device *link) | ||
224 | { | ||
225 | struct net_device *dev = link->priv; | ||
226 | |||
227 | dev_dbg(&link->dev, "3c589_detach\n"); | ||
228 | |||
229 | unregister_netdev(dev); | ||
230 | |||
231 | tc589_release(link); | ||
232 | |||
233 | free_netdev(dev); | ||
234 | } /* tc589_detach */ | ||
235 | |||
236 | static int tc589_config(struct pcmcia_device *link) | ||
237 | { | ||
238 | struct net_device *dev = link->priv; | ||
239 | __be16 *phys_addr; | ||
240 | int ret, i, j, multi = 0, fifo; | ||
241 | unsigned int ioaddr; | ||
242 | static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | ||
243 | u8 *buf; | ||
244 | size_t len; | ||
245 | |||
246 | dev_dbg(&link->dev, "3c589_config\n"); | ||
247 | |||
248 | phys_addr = (__be16 *)dev->dev_addr; | ||
249 | /* Is this a 3c562? */ | ||
250 | if (link->manf_id != MANFID_3COM) | ||
251 | dev_info(&link->dev, "hmmm, is this really a 3Com card??\n"); | ||
252 | multi = (link->card_id == PRODID_3COM_3C562); | ||
253 | |||
254 | link->io_lines = 16; | ||
255 | |||
256 | /* For the 3c562, the base address must be xx00-xx7f */ | ||
257 | for (i = j = 0; j < 0x400; j += 0x10) { | ||
258 | if (multi && (j & 0x80)) continue; | ||
259 | link->resource[0]->start = j ^ 0x300; | ||
260 | i = pcmcia_request_io(link); | ||
261 | if (i == 0) | ||
262 | break; | ||
263 | } | ||
264 | if (i != 0) | ||
265 | goto failed; | ||
266 | |||
267 | ret = pcmcia_request_irq(link, el3_interrupt); | ||
268 | if (ret) | ||
269 | goto failed; | ||
270 | |||
271 | ret = pcmcia_enable_device(link); | ||
272 | if (ret) | ||
273 | goto failed; | ||
274 | |||
275 | dev->irq = link->irq; | ||
276 | dev->base_addr = link->resource[0]->start; | ||
277 | ioaddr = dev->base_addr; | ||
278 | EL3WINDOW(0); | ||
279 | |||
280 | /* The 3c589 has an extra EEPROM for configuration info, including | ||
281 | the hardware address. The 3c562 puts the address in the CIS. */ | ||
282 | len = pcmcia_get_tuple(link, 0x88, &buf); | ||
283 | if (buf && len >= 6) { | ||
284 | for (i = 0; i < 3; i++) | ||
285 | phys_addr[i] = htons(le16_to_cpu(buf[i*2])); | ||
286 | kfree(buf); | ||
287 | } else { | ||
288 | kfree(buf); /* 0 < len < 6 */ | ||
289 | for (i = 0; i < 3; i++) | ||
290 | phys_addr[i] = htons(read_eeprom(ioaddr, i)); | ||
291 | if (phys_addr[0] == htons(0x6060)) { | ||
292 | dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n", | ||
293 | dev->base_addr, dev->base_addr+15); | ||
294 | goto failed; | ||
295 | } | ||
296 | } | ||
297 | |||
298 | /* The address and resource configuration register aren't loaded from | ||
299 | the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */ | ||
300 | outw(0x3f00, ioaddr + 8); | ||
301 | fifo = inl(ioaddr); | ||
302 | |||
303 | /* The if_port symbol can be set when the module is loaded */ | ||
304 | if ((if_port >= 0) && (if_port <= 3)) | ||
305 | dev->if_port = if_port; | ||
306 | else | ||
307 | dev_err(&link->dev, "invalid if_port requested\n"); | ||
308 | |||
309 | SET_NETDEV_DEV(dev, &link->dev); | ||
310 | |||
311 | if (register_netdev(dev) != 0) { | ||
312 | dev_err(&link->dev, "register_netdev() failed\n"); | ||
313 | goto failed; | ||
314 | } | ||
315 | |||
316 | netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n", | ||
317 | (multi ? "562" : "589"), dev->base_addr, dev->irq, | ||
318 | dev->dev_addr); | ||
319 | netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n", | ||
320 | (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], | ||
321 | if_names[dev->if_port]); | ||
322 | return 0; | ||
323 | |||
324 | failed: | ||
325 | tc589_release(link); | ||
326 | return -ENODEV; | ||
327 | } /* tc589_config */ | ||
328 | |||
329 | static void tc589_release(struct pcmcia_device *link) | ||
330 | { | ||
331 | pcmcia_disable_device(link); | ||
332 | } | ||
333 | |||
334 | static int tc589_suspend(struct pcmcia_device *link) | ||
335 | { | ||
336 | struct net_device *dev = link->priv; | ||
337 | |||
338 | if (link->open) | ||
339 | netif_device_detach(dev); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static int tc589_resume(struct pcmcia_device *link) | ||
345 | { | ||
346 | struct net_device *dev = link->priv; | ||
347 | |||
348 | if (link->open) { | ||
349 | tc589_reset(dev); | ||
350 | netif_device_attach(dev); | ||
351 | } | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | /*====================================================================*/ | ||
357 | |||
358 | /* | ||
359 | Use this for commands that may take time to finish | ||
360 | */ | ||
361 | static void tc589_wait_for_completion(struct net_device *dev, int cmd) | ||
362 | { | ||
363 | int i = 100; | ||
364 | outw(cmd, dev->base_addr + EL3_CMD); | ||
365 | while (--i > 0) | ||
366 | if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; | ||
367 | if (i == 0) | ||
368 | netdev_warn(dev, "command 0x%04x did not complete!\n", cmd); | ||
369 | } | ||
370 | |||
371 | /* | ||
372 | Read a word from the EEPROM using the regular EEPROM access register. | ||
373 | Assume that we are in register window zero. | ||
374 | */ | ||
375 | static u16 read_eeprom(unsigned int ioaddr, int index) | ||
376 | { | ||
377 | int i; | ||
378 | outw(EEPROM_READ + index, ioaddr + 10); | ||
379 | /* Reading the eeprom takes 162 us */ | ||
380 | for (i = 1620; i >= 0; i--) | ||
381 | if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0) | ||
382 | break; | ||
383 | return inw(ioaddr + 12); | ||
384 | } | ||
385 | |||
386 | /* | ||
387 | Set transceiver type, perhaps to something other than what the user | ||
388 | specified in dev->if_port. | ||
389 | */ | ||
390 | static void tc589_set_xcvr(struct net_device *dev, int if_port) | ||
391 | { | ||
392 | struct el3_private *lp = netdev_priv(dev); | ||
393 | unsigned int ioaddr = dev->base_addr; | ||
394 | |||
395 | EL3WINDOW(0); | ||
396 | switch (if_port) { | ||
397 | case 0: case 1: outw(0, ioaddr + 6); break; | ||
398 | case 2: outw(3<<14, ioaddr + 6); break; | ||
399 | case 3: outw(1<<14, ioaddr + 6); break; | ||
400 | } | ||
401 | /* On PCMCIA, this just turns on the LED */ | ||
402 | outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD); | ||
403 | /* 10baseT interface, enable link beat and jabber check. */ | ||
404 | EL3WINDOW(4); | ||
405 | outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA); | ||
406 | EL3WINDOW(1); | ||
407 | if (if_port == 2) | ||
408 | lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000); | ||
409 | else | ||
410 | lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800); | ||
411 | } | ||
412 | |||
413 | static void dump_status(struct net_device *dev) | ||
414 | { | ||
415 | unsigned int ioaddr = dev->base_addr; | ||
416 | EL3WINDOW(1); | ||
417 | netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n", | ||
418 | inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS), | ||
419 | inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE)); | ||
420 | EL3WINDOW(4); | ||
421 | netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n", | ||
422 | inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08), | ||
423 | inw(ioaddr+0x0a)); | ||
424 | EL3WINDOW(1); | ||
425 | } | ||
426 | |||
427 | /* Reset and restore all of the 3c589 registers. */ | ||
428 | static void tc589_reset(struct net_device *dev) | ||
429 | { | ||
430 | unsigned int ioaddr = dev->base_addr; | ||
431 | int i; | ||
432 | |||
433 | EL3WINDOW(0); | ||
434 | outw(0x0001, ioaddr + 4); /* Activate board. */ | ||
435 | outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ | ||
436 | |||
437 | /* Set the station address in window 2. */ | ||
438 | EL3WINDOW(2); | ||
439 | for (i = 0; i < 6; i++) | ||
440 | outb(dev->dev_addr[i], ioaddr + i); | ||
441 | |||
442 | tc589_set_xcvr(dev, dev->if_port); | ||
443 | |||
444 | /* Switch to the stats window, and clear all stats by reading. */ | ||
445 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
446 | EL3WINDOW(6); | ||
447 | for (i = 0; i < 9; i++) | ||
448 | inb(ioaddr+i); | ||
449 | inw(ioaddr + 10); | ||
450 | inw(ioaddr + 12); | ||
451 | |||
452 | /* Switch to register set 1 for normal use. */ | ||
453 | EL3WINDOW(1); | ||
454 | |||
455 | set_rx_mode(dev); | ||
456 | outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ | ||
457 | outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ | ||
458 | outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ | ||
459 | /* Allow status bits to be seen. */ | ||
460 | outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD); | ||
461 | /* Ack all pending events, and set active indicator mask. */ | ||
462 | outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, | ||
463 | ioaddr + EL3_CMD); | ||
464 | outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull | ||
465 | | AdapterFailure, ioaddr + EL3_CMD); | ||
466 | } | ||
467 | |||
468 | static void netdev_get_drvinfo(struct net_device *dev, | ||
469 | struct ethtool_drvinfo *info) | ||
470 | { | ||
471 | strcpy(info->driver, DRV_NAME); | ||
472 | strcpy(info->version, DRV_VERSION); | ||
473 | sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr); | ||
474 | } | ||
475 | |||
476 | static const struct ethtool_ops netdev_ethtool_ops = { | ||
477 | .get_drvinfo = netdev_get_drvinfo, | ||
478 | }; | ||
479 | |||
480 | static int el3_config(struct net_device *dev, struct ifmap *map) | ||
481 | { | ||
482 | if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { | ||
483 | if (map->port <= 3) { | ||
484 | dev->if_port = map->port; | ||
485 | netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]); | ||
486 | tc589_set_xcvr(dev, dev->if_port); | ||
487 | } else | ||
488 | return -EINVAL; | ||
489 | } | ||
490 | return 0; | ||
491 | } | ||
492 | |||
493 | static int el3_open(struct net_device *dev) | ||
494 | { | ||
495 | struct el3_private *lp = netdev_priv(dev); | ||
496 | struct pcmcia_device *link = lp->p_dev; | ||
497 | |||
498 | if (!pcmcia_dev_present(link)) | ||
499 | return -ENODEV; | ||
500 | |||
501 | link->open++; | ||
502 | netif_start_queue(dev); | ||
503 | |||
504 | tc589_reset(dev); | ||
505 | init_timer(&lp->media); | ||
506 | lp->media.function = media_check; | ||
507 | lp->media.data = (unsigned long) dev; | ||
508 | lp->media.expires = jiffies + HZ; | ||
509 | add_timer(&lp->media); | ||
510 | |||
511 | dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", | ||
512 | dev->name, inw(dev->base_addr + EL3_STATUS)); | ||
513 | |||
514 | return 0; | ||
515 | } | ||
516 | |||
517 | static void el3_tx_timeout(struct net_device *dev) | ||
518 | { | ||
519 | unsigned int ioaddr = dev->base_addr; | ||
520 | |||
521 | netdev_warn(dev, "Transmit timed out!\n"); | ||
522 | dump_status(dev); | ||
523 | dev->stats.tx_errors++; | ||
524 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
525 | /* Issue TX_RESET and TX_START commands. */ | ||
526 | tc589_wait_for_completion(dev, TxReset); | ||
527 | outw(TxEnable, ioaddr + EL3_CMD); | ||
528 | netif_wake_queue(dev); | ||
529 | } | ||
530 | |||
531 | static void pop_tx_status(struct net_device *dev) | ||
532 | { | ||
533 | unsigned int ioaddr = dev->base_addr; | ||
534 | int i; | ||
535 | |||
536 | /* Clear the Tx status stack. */ | ||
537 | for (i = 32; i > 0; i--) { | ||
538 | u_char tx_status = inb(ioaddr + TX_STATUS); | ||
539 | if (!(tx_status & 0x84)) break; | ||
540 | /* reset transmitter on jabber error or underrun */ | ||
541 | if (tx_status & 0x30) | ||
542 | tc589_wait_for_completion(dev, TxReset); | ||
543 | if (tx_status & 0x38) { | ||
544 | netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status); | ||
545 | outw(TxEnable, ioaddr + EL3_CMD); | ||
546 | dev->stats.tx_aborted_errors++; | ||
547 | } | ||
548 | outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ | ||
549 | } | ||
550 | } | ||
551 | |||
552 | static netdev_tx_t el3_start_xmit(struct sk_buff *skb, | ||
553 | struct net_device *dev) | ||
554 | { | ||
555 | unsigned int ioaddr = dev->base_addr; | ||
556 | struct el3_private *priv = netdev_priv(dev); | ||
557 | unsigned long flags; | ||
558 | |||
559 | netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n", | ||
560 | (long)skb->len, inw(ioaddr + EL3_STATUS)); | ||
561 | |||
562 | spin_lock_irqsave(&priv->lock, flags); | ||
563 | |||
564 | dev->stats.tx_bytes += skb->len; | ||
565 | |||
566 | /* Put out the doubleword header... */ | ||
567 | outw(skb->len, ioaddr + TX_FIFO); | ||
568 | outw(0x00, ioaddr + TX_FIFO); | ||
569 | /* ... and the packet rounded to a doubleword. */ | ||
570 | outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); | ||
571 | |||
572 | if (inw(ioaddr + TX_FREE) <= 1536) { | ||
573 | netif_stop_queue(dev); | ||
574 | /* Interrupt us when the FIFO has room for max-sized packet. */ | ||
575 | outw(SetTxThreshold + 1536, ioaddr + EL3_CMD); | ||
576 | } | ||
577 | |||
578 | pop_tx_status(dev); | ||
579 | spin_unlock_irqrestore(&priv->lock, flags); | ||
580 | dev_kfree_skb(skb); | ||
581 | |||
582 | return NETDEV_TX_OK; | ||
583 | } | ||
584 | |||
585 | /* The EL3 interrupt handler. */ | ||
586 | static irqreturn_t el3_interrupt(int irq, void *dev_id) | ||
587 | { | ||
588 | struct net_device *dev = (struct net_device *) dev_id; | ||
589 | struct el3_private *lp = netdev_priv(dev); | ||
590 | unsigned int ioaddr; | ||
591 | __u16 status; | ||
592 | int i = 0, handled = 1; | ||
593 | |||
594 | if (!netif_device_present(dev)) | ||
595 | return IRQ_NONE; | ||
596 | |||
597 | ioaddr = dev->base_addr; | ||
598 | |||
599 | netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS)); | ||
600 | |||
601 | spin_lock(&lp->lock); | ||
602 | while ((status = inw(ioaddr + EL3_STATUS)) & | ||
603 | (IntLatch | RxComplete | StatsFull)) { | ||
604 | if ((status & 0xe000) != 0x2000) { | ||
605 | netdev_dbg(dev, "interrupt from dead card\n"); | ||
606 | handled = 0; | ||
607 | break; | ||
608 | } | ||
609 | if (status & RxComplete) | ||
610 | el3_rx(dev); | ||
611 | if (status & TxAvailable) { | ||
612 | netdev_dbg(dev, " TX room bit was handled.\n"); | ||
613 | /* There's room in the FIFO for a full-sized packet. */ | ||
614 | outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); | ||
615 | netif_wake_queue(dev); | ||
616 | } | ||
617 | if (status & TxComplete) | ||
618 | pop_tx_status(dev); | ||
619 | if (status & (AdapterFailure | RxEarly | StatsFull)) { | ||
620 | /* Handle all uncommon interrupts. */ | ||
621 | if (status & StatsFull) /* Empty statistics. */ | ||
622 | update_stats(dev); | ||
623 | if (status & RxEarly) { /* Rx early is unused. */ | ||
624 | el3_rx(dev); | ||
625 | outw(AckIntr | RxEarly, ioaddr + EL3_CMD); | ||
626 | } | ||
627 | if (status & AdapterFailure) { | ||
628 | u16 fifo_diag; | ||
629 | EL3WINDOW(4); | ||
630 | fifo_diag = inw(ioaddr + 4); | ||
631 | EL3WINDOW(1); | ||
632 | netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n", | ||
633 | fifo_diag); | ||
634 | if (fifo_diag & 0x0400) { | ||
635 | /* Tx overrun */ | ||
636 | tc589_wait_for_completion(dev, TxReset); | ||
637 | outw(TxEnable, ioaddr + EL3_CMD); | ||
638 | } | ||
639 | if (fifo_diag & 0x2000) { | ||
640 | /* Rx underrun */ | ||
641 | tc589_wait_for_completion(dev, RxReset); | ||
642 | set_rx_mode(dev); | ||
643 | outw(RxEnable, ioaddr + EL3_CMD); | ||
644 | } | ||
645 | outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); | ||
646 | } | ||
647 | } | ||
648 | if (++i > 10) { | ||
649 | netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n", | ||
650 | status); | ||
651 | /* Clear all interrupts */ | ||
652 | outw(AckIntr | 0xFF, ioaddr + EL3_CMD); | ||
653 | break; | ||
654 | } | ||
655 | /* Acknowledge the IRQ. */ | ||
656 | outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); | ||
657 | } | ||
658 | lp->last_irq = jiffies; | ||
659 | spin_unlock(&lp->lock); | ||
660 | netdev_dbg(dev, "exiting interrupt, status %4.4x.\n", | ||
661 | inw(ioaddr + EL3_STATUS)); | ||
662 | return IRQ_RETVAL(handled); | ||
663 | } | ||
664 | |||
665 | static void media_check(unsigned long arg) | ||
666 | { | ||
667 | struct net_device *dev = (struct net_device *)(arg); | ||
668 | struct el3_private *lp = netdev_priv(dev); | ||
669 | unsigned int ioaddr = dev->base_addr; | ||
670 | u16 media, errs; | ||
671 | unsigned long flags; | ||
672 | |||
673 | if (!netif_device_present(dev)) goto reschedule; | ||
674 | |||
675 | /* Check for pending interrupt with expired latency timer: with | ||
676 | this, we can limp along even if the interrupt is blocked */ | ||
677 | if ((inw(ioaddr + EL3_STATUS) & IntLatch) && | ||
678 | (inb(ioaddr + EL3_TIMER) == 0xff)) { | ||
679 | if (!lp->fast_poll) | ||
680 | netdev_warn(dev, "interrupt(s) dropped!\n"); | ||
681 | |||
682 | local_irq_save(flags); | ||
683 | el3_interrupt(dev->irq, dev); | ||
684 | local_irq_restore(flags); | ||
685 | |||
686 | lp->fast_poll = HZ; | ||
687 | } | ||
688 | if (lp->fast_poll) { | ||
689 | lp->fast_poll--; | ||
690 | lp->media.expires = jiffies + HZ/100; | ||
691 | add_timer(&lp->media); | ||
692 | return; | ||
693 | } | ||
694 | |||
695 | /* lp->lock guards the EL3 window. Window should always be 1 except | ||
696 | when the lock is held */ | ||
697 | spin_lock_irqsave(&lp->lock, flags); | ||
698 | EL3WINDOW(4); | ||
699 | media = inw(ioaddr+WN4_MEDIA) & 0xc810; | ||
700 | |||
701 | /* Ignore collisions unless we've had no irq's recently */ | ||
702 | if (time_before(jiffies, lp->last_irq + HZ)) { | ||
703 | media &= ~0x0010; | ||
704 | } else { | ||
705 | /* Try harder to detect carrier errors */ | ||
706 | EL3WINDOW(6); | ||
707 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
708 | errs = inb(ioaddr + 0); | ||
709 | outw(StatsEnable, ioaddr + EL3_CMD); | ||
710 | dev->stats.tx_carrier_errors += errs; | ||
711 | if (errs || (lp->media_status & 0x0010)) media |= 0x0010; | ||
712 | } | ||
713 | |||
714 | if (media != lp->media_status) { | ||
715 | if ((media & lp->media_status & 0x8000) && | ||
716 | ((lp->media_status ^ media) & 0x0800)) | ||
717 | netdev_info(dev, "%s link beat\n", | ||
718 | (lp->media_status & 0x0800 ? "lost" : "found")); | ||
719 | else if ((media & lp->media_status & 0x4000) && | ||
720 | ((lp->media_status ^ media) & 0x0010)) | ||
721 | netdev_info(dev, "coax cable %s\n", | ||
722 | (lp->media_status & 0x0010 ? "ok" : "problem")); | ||
723 | if (dev->if_port == 0) { | ||
724 | if (media & 0x8000) { | ||
725 | if (media & 0x0800) | ||
726 | netdev_info(dev, "flipped to 10baseT\n"); | ||
727 | else | ||
728 | tc589_set_xcvr(dev, 2); | ||
729 | } else if (media & 0x4000) { | ||
730 | if (media & 0x0010) | ||
731 | tc589_set_xcvr(dev, 1); | ||
732 | else | ||
733 | netdev_info(dev, "flipped to 10base2\n"); | ||
734 | } | ||
735 | } | ||
736 | lp->media_status = media; | ||
737 | } | ||
738 | |||
739 | EL3WINDOW(1); | ||
740 | spin_unlock_irqrestore(&lp->lock, flags); | ||
741 | |||
742 | reschedule: | ||
743 | lp->media.expires = jiffies + HZ; | ||
744 | add_timer(&lp->media); | ||
745 | } | ||
746 | |||
747 | static struct net_device_stats *el3_get_stats(struct net_device *dev) | ||
748 | { | ||
749 | struct el3_private *lp = netdev_priv(dev); | ||
750 | unsigned long flags; | ||
751 | struct pcmcia_device *link = lp->p_dev; | ||
752 | |||
753 | if (pcmcia_dev_present(link)) { | ||
754 | spin_lock_irqsave(&lp->lock, flags); | ||
755 | update_stats(dev); | ||
756 | spin_unlock_irqrestore(&lp->lock, flags); | ||
757 | } | ||
758 | return &dev->stats; | ||
759 | } | ||
760 | |||
761 | /* | ||
762 | Update statistics. We change to register window 6, so this should be run | ||
763 | single-threaded if the device is active. This is expected to be a rare | ||
764 | operation, and it's simpler for the rest of the driver to assume that | ||
765 | window 1 is always valid rather than use a special window-state variable. | ||
766 | |||
767 | Caller must hold the lock for this | ||
768 | */ | ||
769 | static void update_stats(struct net_device *dev) | ||
770 | { | ||
771 | unsigned int ioaddr = dev->base_addr; | ||
772 | |||
773 | netdev_dbg(dev, "updating the statistics.\n"); | ||
774 | /* Turn off statistics updates while reading. */ | ||
775 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
776 | /* Switch to the stats window, and read everything. */ | ||
777 | EL3WINDOW(6); | ||
778 | dev->stats.tx_carrier_errors += inb(ioaddr + 0); | ||
779 | dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); | ||
780 | /* Multiple collisions. */ inb(ioaddr + 2); | ||
781 | dev->stats.collisions += inb(ioaddr + 3); | ||
782 | dev->stats.tx_window_errors += inb(ioaddr + 4); | ||
783 | dev->stats.rx_fifo_errors += inb(ioaddr + 5); | ||
784 | dev->stats.tx_packets += inb(ioaddr + 6); | ||
785 | /* Rx packets */ inb(ioaddr + 7); | ||
786 | /* Tx deferrals */ inb(ioaddr + 8); | ||
787 | /* Rx octets */ inw(ioaddr + 10); | ||
788 | /* Tx octets */ inw(ioaddr + 12); | ||
789 | |||
790 | /* Back to window 1, and turn statistics back on. */ | ||
791 | EL3WINDOW(1); | ||
792 | outw(StatsEnable, ioaddr + EL3_CMD); | ||
793 | } | ||
794 | |||
795 | static int el3_rx(struct net_device *dev) | ||
796 | { | ||
797 | unsigned int ioaddr = dev->base_addr; | ||
798 | int worklimit = 32; | ||
799 | short rx_status; | ||
800 | |||
801 | netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n", | ||
802 | inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); | ||
803 | while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && | ||
804 | worklimit > 0) { | ||
805 | worklimit--; | ||
806 | if (rx_status & 0x4000) { /* Error, update stats. */ | ||
807 | short error = rx_status & 0x3800; | ||
808 | dev->stats.rx_errors++; | ||
809 | switch (error) { | ||
810 | case 0x0000: dev->stats.rx_over_errors++; break; | ||
811 | case 0x0800: dev->stats.rx_length_errors++; break; | ||
812 | case 0x1000: dev->stats.rx_frame_errors++; break; | ||
813 | case 0x1800: dev->stats.rx_length_errors++; break; | ||
814 | case 0x2000: dev->stats.rx_frame_errors++; break; | ||
815 | case 0x2800: dev->stats.rx_crc_errors++; break; | ||
816 | } | ||
817 | } else { | ||
818 | short pkt_len = rx_status & 0x7ff; | ||
819 | struct sk_buff *skb; | ||
820 | |||
821 | skb = dev_alloc_skb(pkt_len+5); | ||
822 | |||
823 | netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n", | ||
824 | pkt_len, rx_status); | ||
825 | if (skb != NULL) { | ||
826 | skb_reserve(skb, 2); | ||
827 | insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), | ||
828 | (pkt_len+3)>>2); | ||
829 | skb->protocol = eth_type_trans(skb, dev); | ||
830 | netif_rx(skb); | ||
831 | dev->stats.rx_packets++; | ||
832 | dev->stats.rx_bytes += pkt_len; | ||
833 | } else { | ||
834 | netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n", | ||
835 | pkt_len); | ||
836 | dev->stats.rx_dropped++; | ||
837 | } | ||
838 | } | ||
839 | /* Pop the top of the Rx FIFO */ | ||
840 | tc589_wait_for_completion(dev, RxDiscard); | ||
841 | } | ||
842 | if (worklimit == 0) | ||
843 | netdev_warn(dev, "too much work in el3_rx!\n"); | ||
844 | return 0; | ||
845 | } | ||
846 | |||
847 | static void set_rx_mode(struct net_device *dev) | ||
848 | { | ||
849 | unsigned int ioaddr = dev->base_addr; | ||
850 | u16 opts = SetRxFilter | RxStation | RxBroadcast; | ||
851 | |||
852 | if (dev->flags & IFF_PROMISC) | ||
853 | opts |= RxMulticast | RxProm; | ||
854 | else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) | ||
855 | opts |= RxMulticast; | ||
856 | outw(opts, ioaddr + EL3_CMD); | ||
857 | } | ||
858 | |||
859 | static void set_multicast_list(struct net_device *dev) | ||
860 | { | ||
861 | struct el3_private *priv = netdev_priv(dev); | ||
862 | unsigned long flags; | ||
863 | |||
864 | spin_lock_irqsave(&priv->lock, flags); | ||
865 | set_rx_mode(dev); | ||
866 | spin_unlock_irqrestore(&priv->lock, flags); | ||
867 | } | ||
868 | |||
869 | static int el3_close(struct net_device *dev) | ||
870 | { | ||
871 | struct el3_private *lp = netdev_priv(dev); | ||
872 | struct pcmcia_device *link = lp->p_dev; | ||
873 | unsigned int ioaddr = dev->base_addr; | ||
874 | |||
875 | dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); | ||
876 | |||
877 | if (pcmcia_dev_present(link)) { | ||
878 | /* Turn off statistics ASAP. We update dev->stats below. */ | ||
879 | outw(StatsDisable, ioaddr + EL3_CMD); | ||
880 | |||
881 | /* Disable the receiver and transmitter. */ | ||
882 | outw(RxDisable, ioaddr + EL3_CMD); | ||
883 | outw(TxDisable, ioaddr + EL3_CMD); | ||
884 | |||
885 | if (dev->if_port == 2) | ||
886 | /* Turn off thinnet power. Green! */ | ||
887 | outw(StopCoax, ioaddr + EL3_CMD); | ||
888 | else if (dev->if_port == 1) { | ||
889 | /* Disable link beat and jabber */ | ||
890 | EL3WINDOW(4); | ||
891 | outw(0, ioaddr + WN4_MEDIA); | ||
892 | } | ||
893 | |||
894 | /* Switching back to window 0 disables the IRQ. */ | ||
895 | EL3WINDOW(0); | ||
896 | /* But we explicitly zero the IRQ line select anyway. */ | ||
897 | outw(0x0f00, ioaddr + WN0_IRQ); | ||
898 | |||
899 | /* Check if the card still exists */ | ||
900 | if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) | ||
901 | update_stats(dev); | ||
902 | } | ||
903 | |||
904 | link->open--; | ||
905 | netif_stop_queue(dev); | ||
906 | del_timer_sync(&lp->media); | ||
907 | |||
908 | return 0; | ||
909 | } | ||
910 | |||
911 | static const struct pcmcia_device_id tc589_ids[] = { | ||
912 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0101, 0x0562), | ||
913 | PCMCIA_MFC_DEVICE_PROD_ID1(0, "Motorola MARQUIS", 0xf03e4e77), | ||
914 | PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0589), | ||
915 | PCMCIA_DEVICE_PROD_ID12("Farallon", "ENet", 0x58d93fc4, 0x992c2202), | ||
916 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x0035, "cis/3CXEM556.cis"), | ||
917 | PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0101, 0x003d, "cis/3CXEM556.cis"), | ||
918 | PCMCIA_DEVICE_NULL, | ||
919 | }; | ||
920 | MODULE_DEVICE_TABLE(pcmcia, tc589_ids); | ||
921 | |||
922 | static struct pcmcia_driver tc589_driver = { | ||
923 | .owner = THIS_MODULE, | ||
924 | .name = "3c589_cs", | ||
925 | .probe = tc589_probe, | ||
926 | .remove = tc589_detach, | ||
927 | .id_table = tc589_ids, | ||
928 | .suspend = tc589_suspend, | ||
929 | .resume = tc589_resume, | ||
930 | }; | ||
931 | |||
932 | static int __init init_tc589(void) | ||
933 | { | ||
934 | return pcmcia_register_driver(&tc589_driver); | ||
935 | } | ||
936 | |||
937 | static void __exit exit_tc589(void) | ||
938 | { | ||
939 | pcmcia_unregister_driver(&tc589_driver); | ||
940 | } | ||
941 | |||
942 | module_init(init_tc589); | ||
943 | module_exit(exit_tc589); | ||
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c new file mode 100644 index 000000000000..8cc22568ebd3 --- /dev/null +++ b/drivers/net/ethernet/3com/3c59x.c | |||
@@ -0,0 +1,3326 @@ | |||
1 | /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */ | ||
2 | /* | ||
3 | Written 1996-1999 by Donald Becker. | ||
4 | |||
5 | This software may be used and distributed according to the terms | ||
6 | of the GNU General Public License, incorporated herein by reference. | ||
7 | |||
8 | This driver is for the 3Com "Vortex" and "Boomerang" series ethercards. | ||
9 | Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597 | ||
10 | and the EtherLink XL 3c900 and 3c905 cards. | ||
11 | |||
12 | Problem reports and questions should be directed to | ||
13 | vortex@scyld.com | ||
14 | |||
15 | The author may be reached as becker@scyld.com, or C/O | ||
16 | Scyld Computing Corporation | ||
17 | 410 Severn Ave., Suite 210 | ||
18 | Annapolis MD 21403 | ||
19 | |||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * FIXME: This driver _could_ support MTU changing, but doesn't. See Don's hamachi.c implementation | ||
24 | * as well as other drivers | ||
25 | * | ||
26 | * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k | ||
27 | * due to dead code elimination. There will be some performance benefits from this due to | ||
28 | * elimination of all the tests and reduced cache footprint. | ||
29 | */ | ||
30 | |||
31 | |||
32 | #define DRV_NAME "3c59x" | ||
33 | |||
34 | |||
35 | |||
36 | /* A few values that may be tweaked. */ | ||
37 | /* Keep the ring sizes a power of two for efficiency. */ | ||
38 | #define TX_RING_SIZE 16 | ||
39 | #define RX_RING_SIZE 32 | ||
40 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | ||
41 | |||
42 | /* "Knobs" that adjust features and parameters. */ | ||
43 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | ||
44 | Setting to > 1512 effectively disables this feature. */ | ||
45 | #ifndef __arm__ | ||
46 | static int rx_copybreak = 200; | ||
47 | #else | ||
48 | /* ARM systems perform better by disregarding the bus-master | ||
49 | transfer capability of these cards. -- rmk */ | ||
50 | static int rx_copybreak = 1513; | ||
51 | #endif | ||
52 | /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */ | ||
53 | static const int mtu = 1500; | ||
54 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
55 | static int max_interrupt_work = 32; | ||
56 | /* Tx timeout interval (millisecs) */ | ||
57 | static int watchdog = 5000; | ||
58 | |||
59 | /* Allow aggregation of Tx interrupts. Saves CPU load at the cost | ||
60 | * of possible Tx stalls if the system is blocking interrupts | ||
61 | * somewhere else. Undefine this to disable. | ||
62 | */ | ||
63 | #define tx_interrupt_mitigation 1 | ||
64 | |||
65 | /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */ | ||
66 | #define vortex_debug debug | ||
67 | #ifdef VORTEX_DEBUG | ||
68 | static int vortex_debug = VORTEX_DEBUG; | ||
69 | #else | ||
70 | static int vortex_debug = 1; | ||
71 | #endif | ||
72 | |||
73 | #include <linux/module.h> | ||
74 | #include <linux/kernel.h> | ||
75 | #include <linux/string.h> | ||
76 | #include <linux/timer.h> | ||
77 | #include <linux/errno.h> | ||
78 | #include <linux/in.h> | ||
79 | #include <linux/ioport.h> | ||
80 | #include <linux/interrupt.h> | ||
81 | #include <linux/pci.h> | ||
82 | #include <linux/mii.h> | ||
83 | #include <linux/init.h> | ||
84 | #include <linux/netdevice.h> | ||
85 | #include <linux/etherdevice.h> | ||
86 | #include <linux/skbuff.h> | ||
87 | #include <linux/ethtool.h> | ||
88 | #include <linux/highmem.h> | ||
89 | #include <linux/eisa.h> | ||
90 | #include <linux/bitops.h> | ||
91 | #include <linux/jiffies.h> | ||
92 | #include <linux/gfp.h> | ||
93 | #include <asm/irq.h> /* For nr_irqs only. */ | ||
94 | #include <asm/io.h> | ||
95 | #include <asm/uaccess.h> | ||
96 | |||
97 | /* Kernel compatibility defines, some common to David Hinds' PCMCIA package. | ||
98 | This is only in the support-all-kernels source code. */ | ||
99 | |||
100 | #define RUN_AT(x) (jiffies + (x)) | ||
101 | |||
102 | #include <linux/delay.h> | ||
103 | |||
104 | |||
105 | static const char version[] __devinitconst = | ||
106 | DRV_NAME ": Donald Becker and others.\n"; | ||
107 | |||
108 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | ||
109 | MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); | ||
110 | MODULE_LICENSE("GPL"); | ||
111 | |||
112 | |||
113 | /* Operational parameter that usually are not changed. */ | ||
114 | |||
115 | /* The Vortex size is twice that of the original EtherLinkIII series: the | ||
116 | runtime register window, window 1, is now always mapped in. | ||
117 | The Boomerang size is twice as large as the Vortex -- it has additional | ||
118 | bus master control registers. */ | ||
119 | #define VORTEX_TOTAL_SIZE 0x20 | ||
120 | #define BOOMERANG_TOTAL_SIZE 0x40 | ||
121 | |||
122 | /* Set iff a MII transceiver on any interface requires mdio preamble. | ||
123 | This only set with the original DP83840 on older 3c905 boards, so the extra | ||
124 | code size of a per-interface flag is not worthwhile. */ | ||
125 | static char mii_preamble_required; | ||
126 | |||
127 | #define PFX DRV_NAME ": " | ||
128 | |||
129 | |||
130 | |||
131 | /* | ||
132 | Theory of Operation | ||
133 | |||
134 | I. Board Compatibility | ||
135 | |||
136 | This device driver is designed for the 3Com FastEtherLink and FastEtherLink | ||
137 | XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs | ||
138 | versions of the FastEtherLink cards. The supported product IDs are | ||
139 | 3c590, 3c592, 3c595, 3c597, 3c900, 3c905 | ||
140 | |||
141 | The related ISA 3c515 is supported with a separate driver, 3c515.c, included | ||
142 | with the kernel source or available from | ||
143 | cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html | ||
144 | |||
145 | II. Board-specific settings | ||
146 | |||
147 | PCI bus devices are configured by the system at boot time, so no jumpers | ||
148 | need to be set on the board. The system BIOS should be set to assign the | ||
149 | PCI INTA signal to an otherwise unused system IRQ line. | ||
150 | |||
151 | The EEPROM settings for media type and forced-full-duplex are observed. | ||
152 | The EEPROM media type should be left at the default "autoselect" unless using | ||
153 | 10base2 or AUI connections which cannot be reliably detected. | ||
154 | |||
155 | III. Driver operation | ||
156 | |||
157 | The 3c59x series use an interface that's very similar to the previous 3c5x9 | ||
158 | series. The primary interface is two programmed-I/O FIFOs, with an | ||
159 | alternate single-contiguous-region bus-master transfer (see next). | ||
160 | |||
161 | The 3c900 "Boomerang" series uses a full-bus-master interface with separate | ||
162 | lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet, | ||
163 | DEC Tulip and Intel Speedo3. The first chip version retains a compatible | ||
164 | programmed-I/O interface that has been removed in 'B' and subsequent board | ||
165 | revisions. | ||
166 | |||
167 | One extension that is advertised in a very large font is that the adapters | ||
168 | are capable of being bus masters. On the Vortex chip this capability was | ||
169 | only for a single contiguous region making it far less useful than the full | ||
170 | bus master capability. There is a significant performance impact of taking | ||
171 | an extra interrupt or polling for the completion of each transfer, as well | ||
172 | as difficulty sharing the single transfer engine between the transmit and | ||
173 | receive threads. Using DMA transfers is a win only with large blocks or | ||
174 | with the flawed versions of the Intel Orion motherboard PCI controller. | ||
175 | |||
176 | The Boomerang chip's full-bus-master interface is useful, and has the | ||
177 | currently-unused advantages over other similar chips that queued transmit | ||
178 | packets may be reordered and receive buffer groups are associated with a | ||
179 | single frame. | ||
180 | |||
181 | With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme. | ||
182 | Rather than a fixed intermediate receive buffer, this scheme allocates | ||
183 | full-sized skbuffs as receive buffers. The value RX_COPYBREAK is used as | ||
184 | the copying breakpoint: it is chosen to trade-off the memory wasted by | ||
185 | passing the full-sized skbuff to the queue layer for all frames vs. the | ||
186 | copying cost of copying a frame to a correctly-sized skbuff. | ||
187 | |||
188 | IIIC. Synchronization | ||
189 | The driver runs as two independent, single-threaded flows of control. One | ||
190 | is the send-packet routine, which enforces single-threaded use by the | ||
191 | dev->tbusy flag. The other thread is the interrupt handler, which is single | ||
192 | threaded by the hardware and other software. | ||
193 | |||
194 | IV. Notes | ||
195 | |||
196 | Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development | ||
197 | 3c590, 3c595, and 3c900 boards. | ||
198 | The name "Vortex" is the internal 3Com project name for the PCI ASIC, and | ||
199 | the EISA version is called "Demon". According to Terry these names come | ||
200 | from rides at the local amusement park. | ||
201 | |||
202 | The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes! | ||
203 | This driver only supports ethernet packets because of the skbuff allocation | ||
204 | limit of 4K. | ||
205 | */ | ||
206 | |||
207 | /* This table drives the PCI probe routines. It's mostly boilerplate in all | ||
208 | of the drivers, and will likely be provided by some future kernel. | ||
209 | */ | ||
210 | enum pci_flags_bit { | ||
211 | PCI_USES_MASTER=4, | ||
212 | }; | ||
213 | |||
214 | enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8, | ||
215 | EEPROM_8BIT=0x10, /* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */ | ||
216 | HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100, | ||
217 | INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800, | ||
218 | EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000, | ||
219 | EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, }; | ||
220 | |||
221 | enum vortex_chips { | ||
222 | CH_3C590 = 0, | ||
223 | CH_3C592, | ||
224 | CH_3C597, | ||
225 | CH_3C595_1, | ||
226 | CH_3C595_2, | ||
227 | |||
228 | CH_3C595_3, | ||
229 | CH_3C900_1, | ||
230 | CH_3C900_2, | ||
231 | CH_3C900_3, | ||
232 | CH_3C900_4, | ||
233 | |||
234 | CH_3C900_5, | ||
235 | CH_3C900B_FL, | ||
236 | CH_3C905_1, | ||
237 | CH_3C905_2, | ||
238 | CH_3C905B_TX, | ||
239 | CH_3C905B_1, | ||
240 | |||
241 | CH_3C905B_2, | ||
242 | CH_3C905B_FX, | ||
243 | CH_3C905C, | ||
244 | CH_3C9202, | ||
245 | CH_3C980, | ||
246 | CH_3C9805, | ||
247 | |||
248 | CH_3CSOHO100_TX, | ||
249 | CH_3C555, | ||
250 | CH_3C556, | ||
251 | CH_3C556B, | ||
252 | CH_3C575, | ||
253 | |||
254 | CH_3C575_1, | ||
255 | CH_3CCFE575, | ||
256 | CH_3CCFE575CT, | ||
257 | CH_3CCFE656, | ||
258 | CH_3CCFEM656, | ||
259 | |||
260 | CH_3CCFEM656_1, | ||
261 | CH_3C450, | ||
262 | CH_3C920, | ||
263 | CH_3C982A, | ||
264 | CH_3C982B, | ||
265 | |||
266 | CH_905BT4, | ||
267 | CH_920B_EMB_WNM, | ||
268 | }; | ||
269 | |||
270 | |||
271 | /* note: this array directly indexed by above enums, and MUST | ||
272 | * be kept in sync with both the enums above, and the PCI device | ||
273 | * table below | ||
274 | */ | ||
275 | static struct vortex_chip_info { | ||
276 | const char *name; | ||
277 | int flags; | ||
278 | int drv_flags; | ||
279 | int io_size; | ||
280 | } vortex_info_tbl[] __devinitdata = { | ||
281 | {"3c590 Vortex 10Mbps", | ||
282 | PCI_USES_MASTER, IS_VORTEX, 32, }, | ||
283 | {"3c592 EISA 10Mbps Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ | ||
284 | PCI_USES_MASTER, IS_VORTEX, 32, }, | ||
285 | {"3c597 EISA Fast Demon/Vortex", /* AKPM: from Don's 3c59x_cb.c 0.49H */ | ||
286 | PCI_USES_MASTER, IS_VORTEX, 32, }, | ||
287 | {"3c595 Vortex 100baseTx", | ||
288 | PCI_USES_MASTER, IS_VORTEX, 32, }, | ||
289 | {"3c595 Vortex 100baseT4", | ||
290 | PCI_USES_MASTER, IS_VORTEX, 32, }, | ||
291 | |||
292 | {"3c595 Vortex 100base-MII", | ||
293 | PCI_USES_MASTER, IS_VORTEX, 32, }, | ||
294 | {"3c900 Boomerang 10baseT", | ||
295 | PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, | ||
296 | {"3c900 Boomerang 10Mbps Combo", | ||
297 | PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, }, | ||
298 | {"3c900 Cyclone 10Mbps TPO", /* AKPM: from Don's 0.99M */ | ||
299 | PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | ||
300 | {"3c900 Cyclone 10Mbps Combo", | ||
301 | PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | ||
302 | |||
303 | {"3c900 Cyclone 10Mbps TPC", /* AKPM: from Don's 0.99M */ | ||
304 | PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | ||
305 | {"3c900B-FL Cyclone 10base-FL", | ||
306 | PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | ||
307 | {"3c905 Boomerang 100baseTx", | ||
308 | PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, | ||
309 | {"3c905 Boomerang 100baseT4", | ||
310 | PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, }, | ||
311 | {"3C905B-TX Fast Etherlink XL PCI", | ||
312 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | ||
313 | {"3c905B Cyclone 100baseTx", | ||
314 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | ||
315 | |||
316 | {"3c905B Cyclone 10/100/BNC", | ||
317 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, | ||
318 | {"3c905B-FX Cyclone 100baseFx", | ||
319 | PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, }, | ||
320 | {"3c905C Tornado", | ||
321 | PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | ||
322 | {"3c920B-EMB-WNM (ATI Radeon 9100 IGP)", | ||
323 | PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, }, | ||
324 | {"3c980 Cyclone", | ||
325 | PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | ||
326 | |||
327 | {"3c980C Python-T", | ||
328 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, }, | ||
329 | {"3cSOHO100-TX Hurricane", | ||
330 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | ||
331 | {"3c555 Laptop Hurricane", | ||
332 | PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, }, | ||
333 | {"3c556 Laptop Tornado", | ||
334 | PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR| | ||
335 | HAS_HWCKSM, 128, }, | ||
336 | {"3c556B Laptop Hurricane", | ||
337 | PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR| | ||
338 | WNO_XCVR_PWR|HAS_HWCKSM, 128, }, | ||
339 | |||
340 | {"3c575 [Megahertz] 10/100 LAN CardBus", | ||
341 | PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, | ||
342 | {"3c575 Boomerang CardBus", | ||
343 | PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, }, | ||
344 | {"3CCFE575BT Cyclone CardBus", | ||
345 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT| | ||
346 | INVERT_LED_PWR|HAS_HWCKSM, 128, }, | ||
347 | {"3CCFE575CT Tornado CardBus", | ||
348 | PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| | ||
349 | MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, | ||
350 | {"3CCFE656 Cyclone CardBus", | ||
351 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| | ||
352 | INVERT_LED_PWR|HAS_HWCKSM, 128, }, | ||
353 | |||
354 | {"3CCFEM656B Cyclone+Winmodem CardBus", | ||
355 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| | ||
356 | INVERT_LED_PWR|HAS_HWCKSM, 128, }, | ||
357 | {"3CXFEM656C Tornado+Winmodem CardBus", /* From pcmcia-cs-3.1.5 */ | ||
358 | PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR| | ||
359 | MAX_COLLISION_RESET|HAS_HWCKSM, 128, }, | ||
360 | {"3c450 HomePNA Tornado", /* AKPM: from Don's 0.99Q */ | ||
361 | PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, | ||
362 | {"3c920 Tornado", | ||
363 | PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, | ||
364 | {"3c982 Hydra Dual Port A", | ||
365 | PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, | ||
366 | |||
367 | {"3c982 Hydra Dual Port B", | ||
368 | PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, }, | ||
369 | {"3c905B-T4", | ||
370 | PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, }, | ||
371 | {"3c920B-EMB-WNM Tornado", | ||
372 | PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, }, | ||
373 | |||
374 | {NULL,}, /* NULL terminated list. */ | ||
375 | }; | ||
376 | |||
377 | |||
378 | static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = { | ||
379 | { 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 }, | ||
380 | { 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 }, | ||
381 | { 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 }, | ||
382 | { 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 }, | ||
383 | { 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 }, | ||
384 | |||
385 | { 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 }, | ||
386 | { 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 }, | ||
387 | { 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 }, | ||
388 | { 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 }, | ||
389 | { 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 }, | ||
390 | |||
391 | { 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 }, | ||
392 | { 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL }, | ||
393 | { 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 }, | ||
394 | { 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 }, | ||
395 | { 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX }, | ||
396 | { 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 }, | ||
397 | |||
398 | { 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 }, | ||
399 | { 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX }, | ||
400 | { 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C }, | ||
401 | { 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 }, | ||
402 | { 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 }, | ||
403 | { 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 }, | ||
404 | |||
405 | { 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX }, | ||
406 | { 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 }, | ||
407 | { 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 }, | ||
408 | { 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B }, | ||
409 | { 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 }, | ||
410 | |||
411 | { 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 }, | ||
412 | { 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 }, | ||
413 | { 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT }, | ||
414 | { 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 }, | ||
415 | { 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 }, | ||
416 | |||
417 | { 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 }, | ||
418 | { 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 }, | ||
419 | { 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 }, | ||
420 | { 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A }, | ||
421 | { 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B }, | ||
422 | |||
423 | { 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 }, | ||
424 | { 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM }, | ||
425 | |||
426 | {0,} /* 0 terminated list. */ | ||
427 | }; | ||
428 | MODULE_DEVICE_TABLE(pci, vortex_pci_tbl); | ||
429 | |||
430 | |||
431 | /* Operational definitions. | ||
432 | These are not used by other compilation units and thus are not | ||
433 | exported in a ".h" file. | ||
434 | |||
435 | First the windows. There are eight register windows, with the command | ||
436 | and status registers available in each. | ||
437 | */ | ||
438 | #define EL3_CMD 0x0e | ||
439 | #define EL3_STATUS 0x0e | ||
440 | |||
441 | /* The top five bits written to EL3_CMD are a command, the lower | ||
442 | 11 bits are the parameter, if applicable. | ||
443 | Note that 11 parameters bits was fine for ethernet, but the new chip | ||
444 | can handle FDDI length frames (~4500 octets) and now parameters count | ||
445 | 32-bit 'Dwords' rather than octets. */ | ||
446 | |||
447 | enum vortex_cmd { | ||
448 | TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, | ||
449 | RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, | ||
450 | UpStall = 6<<11, UpUnstall = (6<<11)+1, | ||
451 | DownStall = (6<<11)+2, DownUnstall = (6<<11)+3, | ||
452 | RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, | ||
453 | FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, | ||
454 | SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, | ||
455 | SetTxThreshold = 18<<11, SetTxStart = 19<<11, | ||
456 | StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11, | ||
457 | StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,}; | ||
458 | |||
459 | /* The SetRxFilter command accepts the following classes: */ | ||
460 | enum RxFilter { | ||
461 | RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 }; | ||
462 | |||
463 | /* Bits in the general status register. */ | ||
464 | enum vortex_status { | ||
465 | IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004, | ||
466 | TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, | ||
467 | IntReq = 0x0040, StatsFull = 0x0080, | ||
468 | DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10, | ||
469 | DMAInProgress = 1<<11, /* DMA controller is still busy.*/ | ||
470 | CmdInProgress = 1<<12, /* EL3_CMD is still busy.*/ | ||
471 | }; | ||
472 | |||
473 | /* Register window 1 offsets, the window used in normal operation. | ||
474 | On the Vortex this window is always mapped at offsets 0x10-0x1f. */ | ||
475 | enum Window1 { | ||
476 | TX_FIFO = 0x10, RX_FIFO = 0x10, RxErrors = 0x14, | ||
477 | RxStatus = 0x18, Timer=0x1A, TxStatus = 0x1B, | ||
478 | TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */ | ||
479 | }; | ||
480 | enum Window0 { | ||
481 | Wn0EepromCmd = 10, /* Window 0: EEPROM command register. */ | ||
482 | Wn0EepromData = 12, /* Window 0: EEPROM results register. */ | ||
483 | IntrStatus=0x0E, /* Valid in all windows. */ | ||
484 | }; | ||
485 | enum Win0_EEPROM_bits { | ||
486 | EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0, | ||
487 | EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */ | ||
488 | EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */ | ||
489 | }; | ||
490 | /* EEPROM locations. */ | ||
491 | enum eeprom_offset { | ||
492 | PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3, | ||
493 | EtherLink3ID=7, IFXcvrIO=8, IRQLine=9, | ||
494 | NodeAddr01=10, NodeAddr23=11, NodeAddr45=12, | ||
495 | DriverTune=13, Checksum=15}; | ||
496 | |||
497 | enum Window2 { /* Window 2. */ | ||
498 | Wn2_ResetOptions=12, | ||
499 | }; | ||
500 | enum Window3 { /* Window 3: MAC/config bits. */ | ||
501 | Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8, | ||
502 | }; | ||
503 | |||
504 | #define BFEXT(value, offset, bitcount) \ | ||
505 | ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1)) | ||
506 | |||
507 | #define BFINS(lhs, rhs, offset, bitcount) \ | ||
508 | (((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) | \ | ||
509 | (((rhs) & ((1 << (bitcount)) - 1)) << (offset))) | ||
510 | |||
511 | #define RAM_SIZE(v) BFEXT(v, 0, 3) | ||
512 | #define RAM_WIDTH(v) BFEXT(v, 3, 1) | ||
513 | #define RAM_SPEED(v) BFEXT(v, 4, 2) | ||
514 | #define ROM_SIZE(v) BFEXT(v, 6, 2) | ||
515 | #define RAM_SPLIT(v) BFEXT(v, 16, 2) | ||
516 | #define XCVR(v) BFEXT(v, 20, 4) | ||
517 | #define AUTOSELECT(v) BFEXT(v, 24, 1) | ||
518 | |||
519 | enum Window4 { /* Window 4: Xcvr/media bits. */ | ||
520 | Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10, | ||
521 | }; | ||
522 | enum Win4_Media_bits { | ||
523 | Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */ | ||
524 | Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */ | ||
525 | Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */ | ||
526 | Media_LnkBeat = 0x0800, | ||
527 | }; | ||
528 | enum Window7 { /* Window 7: Bus Master control. */ | ||
529 | Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6, | ||
530 | Wn7_MasterStatus = 12, | ||
531 | }; | ||
532 | /* Boomerang bus master control registers. */ | ||
533 | enum MasterCtrl { | ||
534 | PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c, | ||
535 | TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38, | ||
536 | }; | ||
537 | |||
538 | /* The Rx and Tx descriptor lists. | ||
539 | Caution Alpha hackers: these types are 32 bits! Note also the 8 byte | ||
540 | alignment contraint on tx_ring[] and rx_ring[]. */ | ||
541 | #define LAST_FRAG 0x80000000 /* Last Addr/Len pair in descriptor. */ | ||
542 | #define DN_COMPLETE 0x00010000 /* This packet has been downloaded */ | ||
543 | struct boom_rx_desc { | ||
544 | __le32 next; /* Last entry points to 0. */ | ||
545 | __le32 status; | ||
546 | __le32 addr; /* Up to 63 addr/len pairs possible. */ | ||
547 | __le32 length; /* Set LAST_FRAG to indicate last pair. */ | ||
548 | }; | ||
549 | /* Values for the Rx status entry. */ | ||
550 | enum rx_desc_status { | ||
551 | RxDComplete=0x00008000, RxDError=0x4000, | ||
552 | /* See boomerang_rx() for actual error bits */ | ||
553 | IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27, | ||
554 | IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31, | ||
555 | }; | ||
556 | |||
557 | #ifdef MAX_SKB_FRAGS | ||
558 | #define DO_ZEROCOPY 1 | ||
559 | #else | ||
560 | #define DO_ZEROCOPY 0 | ||
561 | #endif | ||
562 | |||
563 | struct boom_tx_desc { | ||
564 | __le32 next; /* Last entry points to 0. */ | ||
565 | __le32 status; /* bits 0:12 length, others see below. */ | ||
566 | #if DO_ZEROCOPY | ||
567 | struct { | ||
568 | __le32 addr; | ||
569 | __le32 length; | ||
570 | } frag[1+MAX_SKB_FRAGS]; | ||
571 | #else | ||
572 | __le32 addr; | ||
573 | __le32 length; | ||
574 | #endif | ||
575 | }; | ||
576 | |||
577 | /* Values for the Tx status entry. */ | ||
578 | enum tx_desc_status { | ||
579 | CRCDisable=0x2000, TxDComplete=0x8000, | ||
580 | AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000, | ||
581 | TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */ | ||
582 | }; | ||
583 | |||
584 | /* Chip features we care about in vp->capabilities, read from the EEPROM. */ | ||
585 | enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 }; | ||
586 | |||
587 | struct vortex_extra_stats { | ||
588 | unsigned long tx_deferred; | ||
589 | unsigned long tx_max_collisions; | ||
590 | unsigned long tx_multiple_collisions; | ||
591 | unsigned long tx_single_collisions; | ||
592 | unsigned long rx_bad_ssd; | ||
593 | }; | ||
594 | |||
595 | struct vortex_private { | ||
596 | /* The Rx and Tx rings should be quad-word-aligned. */ | ||
597 | struct boom_rx_desc* rx_ring; | ||
598 | struct boom_tx_desc* tx_ring; | ||
599 | dma_addr_t rx_ring_dma; | ||
600 | dma_addr_t tx_ring_dma; | ||
601 | /* The addresses of transmit- and receive-in-place skbuffs. */ | ||
602 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | ||
603 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | ||
604 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ | ||
605 | unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ | ||
606 | struct vortex_extra_stats xstats; /* NIC-specific extra stats */ | ||
607 | struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ | ||
608 | dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */ | ||
609 | |||
610 | /* PCI configuration space information. */ | ||
611 | struct device *gendev; | ||
612 | void __iomem *ioaddr; /* IO address space */ | ||
613 | void __iomem *cb_fn_base; /* CardBus function status addr space. */ | ||
614 | |||
615 | /* Some values here only for performance evaluation and path-coverage */ | ||
616 | int rx_nocopy, rx_copy, queued_packet, rx_csumhits; | ||
617 | int card_idx; | ||
618 | |||
619 | /* The remainder are related to chip state, mostly media selection. */ | ||
620 | struct timer_list timer; /* Media selection timer. */ | ||
621 | struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */ | ||
622 | int options; /* User-settable misc. driver options. */ | ||
623 | unsigned int media_override:4, /* Passed-in media type. */ | ||
624 | default_media:4, /* Read from the EEPROM/Wn3_Config. */ | ||
625 | full_duplex:1, autoselect:1, | ||
626 | bus_master:1, /* Vortex can only do a fragment bus-m. */ | ||
627 | full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */ | ||
628 | flow_ctrl:1, /* Use 802.3x flow control (PAUSE only) */ | ||
629 | partner_flow_ctrl:1, /* Partner supports flow control */ | ||
630 | has_nway:1, | ||
631 | enable_wol:1, /* Wake-on-LAN is enabled */ | ||
632 | pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ | ||
633 | open:1, | ||
634 | medialock:1, | ||
635 | must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ | ||
636 | large_frames:1, /* accept large frames */ | ||
637 | handling_irq:1; /* private in_irq indicator */ | ||
638 | /* {get|set}_wol operations are already serialized by rtnl. | ||
639 | * no additional locking is required for the enable_wol and acpi_set_WOL() | ||
640 | */ | ||
641 | int drv_flags; | ||
642 | u16 status_enable; | ||
643 | u16 intr_enable; | ||
644 | u16 available_media; /* From Wn3_Options. */ | ||
645 | u16 capabilities, info1, info2; /* Various, from EEPROM. */ | ||
646 | u16 advertising; /* NWay media advertisement */ | ||
647 | unsigned char phys[2]; /* MII device addresses. */ | ||
648 | u16 deferred; /* Resend these interrupts when we | ||
649 | * bale from the ISR */ | ||
650 | u16 io_size; /* Size of PCI region (for release_region) */ | ||
651 | |||
652 | /* Serialises access to hardware other than MII and variables below. | ||
653 | * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */ | ||
654 | spinlock_t lock; | ||
655 | |||
656 | spinlock_t mii_lock; /* Serialises access to MII */ | ||
657 | struct mii_if_info mii; /* MII lib hooks/info */ | ||
658 | spinlock_t window_lock; /* Serialises access to windowed regs */ | ||
659 | int window; /* Register window */ | ||
660 | }; | ||
661 | |||
662 | static void window_set(struct vortex_private *vp, int window) | ||
663 | { | ||
664 | if (window != vp->window) { | ||
665 | iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD); | ||
666 | vp->window = window; | ||
667 | } | ||
668 | } | ||
669 | |||
670 | #define DEFINE_WINDOW_IO(size) \ | ||
671 | static u ## size \ | ||
672 | window_read ## size(struct vortex_private *vp, int window, int addr) \ | ||
673 | { \ | ||
674 | unsigned long flags; \ | ||
675 | u ## size ret; \ | ||
676 | spin_lock_irqsave(&vp->window_lock, flags); \ | ||
677 | window_set(vp, window); \ | ||
678 | ret = ioread ## size(vp->ioaddr + addr); \ | ||
679 | spin_unlock_irqrestore(&vp->window_lock, flags); \ | ||
680 | return ret; \ | ||
681 | } \ | ||
682 | static void \ | ||
683 | window_write ## size(struct vortex_private *vp, u ## size value, \ | ||
684 | int window, int addr) \ | ||
685 | { \ | ||
686 | unsigned long flags; \ | ||
687 | spin_lock_irqsave(&vp->window_lock, flags); \ | ||
688 | window_set(vp, window); \ | ||
689 | iowrite ## size(value, vp->ioaddr + addr); \ | ||
690 | spin_unlock_irqrestore(&vp->window_lock, flags); \ | ||
691 | } | ||
692 | DEFINE_WINDOW_IO(8) | ||
693 | DEFINE_WINDOW_IO(16) | ||
694 | DEFINE_WINDOW_IO(32) | ||
695 | |||
696 | #ifdef CONFIG_PCI | ||
697 | #define DEVICE_PCI(dev) (((dev)->bus == &pci_bus_type) ? to_pci_dev((dev)) : NULL) | ||
698 | #else | ||
699 | #define DEVICE_PCI(dev) NULL | ||
700 | #endif | ||
701 | |||
702 | #define VORTEX_PCI(vp) \ | ||
703 | ((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)) | ||
704 | |||
705 | #ifdef CONFIG_EISA | ||
706 | #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL) | ||
707 | #else | ||
708 | #define DEVICE_EISA(dev) NULL | ||
709 | #endif | ||
710 | |||
711 | #define VORTEX_EISA(vp) \ | ||
712 | ((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)) | ||
713 | |||
714 | /* The action to take with a media selection timer tick. | ||
715 | Note that we deviate from the 3Com order by checking 10base2 before AUI. | ||
716 | */ | ||
717 | enum xcvr_types { | ||
718 | XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx, | ||
719 | XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10, | ||
720 | }; | ||
721 | |||
722 | static const struct media_table { | ||
723 | char *name; | ||
724 | unsigned int media_bits:16, /* Bits to set in Wn4_Media register. */ | ||
725 | mask:8, /* The transceiver-present bit in Wn3_Config.*/ | ||
726 | next:8; /* The media type to try next. */ | ||
727 | int wait; /* Time before we check media status. */ | ||
728 | } media_tbl[] = { | ||
729 | { "10baseT", Media_10TP,0x08, XCVR_10base2, (14*HZ)/10}, | ||
730 | { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10}, | ||
731 | { "undefined", 0, 0x80, XCVR_10baseT, 10000}, | ||
732 | { "10base2", 0, 0x10, XCVR_AUI, (1*HZ)/10}, | ||
733 | { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10}, | ||
734 | { "100baseFX", Media_Lnk, 0x04, XCVR_MII, (14*HZ)/10}, | ||
735 | { "MII", 0, 0x41, XCVR_10baseT, 3*HZ }, | ||
736 | { "undefined", 0, 0x01, XCVR_10baseT, 10000}, | ||
737 | { "Autonegotiate", 0, 0x41, XCVR_10baseT, 3*HZ}, | ||
738 | { "MII-External", 0, 0x41, XCVR_10baseT, 3*HZ }, | ||
739 | { "Default", 0, 0xFF, XCVR_10baseT, 10000}, | ||
740 | }; | ||
741 | |||
742 | static struct { | ||
743 | const char str[ETH_GSTRING_LEN]; | ||
744 | } ethtool_stats_keys[] = { | ||
745 | { "tx_deferred" }, | ||
746 | { "tx_max_collisions" }, | ||
747 | { "tx_multiple_collisions" }, | ||
748 | { "tx_single_collisions" }, | ||
749 | { "rx_bad_ssd" }, | ||
750 | }; | ||
751 | |||
752 | /* number of ETHTOOL_GSTATS u64's */ | ||
753 | #define VORTEX_NUM_STATS 5 | ||
754 | |||
755 | static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, | ||
756 | int chip_idx, int card_idx); | ||
757 | static int vortex_up(struct net_device *dev); | ||
758 | static void vortex_down(struct net_device *dev, int final); | ||
759 | static int vortex_open(struct net_device *dev); | ||
760 | static void mdio_sync(struct vortex_private *vp, int bits); | ||
761 | static int mdio_read(struct net_device *dev, int phy_id, int location); | ||
762 | static void mdio_write(struct net_device *vp, int phy_id, int location, int value); | ||
763 | static void vortex_timer(unsigned long arg); | ||
764 | static void rx_oom_timer(unsigned long arg); | ||
765 | static netdev_tx_t vortex_start_xmit(struct sk_buff *skb, | ||
766 | struct net_device *dev); | ||
767 | static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb, | ||
768 | struct net_device *dev); | ||
769 | static int vortex_rx(struct net_device *dev); | ||
770 | static int boomerang_rx(struct net_device *dev); | ||
771 | static irqreturn_t vortex_interrupt(int irq, void *dev_id); | ||
772 | static irqreturn_t boomerang_interrupt(int irq, void *dev_id); | ||
773 | static int vortex_close(struct net_device *dev); | ||
774 | static void dump_tx_ring(struct net_device *dev); | ||
775 | static void update_stats(void __iomem *ioaddr, struct net_device *dev); | ||
776 | static struct net_device_stats *vortex_get_stats(struct net_device *dev); | ||
777 | static void set_rx_mode(struct net_device *dev); | ||
778 | #ifdef CONFIG_PCI | ||
779 | static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
780 | #endif | ||
781 | static void vortex_tx_timeout(struct net_device *dev); | ||
782 | static void acpi_set_WOL(struct net_device *dev); | ||
783 | static const struct ethtool_ops vortex_ethtool_ops; | ||
784 | static void set_8021q_mode(struct net_device *dev, int enable); | ||
785 | |||
786 | /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ | ||
787 | /* Option count limit only -- unlimited interfaces are supported. */ | ||
788 | #define MAX_UNITS 8 | ||
789 | static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 }; | ||
790 | static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; | ||
791 | static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; | ||
792 | static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; | ||
793 | static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; | ||
794 | static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 }; | ||
795 | static int global_options = -1; | ||
796 | static int global_full_duplex = -1; | ||
797 | static int global_enable_wol = -1; | ||
798 | static int global_use_mmio = -1; | ||
799 | |||
800 | /* Variables to work-around the Compaq PCI BIOS32 problem. */ | ||
801 | static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; | ||
802 | static struct net_device *compaq_net_device; | ||
803 | |||
804 | static int vortex_cards_found; | ||
805 | |||
806 | module_param(debug, int, 0); | ||
807 | module_param(global_options, int, 0); | ||
808 | module_param_array(options, int, NULL, 0); | ||
809 | module_param(global_full_duplex, int, 0); | ||
810 | module_param_array(full_duplex, int, NULL, 0); | ||
811 | module_param_array(hw_checksums, int, NULL, 0); | ||
812 | module_param_array(flow_ctrl, int, NULL, 0); | ||
813 | module_param(global_enable_wol, int, 0); | ||
814 | module_param_array(enable_wol, int, NULL, 0); | ||
815 | module_param(rx_copybreak, int, 0); | ||
816 | module_param(max_interrupt_work, int, 0); | ||
817 | module_param(compaq_ioaddr, int, 0); | ||
818 | module_param(compaq_irq, int, 0); | ||
819 | module_param(compaq_device_id, int, 0); | ||
820 | module_param(watchdog, int, 0); | ||
821 | module_param(global_use_mmio, int, 0); | ||
822 | module_param_array(use_mmio, int, NULL, 0); | ||
823 | MODULE_PARM_DESC(debug, "3c59x debug level (0-6)"); | ||
824 | MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex"); | ||
825 | MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset"); | ||
826 | MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)"); | ||
827 | MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset"); | ||
828 | MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)"); | ||
829 | MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)"); | ||
830 | MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)"); | ||
831 | MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset"); | ||
832 | MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames"); | ||
833 | MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt"); | ||
834 | MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)"); | ||
835 | MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)"); | ||
836 | MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)"); | ||
837 | MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds"); | ||
838 | MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset"); | ||
839 | MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)"); | ||
840 | |||
841 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
842 | static void poll_vortex(struct net_device *dev) | ||
843 | { | ||
844 | struct vortex_private *vp = netdev_priv(dev); | ||
845 | unsigned long flags; | ||
846 | local_irq_save(flags); | ||
847 | (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); | ||
848 | local_irq_restore(flags); | ||
849 | } | ||
850 | #endif | ||
851 | |||
852 | #ifdef CONFIG_PM | ||
853 | |||
854 | static int vortex_suspend(struct device *dev) | ||
855 | { | ||
856 | struct pci_dev *pdev = to_pci_dev(dev); | ||
857 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
858 | |||
859 | if (!ndev || !netif_running(ndev)) | ||
860 | return 0; | ||
861 | |||
862 | netif_device_detach(ndev); | ||
863 | vortex_down(ndev, 1); | ||
864 | |||
865 | return 0; | ||
866 | } | ||
867 | |||
868 | static int vortex_resume(struct device *dev) | ||
869 | { | ||
870 | struct pci_dev *pdev = to_pci_dev(dev); | ||
871 | struct net_device *ndev = pci_get_drvdata(pdev); | ||
872 | int err; | ||
873 | |||
874 | if (!ndev || !netif_running(ndev)) | ||
875 | return 0; | ||
876 | |||
877 | err = vortex_up(ndev); | ||
878 | if (err) | ||
879 | return err; | ||
880 | |||
881 | netif_device_attach(ndev); | ||
882 | |||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | static const struct dev_pm_ops vortex_pm_ops = { | ||
887 | .suspend = vortex_suspend, | ||
888 | .resume = vortex_resume, | ||
889 | .freeze = vortex_suspend, | ||
890 | .thaw = vortex_resume, | ||
891 | .poweroff = vortex_suspend, | ||
892 | .restore = vortex_resume, | ||
893 | }; | ||
894 | |||
895 | #define VORTEX_PM_OPS (&vortex_pm_ops) | ||
896 | |||
897 | #else /* !CONFIG_PM */ | ||
898 | |||
899 | #define VORTEX_PM_OPS NULL | ||
900 | |||
901 | #endif /* !CONFIG_PM */ | ||
902 | |||
903 | #ifdef CONFIG_EISA | ||
904 | static struct eisa_device_id vortex_eisa_ids[] = { | ||
905 | { "TCM5920", CH_3C592 }, | ||
906 | { "TCM5970", CH_3C597 }, | ||
907 | { "" } | ||
908 | }; | ||
909 | MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids); | ||
910 | |||
911 | static int __init vortex_eisa_probe(struct device *device) | ||
912 | { | ||
913 | void __iomem *ioaddr; | ||
914 | struct eisa_device *edev; | ||
915 | |||
916 | edev = to_eisa_device(device); | ||
917 | |||
918 | if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) | ||
919 | return -EBUSY; | ||
920 | |||
921 | ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE); | ||
922 | |||
923 | if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, | ||
924 | edev->id.driver_data, vortex_cards_found)) { | ||
925 | release_region(edev->base_addr, VORTEX_TOTAL_SIZE); | ||
926 | return -ENODEV; | ||
927 | } | ||
928 | |||
929 | vortex_cards_found++; | ||
930 | |||
931 | return 0; | ||
932 | } | ||
933 | |||
934 | static int __devexit vortex_eisa_remove(struct device *device) | ||
935 | { | ||
936 | struct eisa_device *edev; | ||
937 | struct net_device *dev; | ||
938 | struct vortex_private *vp; | ||
939 | void __iomem *ioaddr; | ||
940 | |||
941 | edev = to_eisa_device(device); | ||
942 | dev = eisa_get_drvdata(edev); | ||
943 | |||
944 | if (!dev) { | ||
945 | pr_err("vortex_eisa_remove called for Compaq device!\n"); | ||
946 | BUG(); | ||
947 | } | ||
948 | |||
949 | vp = netdev_priv(dev); | ||
950 | ioaddr = vp->ioaddr; | ||
951 | |||
952 | unregister_netdev(dev); | ||
953 | iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); | ||
954 | release_region(dev->base_addr, VORTEX_TOTAL_SIZE); | ||
955 | |||
956 | free_netdev(dev); | ||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | static struct eisa_driver vortex_eisa_driver = { | ||
961 | .id_table = vortex_eisa_ids, | ||
962 | .driver = { | ||
963 | .name = "3c59x", | ||
964 | .probe = vortex_eisa_probe, | ||
965 | .remove = __devexit_p(vortex_eisa_remove) | ||
966 | } | ||
967 | }; | ||
968 | |||
969 | #endif /* CONFIG_EISA */ | ||
970 | |||
971 | /* returns count found (>= 0), or negative on error */ | ||
972 | static int __init vortex_eisa_init(void) | ||
973 | { | ||
974 | int eisa_found = 0; | ||
975 | int orig_cards_found = vortex_cards_found; | ||
976 | |||
977 | #ifdef CONFIG_EISA | ||
978 | int err; | ||
979 | |||
980 | err = eisa_driver_register (&vortex_eisa_driver); | ||
981 | if (!err) { | ||
982 | /* | ||
983 | * Because of the way EISA bus is probed, we cannot assume | ||
984 | * any device have been found when we exit from | ||
985 | * eisa_driver_register (the bus root driver may not be | ||
986 | * initialized yet). So we blindly assume something was | ||
987 | * found, and let the sysfs magic happened... | ||
988 | */ | ||
989 | eisa_found = 1; | ||
990 | } | ||
991 | #endif | ||
992 | |||
993 | /* Special code to work-around the Compaq PCI BIOS32 problem. */ | ||
994 | if (compaq_ioaddr) { | ||
995 | vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE), | ||
996 | compaq_irq, compaq_device_id, vortex_cards_found++); | ||
997 | } | ||
998 | |||
999 | return vortex_cards_found - orig_cards_found + eisa_found; | ||
1000 | } | ||
1001 | |||
1002 | /* returns count (>= 0), or negative on error */ | ||
1003 | static int __devinit vortex_init_one(struct pci_dev *pdev, | ||
1004 | const struct pci_device_id *ent) | ||
1005 | { | ||
1006 | int rc, unit, pci_bar; | ||
1007 | struct vortex_chip_info *vci; | ||
1008 | void __iomem *ioaddr; | ||
1009 | |||
1010 | /* wake up and enable device */ | ||
1011 | rc = pci_enable_device(pdev); | ||
1012 | if (rc < 0) | ||
1013 | goto out; | ||
1014 | |||
1015 | unit = vortex_cards_found; | ||
1016 | |||
1017 | if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { | ||
1018 | /* Determine the default if the user didn't override us */ | ||
1019 | vci = &vortex_info_tbl[ent->driver_data]; | ||
1020 | pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0; | ||
1021 | } else if (unit < MAX_UNITS && use_mmio[unit] >= 0) | ||
1022 | pci_bar = use_mmio[unit] ? 1 : 0; | ||
1023 | else | ||
1024 | pci_bar = global_use_mmio ? 1 : 0; | ||
1025 | |||
1026 | ioaddr = pci_iomap(pdev, pci_bar, 0); | ||
1027 | if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ | ||
1028 | ioaddr = pci_iomap(pdev, 0, 0); | ||
1029 | if (!ioaddr) { | ||
1030 | pci_disable_device(pdev); | ||
1031 | rc = -ENOMEM; | ||
1032 | goto out; | ||
1033 | } | ||
1034 | |||
1035 | rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, | ||
1036 | ent->driver_data, unit); | ||
1037 | if (rc < 0) { | ||
1038 | pci_iounmap(pdev, ioaddr); | ||
1039 | pci_disable_device(pdev); | ||
1040 | goto out; | ||
1041 | } | ||
1042 | |||
1043 | vortex_cards_found++; | ||
1044 | |||
1045 | out: | ||
1046 | return rc; | ||
1047 | } | ||
1048 | |||
1049 | static const struct net_device_ops boomrang_netdev_ops = { | ||
1050 | .ndo_open = vortex_open, | ||
1051 | .ndo_stop = vortex_close, | ||
1052 | .ndo_start_xmit = boomerang_start_xmit, | ||
1053 | .ndo_tx_timeout = vortex_tx_timeout, | ||
1054 | .ndo_get_stats = vortex_get_stats, | ||
1055 | #ifdef CONFIG_PCI | ||
1056 | .ndo_do_ioctl = vortex_ioctl, | ||
1057 | #endif | ||
1058 | .ndo_set_multicast_list = set_rx_mode, | ||
1059 | .ndo_change_mtu = eth_change_mtu, | ||
1060 | .ndo_set_mac_address = eth_mac_addr, | ||
1061 | .ndo_validate_addr = eth_validate_addr, | ||
1062 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1063 | .ndo_poll_controller = poll_vortex, | ||
1064 | #endif | ||
1065 | }; | ||
1066 | |||
1067 | static const struct net_device_ops vortex_netdev_ops = { | ||
1068 | .ndo_open = vortex_open, | ||
1069 | .ndo_stop = vortex_close, | ||
1070 | .ndo_start_xmit = vortex_start_xmit, | ||
1071 | .ndo_tx_timeout = vortex_tx_timeout, | ||
1072 | .ndo_get_stats = vortex_get_stats, | ||
1073 | #ifdef CONFIG_PCI | ||
1074 | .ndo_do_ioctl = vortex_ioctl, | ||
1075 | #endif | ||
1076 | .ndo_set_multicast_list = set_rx_mode, | ||
1077 | .ndo_change_mtu = eth_change_mtu, | ||
1078 | .ndo_set_mac_address = eth_mac_addr, | ||
1079 | .ndo_validate_addr = eth_validate_addr, | ||
1080 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1081 | .ndo_poll_controller = poll_vortex, | ||
1082 | #endif | ||
1083 | }; | ||
1084 | |||
1085 | /* | ||
1086 | * Start up the PCI/EISA device which is described by *gendev. | ||
1087 | * Return 0 on success. | ||
1088 | * | ||
1089 | * NOTE: pdev can be NULL, for the case of a Compaq device | ||
1090 | */ | ||
1091 | static int __devinit vortex_probe1(struct device *gendev, | ||
1092 | void __iomem *ioaddr, int irq, | ||
1093 | int chip_idx, int card_idx) | ||
1094 | { | ||
1095 | struct vortex_private *vp; | ||
1096 | int option; | ||
1097 | unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */ | ||
1098 | int i, step; | ||
1099 | struct net_device *dev; | ||
1100 | static int printed_version; | ||
1101 | int retval, print_info; | ||
1102 | struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx]; | ||
1103 | const char *print_name = "3c59x"; | ||
1104 | struct pci_dev *pdev = NULL; | ||
1105 | struct eisa_device *edev = NULL; | ||
1106 | |||
1107 | if (!printed_version) { | ||
1108 | pr_info("%s", version); | ||
1109 | printed_version = 1; | ||
1110 | } | ||
1111 | |||
1112 | if (gendev) { | ||
1113 | if ((pdev = DEVICE_PCI(gendev))) { | ||
1114 | print_name = pci_name(pdev); | ||
1115 | } | ||
1116 | |||
1117 | if ((edev = DEVICE_EISA(gendev))) { | ||
1118 | print_name = dev_name(&edev->dev); | ||
1119 | } | ||
1120 | } | ||
1121 | |||
1122 | dev = alloc_etherdev(sizeof(*vp)); | ||
1123 | retval = -ENOMEM; | ||
1124 | if (!dev) { | ||
1125 | pr_err(PFX "unable to allocate etherdev, aborting\n"); | ||
1126 | goto out; | ||
1127 | } | ||
1128 | SET_NETDEV_DEV(dev, gendev); | ||
1129 | vp = netdev_priv(dev); | ||
1130 | |||
1131 | option = global_options; | ||
1132 | |||
1133 | /* The lower four bits are the media type. */ | ||
1134 | if (dev->mem_start) { | ||
1135 | /* | ||
1136 | * The 'options' param is passed in as the third arg to the | ||
1137 | * LILO 'ether=' argument for non-modular use | ||
1138 | */ | ||
1139 | option = dev->mem_start; | ||
1140 | } | ||
1141 | else if (card_idx < MAX_UNITS) { | ||
1142 | if (options[card_idx] >= 0) | ||
1143 | option = options[card_idx]; | ||
1144 | } | ||
1145 | |||
1146 | if (option > 0) { | ||
1147 | if (option & 0x8000) | ||
1148 | vortex_debug = 7; | ||
1149 | if (option & 0x4000) | ||
1150 | vortex_debug = 2; | ||
1151 | if (option & 0x0400) | ||
1152 | vp->enable_wol = 1; | ||
1153 | } | ||
1154 | |||
1155 | print_info = (vortex_debug > 1); | ||
1156 | if (print_info) | ||
1157 | pr_info("See Documentation/networking/vortex.txt\n"); | ||
1158 | |||
1159 | pr_info("%s: 3Com %s %s at %p.\n", | ||
1160 | print_name, | ||
1161 | pdev ? "PCI" : "EISA", | ||
1162 | vci->name, | ||
1163 | ioaddr); | ||
1164 | |||
1165 | dev->base_addr = (unsigned long)ioaddr; | ||
1166 | dev->irq = irq; | ||
1167 | dev->mtu = mtu; | ||
1168 | vp->ioaddr = ioaddr; | ||
1169 | vp->large_frames = mtu > 1500; | ||
1170 | vp->drv_flags = vci->drv_flags; | ||
1171 | vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; | ||
1172 | vp->io_size = vci->io_size; | ||
1173 | vp->card_idx = card_idx; | ||
1174 | vp->window = -1; | ||
1175 | |||
1176 | /* module list only for Compaq device */ | ||
1177 | if (gendev == NULL) { | ||
1178 | compaq_net_device = dev; | ||
1179 | } | ||
1180 | |||
1181 | /* PCI-only startup logic */ | ||
1182 | if (pdev) { | ||
1183 | /* EISA resources already marked, so only PCI needs to do this here */ | ||
1184 | /* Ignore return value, because Cardbus drivers already allocate for us */ | ||
1185 | if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) | ||
1186 | vp->must_free_region = 1; | ||
1187 | |||
1188 | /* enable bus-mastering if necessary */ | ||
1189 | if (vci->flags & PCI_USES_MASTER) | ||
1190 | pci_set_master(pdev); | ||
1191 | |||
1192 | if (vci->drv_flags & IS_VORTEX) { | ||
1193 | u8 pci_latency; | ||
1194 | u8 new_latency = 248; | ||
1195 | |||
1196 | /* Check the PCI latency value. On the 3c590 series the latency timer | ||
1197 | must be set to the maximum value to avoid data corruption that occurs | ||
1198 | when the timer expires during a transfer. This bug exists the Vortex | ||
1199 | chip only. */ | ||
1200 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); | ||
1201 | if (pci_latency < new_latency) { | ||
1202 | pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n", | ||
1203 | print_name, pci_latency, new_latency); | ||
1204 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency); | ||
1205 | } | ||
1206 | } | ||
1207 | } | ||
1208 | |||
1209 | spin_lock_init(&vp->lock); | ||
1210 | spin_lock_init(&vp->mii_lock); | ||
1211 | spin_lock_init(&vp->window_lock); | ||
1212 | vp->gendev = gendev; | ||
1213 | vp->mii.dev = dev; | ||
1214 | vp->mii.mdio_read = mdio_read; | ||
1215 | vp->mii.mdio_write = mdio_write; | ||
1216 | vp->mii.phy_id_mask = 0x1f; | ||
1217 | vp->mii.reg_num_mask = 0x1f; | ||
1218 | |||
1219 | /* Makes sure rings are at least 16 byte aligned. */ | ||
1220 | vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE | ||
1221 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | ||
1222 | &vp->rx_ring_dma); | ||
1223 | retval = -ENOMEM; | ||
1224 | if (!vp->rx_ring) | ||
1225 | goto free_region; | ||
1226 | |||
1227 | vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); | ||
1228 | vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; | ||
1229 | |||
1230 | /* if we are a PCI driver, we store info in pdev->driver_data | ||
1231 | * instead of a module list */ | ||
1232 | if (pdev) | ||
1233 | pci_set_drvdata(pdev, dev); | ||
1234 | if (edev) | ||
1235 | eisa_set_drvdata(edev, dev); | ||
1236 | |||
1237 | vp->media_override = 7; | ||
1238 | if (option >= 0) { | ||
1239 | vp->media_override = ((option & 7) == 2) ? 0 : option & 15; | ||
1240 | if (vp->media_override != 7) | ||
1241 | vp->medialock = 1; | ||
1242 | vp->full_duplex = (option & 0x200) ? 1 : 0; | ||
1243 | vp->bus_master = (option & 16) ? 1 : 0; | ||
1244 | } | ||
1245 | |||
1246 | if (global_full_duplex > 0) | ||
1247 | vp->full_duplex = 1; | ||
1248 | if (global_enable_wol > 0) | ||
1249 | vp->enable_wol = 1; | ||
1250 | |||
1251 | if (card_idx < MAX_UNITS) { | ||
1252 | if (full_duplex[card_idx] > 0) | ||
1253 | vp->full_duplex = 1; | ||
1254 | if (flow_ctrl[card_idx] > 0) | ||
1255 | vp->flow_ctrl = 1; | ||
1256 | if (enable_wol[card_idx] > 0) | ||
1257 | vp->enable_wol = 1; | ||
1258 | } | ||
1259 | |||
1260 | vp->mii.force_media = vp->full_duplex; | ||
1261 | vp->options = option; | ||
1262 | /* Read the station address from the EEPROM. */ | ||
1263 | { | ||
1264 | int base; | ||
1265 | |||
1266 | if (vci->drv_flags & EEPROM_8BIT) | ||
1267 | base = 0x230; | ||
1268 | else if (vci->drv_flags & EEPROM_OFFSET) | ||
1269 | base = EEPROM_Read + 0x30; | ||
1270 | else | ||
1271 | base = EEPROM_Read; | ||
1272 | |||
1273 | for (i = 0; i < 0x40; i++) { | ||
1274 | int timer; | ||
1275 | window_write16(vp, base + i, 0, Wn0EepromCmd); | ||
1276 | /* Pause for at least 162 us. for the read to take place. */ | ||
1277 | for (timer = 10; timer >= 0; timer--) { | ||
1278 | udelay(162); | ||
1279 | if ((window_read16(vp, 0, Wn0EepromCmd) & | ||
1280 | 0x8000) == 0) | ||
1281 | break; | ||
1282 | } | ||
1283 | eeprom[i] = window_read16(vp, 0, Wn0EepromData); | ||
1284 | } | ||
1285 | } | ||
1286 | for (i = 0; i < 0x18; i++) | ||
1287 | checksum ^= eeprom[i]; | ||
1288 | checksum = (checksum ^ (checksum >> 8)) & 0xff; | ||
1289 | if (checksum != 0x00) { /* Grrr, needless incompatible change 3Com. */ | ||
1290 | while (i < 0x21) | ||
1291 | checksum ^= eeprom[i++]; | ||
1292 | checksum = (checksum ^ (checksum >> 8)) & 0xff; | ||
1293 | } | ||
1294 | if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO)) | ||
1295 | pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum); | ||
1296 | for (i = 0; i < 3; i++) | ||
1297 | ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]); | ||
1298 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
1299 | if (print_info) | ||
1300 | pr_cont(" %pM", dev->dev_addr); | ||
1301 | /* Unfortunately an all zero eeprom passes the checksum and this | ||
1302 | gets found in the wild in failure cases. Crypto is hard 8) */ | ||
1303 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
1304 | retval = -EINVAL; | ||
1305 | pr_err("*** EEPROM MAC address is invalid.\n"); | ||
1306 | goto free_ring; /* With every pack */ | ||
1307 | } | ||
1308 | for (i = 0; i < 6; i++) | ||
1309 | window_write8(vp, dev->dev_addr[i], 2, i); | ||
1310 | |||
1311 | if (print_info) | ||
1312 | pr_cont(", IRQ %d\n", dev->irq); | ||
1313 | /* Tell them about an invalid IRQ. */ | ||
1314 | if (dev->irq <= 0 || dev->irq >= nr_irqs) | ||
1315 | pr_warning(" *** Warning: IRQ %d is unlikely to work! ***\n", | ||
1316 | dev->irq); | ||
1317 | |||
1318 | step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1; | ||
1319 | if (print_info) { | ||
1320 | pr_info(" product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n", | ||
1321 | eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14], | ||
1322 | step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9); | ||
1323 | } | ||
1324 | |||
1325 | |||
1326 | if (pdev && vci->drv_flags & HAS_CB_FNS) { | ||
1327 | unsigned short n; | ||
1328 | |||
1329 | vp->cb_fn_base = pci_iomap(pdev, 2, 0); | ||
1330 | if (!vp->cb_fn_base) { | ||
1331 | retval = -ENOMEM; | ||
1332 | goto free_ring; | ||
1333 | } | ||
1334 | |||
1335 | if (print_info) { | ||
1336 | pr_info("%s: CardBus functions mapped %16.16llx->%p\n", | ||
1337 | print_name, | ||
1338 | (unsigned long long)pci_resource_start(pdev, 2), | ||
1339 | vp->cb_fn_base); | ||
1340 | } | ||
1341 | |||
1342 | n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; | ||
1343 | if (vp->drv_flags & INVERT_LED_PWR) | ||
1344 | n |= 0x10; | ||
1345 | if (vp->drv_flags & INVERT_MII_PWR) | ||
1346 | n |= 0x4000; | ||
1347 | window_write16(vp, n, 2, Wn2_ResetOptions); | ||
1348 | if (vp->drv_flags & WNO_XCVR_PWR) { | ||
1349 | window_write16(vp, 0x0800, 0, 0); | ||
1350 | } | ||
1351 | } | ||
1352 | |||
1353 | /* Extract our information from the EEPROM data. */ | ||
1354 | vp->info1 = eeprom[13]; | ||
1355 | vp->info2 = eeprom[15]; | ||
1356 | vp->capabilities = eeprom[16]; | ||
1357 | |||
1358 | if (vp->info1 & 0x8000) { | ||
1359 | vp->full_duplex = 1; | ||
1360 | if (print_info) | ||
1361 | pr_info("Full duplex capable\n"); | ||
1362 | } | ||
1363 | |||
1364 | { | ||
1365 | static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; | ||
1366 | unsigned int config; | ||
1367 | vp->available_media = window_read16(vp, 3, Wn3_Options); | ||
1368 | if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */ | ||
1369 | vp->available_media = 0x40; | ||
1370 | config = window_read32(vp, 3, Wn3_Config); | ||
1371 | if (print_info) { | ||
1372 | pr_debug(" Internal config register is %4.4x, transceivers %#x.\n", | ||
1373 | config, window_read16(vp, 3, Wn3_Options)); | ||
1374 | pr_info(" %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n", | ||
1375 | 8 << RAM_SIZE(config), | ||
1376 | RAM_WIDTH(config) ? "word" : "byte", | ||
1377 | ram_split[RAM_SPLIT(config)], | ||
1378 | AUTOSELECT(config) ? "autoselect/" : "", | ||
1379 | XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" : | ||
1380 | media_tbl[XCVR(config)].name); | ||
1381 | } | ||
1382 | vp->default_media = XCVR(config); | ||
1383 | if (vp->default_media == XCVR_NWAY) | ||
1384 | vp->has_nway = 1; | ||
1385 | vp->autoselect = AUTOSELECT(config); | ||
1386 | } | ||
1387 | |||
1388 | if (vp->media_override != 7) { | ||
1389 | pr_info("%s: Media override to transceiver type %d (%s).\n", | ||
1390 | print_name, vp->media_override, | ||
1391 | media_tbl[vp->media_override].name); | ||
1392 | dev->if_port = vp->media_override; | ||
1393 | } else | ||
1394 | dev->if_port = vp->default_media; | ||
1395 | |||
1396 | if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) || | ||
1397 | dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { | ||
1398 | int phy, phy_idx = 0; | ||
1399 | mii_preamble_required++; | ||
1400 | if (vp->drv_flags & EXTRA_PREAMBLE) | ||
1401 | mii_preamble_required++; | ||
1402 | mdio_sync(vp, 32); | ||
1403 | mdio_read(dev, 24, MII_BMSR); | ||
1404 | for (phy = 0; phy < 32 && phy_idx < 1; phy++) { | ||
1405 | int mii_status, phyx; | ||
1406 | |||
1407 | /* | ||
1408 | * For the 3c905CX we look at index 24 first, because it bogusly | ||
1409 | * reports an external PHY at all indices | ||
1410 | */ | ||
1411 | if (phy == 0) | ||
1412 | phyx = 24; | ||
1413 | else if (phy <= 24) | ||
1414 | phyx = phy - 1; | ||
1415 | else | ||
1416 | phyx = phy; | ||
1417 | mii_status = mdio_read(dev, phyx, MII_BMSR); | ||
1418 | if (mii_status && mii_status != 0xffff) { | ||
1419 | vp->phys[phy_idx++] = phyx; | ||
1420 | if (print_info) { | ||
1421 | pr_info(" MII transceiver found at address %d, status %4x.\n", | ||
1422 | phyx, mii_status); | ||
1423 | } | ||
1424 | if ((mii_status & 0x0040) == 0) | ||
1425 | mii_preamble_required++; | ||
1426 | } | ||
1427 | } | ||
1428 | mii_preamble_required--; | ||
1429 | if (phy_idx == 0) { | ||
1430 | pr_warning(" ***WARNING*** No MII transceivers found!\n"); | ||
1431 | vp->phys[0] = 24; | ||
1432 | } else { | ||
1433 | vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE); | ||
1434 | if (vp->full_duplex) { | ||
1435 | /* Only advertise the FD media types. */ | ||
1436 | vp->advertising &= ~0x02A0; | ||
1437 | mdio_write(dev, vp->phys[0], 4, vp->advertising); | ||
1438 | } | ||
1439 | } | ||
1440 | vp->mii.phy_id = vp->phys[0]; | ||
1441 | } | ||
1442 | |||
1443 | if (vp->capabilities & CapBusMaster) { | ||
1444 | vp->full_bus_master_tx = 1; | ||
1445 | if (print_info) { | ||
1446 | pr_info(" Enabling bus-master transmits and %s receives.\n", | ||
1447 | (vp->info2 & 1) ? "early" : "whole-frame" ); | ||
1448 | } | ||
1449 | vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2; | ||
1450 | vp->bus_master = 0; /* AKPM: vortex only */ | ||
1451 | } | ||
1452 | |||
1453 | /* The 3c59x-specific entries in the device structure. */ | ||
1454 | if (vp->full_bus_master_tx) { | ||
1455 | dev->netdev_ops = &boomrang_netdev_ops; | ||
1456 | /* Actually, it still should work with iommu. */ | ||
1457 | if (card_idx < MAX_UNITS && | ||
1458 | ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) || | ||
1459 | hw_checksums[card_idx] == 1)) { | ||
1460 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | ||
1461 | } | ||
1462 | } else | ||
1463 | dev->netdev_ops = &vortex_netdev_ops; | ||
1464 | |||
1465 | if (print_info) { | ||
1466 | pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n", | ||
1467 | print_name, | ||
1468 | (dev->features & NETIF_F_SG) ? "en":"dis", | ||
1469 | (dev->features & NETIF_F_IP_CSUM) ? "en":"dis"); | ||
1470 | } | ||
1471 | |||
1472 | dev->ethtool_ops = &vortex_ethtool_ops; | ||
1473 | dev->watchdog_timeo = (watchdog * HZ) / 1000; | ||
1474 | |||
1475 | if (pdev) { | ||
1476 | vp->pm_state_valid = 1; | ||
1477 | pci_save_state(VORTEX_PCI(vp)); | ||
1478 | acpi_set_WOL(dev); | ||
1479 | } | ||
1480 | retval = register_netdev(dev); | ||
1481 | if (retval == 0) | ||
1482 | return 0; | ||
1483 | |||
1484 | free_ring: | ||
1485 | pci_free_consistent(pdev, | ||
1486 | sizeof(struct boom_rx_desc) * RX_RING_SIZE | ||
1487 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | ||
1488 | vp->rx_ring, | ||
1489 | vp->rx_ring_dma); | ||
1490 | free_region: | ||
1491 | if (vp->must_free_region) | ||
1492 | release_region(dev->base_addr, vci->io_size); | ||
1493 | free_netdev(dev); | ||
1494 | pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); | ||
1495 | out: | ||
1496 | return retval; | ||
1497 | } | ||
1498 | |||
1499 | static void | ||
1500 | issue_and_wait(struct net_device *dev, int cmd) | ||
1501 | { | ||
1502 | struct vortex_private *vp = netdev_priv(dev); | ||
1503 | void __iomem *ioaddr = vp->ioaddr; | ||
1504 | int i; | ||
1505 | |||
1506 | iowrite16(cmd, ioaddr + EL3_CMD); | ||
1507 | for (i = 0; i < 2000; i++) { | ||
1508 | if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) | ||
1509 | return; | ||
1510 | } | ||
1511 | |||
1512 | /* OK, that didn't work. Do it the slow way. One second */ | ||
1513 | for (i = 0; i < 100000; i++) { | ||
1514 | if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) { | ||
1515 | if (vortex_debug > 1) | ||
1516 | pr_info("%s: command 0x%04x took %d usecs\n", | ||
1517 | dev->name, cmd, i * 10); | ||
1518 | return; | ||
1519 | } | ||
1520 | udelay(10); | ||
1521 | } | ||
1522 | pr_err("%s: command 0x%04x did not complete! Status=0x%x\n", | ||
1523 | dev->name, cmd, ioread16(ioaddr + EL3_STATUS)); | ||
1524 | } | ||
1525 | |||
1526 | static void | ||
1527 | vortex_set_duplex(struct net_device *dev) | ||
1528 | { | ||
1529 | struct vortex_private *vp = netdev_priv(dev); | ||
1530 | |||
1531 | pr_info("%s: setting %s-duplex.\n", | ||
1532 | dev->name, (vp->full_duplex) ? "full" : "half"); | ||
1533 | |||
1534 | /* Set the full-duplex bit. */ | ||
1535 | window_write16(vp, | ||
1536 | ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | | ||
1537 | (vp->large_frames ? 0x40 : 0) | | ||
1538 | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? | ||
1539 | 0x100 : 0), | ||
1540 | 3, Wn3_MAC_Ctrl); | ||
1541 | } | ||
1542 | |||
1543 | static void vortex_check_media(struct net_device *dev, unsigned int init) | ||
1544 | { | ||
1545 | struct vortex_private *vp = netdev_priv(dev); | ||
1546 | unsigned int ok_to_print = 0; | ||
1547 | |||
1548 | if (vortex_debug > 3) | ||
1549 | ok_to_print = 1; | ||
1550 | |||
1551 | if (mii_check_media(&vp->mii, ok_to_print, init)) { | ||
1552 | vp->full_duplex = vp->mii.full_duplex; | ||
1553 | vortex_set_duplex(dev); | ||
1554 | } else if (init) { | ||
1555 | vortex_set_duplex(dev); | ||
1556 | } | ||
1557 | } | ||
1558 | |||
1559 | static int | ||
1560 | vortex_up(struct net_device *dev) | ||
1561 | { | ||
1562 | struct vortex_private *vp = netdev_priv(dev); | ||
1563 | void __iomem *ioaddr = vp->ioaddr; | ||
1564 | unsigned int config; | ||
1565 | int i, mii_reg1, mii_reg5, err = 0; | ||
1566 | |||
1567 | if (VORTEX_PCI(vp)) { | ||
1568 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ | ||
1569 | if (vp->pm_state_valid) | ||
1570 | pci_restore_state(VORTEX_PCI(vp)); | ||
1571 | err = pci_enable_device(VORTEX_PCI(vp)); | ||
1572 | if (err) { | ||
1573 | pr_warning("%s: Could not enable device\n", | ||
1574 | dev->name); | ||
1575 | goto err_out; | ||
1576 | } | ||
1577 | } | ||
1578 | |||
1579 | /* Before initializing select the active media port. */ | ||
1580 | config = window_read32(vp, 3, Wn3_Config); | ||
1581 | |||
1582 | if (vp->media_override != 7) { | ||
1583 | pr_info("%s: Media override to transceiver %d (%s).\n", | ||
1584 | dev->name, vp->media_override, | ||
1585 | media_tbl[vp->media_override].name); | ||
1586 | dev->if_port = vp->media_override; | ||
1587 | } else if (vp->autoselect) { | ||
1588 | if (vp->has_nway) { | ||
1589 | if (vortex_debug > 1) | ||
1590 | pr_info("%s: using NWAY device table, not %d\n", | ||
1591 | dev->name, dev->if_port); | ||
1592 | dev->if_port = XCVR_NWAY; | ||
1593 | } else { | ||
1594 | /* Find first available media type, starting with 100baseTx. */ | ||
1595 | dev->if_port = XCVR_100baseTx; | ||
1596 | while (! (vp->available_media & media_tbl[dev->if_port].mask)) | ||
1597 | dev->if_port = media_tbl[dev->if_port].next; | ||
1598 | if (vortex_debug > 1) | ||
1599 | pr_info("%s: first available media type: %s\n", | ||
1600 | dev->name, media_tbl[dev->if_port].name); | ||
1601 | } | ||
1602 | } else { | ||
1603 | dev->if_port = vp->default_media; | ||
1604 | if (vortex_debug > 1) | ||
1605 | pr_info("%s: using default media %s\n", | ||
1606 | dev->name, media_tbl[dev->if_port].name); | ||
1607 | } | ||
1608 | |||
1609 | init_timer(&vp->timer); | ||
1610 | vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); | ||
1611 | vp->timer.data = (unsigned long)dev; | ||
1612 | vp->timer.function = vortex_timer; /* timer handler */ | ||
1613 | add_timer(&vp->timer); | ||
1614 | |||
1615 | init_timer(&vp->rx_oom_timer); | ||
1616 | vp->rx_oom_timer.data = (unsigned long)dev; | ||
1617 | vp->rx_oom_timer.function = rx_oom_timer; | ||
1618 | |||
1619 | if (vortex_debug > 1) | ||
1620 | pr_debug("%s: Initial media type %s.\n", | ||
1621 | dev->name, media_tbl[dev->if_port].name); | ||
1622 | |||
1623 | vp->full_duplex = vp->mii.force_media; | ||
1624 | config = BFINS(config, dev->if_port, 20, 4); | ||
1625 | if (vortex_debug > 6) | ||
1626 | pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config); | ||
1627 | window_write32(vp, config, 3, Wn3_Config); | ||
1628 | |||
1629 | if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { | ||
1630 | mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); | ||
1631 | mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); | ||
1632 | vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); | ||
1633 | vp->mii.full_duplex = vp->full_duplex; | ||
1634 | |||
1635 | vortex_check_media(dev, 1); | ||
1636 | } | ||
1637 | else | ||
1638 | vortex_set_duplex(dev); | ||
1639 | |||
1640 | issue_and_wait(dev, TxReset); | ||
1641 | /* | ||
1642 | * Don't reset the PHY - that upsets autonegotiation during DHCP operations. | ||
1643 | */ | ||
1644 | issue_and_wait(dev, RxReset|0x04); | ||
1645 | |||
1646 | |||
1647 | iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); | ||
1648 | |||
1649 | if (vortex_debug > 1) { | ||
1650 | pr_debug("%s: vortex_up() irq %d media status %4.4x.\n", | ||
1651 | dev->name, dev->irq, window_read16(vp, 4, Wn4_Media)); | ||
1652 | } | ||
1653 | |||
1654 | /* Set the station address and mask in window 2 each time opened. */ | ||
1655 | for (i = 0; i < 6; i++) | ||
1656 | window_write8(vp, dev->dev_addr[i], 2, i); | ||
1657 | for (; i < 12; i+=2) | ||
1658 | window_write16(vp, 0, 2, i); | ||
1659 | |||
1660 | if (vp->cb_fn_base) { | ||
1661 | unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010; | ||
1662 | if (vp->drv_flags & INVERT_LED_PWR) | ||
1663 | n |= 0x10; | ||
1664 | if (vp->drv_flags & INVERT_MII_PWR) | ||
1665 | n |= 0x4000; | ||
1666 | window_write16(vp, n, 2, Wn2_ResetOptions); | ||
1667 | } | ||
1668 | |||
1669 | if (dev->if_port == XCVR_10base2) | ||
1670 | /* Start the thinnet transceiver. We should really wait 50ms...*/ | ||
1671 | iowrite16(StartCoax, ioaddr + EL3_CMD); | ||
1672 | if (dev->if_port != XCVR_NWAY) { | ||
1673 | window_write16(vp, | ||
1674 | (window_read16(vp, 4, Wn4_Media) & | ||
1675 | ~(Media_10TP|Media_SQE)) | | ||
1676 | media_tbl[dev->if_port].media_bits, | ||
1677 | 4, Wn4_Media); | ||
1678 | } | ||
1679 | |||
1680 | /* Switch to the stats window, and clear all stats by reading. */ | ||
1681 | iowrite16(StatsDisable, ioaddr + EL3_CMD); | ||
1682 | for (i = 0; i < 10; i++) | ||
1683 | window_read8(vp, 6, i); | ||
1684 | window_read16(vp, 6, 10); | ||
1685 | window_read16(vp, 6, 12); | ||
1686 | /* New: On the Vortex we must also clear the BadSSD counter. */ | ||
1687 | window_read8(vp, 4, 12); | ||
1688 | /* ..and on the Boomerang we enable the extra statistics bits. */ | ||
1689 | window_write16(vp, 0x0040, 4, Wn4_NetDiag); | ||
1690 | |||
1691 | if (vp->full_bus_master_rx) { /* Boomerang bus master. */ | ||
1692 | vp->cur_rx = vp->dirty_rx = 0; | ||
1693 | /* Initialize the RxEarly register as recommended. */ | ||
1694 | iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD); | ||
1695 | iowrite32(0x0020, ioaddr + PktStatus); | ||
1696 | iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr); | ||
1697 | } | ||
1698 | if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */ | ||
1699 | vp->cur_tx = vp->dirty_tx = 0; | ||
1700 | if (vp->drv_flags & IS_BOOMERANG) | ||
1701 | iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ | ||
1702 | /* Clear the Rx, Tx rings. */ | ||
1703 | for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ | ||
1704 | vp->rx_ring[i].status = 0; | ||
1705 | for (i = 0; i < TX_RING_SIZE; i++) | ||
1706 | vp->tx_skbuff[i] = NULL; | ||
1707 | iowrite32(0, ioaddr + DownListPtr); | ||
1708 | } | ||
1709 | /* Set receiver mode: presumably accept b-case and phys addr only. */ | ||
1710 | set_rx_mode(dev); | ||
1711 | /* enable 802.1q tagged frames */ | ||
1712 | set_8021q_mode(dev, 1); | ||
1713 | iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ | ||
1714 | |||
1715 | iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ | ||
1716 | iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ | ||
1717 | /* Allow status bits to be seen. */ | ||
1718 | vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete| | ||
1719 | (vp->full_bus_master_tx ? DownComplete : TxAvailable) | | ||
1720 | (vp->full_bus_master_rx ? UpComplete : RxComplete) | | ||
1721 | (vp->bus_master ? DMADone : 0); | ||
1722 | vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | | ||
1723 | (vp->full_bus_master_rx ? 0 : RxComplete) | | ||
1724 | StatsFull | HostError | TxComplete | IntReq | ||
1725 | | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete; | ||
1726 | iowrite16(vp->status_enable, ioaddr + EL3_CMD); | ||
1727 | /* Ack all pending events, and set active indicator mask. */ | ||
1728 | iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq, | ||
1729 | ioaddr + EL3_CMD); | ||
1730 | iowrite16(vp->intr_enable, ioaddr + EL3_CMD); | ||
1731 | if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ | ||
1732 | iowrite32(0x8000, vp->cb_fn_base + 4); | ||
1733 | netif_start_queue (dev); | ||
1734 | err_out: | ||
1735 | return err; | ||
1736 | } | ||
1737 | |||
1738 | static int | ||
1739 | vortex_open(struct net_device *dev) | ||
1740 | { | ||
1741 | struct vortex_private *vp = netdev_priv(dev); | ||
1742 | int i; | ||
1743 | int retval; | ||
1744 | |||
1745 | /* Use the now-standard shared IRQ implementation. */ | ||
1746 | if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? | ||
1747 | boomerang_interrupt : vortex_interrupt, IRQF_SHARED, dev->name, dev))) { | ||
1748 | pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq); | ||
1749 | goto err; | ||
1750 | } | ||
1751 | |||
1752 | if (vp->full_bus_master_rx) { /* Boomerang bus master. */ | ||
1753 | if (vortex_debug > 2) | ||
1754 | pr_debug("%s: Filling in the Rx ring.\n", dev->name); | ||
1755 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1756 | struct sk_buff *skb; | ||
1757 | vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); | ||
1758 | vp->rx_ring[i].status = 0; /* Clear complete bit. */ | ||
1759 | vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG); | ||
1760 | |||
1761 | skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN, | ||
1762 | GFP_KERNEL); | ||
1763 | vp->rx_skbuff[i] = skb; | ||
1764 | if (skb == NULL) | ||
1765 | break; /* Bad news! */ | ||
1766 | |||
1767 | skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */ | ||
1768 | vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); | ||
1769 | } | ||
1770 | if (i != RX_RING_SIZE) { | ||
1771 | int j; | ||
1772 | pr_emerg("%s: no memory for rx ring\n", dev->name); | ||
1773 | for (j = 0; j < i; j++) { | ||
1774 | if (vp->rx_skbuff[j]) { | ||
1775 | dev_kfree_skb(vp->rx_skbuff[j]); | ||
1776 | vp->rx_skbuff[j] = NULL; | ||
1777 | } | ||
1778 | } | ||
1779 | retval = -ENOMEM; | ||
1780 | goto err_free_irq; | ||
1781 | } | ||
1782 | /* Wrap the ring. */ | ||
1783 | vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); | ||
1784 | } | ||
1785 | |||
1786 | retval = vortex_up(dev); | ||
1787 | if (!retval) | ||
1788 | goto out; | ||
1789 | |||
1790 | err_free_irq: | ||
1791 | free_irq(dev->irq, dev); | ||
1792 | err: | ||
1793 | if (vortex_debug > 1) | ||
1794 | pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval); | ||
1795 | out: | ||
1796 | return retval; | ||
1797 | } | ||
1798 | |||
1799 | static void | ||
1800 | vortex_timer(unsigned long data) | ||
1801 | { | ||
1802 | struct net_device *dev = (struct net_device *)data; | ||
1803 | struct vortex_private *vp = netdev_priv(dev); | ||
1804 | void __iomem *ioaddr = vp->ioaddr; | ||
1805 | int next_tick = 60*HZ; | ||
1806 | int ok = 0; | ||
1807 | int media_status; | ||
1808 | |||
1809 | if (vortex_debug > 2) { | ||
1810 | pr_debug("%s: Media selection timer tick happened, %s.\n", | ||
1811 | dev->name, media_tbl[dev->if_port].name); | ||
1812 | pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo); | ||
1813 | } | ||
1814 | |||
1815 | media_status = window_read16(vp, 4, Wn4_Media); | ||
1816 | switch (dev->if_port) { | ||
1817 | case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx: | ||
1818 | if (media_status & Media_LnkBeat) { | ||
1819 | netif_carrier_on(dev); | ||
1820 | ok = 1; | ||
1821 | if (vortex_debug > 1) | ||
1822 | pr_debug("%s: Media %s has link beat, %x.\n", | ||
1823 | dev->name, media_tbl[dev->if_port].name, media_status); | ||
1824 | } else { | ||
1825 | netif_carrier_off(dev); | ||
1826 | if (vortex_debug > 1) { | ||
1827 | pr_debug("%s: Media %s has no link beat, %x.\n", | ||
1828 | dev->name, media_tbl[dev->if_port].name, media_status); | ||
1829 | } | ||
1830 | } | ||
1831 | break; | ||
1832 | case XCVR_MII: case XCVR_NWAY: | ||
1833 | { | ||
1834 | ok = 1; | ||
1835 | vortex_check_media(dev, 0); | ||
1836 | } | ||
1837 | break; | ||
1838 | default: /* Other media types handled by Tx timeouts. */ | ||
1839 | if (vortex_debug > 1) | ||
1840 | pr_debug("%s: Media %s has no indication, %x.\n", | ||
1841 | dev->name, media_tbl[dev->if_port].name, media_status); | ||
1842 | ok = 1; | ||
1843 | } | ||
1844 | |||
1845 | if (!netif_carrier_ok(dev)) | ||
1846 | next_tick = 5*HZ; | ||
1847 | |||
1848 | if (vp->medialock) | ||
1849 | goto leave_media_alone; | ||
1850 | |||
1851 | if (!ok) { | ||
1852 | unsigned int config; | ||
1853 | |||
1854 | spin_lock_irq(&vp->lock); | ||
1855 | |||
1856 | do { | ||
1857 | dev->if_port = media_tbl[dev->if_port].next; | ||
1858 | } while ( ! (vp->available_media & media_tbl[dev->if_port].mask)); | ||
1859 | if (dev->if_port == XCVR_Default) { /* Go back to default. */ | ||
1860 | dev->if_port = vp->default_media; | ||
1861 | if (vortex_debug > 1) | ||
1862 | pr_debug("%s: Media selection failing, using default %s port.\n", | ||
1863 | dev->name, media_tbl[dev->if_port].name); | ||
1864 | } else { | ||
1865 | if (vortex_debug > 1) | ||
1866 | pr_debug("%s: Media selection failed, now trying %s port.\n", | ||
1867 | dev->name, media_tbl[dev->if_port].name); | ||
1868 | next_tick = media_tbl[dev->if_port].wait; | ||
1869 | } | ||
1870 | window_write16(vp, | ||
1871 | (media_status & ~(Media_10TP|Media_SQE)) | | ||
1872 | media_tbl[dev->if_port].media_bits, | ||
1873 | 4, Wn4_Media); | ||
1874 | |||
1875 | config = window_read32(vp, 3, Wn3_Config); | ||
1876 | config = BFINS(config, dev->if_port, 20, 4); | ||
1877 | window_write32(vp, config, 3, Wn3_Config); | ||
1878 | |||
1879 | iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax, | ||
1880 | ioaddr + EL3_CMD); | ||
1881 | if (vortex_debug > 1) | ||
1882 | pr_debug("wrote 0x%08x to Wn3_Config\n", config); | ||
1883 | /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ | ||
1884 | |||
1885 | spin_unlock_irq(&vp->lock); | ||
1886 | } | ||
1887 | |||
1888 | leave_media_alone: | ||
1889 | if (vortex_debug > 2) | ||
1890 | pr_debug("%s: Media selection timer finished, %s.\n", | ||
1891 | dev->name, media_tbl[dev->if_port].name); | ||
1892 | |||
1893 | mod_timer(&vp->timer, RUN_AT(next_tick)); | ||
1894 | if (vp->deferred) | ||
1895 | iowrite16(FakeIntr, ioaddr + EL3_CMD); | ||
1896 | } | ||
1897 | |||
1898 | static void vortex_tx_timeout(struct net_device *dev) | ||
1899 | { | ||
1900 | struct vortex_private *vp = netdev_priv(dev); | ||
1901 | void __iomem *ioaddr = vp->ioaddr; | ||
1902 | |||
1903 | pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n", | ||
1904 | dev->name, ioread8(ioaddr + TxStatus), | ||
1905 | ioread16(ioaddr + EL3_STATUS)); | ||
1906 | pr_err(" diagnostics: net %04x media %04x dma %08x fifo %04x\n", | ||
1907 | window_read16(vp, 4, Wn4_NetDiag), | ||
1908 | window_read16(vp, 4, Wn4_Media), | ||
1909 | ioread32(ioaddr + PktStatus), | ||
1910 | window_read16(vp, 4, Wn4_FIFODiag)); | ||
1911 | /* Slight code bloat to be user friendly. */ | ||
1912 | if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88) | ||
1913 | pr_err("%s: Transmitter encountered 16 collisions --" | ||
1914 | " network cable problem?\n", dev->name); | ||
1915 | if (ioread16(ioaddr + EL3_STATUS) & IntLatch) { | ||
1916 | pr_err("%s: Interrupt posted but not delivered --" | ||
1917 | " IRQ blocked by another device?\n", dev->name); | ||
1918 | /* Bad idea here.. but we might as well handle a few events. */ | ||
1919 | { | ||
1920 | /* | ||
1921 | * Block interrupts because vortex_interrupt does a bare spin_lock() | ||
1922 | */ | ||
1923 | unsigned long flags; | ||
1924 | local_irq_save(flags); | ||
1925 | if (vp->full_bus_master_tx) | ||
1926 | boomerang_interrupt(dev->irq, dev); | ||
1927 | else | ||
1928 | vortex_interrupt(dev->irq, dev); | ||
1929 | local_irq_restore(flags); | ||
1930 | } | ||
1931 | } | ||
1932 | |||
1933 | if (vortex_debug > 0) | ||
1934 | dump_tx_ring(dev); | ||
1935 | |||
1936 | issue_and_wait(dev, TxReset); | ||
1937 | |||
1938 | dev->stats.tx_errors++; | ||
1939 | if (vp->full_bus_master_tx) { | ||
1940 | pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name); | ||
1941 | if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) | ||
1942 | iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), | ||
1943 | ioaddr + DownListPtr); | ||
1944 | if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) | ||
1945 | netif_wake_queue (dev); | ||
1946 | if (vp->drv_flags & IS_BOOMERANG) | ||
1947 | iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); | ||
1948 | iowrite16(DownUnstall, ioaddr + EL3_CMD); | ||
1949 | } else { | ||
1950 | dev->stats.tx_dropped++; | ||
1951 | netif_wake_queue(dev); | ||
1952 | } | ||
1953 | |||
1954 | /* Issue Tx Enable */ | ||
1955 | iowrite16(TxEnable, ioaddr + EL3_CMD); | ||
1956 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
1957 | } | ||
1958 | |||
1959 | /* | ||
1960 | * Handle uncommon interrupt sources. This is a separate routine to minimize | ||
1961 | * the cache impact. | ||
1962 | */ | ||
1963 | static void | ||
1964 | vortex_error(struct net_device *dev, int status) | ||
1965 | { | ||
1966 | struct vortex_private *vp = netdev_priv(dev); | ||
1967 | void __iomem *ioaddr = vp->ioaddr; | ||
1968 | int do_tx_reset = 0, reset_mask = 0; | ||
1969 | unsigned char tx_status = 0; | ||
1970 | |||
1971 | if (vortex_debug > 2) { | ||
1972 | pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status); | ||
1973 | } | ||
1974 | |||
1975 | if (status & TxComplete) { /* Really "TxError" for us. */ | ||
1976 | tx_status = ioread8(ioaddr + TxStatus); | ||
1977 | /* Presumably a tx-timeout. We must merely re-enable. */ | ||
1978 | if (vortex_debug > 2 || | ||
1979 | (tx_status != 0x88 && vortex_debug > 0)) { | ||
1980 | pr_err("%s: Transmit error, Tx status register %2.2x.\n", | ||
1981 | dev->name, tx_status); | ||
1982 | if (tx_status == 0x82) { | ||
1983 | pr_err("Probably a duplex mismatch. See " | ||
1984 | "Documentation/networking/vortex.txt\n"); | ||
1985 | } | ||
1986 | dump_tx_ring(dev); | ||
1987 | } | ||
1988 | if (tx_status & 0x14) dev->stats.tx_fifo_errors++; | ||
1989 | if (tx_status & 0x38) dev->stats.tx_aborted_errors++; | ||
1990 | if (tx_status & 0x08) vp->xstats.tx_max_collisions++; | ||
1991 | iowrite8(0, ioaddr + TxStatus); | ||
1992 | if (tx_status & 0x30) { /* txJabber or txUnderrun */ | ||
1993 | do_tx_reset = 1; | ||
1994 | } else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) { /* maxCollisions */ | ||
1995 | do_tx_reset = 1; | ||
1996 | reset_mask = 0x0108; /* Reset interface logic, but not download logic */ | ||
1997 | } else { /* Merely re-enable the transmitter. */ | ||
1998 | iowrite16(TxEnable, ioaddr + EL3_CMD); | ||
1999 | } | ||
2000 | } | ||
2001 | |||
2002 | if (status & RxEarly) /* Rx early is unused. */ | ||
2003 | iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD); | ||
2004 | |||
2005 | if (status & StatsFull) { /* Empty statistics. */ | ||
2006 | static int DoneDidThat; | ||
2007 | if (vortex_debug > 4) | ||
2008 | pr_debug("%s: Updating stats.\n", dev->name); | ||
2009 | update_stats(ioaddr, dev); | ||
2010 | /* HACK: Disable statistics as an interrupt source. */ | ||
2011 | /* This occurs when we have the wrong media type! */ | ||
2012 | if (DoneDidThat == 0 && | ||
2013 | ioread16(ioaddr + EL3_STATUS) & StatsFull) { | ||
2014 | pr_warning("%s: Updating statistics failed, disabling " | ||
2015 | "stats as an interrupt source.\n", dev->name); | ||
2016 | iowrite16(SetIntrEnb | | ||
2017 | (window_read16(vp, 5, 10) & ~StatsFull), | ||
2018 | ioaddr + EL3_CMD); | ||
2019 | vp->intr_enable &= ~StatsFull; | ||
2020 | DoneDidThat++; | ||
2021 | } | ||
2022 | } | ||
2023 | if (status & IntReq) { /* Restore all interrupt sources. */ | ||
2024 | iowrite16(vp->status_enable, ioaddr + EL3_CMD); | ||
2025 | iowrite16(vp->intr_enable, ioaddr + EL3_CMD); | ||
2026 | } | ||
2027 | if (status & HostError) { | ||
2028 | u16 fifo_diag; | ||
2029 | fifo_diag = window_read16(vp, 4, Wn4_FIFODiag); | ||
2030 | pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n", | ||
2031 | dev->name, fifo_diag); | ||
2032 | /* Adapter failure requires Tx/Rx reset and reinit. */ | ||
2033 | if (vp->full_bus_master_tx) { | ||
2034 | int bus_status = ioread32(ioaddr + PktStatus); | ||
2035 | /* 0x80000000 PCI master abort. */ | ||
2036 | /* 0x40000000 PCI target abort. */ | ||
2037 | if (vortex_debug) | ||
2038 | pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status); | ||
2039 | |||
2040 | /* In this case, blow the card away */ | ||
2041 | /* Must not enter D3 or we can't legally issue the reset! */ | ||
2042 | vortex_down(dev, 0); | ||
2043 | issue_and_wait(dev, TotalReset | 0xff); | ||
2044 | vortex_up(dev); /* AKPM: bug. vortex_up() assumes that the rx ring is full. It may not be. */ | ||
2045 | } else if (fifo_diag & 0x0400) | ||
2046 | do_tx_reset = 1; | ||
2047 | if (fifo_diag & 0x3000) { | ||
2048 | /* Reset Rx fifo and upload logic */ | ||
2049 | issue_and_wait(dev, RxReset|0x07); | ||
2050 | /* Set the Rx filter to the current state. */ | ||
2051 | set_rx_mode(dev); | ||
2052 | /* enable 802.1q VLAN tagged frames */ | ||
2053 | set_8021q_mode(dev, 1); | ||
2054 | iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */ | ||
2055 | iowrite16(AckIntr | HostError, ioaddr + EL3_CMD); | ||
2056 | } | ||
2057 | } | ||
2058 | |||
2059 | if (do_tx_reset) { | ||
2060 | issue_and_wait(dev, TxReset|reset_mask); | ||
2061 | iowrite16(TxEnable, ioaddr + EL3_CMD); | ||
2062 | if (!vp->full_bus_master_tx) | ||
2063 | netif_wake_queue(dev); | ||
2064 | } | ||
2065 | } | ||
2066 | |||
2067 | static netdev_tx_t | ||
2068 | vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2069 | { | ||
2070 | struct vortex_private *vp = netdev_priv(dev); | ||
2071 | void __iomem *ioaddr = vp->ioaddr; | ||
2072 | |||
2073 | /* Put out the doubleword header... */ | ||
2074 | iowrite32(skb->len, ioaddr + TX_FIFO); | ||
2075 | if (vp->bus_master) { | ||
2076 | /* Set the bus-master controller to transfer the packet. */ | ||
2077 | int len = (skb->len + 3) & ~3; | ||
2078 | vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, | ||
2079 | PCI_DMA_TODEVICE); | ||
2080 | spin_lock_irq(&vp->window_lock); | ||
2081 | window_set(vp, 7); | ||
2082 | iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr); | ||
2083 | iowrite16(len, ioaddr + Wn7_MasterLen); | ||
2084 | spin_unlock_irq(&vp->window_lock); | ||
2085 | vp->tx_skb = skb; | ||
2086 | iowrite16(StartDMADown, ioaddr + EL3_CMD); | ||
2087 | /* netif_wake_queue() will be called at the DMADone interrupt. */ | ||
2088 | } else { | ||
2089 | /* ... and the packet rounded to a doubleword. */ | ||
2090 | iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); | ||
2091 | dev_kfree_skb (skb); | ||
2092 | if (ioread16(ioaddr + TxFree) > 1536) { | ||
2093 | netif_start_queue (dev); /* AKPM: redundant? */ | ||
2094 | } else { | ||
2095 | /* Interrupt us when the FIFO has room for max-sized packet. */ | ||
2096 | netif_stop_queue(dev); | ||
2097 | iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); | ||
2098 | } | ||
2099 | } | ||
2100 | |||
2101 | |||
2102 | /* Clear the Tx status stack. */ | ||
2103 | { | ||
2104 | int tx_status; | ||
2105 | int i = 32; | ||
2106 | |||
2107 | while (--i > 0 && (tx_status = ioread8(ioaddr + TxStatus)) > 0) { | ||
2108 | if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */ | ||
2109 | if (vortex_debug > 2) | ||
2110 | pr_debug("%s: Tx error, status %2.2x.\n", | ||
2111 | dev->name, tx_status); | ||
2112 | if (tx_status & 0x04) dev->stats.tx_fifo_errors++; | ||
2113 | if (tx_status & 0x38) dev->stats.tx_aborted_errors++; | ||
2114 | if (tx_status & 0x30) { | ||
2115 | issue_and_wait(dev, TxReset); | ||
2116 | } | ||
2117 | iowrite16(TxEnable, ioaddr + EL3_CMD); | ||
2118 | } | ||
2119 | iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */ | ||
2120 | } | ||
2121 | } | ||
2122 | return NETDEV_TX_OK; | ||
2123 | } | ||
2124 | |||
2125 | static netdev_tx_t | ||
2126 | boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
2127 | { | ||
2128 | struct vortex_private *vp = netdev_priv(dev); | ||
2129 | void __iomem *ioaddr = vp->ioaddr; | ||
2130 | /* Calculate the next Tx descriptor entry. */ | ||
2131 | int entry = vp->cur_tx % TX_RING_SIZE; | ||
2132 | struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; | ||
2133 | unsigned long flags; | ||
2134 | |||
2135 | if (vortex_debug > 6) { | ||
2136 | pr_debug("boomerang_start_xmit()\n"); | ||
2137 | pr_debug("%s: Trying to send a packet, Tx index %d.\n", | ||
2138 | dev->name, vp->cur_tx); | ||
2139 | } | ||
2140 | |||
2141 | /* | ||
2142 | * We can't allow a recursion from our interrupt handler back into the | ||
2143 | * tx routine, as they take the same spin lock, and that causes | ||
2144 | * deadlock. Just return NETDEV_TX_BUSY and let the stack try again in | ||
2145 | * a bit | ||
2146 | */ | ||
2147 | if (vp->handling_irq) | ||
2148 | return NETDEV_TX_BUSY; | ||
2149 | |||
2150 | if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { | ||
2151 | if (vortex_debug > 0) | ||
2152 | pr_warning("%s: BUG! Tx Ring full, refusing to send buffer.\n", | ||
2153 | dev->name); | ||
2154 | netif_stop_queue(dev); | ||
2155 | return NETDEV_TX_BUSY; | ||
2156 | } | ||
2157 | |||
2158 | vp->tx_skbuff[entry] = skb; | ||
2159 | |||
2160 | vp->tx_ring[entry].next = 0; | ||
2161 | #if DO_ZEROCOPY | ||
2162 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
2163 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | ||
2164 | else | ||
2165 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum); | ||
2166 | |||
2167 | if (!skb_shinfo(skb)->nr_frags) { | ||
2168 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | ||
2169 | skb->len, PCI_DMA_TODEVICE)); | ||
2170 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG); | ||
2171 | } else { | ||
2172 | int i; | ||
2173 | |||
2174 | vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, | ||
2175 | skb_headlen(skb), PCI_DMA_TODEVICE)); | ||
2176 | vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb)); | ||
2177 | |||
2178 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
2179 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
2180 | |||
2181 | vp->tx_ring[entry].frag[i+1].addr = | ||
2182 | cpu_to_le32(pci_map_single(VORTEX_PCI(vp), | ||
2183 | (void*)page_address(frag->page) + frag->page_offset, | ||
2184 | frag->size, PCI_DMA_TODEVICE)); | ||
2185 | |||
2186 | if (i == skb_shinfo(skb)->nr_frags-1) | ||
2187 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG); | ||
2188 | else | ||
2189 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size); | ||
2190 | } | ||
2191 | } | ||
2192 | #else | ||
2193 | vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE)); | ||
2194 | vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG); | ||
2195 | vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded); | ||
2196 | #endif | ||
2197 | |||
2198 | spin_lock_irqsave(&vp->lock, flags); | ||
2199 | /* Wait for the stall to complete. */ | ||
2200 | issue_and_wait(dev, DownStall); | ||
2201 | prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)); | ||
2202 | if (ioread32(ioaddr + DownListPtr) == 0) { | ||
2203 | iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); | ||
2204 | vp->queued_packet++; | ||
2205 | } | ||
2206 | |||
2207 | vp->cur_tx++; | ||
2208 | if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { | ||
2209 | netif_stop_queue (dev); | ||
2210 | } else { /* Clear previous interrupt enable. */ | ||
2211 | #if defined(tx_interrupt_mitigation) | ||
2212 | /* Dubious. If in boomeang_interrupt "faster" cyclone ifdef | ||
2213 | * were selected, this would corrupt DN_COMPLETE. No? | ||
2214 | */ | ||
2215 | prev_entry->status &= cpu_to_le32(~TxIntrUploaded); | ||
2216 | #endif | ||
2217 | } | ||
2218 | iowrite16(DownUnstall, ioaddr + EL3_CMD); | ||
2219 | spin_unlock_irqrestore(&vp->lock, flags); | ||
2220 | return NETDEV_TX_OK; | ||
2221 | } | ||
2222 | |||
2223 | /* The interrupt handler does all of the Rx thread work and cleans up | ||
2224 | after the Tx thread. */ | ||
2225 | |||
2226 | /* | ||
2227 | * This is the ISR for the vortex series chips. | ||
2228 | * full_bus_master_tx == 0 && full_bus_master_rx == 0 | ||
2229 | */ | ||
2230 | |||
2231 | static irqreturn_t | ||
2232 | vortex_interrupt(int irq, void *dev_id) | ||
2233 | { | ||
2234 | struct net_device *dev = dev_id; | ||
2235 | struct vortex_private *vp = netdev_priv(dev); | ||
2236 | void __iomem *ioaddr; | ||
2237 | int status; | ||
2238 | int work_done = max_interrupt_work; | ||
2239 | int handled = 0; | ||
2240 | |||
2241 | ioaddr = vp->ioaddr; | ||
2242 | spin_lock(&vp->lock); | ||
2243 | |||
2244 | status = ioread16(ioaddr + EL3_STATUS); | ||
2245 | |||
2246 | if (vortex_debug > 6) | ||
2247 | pr_debug("vortex_interrupt(). status=0x%4x\n", status); | ||
2248 | |||
2249 | if ((status & IntLatch) == 0) | ||
2250 | goto handler_exit; /* No interrupt: shared IRQs cause this */ | ||
2251 | handled = 1; | ||
2252 | |||
2253 | if (status & IntReq) { | ||
2254 | status |= vp->deferred; | ||
2255 | vp->deferred = 0; | ||
2256 | } | ||
2257 | |||
2258 | if (status == 0xffff) /* h/w no longer present (hotplug)? */ | ||
2259 | goto handler_exit; | ||
2260 | |||
2261 | if (vortex_debug > 4) | ||
2262 | pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", | ||
2263 | dev->name, status, ioread8(ioaddr + Timer)); | ||
2264 | |||
2265 | spin_lock(&vp->window_lock); | ||
2266 | window_set(vp, 7); | ||
2267 | |||
2268 | do { | ||
2269 | if (vortex_debug > 5) | ||
2270 | pr_debug("%s: In interrupt loop, status %4.4x.\n", | ||
2271 | dev->name, status); | ||
2272 | if (status & RxComplete) | ||
2273 | vortex_rx(dev); | ||
2274 | |||
2275 | if (status & TxAvailable) { | ||
2276 | if (vortex_debug > 5) | ||
2277 | pr_debug(" TX room bit was handled.\n"); | ||
2278 | /* There's room in the FIFO for a full-sized packet. */ | ||
2279 | iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD); | ||
2280 | netif_wake_queue (dev); | ||
2281 | } | ||
2282 | |||
2283 | if (status & DMADone) { | ||
2284 | if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) { | ||
2285 | iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */ | ||
2286 | pci_unmap_single(VORTEX_PCI(vp), vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE); | ||
2287 | dev_kfree_skb_irq(vp->tx_skb); /* Release the transferred buffer */ | ||
2288 | if (ioread16(ioaddr + TxFree) > 1536) { | ||
2289 | /* | ||
2290 | * AKPM: FIXME: I don't think we need this. If the queue was stopped due to | ||
2291 | * insufficient FIFO room, the TxAvailable test will succeed and call | ||
2292 | * netif_wake_queue() | ||
2293 | */ | ||
2294 | netif_wake_queue(dev); | ||
2295 | } else { /* Interrupt when FIFO has room for max-sized packet. */ | ||
2296 | iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD); | ||
2297 | netif_stop_queue(dev); | ||
2298 | } | ||
2299 | } | ||
2300 | } | ||
2301 | /* Check for all uncommon interrupts at once. */ | ||
2302 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) { | ||
2303 | if (status == 0xffff) | ||
2304 | break; | ||
2305 | if (status & RxEarly) | ||
2306 | vortex_rx(dev); | ||
2307 | spin_unlock(&vp->window_lock); | ||
2308 | vortex_error(dev, status); | ||
2309 | spin_lock(&vp->window_lock); | ||
2310 | window_set(vp, 7); | ||
2311 | } | ||
2312 | |||
2313 | if (--work_done < 0) { | ||
2314 | pr_warning("%s: Too much work in interrupt, status %4.4x.\n", | ||
2315 | dev->name, status); | ||
2316 | /* Disable all pending interrupts. */ | ||
2317 | do { | ||
2318 | vp->deferred |= status; | ||
2319 | iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), | ||
2320 | ioaddr + EL3_CMD); | ||
2321 | iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); | ||
2322 | } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); | ||
2323 | /* The timer will reenable interrupts. */ | ||
2324 | mod_timer(&vp->timer, jiffies + 1*HZ); | ||
2325 | break; | ||
2326 | } | ||
2327 | /* Acknowledge the IRQ. */ | ||
2328 | iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); | ||
2329 | } while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete)); | ||
2330 | |||
2331 | spin_unlock(&vp->window_lock); | ||
2332 | |||
2333 | if (vortex_debug > 4) | ||
2334 | pr_debug("%s: exiting interrupt, status %4.4x.\n", | ||
2335 | dev->name, status); | ||
2336 | handler_exit: | ||
2337 | spin_unlock(&vp->lock); | ||
2338 | return IRQ_RETVAL(handled); | ||
2339 | } | ||
2340 | |||
2341 | /* | ||
2342 | * This is the ISR for the boomerang series chips. | ||
2343 | * full_bus_master_tx == 1 && full_bus_master_rx == 1 | ||
2344 | */ | ||
2345 | |||
2346 | static irqreturn_t | ||
2347 | boomerang_interrupt(int irq, void *dev_id) | ||
2348 | { | ||
2349 | struct net_device *dev = dev_id; | ||
2350 | struct vortex_private *vp = netdev_priv(dev); | ||
2351 | void __iomem *ioaddr; | ||
2352 | int status; | ||
2353 | int work_done = max_interrupt_work; | ||
2354 | |||
2355 | ioaddr = vp->ioaddr; | ||
2356 | |||
2357 | |||
2358 | /* | ||
2359 | * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout | ||
2360 | * and boomerang_start_xmit | ||
2361 | */ | ||
2362 | spin_lock(&vp->lock); | ||
2363 | vp->handling_irq = 1; | ||
2364 | |||
2365 | status = ioread16(ioaddr + EL3_STATUS); | ||
2366 | |||
2367 | if (vortex_debug > 6) | ||
2368 | pr_debug("boomerang_interrupt. status=0x%4x\n", status); | ||
2369 | |||
2370 | if ((status & IntLatch) == 0) | ||
2371 | goto handler_exit; /* No interrupt: shared IRQs can cause this */ | ||
2372 | |||
2373 | if (status == 0xffff) { /* h/w no longer present (hotplug)? */ | ||
2374 | if (vortex_debug > 1) | ||
2375 | pr_debug("boomerang_interrupt(1): status = 0xffff\n"); | ||
2376 | goto handler_exit; | ||
2377 | } | ||
2378 | |||
2379 | if (status & IntReq) { | ||
2380 | status |= vp->deferred; | ||
2381 | vp->deferred = 0; | ||
2382 | } | ||
2383 | |||
2384 | if (vortex_debug > 4) | ||
2385 | pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n", | ||
2386 | dev->name, status, ioread8(ioaddr + Timer)); | ||
2387 | do { | ||
2388 | if (vortex_debug > 5) | ||
2389 | pr_debug("%s: In interrupt loop, status %4.4x.\n", | ||
2390 | dev->name, status); | ||
2391 | if (status & UpComplete) { | ||
2392 | iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD); | ||
2393 | if (vortex_debug > 5) | ||
2394 | pr_debug("boomerang_interrupt->boomerang_rx\n"); | ||
2395 | boomerang_rx(dev); | ||
2396 | } | ||
2397 | |||
2398 | if (status & DownComplete) { | ||
2399 | unsigned int dirty_tx = vp->dirty_tx; | ||
2400 | |||
2401 | iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD); | ||
2402 | while (vp->cur_tx - dirty_tx > 0) { | ||
2403 | int entry = dirty_tx % TX_RING_SIZE; | ||
2404 | #if 1 /* AKPM: the latter is faster, but cyclone-only */ | ||
2405 | if (ioread32(ioaddr + DownListPtr) == | ||
2406 | vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) | ||
2407 | break; /* It still hasn't been processed. */ | ||
2408 | #else | ||
2409 | if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0) | ||
2410 | break; /* It still hasn't been processed. */ | ||
2411 | #endif | ||
2412 | |||
2413 | if (vp->tx_skbuff[entry]) { | ||
2414 | struct sk_buff *skb = vp->tx_skbuff[entry]; | ||
2415 | #if DO_ZEROCOPY | ||
2416 | int i; | ||
2417 | for (i=0; i<=skb_shinfo(skb)->nr_frags; i++) | ||
2418 | pci_unmap_single(VORTEX_PCI(vp), | ||
2419 | le32_to_cpu(vp->tx_ring[entry].frag[i].addr), | ||
2420 | le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF, | ||
2421 | PCI_DMA_TODEVICE); | ||
2422 | #else | ||
2423 | pci_unmap_single(VORTEX_PCI(vp), | ||
2424 | le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); | ||
2425 | #endif | ||
2426 | dev_kfree_skb_irq(skb); | ||
2427 | vp->tx_skbuff[entry] = NULL; | ||
2428 | } else { | ||
2429 | pr_debug("boomerang_interrupt: no skb!\n"); | ||
2430 | } | ||
2431 | /* dev->stats.tx_packets++; Counted below. */ | ||
2432 | dirty_tx++; | ||
2433 | } | ||
2434 | vp->dirty_tx = dirty_tx; | ||
2435 | if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { | ||
2436 | if (vortex_debug > 6) | ||
2437 | pr_debug("boomerang_interrupt: wake queue\n"); | ||
2438 | netif_wake_queue (dev); | ||
2439 | } | ||
2440 | } | ||
2441 | |||
2442 | /* Check for all uncommon interrupts at once. */ | ||
2443 | if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) | ||
2444 | vortex_error(dev, status); | ||
2445 | |||
2446 | if (--work_done < 0) { | ||
2447 | pr_warning("%s: Too much work in interrupt, status %4.4x.\n", | ||
2448 | dev->name, status); | ||
2449 | /* Disable all pending interrupts. */ | ||
2450 | do { | ||
2451 | vp->deferred |= status; | ||
2452 | iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable), | ||
2453 | ioaddr + EL3_CMD); | ||
2454 | iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD); | ||
2455 | } while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch); | ||
2456 | /* The timer will reenable interrupts. */ | ||
2457 | mod_timer(&vp->timer, jiffies + 1*HZ); | ||
2458 | break; | ||
2459 | } | ||
2460 | /* Acknowledge the IRQ. */ | ||
2461 | iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); | ||
2462 | if (vp->cb_fn_base) /* The PCMCIA people are idiots. */ | ||
2463 | iowrite32(0x8000, vp->cb_fn_base + 4); | ||
2464 | |||
2465 | } while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch); | ||
2466 | |||
2467 | if (vortex_debug > 4) | ||
2468 | pr_debug("%s: exiting interrupt, status %4.4x.\n", | ||
2469 | dev->name, status); | ||
2470 | handler_exit: | ||
2471 | vp->handling_irq = 0; | ||
2472 | spin_unlock(&vp->lock); | ||
2473 | return IRQ_HANDLED; | ||
2474 | } | ||
2475 | |||
2476 | static int vortex_rx(struct net_device *dev) | ||
2477 | { | ||
2478 | struct vortex_private *vp = netdev_priv(dev); | ||
2479 | void __iomem *ioaddr = vp->ioaddr; | ||
2480 | int i; | ||
2481 | short rx_status; | ||
2482 | |||
2483 | if (vortex_debug > 5) | ||
2484 | pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n", | ||
2485 | ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus)); | ||
2486 | while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) { | ||
2487 | if (rx_status & 0x4000) { /* Error, update stats. */ | ||
2488 | unsigned char rx_error = ioread8(ioaddr + RxErrors); | ||
2489 | if (vortex_debug > 2) | ||
2490 | pr_debug(" Rx error: status %2.2x.\n", rx_error); | ||
2491 | dev->stats.rx_errors++; | ||
2492 | if (rx_error & 0x01) dev->stats.rx_over_errors++; | ||
2493 | if (rx_error & 0x02) dev->stats.rx_length_errors++; | ||
2494 | if (rx_error & 0x04) dev->stats.rx_frame_errors++; | ||
2495 | if (rx_error & 0x08) dev->stats.rx_crc_errors++; | ||
2496 | if (rx_error & 0x10) dev->stats.rx_length_errors++; | ||
2497 | } else { | ||
2498 | /* The packet length: up to 4.5K!. */ | ||
2499 | int pkt_len = rx_status & 0x1fff; | ||
2500 | struct sk_buff *skb; | ||
2501 | |||
2502 | skb = dev_alloc_skb(pkt_len + 5); | ||
2503 | if (vortex_debug > 4) | ||
2504 | pr_debug("Receiving packet size %d status %4.4x.\n", | ||
2505 | pkt_len, rx_status); | ||
2506 | if (skb != NULL) { | ||
2507 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | ||
2508 | /* 'skb_put()' points to the start of sk_buff data area. */ | ||
2509 | if (vp->bus_master && | ||
2510 | ! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) { | ||
2511 | dma_addr_t dma = pci_map_single(VORTEX_PCI(vp), skb_put(skb, pkt_len), | ||
2512 | pkt_len, PCI_DMA_FROMDEVICE); | ||
2513 | iowrite32(dma, ioaddr + Wn7_MasterAddr); | ||
2514 | iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen); | ||
2515 | iowrite16(StartDMAUp, ioaddr + EL3_CMD); | ||
2516 | while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000) | ||
2517 | ; | ||
2518 | pci_unmap_single(VORTEX_PCI(vp), dma, pkt_len, PCI_DMA_FROMDEVICE); | ||
2519 | } else { | ||
2520 | ioread32_rep(ioaddr + RX_FIFO, | ||
2521 | skb_put(skb, pkt_len), | ||
2522 | (pkt_len + 3) >> 2); | ||
2523 | } | ||
2524 | iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */ | ||
2525 | skb->protocol = eth_type_trans(skb, dev); | ||
2526 | netif_rx(skb); | ||
2527 | dev->stats.rx_packets++; | ||
2528 | /* Wait a limited time to go to next packet. */ | ||
2529 | for (i = 200; i >= 0; i--) | ||
2530 | if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) | ||
2531 | break; | ||
2532 | continue; | ||
2533 | } else if (vortex_debug > 0) | ||
2534 | pr_notice("%s: No memory to allocate a sk_buff of size %d.\n", | ||
2535 | dev->name, pkt_len); | ||
2536 | dev->stats.rx_dropped++; | ||
2537 | } | ||
2538 | issue_and_wait(dev, RxDiscard); | ||
2539 | } | ||
2540 | |||
2541 | return 0; | ||
2542 | } | ||
2543 | |||
2544 | static int | ||
2545 | boomerang_rx(struct net_device *dev) | ||
2546 | { | ||
2547 | struct vortex_private *vp = netdev_priv(dev); | ||
2548 | int entry = vp->cur_rx % RX_RING_SIZE; | ||
2549 | void __iomem *ioaddr = vp->ioaddr; | ||
2550 | int rx_status; | ||
2551 | int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; | ||
2552 | |||
2553 | if (vortex_debug > 5) | ||
2554 | pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); | ||
2555 | |||
2556 | while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){ | ||
2557 | if (--rx_work_limit < 0) | ||
2558 | break; | ||
2559 | if (rx_status & RxDError) { /* Error, update stats. */ | ||
2560 | unsigned char rx_error = rx_status >> 16; | ||
2561 | if (vortex_debug > 2) | ||
2562 | pr_debug(" Rx error: status %2.2x.\n", rx_error); | ||
2563 | dev->stats.rx_errors++; | ||
2564 | if (rx_error & 0x01) dev->stats.rx_over_errors++; | ||
2565 | if (rx_error & 0x02) dev->stats.rx_length_errors++; | ||
2566 | if (rx_error & 0x04) dev->stats.rx_frame_errors++; | ||
2567 | if (rx_error & 0x08) dev->stats.rx_crc_errors++; | ||
2568 | if (rx_error & 0x10) dev->stats.rx_length_errors++; | ||
2569 | } else { | ||
2570 | /* The packet length: up to 4.5K!. */ | ||
2571 | int pkt_len = rx_status & 0x1fff; | ||
2572 | struct sk_buff *skb; | ||
2573 | dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr); | ||
2574 | |||
2575 | if (vortex_debug > 4) | ||
2576 | pr_debug("Receiving packet size %d status %4.4x.\n", | ||
2577 | pkt_len, rx_status); | ||
2578 | |||
2579 | /* Check if the packet is long enough to just accept without | ||
2580 | copying to a properly sized skbuff. */ | ||
2581 | if (pkt_len < rx_copybreak && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
2582 | skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ | ||
2583 | pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
2584 | /* 'skb_put()' points to the start of sk_buff data area. */ | ||
2585 | memcpy(skb_put(skb, pkt_len), | ||
2586 | vp->rx_skbuff[entry]->data, | ||
2587 | pkt_len); | ||
2588 | pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
2589 | vp->rx_copy++; | ||
2590 | } else { | ||
2591 | /* Pass up the skbuff already on the Rx ring. */ | ||
2592 | skb = vp->rx_skbuff[entry]; | ||
2593 | vp->rx_skbuff[entry] = NULL; | ||
2594 | skb_put(skb, pkt_len); | ||
2595 | pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
2596 | vp->rx_nocopy++; | ||
2597 | } | ||
2598 | skb->protocol = eth_type_trans(skb, dev); | ||
2599 | { /* Use hardware checksum info. */ | ||
2600 | int csum_bits = rx_status & 0xee000000; | ||
2601 | if (csum_bits && | ||
2602 | (csum_bits == (IPChksumValid | TCPChksumValid) || | ||
2603 | csum_bits == (IPChksumValid | UDPChksumValid))) { | ||
2604 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
2605 | vp->rx_csumhits++; | ||
2606 | } | ||
2607 | } | ||
2608 | netif_rx(skb); | ||
2609 | dev->stats.rx_packets++; | ||
2610 | } | ||
2611 | entry = (++vp->cur_rx) % RX_RING_SIZE; | ||
2612 | } | ||
2613 | /* Refill the Rx ring buffers. */ | ||
2614 | for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { | ||
2615 | struct sk_buff *skb; | ||
2616 | entry = vp->dirty_rx % RX_RING_SIZE; | ||
2617 | if (vp->rx_skbuff[entry] == NULL) { | ||
2618 | skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ); | ||
2619 | if (skb == NULL) { | ||
2620 | static unsigned long last_jif; | ||
2621 | if (time_after(jiffies, last_jif + 10 * HZ)) { | ||
2622 | pr_warning("%s: memory shortage\n", dev->name); | ||
2623 | last_jif = jiffies; | ||
2624 | } | ||
2625 | if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) | ||
2626 | mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); | ||
2627 | break; /* Bad news! */ | ||
2628 | } | ||
2629 | |||
2630 | vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); | ||
2631 | vp->rx_skbuff[entry] = skb; | ||
2632 | } | ||
2633 | vp->rx_ring[entry].status = 0; /* Clear complete bit. */ | ||
2634 | iowrite16(UpUnstall, ioaddr + EL3_CMD); | ||
2635 | } | ||
2636 | return 0; | ||
2637 | } | ||
2638 | |||
2639 | /* | ||
2640 | * If we've hit a total OOM refilling the Rx ring we poll once a second | ||
2641 | * for some memory. Otherwise there is no way to restart the rx process. | ||
2642 | */ | ||
2643 | static void | ||
2644 | rx_oom_timer(unsigned long arg) | ||
2645 | { | ||
2646 | struct net_device *dev = (struct net_device *)arg; | ||
2647 | struct vortex_private *vp = netdev_priv(dev); | ||
2648 | |||
2649 | spin_lock_irq(&vp->lock); | ||
2650 | if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ | ||
2651 | boomerang_rx(dev); | ||
2652 | if (vortex_debug > 1) { | ||
2653 | pr_debug("%s: rx_oom_timer %s\n", dev->name, | ||
2654 | ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); | ||
2655 | } | ||
2656 | spin_unlock_irq(&vp->lock); | ||
2657 | } | ||
2658 | |||
2659 | static void | ||
2660 | vortex_down(struct net_device *dev, int final_down) | ||
2661 | { | ||
2662 | struct vortex_private *vp = netdev_priv(dev); | ||
2663 | void __iomem *ioaddr = vp->ioaddr; | ||
2664 | |||
2665 | netif_stop_queue (dev); | ||
2666 | |||
2667 | del_timer_sync(&vp->rx_oom_timer); | ||
2668 | del_timer_sync(&vp->timer); | ||
2669 | |||
2670 | /* Turn off statistics ASAP. We update dev->stats below. */ | ||
2671 | iowrite16(StatsDisable, ioaddr + EL3_CMD); | ||
2672 | |||
2673 | /* Disable the receiver and transmitter. */ | ||
2674 | iowrite16(RxDisable, ioaddr + EL3_CMD); | ||
2675 | iowrite16(TxDisable, ioaddr + EL3_CMD); | ||
2676 | |||
2677 | /* Disable receiving 802.1q tagged frames */ | ||
2678 | set_8021q_mode(dev, 0); | ||
2679 | |||
2680 | if (dev->if_port == XCVR_10base2) | ||
2681 | /* Turn off thinnet power. Green! */ | ||
2682 | iowrite16(StopCoax, ioaddr + EL3_CMD); | ||
2683 | |||
2684 | iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD); | ||
2685 | |||
2686 | update_stats(ioaddr, dev); | ||
2687 | if (vp->full_bus_master_rx) | ||
2688 | iowrite32(0, ioaddr + UpListPtr); | ||
2689 | if (vp->full_bus_master_tx) | ||
2690 | iowrite32(0, ioaddr + DownListPtr); | ||
2691 | |||
2692 | if (final_down && VORTEX_PCI(vp)) { | ||
2693 | vp->pm_state_valid = 1; | ||
2694 | pci_save_state(VORTEX_PCI(vp)); | ||
2695 | acpi_set_WOL(dev); | ||
2696 | } | ||
2697 | } | ||
2698 | |||
2699 | static int | ||
2700 | vortex_close(struct net_device *dev) | ||
2701 | { | ||
2702 | struct vortex_private *vp = netdev_priv(dev); | ||
2703 | void __iomem *ioaddr = vp->ioaddr; | ||
2704 | int i; | ||
2705 | |||
2706 | if (netif_device_present(dev)) | ||
2707 | vortex_down(dev, 1); | ||
2708 | |||
2709 | if (vortex_debug > 1) { | ||
2710 | pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n", | ||
2711 | dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus)); | ||
2712 | pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d" | ||
2713 | " tx_queued %d Rx pre-checksummed %d.\n", | ||
2714 | dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits); | ||
2715 | } | ||
2716 | |||
2717 | #if DO_ZEROCOPY | ||
2718 | if (vp->rx_csumhits && | ||
2719 | (vp->drv_flags & HAS_HWCKSM) == 0 && | ||
2720 | (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) { | ||
2721 | pr_warning("%s supports hardware checksums, and we're not using them!\n", dev->name); | ||
2722 | } | ||
2723 | #endif | ||
2724 | |||
2725 | free_irq(dev->irq, dev); | ||
2726 | |||
2727 | if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ | ||
2728 | for (i = 0; i < RX_RING_SIZE; i++) | ||
2729 | if (vp->rx_skbuff[i]) { | ||
2730 | pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), | ||
2731 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
2732 | dev_kfree_skb(vp->rx_skbuff[i]); | ||
2733 | vp->rx_skbuff[i] = NULL; | ||
2734 | } | ||
2735 | } | ||
2736 | if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ | ||
2737 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
2738 | if (vp->tx_skbuff[i]) { | ||
2739 | struct sk_buff *skb = vp->tx_skbuff[i]; | ||
2740 | #if DO_ZEROCOPY | ||
2741 | int k; | ||
2742 | |||
2743 | for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) | ||
2744 | pci_unmap_single(VORTEX_PCI(vp), | ||
2745 | le32_to_cpu(vp->tx_ring[i].frag[k].addr), | ||
2746 | le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, | ||
2747 | PCI_DMA_TODEVICE); | ||
2748 | #else | ||
2749 | pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); | ||
2750 | #endif | ||
2751 | dev_kfree_skb(skb); | ||
2752 | vp->tx_skbuff[i] = NULL; | ||
2753 | } | ||
2754 | } | ||
2755 | } | ||
2756 | |||
2757 | return 0; | ||
2758 | } | ||
2759 | |||
2760 | static void | ||
2761 | dump_tx_ring(struct net_device *dev) | ||
2762 | { | ||
2763 | if (vortex_debug > 0) { | ||
2764 | struct vortex_private *vp = netdev_priv(dev); | ||
2765 | void __iomem *ioaddr = vp->ioaddr; | ||
2766 | |||
2767 | if (vp->full_bus_master_tx) { | ||
2768 | int i; | ||
2769 | int stalled = ioread32(ioaddr + PktStatus) & 0x04; /* Possible racy. But it's only debug stuff */ | ||
2770 | |||
2771 | pr_err(" Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", | ||
2772 | vp->full_bus_master_tx, | ||
2773 | vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, | ||
2774 | vp->cur_tx, vp->cur_tx % TX_RING_SIZE); | ||
2775 | pr_err(" Transmit list %8.8x vs. %p.\n", | ||
2776 | ioread32(ioaddr + DownListPtr), | ||
2777 | &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); | ||
2778 | issue_and_wait(dev, DownStall); | ||
2779 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
2780 | unsigned int length; | ||
2781 | |||
2782 | #if DO_ZEROCOPY | ||
2783 | length = le32_to_cpu(vp->tx_ring[i].frag[0].length); | ||
2784 | #else | ||
2785 | length = le32_to_cpu(vp->tx_ring[i].length); | ||
2786 | #endif | ||
2787 | pr_err(" %d: @%p length %8.8x status %8.8x\n", | ||
2788 | i, &vp->tx_ring[i], length, | ||
2789 | le32_to_cpu(vp->tx_ring[i].status)); | ||
2790 | } | ||
2791 | if (!stalled) | ||
2792 | iowrite16(DownUnstall, ioaddr + EL3_CMD); | ||
2793 | } | ||
2794 | } | ||
2795 | } | ||
2796 | |||
2797 | static struct net_device_stats *vortex_get_stats(struct net_device *dev) | ||
2798 | { | ||
2799 | struct vortex_private *vp = netdev_priv(dev); | ||
2800 | void __iomem *ioaddr = vp->ioaddr; | ||
2801 | unsigned long flags; | ||
2802 | |||
2803 | if (netif_device_present(dev)) { /* AKPM: Used to be netif_running */ | ||
2804 | spin_lock_irqsave (&vp->lock, flags); | ||
2805 | update_stats(ioaddr, dev); | ||
2806 | spin_unlock_irqrestore (&vp->lock, flags); | ||
2807 | } | ||
2808 | return &dev->stats; | ||
2809 | } | ||
2810 | |||
2811 | /* Update statistics. | ||
2812 | Unlike with the EL3 we need not worry about interrupts changing | ||
2813 | the window setting from underneath us, but we must still guard | ||
2814 | against a race condition with a StatsUpdate interrupt updating the | ||
2815 | table. This is done by checking that the ASM (!) code generated uses | ||
2816 | atomic updates with '+='. | ||
2817 | */ | ||
2818 | static void update_stats(void __iomem *ioaddr, struct net_device *dev) | ||
2819 | { | ||
2820 | struct vortex_private *vp = netdev_priv(dev); | ||
2821 | |||
2822 | /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ | ||
2823 | /* Switch to the stats window, and read everything. */ | ||
2824 | dev->stats.tx_carrier_errors += window_read8(vp, 6, 0); | ||
2825 | dev->stats.tx_heartbeat_errors += window_read8(vp, 6, 1); | ||
2826 | dev->stats.tx_window_errors += window_read8(vp, 6, 4); | ||
2827 | dev->stats.rx_fifo_errors += window_read8(vp, 6, 5); | ||
2828 | dev->stats.tx_packets += window_read8(vp, 6, 6); | ||
2829 | dev->stats.tx_packets += (window_read8(vp, 6, 9) & | ||
2830 | 0x30) << 4; | ||
2831 | /* Rx packets */ window_read8(vp, 6, 7); /* Must read to clear */ | ||
2832 | /* Don't bother with register 9, an extension of registers 6&7. | ||
2833 | If we do use the 6&7 values the atomic update assumption above | ||
2834 | is invalid. */ | ||
2835 | dev->stats.rx_bytes += window_read16(vp, 6, 10); | ||
2836 | dev->stats.tx_bytes += window_read16(vp, 6, 12); | ||
2837 | /* Extra stats for get_ethtool_stats() */ | ||
2838 | vp->xstats.tx_multiple_collisions += window_read8(vp, 6, 2); | ||
2839 | vp->xstats.tx_single_collisions += window_read8(vp, 6, 3); | ||
2840 | vp->xstats.tx_deferred += window_read8(vp, 6, 8); | ||
2841 | vp->xstats.rx_bad_ssd += window_read8(vp, 4, 12); | ||
2842 | |||
2843 | dev->stats.collisions = vp->xstats.tx_multiple_collisions | ||
2844 | + vp->xstats.tx_single_collisions | ||
2845 | + vp->xstats.tx_max_collisions; | ||
2846 | |||
2847 | { | ||
2848 | u8 up = window_read8(vp, 4, 13); | ||
2849 | dev->stats.rx_bytes += (up & 0x0f) << 16; | ||
2850 | dev->stats.tx_bytes += (up & 0xf0) << 12; | ||
2851 | } | ||
2852 | } | ||
2853 | |||
2854 | static int vortex_nway_reset(struct net_device *dev) | ||
2855 | { | ||
2856 | struct vortex_private *vp = netdev_priv(dev); | ||
2857 | |||
2858 | return mii_nway_restart(&vp->mii); | ||
2859 | } | ||
2860 | |||
2861 | static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2862 | { | ||
2863 | struct vortex_private *vp = netdev_priv(dev); | ||
2864 | |||
2865 | return mii_ethtool_gset(&vp->mii, cmd); | ||
2866 | } | ||
2867 | |||
2868 | static int vortex_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2869 | { | ||
2870 | struct vortex_private *vp = netdev_priv(dev); | ||
2871 | |||
2872 | return mii_ethtool_sset(&vp->mii, cmd); | ||
2873 | } | ||
2874 | |||
2875 | static u32 vortex_get_msglevel(struct net_device *dev) | ||
2876 | { | ||
2877 | return vortex_debug; | ||
2878 | } | ||
2879 | |||
2880 | static void vortex_set_msglevel(struct net_device *dev, u32 dbg) | ||
2881 | { | ||
2882 | vortex_debug = dbg; | ||
2883 | } | ||
2884 | |||
2885 | static int vortex_get_sset_count(struct net_device *dev, int sset) | ||
2886 | { | ||
2887 | switch (sset) { | ||
2888 | case ETH_SS_STATS: | ||
2889 | return VORTEX_NUM_STATS; | ||
2890 | default: | ||
2891 | return -EOPNOTSUPP; | ||
2892 | } | ||
2893 | } | ||
2894 | |||
2895 | static void vortex_get_ethtool_stats(struct net_device *dev, | ||
2896 | struct ethtool_stats *stats, u64 *data) | ||
2897 | { | ||
2898 | struct vortex_private *vp = netdev_priv(dev); | ||
2899 | void __iomem *ioaddr = vp->ioaddr; | ||
2900 | unsigned long flags; | ||
2901 | |||
2902 | spin_lock_irqsave(&vp->lock, flags); | ||
2903 | update_stats(ioaddr, dev); | ||
2904 | spin_unlock_irqrestore(&vp->lock, flags); | ||
2905 | |||
2906 | data[0] = vp->xstats.tx_deferred; | ||
2907 | data[1] = vp->xstats.tx_max_collisions; | ||
2908 | data[2] = vp->xstats.tx_multiple_collisions; | ||
2909 | data[3] = vp->xstats.tx_single_collisions; | ||
2910 | data[4] = vp->xstats.rx_bad_ssd; | ||
2911 | } | ||
2912 | |||
2913 | |||
2914 | static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) | ||
2915 | { | ||
2916 | switch (stringset) { | ||
2917 | case ETH_SS_STATS: | ||
2918 | memcpy(data, ðtool_stats_keys, sizeof(ethtool_stats_keys)); | ||
2919 | break; | ||
2920 | default: | ||
2921 | WARN_ON(1); | ||
2922 | break; | ||
2923 | } | ||
2924 | } | ||
2925 | |||
2926 | static void vortex_get_drvinfo(struct net_device *dev, | ||
2927 | struct ethtool_drvinfo *info) | ||
2928 | { | ||
2929 | struct vortex_private *vp = netdev_priv(dev); | ||
2930 | |||
2931 | strcpy(info->driver, DRV_NAME); | ||
2932 | if (VORTEX_PCI(vp)) { | ||
2933 | strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); | ||
2934 | } else { | ||
2935 | if (VORTEX_EISA(vp)) | ||
2936 | strcpy(info->bus_info, dev_name(vp->gendev)); | ||
2937 | else | ||
2938 | sprintf(info->bus_info, "EISA 0x%lx %d", | ||
2939 | dev->base_addr, dev->irq); | ||
2940 | } | ||
2941 | } | ||
2942 | |||
2943 | static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
2944 | { | ||
2945 | struct vortex_private *vp = netdev_priv(dev); | ||
2946 | |||
2947 | if (!VORTEX_PCI(vp)) | ||
2948 | return; | ||
2949 | |||
2950 | wol->supported = WAKE_MAGIC; | ||
2951 | |||
2952 | wol->wolopts = 0; | ||
2953 | if (vp->enable_wol) | ||
2954 | wol->wolopts |= WAKE_MAGIC; | ||
2955 | } | ||
2956 | |||
2957 | static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
2958 | { | ||
2959 | struct vortex_private *vp = netdev_priv(dev); | ||
2960 | |||
2961 | if (!VORTEX_PCI(vp)) | ||
2962 | return -EOPNOTSUPP; | ||
2963 | |||
2964 | if (wol->wolopts & ~WAKE_MAGIC) | ||
2965 | return -EINVAL; | ||
2966 | |||
2967 | if (wol->wolopts & WAKE_MAGIC) | ||
2968 | vp->enable_wol = 1; | ||
2969 | else | ||
2970 | vp->enable_wol = 0; | ||
2971 | acpi_set_WOL(dev); | ||
2972 | |||
2973 | return 0; | ||
2974 | } | ||
2975 | |||
2976 | static const struct ethtool_ops vortex_ethtool_ops = { | ||
2977 | .get_drvinfo = vortex_get_drvinfo, | ||
2978 | .get_strings = vortex_get_strings, | ||
2979 | .get_msglevel = vortex_get_msglevel, | ||
2980 | .set_msglevel = vortex_set_msglevel, | ||
2981 | .get_ethtool_stats = vortex_get_ethtool_stats, | ||
2982 | .get_sset_count = vortex_get_sset_count, | ||
2983 | .get_settings = vortex_get_settings, | ||
2984 | .set_settings = vortex_set_settings, | ||
2985 | .get_link = ethtool_op_get_link, | ||
2986 | .nway_reset = vortex_nway_reset, | ||
2987 | .get_wol = vortex_get_wol, | ||
2988 | .set_wol = vortex_set_wol, | ||
2989 | }; | ||
2990 | |||
2991 | #ifdef CONFIG_PCI | ||
2992 | /* | ||
2993 | * Must power the device up to do MDIO operations | ||
2994 | */ | ||
2995 | static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
2996 | { | ||
2997 | int err; | ||
2998 | struct vortex_private *vp = netdev_priv(dev); | ||
2999 | pci_power_t state = 0; | ||
3000 | |||
3001 | if(VORTEX_PCI(vp)) | ||
3002 | state = VORTEX_PCI(vp)->current_state; | ||
3003 | |||
3004 | /* The kernel core really should have pci_get_power_state() */ | ||
3005 | |||
3006 | if(state != 0) | ||
3007 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); | ||
3008 | err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL); | ||
3009 | if(state != 0) | ||
3010 | pci_set_power_state(VORTEX_PCI(vp), state); | ||
3011 | |||
3012 | return err; | ||
3013 | } | ||
3014 | #endif | ||
3015 | |||
3016 | |||
3017 | /* Pre-Cyclone chips have no documented multicast filter, so the only | ||
3018 | multicast setting is to receive all multicast frames. At least | ||
3019 | the chip has a very clean way to set the mode, unlike many others. */ | ||
3020 | static void set_rx_mode(struct net_device *dev) | ||
3021 | { | ||
3022 | struct vortex_private *vp = netdev_priv(dev); | ||
3023 | void __iomem *ioaddr = vp->ioaddr; | ||
3024 | int new_mode; | ||
3025 | |||
3026 | if (dev->flags & IFF_PROMISC) { | ||
3027 | if (vortex_debug > 3) | ||
3028 | pr_notice("%s: Setting promiscuous mode.\n", dev->name); | ||
3029 | new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm; | ||
3030 | } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) { | ||
3031 | new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast; | ||
3032 | } else | ||
3033 | new_mode = SetRxFilter | RxStation | RxBroadcast; | ||
3034 | |||
3035 | iowrite16(new_mode, ioaddr + EL3_CMD); | ||
3036 | } | ||
3037 | |||
3038 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
3039 | /* Setup the card so that it can receive frames with an 802.1q VLAN tag. | ||
3040 | Note that this must be done after each RxReset due to some backwards | ||
3041 | compatibility logic in the Cyclone and Tornado ASICs */ | ||
3042 | |||
3043 | /* The Ethernet Type used for 802.1q tagged frames */ | ||
3044 | #define VLAN_ETHER_TYPE 0x8100 | ||
3045 | |||
3046 | static void set_8021q_mode(struct net_device *dev, int enable) | ||
3047 | { | ||
3048 | struct vortex_private *vp = netdev_priv(dev); | ||
3049 | int mac_ctrl; | ||
3050 | |||
3051 | if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) { | ||
3052 | /* cyclone and tornado chipsets can recognize 802.1q | ||
3053 | * tagged frames and treat them correctly */ | ||
3054 | |||
3055 | int max_pkt_size = dev->mtu+14; /* MTU+Ethernet header */ | ||
3056 | if (enable) | ||
3057 | max_pkt_size += 4; /* 802.1Q VLAN tag */ | ||
3058 | |||
3059 | window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize); | ||
3060 | |||
3061 | /* set VlanEtherType to let the hardware checksumming | ||
3062 | treat tagged frames correctly */ | ||
3063 | window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType); | ||
3064 | } else { | ||
3065 | /* on older cards we have to enable large frames */ | ||
3066 | |||
3067 | vp->large_frames = dev->mtu > 1500 || enable; | ||
3068 | |||
3069 | mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl); | ||
3070 | if (vp->large_frames) | ||
3071 | mac_ctrl |= 0x40; | ||
3072 | else | ||
3073 | mac_ctrl &= ~0x40; | ||
3074 | window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl); | ||
3075 | } | ||
3076 | } | ||
3077 | #else | ||
3078 | |||
3079 | static void set_8021q_mode(struct net_device *dev, int enable) | ||
3080 | { | ||
3081 | } | ||
3082 | |||
3083 | |||
3084 | #endif | ||
3085 | |||
3086 | /* MII transceiver control section. | ||
3087 | Read and write the MII registers using software-generated serial | ||
3088 | MDIO protocol. See the MII specifications or DP83840A data sheet | ||
3089 | for details. */ | ||
3090 | |||
3091 | /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually | ||
3092 | met by back-to-back PCI I/O cycles, but we insert a delay to avoid | ||
3093 | "overclocking" issues. */ | ||
3094 | static void mdio_delay(struct vortex_private *vp) | ||
3095 | { | ||
3096 | window_read32(vp, 4, Wn4_PhysicalMgmt); | ||
3097 | } | ||
3098 | |||
3099 | #define MDIO_SHIFT_CLK 0x01 | ||
3100 | #define MDIO_DIR_WRITE 0x04 | ||
3101 | #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE) | ||
3102 | #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE) | ||
3103 | #define MDIO_DATA_READ 0x02 | ||
3104 | #define MDIO_ENB_IN 0x00 | ||
3105 | |||
3106 | /* Generate the preamble required for initial synchronization and | ||
3107 | a few older transceivers. */ | ||
3108 | static void mdio_sync(struct vortex_private *vp, int bits) | ||
3109 | { | ||
3110 | /* Establish sync by sending at least 32 logic ones. */ | ||
3111 | while (-- bits >= 0) { | ||
3112 | window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt); | ||
3113 | mdio_delay(vp); | ||
3114 | window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, | ||
3115 | 4, Wn4_PhysicalMgmt); | ||
3116 | mdio_delay(vp); | ||
3117 | } | ||
3118 | } | ||
3119 | |||
3120 | static int mdio_read(struct net_device *dev, int phy_id, int location) | ||
3121 | { | ||
3122 | int i; | ||
3123 | struct vortex_private *vp = netdev_priv(dev); | ||
3124 | int read_cmd = (0xf6 << 10) | (phy_id << 5) | location; | ||
3125 | unsigned int retval = 0; | ||
3126 | |||
3127 | spin_lock_bh(&vp->mii_lock); | ||
3128 | |||
3129 | if (mii_preamble_required) | ||
3130 | mdio_sync(vp, 32); | ||
3131 | |||
3132 | /* Shift the read command bits out. */ | ||
3133 | for (i = 14; i >= 0; i--) { | ||
3134 | int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; | ||
3135 | window_write16(vp, dataval, 4, Wn4_PhysicalMgmt); | ||
3136 | mdio_delay(vp); | ||
3137 | window_write16(vp, dataval | MDIO_SHIFT_CLK, | ||
3138 | 4, Wn4_PhysicalMgmt); | ||
3139 | mdio_delay(vp); | ||
3140 | } | ||
3141 | /* Read the two transition, 16 data, and wire-idle bits. */ | ||
3142 | for (i = 19; i > 0; i--) { | ||
3143 | window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); | ||
3144 | mdio_delay(vp); | ||
3145 | retval = (retval << 1) | | ||
3146 | ((window_read16(vp, 4, Wn4_PhysicalMgmt) & | ||
3147 | MDIO_DATA_READ) ? 1 : 0); | ||
3148 | window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, | ||
3149 | 4, Wn4_PhysicalMgmt); | ||
3150 | mdio_delay(vp); | ||
3151 | } | ||
3152 | |||
3153 | spin_unlock_bh(&vp->mii_lock); | ||
3154 | |||
3155 | return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff; | ||
3156 | } | ||
3157 | |||
3158 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value) | ||
3159 | { | ||
3160 | struct vortex_private *vp = netdev_priv(dev); | ||
3161 | int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value; | ||
3162 | int i; | ||
3163 | |||
3164 | spin_lock_bh(&vp->mii_lock); | ||
3165 | |||
3166 | if (mii_preamble_required) | ||
3167 | mdio_sync(vp, 32); | ||
3168 | |||
3169 | /* Shift the command bits out. */ | ||
3170 | for (i = 31; i >= 0; i--) { | ||
3171 | int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0; | ||
3172 | window_write16(vp, dataval, 4, Wn4_PhysicalMgmt); | ||
3173 | mdio_delay(vp); | ||
3174 | window_write16(vp, dataval | MDIO_SHIFT_CLK, | ||
3175 | 4, Wn4_PhysicalMgmt); | ||
3176 | mdio_delay(vp); | ||
3177 | } | ||
3178 | /* Leave the interface idle. */ | ||
3179 | for (i = 1; i >= 0; i--) { | ||
3180 | window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt); | ||
3181 | mdio_delay(vp); | ||
3182 | window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK, | ||
3183 | 4, Wn4_PhysicalMgmt); | ||
3184 | mdio_delay(vp); | ||
3185 | } | ||
3186 | |||
3187 | spin_unlock_bh(&vp->mii_lock); | ||
3188 | } | ||
3189 | |||
3190 | /* ACPI: Advanced Configuration and Power Interface. */ | ||
3191 | /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ | ||
3192 | static void acpi_set_WOL(struct net_device *dev) | ||
3193 | { | ||
3194 | struct vortex_private *vp = netdev_priv(dev); | ||
3195 | void __iomem *ioaddr = vp->ioaddr; | ||
3196 | |||
3197 | device_set_wakeup_enable(vp->gendev, vp->enable_wol); | ||
3198 | |||
3199 | if (vp->enable_wol) { | ||
3200 | /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ | ||
3201 | window_write16(vp, 2, 7, 0x0c); | ||
3202 | /* The RxFilter must accept the WOL frames. */ | ||
3203 | iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD); | ||
3204 | iowrite16(RxEnable, ioaddr + EL3_CMD); | ||
3205 | |||
3206 | if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) { | ||
3207 | pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp))); | ||
3208 | |||
3209 | vp->enable_wol = 0; | ||
3210 | return; | ||
3211 | } | ||
3212 | |||
3213 | if (VORTEX_PCI(vp)->current_state < PCI_D3hot) | ||
3214 | return; | ||
3215 | |||
3216 | /* Change the power state to D3; RxEnable doesn't take effect. */ | ||
3217 | pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot); | ||
3218 | } | ||
3219 | } | ||
3220 | |||
3221 | |||
3222 | static void __devexit vortex_remove_one(struct pci_dev *pdev) | ||
3223 | { | ||
3224 | struct net_device *dev = pci_get_drvdata(pdev); | ||
3225 | struct vortex_private *vp; | ||
3226 | |||
3227 | if (!dev) { | ||
3228 | pr_err("vortex_remove_one called for Compaq device!\n"); | ||
3229 | BUG(); | ||
3230 | } | ||
3231 | |||
3232 | vp = netdev_priv(dev); | ||
3233 | |||
3234 | if (vp->cb_fn_base) | ||
3235 | pci_iounmap(VORTEX_PCI(vp), vp->cb_fn_base); | ||
3236 | |||
3237 | unregister_netdev(dev); | ||
3238 | |||
3239 | if (VORTEX_PCI(vp)) { | ||
3240 | pci_set_power_state(VORTEX_PCI(vp), PCI_D0); /* Go active */ | ||
3241 | if (vp->pm_state_valid) | ||
3242 | pci_restore_state(VORTEX_PCI(vp)); | ||
3243 | pci_disable_device(VORTEX_PCI(vp)); | ||
3244 | } | ||
3245 | /* Should really use issue_and_wait() here */ | ||
3246 | iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14), | ||
3247 | vp->ioaddr + EL3_CMD); | ||
3248 | |||
3249 | pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); | ||
3250 | |||
3251 | pci_free_consistent(pdev, | ||
3252 | sizeof(struct boom_rx_desc) * RX_RING_SIZE | ||
3253 | + sizeof(struct boom_tx_desc) * TX_RING_SIZE, | ||
3254 | vp->rx_ring, | ||
3255 | vp->rx_ring_dma); | ||
3256 | if (vp->must_free_region) | ||
3257 | release_region(dev->base_addr, vp->io_size); | ||
3258 | free_netdev(dev); | ||
3259 | } | ||
3260 | |||
3261 | |||
3262 | static struct pci_driver vortex_driver = { | ||
3263 | .name = "3c59x", | ||
3264 | .probe = vortex_init_one, | ||
3265 | .remove = __devexit_p(vortex_remove_one), | ||
3266 | .id_table = vortex_pci_tbl, | ||
3267 | .driver.pm = VORTEX_PM_OPS, | ||
3268 | }; | ||
3269 | |||
3270 | |||
3271 | static int vortex_have_pci; | ||
3272 | static int vortex_have_eisa; | ||
3273 | |||
3274 | |||
3275 | static int __init vortex_init(void) | ||
3276 | { | ||
3277 | int pci_rc, eisa_rc; | ||
3278 | |||
3279 | pci_rc = pci_register_driver(&vortex_driver); | ||
3280 | eisa_rc = vortex_eisa_init(); | ||
3281 | |||
3282 | if (pci_rc == 0) | ||
3283 | vortex_have_pci = 1; | ||
3284 | if (eisa_rc > 0) | ||
3285 | vortex_have_eisa = 1; | ||
3286 | |||
3287 | return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV; | ||
3288 | } | ||
3289 | |||
3290 | |||
3291 | static void __exit vortex_eisa_cleanup(void) | ||
3292 | { | ||
3293 | struct vortex_private *vp; | ||
3294 | void __iomem *ioaddr; | ||
3295 | |||
3296 | #ifdef CONFIG_EISA | ||
3297 | /* Take care of the EISA devices */ | ||
3298 | eisa_driver_unregister(&vortex_eisa_driver); | ||
3299 | #endif | ||
3300 | |||
3301 | if (compaq_net_device) { | ||
3302 | vp = netdev_priv(compaq_net_device); | ||
3303 | ioaddr = ioport_map(compaq_net_device->base_addr, | ||
3304 | VORTEX_TOTAL_SIZE); | ||
3305 | |||
3306 | unregister_netdev(compaq_net_device); | ||
3307 | iowrite16(TotalReset, ioaddr + EL3_CMD); | ||
3308 | release_region(compaq_net_device->base_addr, | ||
3309 | VORTEX_TOTAL_SIZE); | ||
3310 | |||
3311 | free_netdev(compaq_net_device); | ||
3312 | } | ||
3313 | } | ||
3314 | |||
3315 | |||
3316 | static void __exit vortex_cleanup(void) | ||
3317 | { | ||
3318 | if (vortex_have_pci) | ||
3319 | pci_unregister_driver(&vortex_driver); | ||
3320 | if (vortex_have_eisa) | ||
3321 | vortex_eisa_cleanup(); | ||
3322 | } | ||
3323 | |||
3324 | |||
3325 | module_init(vortex_init); | ||
3326 | module_exit(vortex_cleanup); | ||
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig new file mode 100644 index 000000000000..497f038dcd47 --- /dev/null +++ b/drivers/net/ethernet/3com/Kconfig | |||
@@ -0,0 +1,147 @@ | |||
1 | # | ||
2 | # 3Com Ethernet device configuration | ||
3 | # | ||
4 | |||
5 | config NET_VENDOR_3COM | ||
6 | bool "3Com devices" | ||
7 | depends on ISA || EISA || MCA || PCI || PCMCIA | ||
8 | ---help--- | ||
9 | If you have a network (Ethernet) card belonging to this class, say Y | ||
10 | and read the Ethernet-HOWTO, available from | ||
11 | <http://www.tldp.org/docs.html#howto>. | ||
12 | |||
13 | Note that the answer to this question doesn't directly affect the | ||
14 | kernel: saying N will just cause the configurator to skip all | ||
15 | the questions about 3Com cards. If you say Y, you will be asked for | ||
16 | your specific card in the following questions. | ||
17 | |||
18 | if NET_VENDOR_3COM | ||
19 | |||
20 | config EL1 | ||
21 | tristate "3c501 \"EtherLink\" support" | ||
22 | depends on ISA | ||
23 | ---help--- | ||
24 | If you have a network (Ethernet) card of this type, say Y and read | ||
25 | the Ethernet-HOWTO, available from | ||
26 | <http://www.tldp.org/docs.html#howto>. Also, consider buying a | ||
27 | new card, since the 3c501 is slow, broken, and obsolete: you will | ||
28 | have problems. Some people suggest to ping ("man ping") a nearby | ||
29 | machine every minute ("man cron") when using this card. | ||
30 | |||
31 | To compile this driver as a module, choose M here. The module | ||
32 | will be called 3c501. | ||
33 | |||
34 | config EL3 | ||
35 | tristate "3c509/3c529 (MCA)/3c579 \"EtherLink III\" support" | ||
36 | depends on (ISA || EISA || MCA) | ||
37 | ---help--- | ||
38 | If you have a network (Ethernet) card belonging to the 3Com | ||
39 | EtherLinkIII series, say Y and read the Ethernet-HOWTO, available | ||
40 | from <http://www.tldp.org/docs.html#howto>. | ||
41 | |||
42 | If your card is not working you may need to use the DOS | ||
43 | setup disk to disable Plug & Play mode, and to select the default | ||
44 | media type. | ||
45 | |||
46 | To compile this driver as a module, choose M here. The module | ||
47 | will be called 3c509. | ||
48 | |||
49 | config 3C515 | ||
50 | tristate "3c515 ISA \"Fast EtherLink\"" | ||
51 | depends on (ISA || EISA) && ISA_DMA_API | ||
52 | ---help--- | ||
53 | If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet | ||
54 | network card, say Y and read the Ethernet-HOWTO, available from | ||
55 | <http://www.tldp.org/docs.html#howto>. | ||
56 | |||
57 | To compile this driver as a module, choose M here. The module | ||
58 | will be called 3c515. | ||
59 | |||
60 | config PCMCIA_3C574 | ||
61 | tristate "3Com 3c574 PCMCIA support" | ||
62 | depends on PCMCIA | ||
63 | ---help--- | ||
64 | Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA | ||
65 | (PC-card) Fast Ethernet card to your computer. | ||
66 | |||
67 | To compile this driver as a module, choose M here: the module will be | ||
68 | called 3c574_cs. If unsure, say N. | ||
69 | |||
70 | config PCMCIA_3C589 | ||
71 | tristate "3Com 3c589 PCMCIA support" | ||
72 | depends on PCMCIA | ||
73 | ---help--- | ||
74 | Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA | ||
75 | (PC-card) Ethernet card to your computer. | ||
76 | |||
77 | To compile this driver as a module, choose M here: the module will be | ||
78 | called 3c589_cs. If unsure, say N. | ||
79 | |||
80 | config VORTEX | ||
81 | tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support" | ||
82 | depends on (PCI || EISA) | ||
83 | select MII | ||
84 | ---help--- | ||
85 | This option enables driver support for a large number of 10Mbps and | ||
86 | 10/100Mbps EISA, PCI and PCMCIA 3Com network cards: | ||
87 | |||
88 | "Vortex" (Fast EtherLink 3c590/3c592/3c595/3c597) EISA and PCI | ||
89 | "Boomerang" (EtherLink XL 3c900 or 3c905) PCI | ||
90 | "Cyclone" (3c540/3c900/3c905/3c980/3c575/3c656) PCI and Cardbus | ||
91 | "Tornado" (3c905) PCI | ||
92 | "Hurricane" (3c555/3cSOHO) PCI | ||
93 | |||
94 | If you have such a card, say Y and read the Ethernet-HOWTO, | ||
95 | available from <http://www.tldp.org/docs.html#howto>. More | ||
96 | specific information is in | ||
97 | <file:Documentation/networking/vortex.txt> and in the comments at | ||
98 | the beginning of <file:drivers/net/3c59x.c>. | ||
99 | |||
100 | To compile this support as a module, choose M here. | ||
101 | |||
102 | config TYPHOON | ||
103 | tristate "3cr990 series \"Typhoon\" support" | ||
104 | depends on PCI | ||
105 | select CRC32 | ||
106 | ---help--- | ||
107 | This option enables driver support for the 3cr990 series of cards: | ||
108 | |||
109 | 3C990-TX, 3CR990-TX-95, 3CR990-TX-97, 3CR990-FX-95, 3CR990-FX-97, | ||
110 | 3CR990SVR, 3CR990SVR95, 3CR990SVR97, 3CR990-FX-95 Server, | ||
111 | 3CR990-FX-97 Server, 3C990B-TX-M, 3C990BSVR | ||
112 | |||
113 | If you have a network (Ethernet) card of this type, say Y and read | ||
114 | the Ethernet-HOWTO, available from | ||
115 | <http://www.tldp.org/docs.html#howto>. | ||
116 | |||
117 | To compile this driver as a module, choose M here. The module | ||
118 | will be called typhoon. | ||
119 | |||
120 | config ACENIC | ||
121 | tristate "Alteon AceNIC/3Com 3C985/NetGear GA620 Gigabit support" | ||
122 | depends on PCI | ||
123 | ---help--- | ||
124 | Say Y here if you have an Alteon AceNIC, 3Com 3C985(B), NetGear | ||
125 | GA620, SGI Gigabit or Farallon PN9000-SX PCI Gigabit Ethernet | ||
126 | adapter. The driver allows for using the Jumbo Frame option (9000 | ||
127 | bytes/frame) however it requires that your switches can handle this | ||
128 | as well. To enable Jumbo Frames, add `mtu 9000' to your ifconfig | ||
129 | line. | ||
130 | |||
131 | To compile this driver as a module, choose M here: the | ||
132 | module will be called acenic. | ||
133 | |||
134 | config ACENIC_OMIT_TIGON_I | ||
135 | bool "Omit support for old Tigon I based AceNICs" | ||
136 | depends on ACENIC | ||
137 | ---help--- | ||
138 | Say Y here if you only have Tigon II based AceNICs and want to leave | ||
139 | out support for the older Tigon I based cards which are no longer | ||
140 | being sold (ie. the original Alteon AceNIC and 3Com 3C985 (non B | ||
141 | version)). This will reduce the size of the driver object by | ||
142 | app. 100KB. If you are not sure whether your card is a Tigon I or a | ||
143 | Tigon II, say N here. | ||
144 | |||
145 | The safe and default value for this is N. | ||
146 | |||
147 | endif # NET_VENDOR_3COM | ||
diff --git a/drivers/net/ethernet/3com/Makefile b/drivers/net/ethernet/3com/Makefile new file mode 100644 index 000000000000..96d1d60d67b6 --- /dev/null +++ b/drivers/net/ethernet/3com/Makefile | |||
@@ -0,0 +1,12 @@ | |||
1 | # | ||
2 | # Makefile for the 3Com Ethernet device drivers | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_EL1) += 3c501.o | ||
6 | obj-$(CONFIG_EL3) += 3c509.o | ||
7 | obj-$(CONFIG_3C515) += 3c515.o | ||
8 | obj-$(CONFIG_PCMCIA_3C589) += 3c589_cs.o | ||
9 | obj-$(CONFIG_PCMCIA_3C574) += 3c574_cs.o | ||
10 | obj-$(CONFIG_VORTEX) += 3c59x.o | ||
11 | obj-$(CONFIG_ACENIC) += acenic.o | ||
12 | obj-$(CONFIG_TYPHOON) += typhoon.o | ||
diff --git a/drivers/net/ethernet/3com/acenic.c b/drivers/net/ethernet/3com/acenic.c new file mode 100644 index 000000000000..31798f5f5d06 --- /dev/null +++ b/drivers/net/ethernet/3com/acenic.c | |||
@@ -0,0 +1,3206 @@ | |||
1 | /* | ||
2 | * acenic.c: Linux driver for the Alteon AceNIC Gigabit Ethernet card | ||
3 | * and other Tigon based cards. | ||
4 | * | ||
5 | * Copyright 1998-2002 by Jes Sorensen, <jes@trained-monkey.org>. | ||
6 | * | ||
7 | * Thanks to Alteon and 3Com for providing hardware and documentation | ||
8 | * enabling me to write this driver. | ||
9 | * | ||
10 | * A mailing list for discussing the use of this driver has been | ||
11 | * setup, please subscribe to the lists if you have any questions | ||
12 | * about the driver. Send mail to linux-acenic-help@sunsite.auc.dk to | ||
13 | * see how to subscribe. | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or modify | ||
16 | * it under the terms of the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2 of the License, or | ||
18 | * (at your option) any later version. | ||
19 | * | ||
20 | * Additional credits: | ||
21 | * Pete Wyckoff <wyckoff@ca.sandia.gov>: Initial Linux/Alpha and trace | ||
22 | * dump support. The trace dump support has not been | ||
23 | * integrated yet however. | ||
24 | * Troy Benjegerdes: Big Endian (PPC) patches. | ||
25 | * Nate Stahl: Better out of memory handling and stats support. | ||
26 | * Aman Singla: Nasty race between interrupt handler and tx code dealing | ||
27 | * with 'testing the tx_ret_csm and setting tx_full' | ||
28 | * David S. Miller <davem@redhat.com>: conversion to new PCI dma mapping | ||
29 | * infrastructure and Sparc support | ||
30 | * Pierrick Pinasseau (CERN): For lending me an Ultra 5 to test the | ||
31 | * driver under Linux/Sparc64 | ||
32 | * Matt Domsch <Matt_Domsch@dell.com>: Detect Alteon 1000baseT cards | ||
33 | * ETHTOOL_GDRVINFO support | ||
34 | * Chip Salzenberg <chip@valinux.com>: Fix race condition between tx | ||
35 | * handler and close() cleanup. | ||
36 | * Ken Aaker <kdaaker@rchland.vnet.ibm.com>: Correct check for whether | ||
37 | * memory mapped IO is enabled to | ||
38 | * make the driver work on RS/6000. | ||
39 | * Takayoshi Kouchi <kouchi@hpc.bs1.fc.nec.co.jp>: Identifying problem | ||
40 | * where the driver would disable | ||
41 | * bus master mode if it had to disable | ||
42 | * write and invalidate. | ||
43 | * Stephen Hack <stephen_hack@hp.com>: Fixed ace_set_mac_addr for little | ||
44 | * endian systems. | ||
45 | * Val Henson <vhenson@esscom.com>: Reset Jumbo skb producer and | ||
46 | * rx producer index when | ||
47 | * flushing the Jumbo ring. | ||
48 | * Hans Grobler <grobh@sun.ac.za>: Memory leak fixes in the | ||
49 | * driver init path. | ||
50 | * Grant Grundler <grundler@cup.hp.com>: PCI write posting fixes. | ||
51 | */ | ||
52 | |||
53 | #include <linux/module.h> | ||
54 | #include <linux/moduleparam.h> | ||
55 | #include <linux/types.h> | ||
56 | #include <linux/errno.h> | ||
57 | #include <linux/ioport.h> | ||
58 | #include <linux/pci.h> | ||
59 | #include <linux/dma-mapping.h> | ||
60 | #include <linux/kernel.h> | ||
61 | #include <linux/netdevice.h> | ||
62 | #include <linux/etherdevice.h> | ||
63 | #include <linux/skbuff.h> | ||
64 | #include <linux/init.h> | ||
65 | #include <linux/delay.h> | ||
66 | #include <linux/mm.h> | ||
67 | #include <linux/highmem.h> | ||
68 | #include <linux/sockios.h> | ||
69 | #include <linux/firmware.h> | ||
70 | #include <linux/slab.h> | ||
71 | #include <linux/prefetch.h> | ||
72 | #include <linux/if_vlan.h> | ||
73 | |||
74 | #ifdef SIOCETHTOOL | ||
75 | #include <linux/ethtool.h> | ||
76 | #endif | ||
77 | |||
78 | #include <net/sock.h> | ||
79 | #include <net/ip.h> | ||
80 | |||
81 | #include <asm/system.h> | ||
82 | #include <asm/io.h> | ||
83 | #include <asm/irq.h> | ||
84 | #include <asm/byteorder.h> | ||
85 | #include <asm/uaccess.h> | ||
86 | |||
87 | |||
88 | #define DRV_NAME "acenic" | ||
89 | |||
90 | #undef INDEX_DEBUG | ||
91 | |||
92 | #ifdef CONFIG_ACENIC_OMIT_TIGON_I | ||
93 | #define ACE_IS_TIGON_I(ap) 0 | ||
94 | #define ACE_TX_RING_ENTRIES(ap) MAX_TX_RING_ENTRIES | ||
95 | #else | ||
96 | #define ACE_IS_TIGON_I(ap) (ap->version == 1) | ||
97 | #define ACE_TX_RING_ENTRIES(ap) ap->tx_ring_entries | ||
98 | #endif | ||
99 | |||
100 | #ifndef PCI_VENDOR_ID_ALTEON | ||
101 | #define PCI_VENDOR_ID_ALTEON 0x12ae | ||
102 | #endif | ||
103 | #ifndef PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE | ||
104 | #define PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE 0x0001 | ||
105 | #define PCI_DEVICE_ID_ALTEON_ACENIC_COPPER 0x0002 | ||
106 | #endif | ||
107 | #ifndef PCI_DEVICE_ID_3COM_3C985 | ||
108 | #define PCI_DEVICE_ID_3COM_3C985 0x0001 | ||
109 | #endif | ||
110 | #ifndef PCI_VENDOR_ID_NETGEAR | ||
111 | #define PCI_VENDOR_ID_NETGEAR 0x1385 | ||
112 | #define PCI_DEVICE_ID_NETGEAR_GA620 0x620a | ||
113 | #endif | ||
114 | #ifndef PCI_DEVICE_ID_NETGEAR_GA620T | ||
115 | #define PCI_DEVICE_ID_NETGEAR_GA620T 0x630a | ||
116 | #endif | ||
117 | |||
118 | |||
119 | /* | ||
120 | * Farallon used the DEC vendor ID by mistake and they seem not | ||
121 | * to care - stinky! | ||
122 | */ | ||
123 | #ifndef PCI_DEVICE_ID_FARALLON_PN9000SX | ||
124 | #define PCI_DEVICE_ID_FARALLON_PN9000SX 0x1a | ||
125 | #endif | ||
126 | #ifndef PCI_DEVICE_ID_FARALLON_PN9100T | ||
127 | #define PCI_DEVICE_ID_FARALLON_PN9100T 0xfa | ||
128 | #endif | ||
129 | #ifndef PCI_VENDOR_ID_SGI | ||
130 | #define PCI_VENDOR_ID_SGI 0x10a9 | ||
131 | #endif | ||
132 | #ifndef PCI_DEVICE_ID_SGI_ACENIC | ||
133 | #define PCI_DEVICE_ID_SGI_ACENIC 0x0009 | ||
134 | #endif | ||
135 | |||
136 | static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = { | ||
137 | { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE, | ||
138 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, | ||
139 | { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER, | ||
140 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, | ||
141 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C985, | ||
142 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, | ||
143 | { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620, | ||
144 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, | ||
145 | { PCI_VENDOR_ID_NETGEAR, PCI_DEVICE_ID_NETGEAR_GA620T, | ||
146 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, | ||
147 | /* | ||
148 | * Farallon used the DEC vendor ID on their cards incorrectly, | ||
149 | * then later Alteon's ID. | ||
150 | */ | ||
151 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_FARALLON_PN9000SX, | ||
152 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, | ||
153 | { PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_FARALLON_PN9100T, | ||
154 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, | ||
155 | { PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_ACENIC, | ||
156 | PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, }, | ||
157 | { } | ||
158 | }; | ||
159 | MODULE_DEVICE_TABLE(pci, acenic_pci_tbl); | ||
160 | |||
161 | #define ace_sync_irq(irq) synchronize_irq(irq) | ||
162 | |||
163 | #ifndef offset_in_page | ||
164 | #define offset_in_page(ptr) ((unsigned long)(ptr) & ~PAGE_MASK) | ||
165 | #endif | ||
166 | |||
167 | #define ACE_MAX_MOD_PARMS 8 | ||
168 | #define BOARD_IDX_STATIC 0 | ||
169 | #define BOARD_IDX_OVERFLOW -1 | ||
170 | |||
171 | #include "acenic.h" | ||
172 | |||
173 | /* | ||
174 | * These must be defined before the firmware is included. | ||
175 | */ | ||
176 | #define MAX_TEXT_LEN 96*1024 | ||
177 | #define MAX_RODATA_LEN 8*1024 | ||
178 | #define MAX_DATA_LEN 2*1024 | ||
179 | |||
180 | #ifndef tigon2FwReleaseLocal | ||
181 | #define tigon2FwReleaseLocal 0 | ||
182 | #endif | ||
183 | |||
184 | /* | ||
185 | * This driver currently supports Tigon I and Tigon II based cards | ||
186 | * including the Alteon AceNIC, the 3Com 3C985[B] and NetGear | ||
187 | * GA620. The driver should also work on the SGI, DEC and Farallon | ||
188 | * versions of the card, however I have not been able to test that | ||
189 | * myself. | ||
190 | * | ||
191 | * This card is really neat, it supports receive hardware checksumming | ||
192 | * and jumbo frames (up to 9000 bytes) and does a lot of work in the | ||
193 | * firmware. Also the programming interface is quite neat, except for | ||
194 | * the parts dealing with the i2c eeprom on the card ;-) | ||
195 | * | ||
196 | * Using jumbo frames: | ||
197 | * | ||
198 | * To enable jumbo frames, simply specify an mtu between 1500 and 9000 | ||
199 | * bytes to ifconfig. Jumbo frames can be enabled or disabled at any time | ||
200 | * by running `ifconfig eth<X> mtu <MTU>' with <X> being the Ethernet | ||
201 | * interface number and <MTU> being the MTU value. | ||
202 | * | ||
203 | * Module parameters: | ||
204 | * | ||
205 | * When compiled as a loadable module, the driver allows for a number | ||
206 | * of module parameters to be specified. The driver supports the | ||
207 | * following module parameters: | ||
208 | * | ||
209 | * trace=<val> - Firmware trace level. This requires special traced | ||
210 | * firmware to replace the firmware supplied with | ||
211 | * the driver - for debugging purposes only. | ||
212 | * | ||
213 | * link=<val> - Link state. Normally you want to use the default link | ||
214 | * parameters set by the driver. This can be used to | ||
215 | * override these in case your switch doesn't negotiate | ||
216 | * the link properly. Valid values are: | ||
217 | * 0x0001 - Force half duplex link. | ||
218 | * 0x0002 - Do not negotiate line speed with the other end. | ||
219 | * 0x0010 - 10Mbit/sec link. | ||
220 | * 0x0020 - 100Mbit/sec link. | ||
221 | * 0x0040 - 1000Mbit/sec link. | ||
222 | * 0x0100 - Do not negotiate flow control. | ||
223 | * 0x0200 - Enable RX flow control Y | ||
224 | * 0x0400 - Enable TX flow control Y (Tigon II NICs only). | ||
225 | * Default value is 0x0270, ie. enable link+flow | ||
226 | * control negotiation. Negotiating the highest | ||
227 | * possible link speed with RX flow control enabled. | ||
228 | * | ||
229 | * When disabling link speed negotiation, only one link | ||
230 | * speed is allowed to be specified! | ||
231 | * | ||
232 | * tx_coal_tick=<val> - number of coalescing clock ticks (us) allowed | ||
233 | * to wait for more packets to arive before | ||
234 | * interrupting the host, from the time the first | ||
235 | * packet arrives. | ||
236 | * | ||
237 | * rx_coal_tick=<val> - number of coalescing clock ticks (us) allowed | ||
238 | * to wait for more packets to arive in the transmit ring, | ||
239 | * before interrupting the host, after transmitting the | ||
240 | * first packet in the ring. | ||
241 | * | ||
242 | * max_tx_desc=<val> - maximum number of transmit descriptors | ||
243 | * (packets) transmitted before interrupting the host. | ||
244 | * | ||
245 | * max_rx_desc=<val> - maximum number of receive descriptors | ||
246 | * (packets) received before interrupting the host. | ||
247 | * | ||
248 | * tx_ratio=<val> - 7 bit value (0 - 63) specifying the split in 64th | ||
249 | * increments of the NIC's on board memory to be used for | ||
250 | * transmit and receive buffers. For the 1MB NIC app. 800KB | ||
251 | * is available, on the 1/2MB NIC app. 300KB is available. | ||
252 | * 68KB will always be available as a minimum for both | ||
253 | * directions. The default value is a 50/50 split. | ||
254 | * dis_pci_mem_inval=<val> - disable PCI memory write and invalidate | ||
255 | * operations, default (1) is to always disable this as | ||
256 | * that is what Alteon does on NT. I have not been able | ||
257 | * to measure any real performance differences with | ||
258 | * this on my systems. Set <val>=0 if you want to | ||
259 | * enable these operations. | ||
260 | * | ||
261 | * If you use more than one NIC, specify the parameters for the | ||
262 | * individual NICs with a comma, ie. trace=0,0x00001fff,0 you want to | ||
263 | * run tracing on NIC #2 but not on NIC #1 and #3. | ||
264 | * | ||
265 | * TODO: | ||
266 | * | ||
267 | * - Proper multicast support. | ||
268 | * - NIC dump support. | ||
269 | * - More tuning parameters. | ||
270 | * | ||
271 | * The mini ring is not used under Linux and I am not sure it makes sense | ||
272 | * to actually use it. | ||
273 | * | ||
274 | * New interrupt handler strategy: | ||
275 | * | ||
276 | * The old interrupt handler worked using the traditional method of | ||
277 | * replacing an skbuff with a new one when a packet arrives. However | ||
278 | * the rx rings do not need to contain a static number of buffer | ||
279 | * descriptors, thus it makes sense to move the memory allocation out | ||
280 | * of the main interrupt handler and do it in a bottom half handler | ||
281 | * and only allocate new buffers when the number of buffers in the | ||
282 | * ring is below a certain threshold. In order to avoid starving the | ||
283 | * NIC under heavy load it is however necessary to force allocation | ||
284 | * when hitting a minimum threshold. The strategy for alloction is as | ||
285 | * follows: | ||
286 | * | ||
287 | * RX_LOW_BUF_THRES - allocate buffers in the bottom half | ||
288 | * RX_PANIC_LOW_THRES - we are very low on buffers, allocate | ||
289 | * the buffers in the interrupt handler | ||
290 | * RX_RING_THRES - maximum number of buffers in the rx ring | ||
291 | * RX_MINI_THRES - maximum number of buffers in the mini ring | ||
292 | * RX_JUMBO_THRES - maximum number of buffers in the jumbo ring | ||
293 | * | ||
294 | * One advantagous side effect of this allocation approach is that the | ||
295 | * entire rx processing can be done without holding any spin lock | ||
296 | * since the rx rings and registers are totally independent of the tx | ||
297 | * ring and its registers. This of course includes the kmalloc's of | ||
298 | * new skb's. Thus start_xmit can run in parallel with rx processing | ||
299 | * and the memory allocation on SMP systems. | ||
300 | * | ||
301 | * Note that running the skb reallocation in a bottom half opens up | ||
302 | * another can of races which needs to be handled properly. In | ||
303 | * particular it can happen that the interrupt handler tries to run | ||
304 | * the reallocation while the bottom half is either running on another | ||
305 | * CPU or was interrupted on the same CPU. To get around this the | ||
306 | * driver uses bitops to prevent the reallocation routines from being | ||
307 | * reentered. | ||
308 | * | ||
309 | * TX handling can also be done without holding any spin lock, wheee | ||
310 | * this is fun! since tx_ret_csm is only written to by the interrupt | ||
311 | * handler. The case to be aware of is when shutting down the device | ||
312 | * and cleaning up where it is necessary to make sure that | ||
313 | * start_xmit() is not running while this is happening. Well DaveM | ||
314 | * informs me that this case is already protected against ... bye bye | ||
315 | * Mr. Spin Lock, it was nice to know you. | ||
316 | * | ||
317 | * TX interrupts are now partly disabled so the NIC will only generate | ||
318 | * TX interrupts for the number of coal ticks, not for the number of | ||
319 | * TX packets in the queue. This should reduce the number of TX only, | ||
320 | * ie. when no RX processing is done, interrupts seen. | ||
321 | */ | ||
322 | |||
323 | /* | ||
324 | * Threshold values for RX buffer allocation - the low water marks for | ||
325 | * when to start refilling the rings are set to 75% of the ring | ||
326 | * sizes. It seems to make sense to refill the rings entirely from the | ||
327 | * intrrupt handler once it gets below the panic threshold, that way | ||
328 | * we don't risk that the refilling is moved to another CPU when the | ||
329 | * one running the interrupt handler just got the slab code hot in its | ||
330 | * cache. | ||
331 | */ | ||
332 | #define RX_RING_SIZE 72 | ||
333 | #define RX_MINI_SIZE 64 | ||
334 | #define RX_JUMBO_SIZE 48 | ||
335 | |||
336 | #define RX_PANIC_STD_THRES 16 | ||
337 | #define RX_PANIC_STD_REFILL (3*RX_PANIC_STD_THRES)/2 | ||
338 | #define RX_LOW_STD_THRES (3*RX_RING_SIZE)/4 | ||
339 | #define RX_PANIC_MINI_THRES 12 | ||
340 | #define RX_PANIC_MINI_REFILL (3*RX_PANIC_MINI_THRES)/2 | ||
341 | #define RX_LOW_MINI_THRES (3*RX_MINI_SIZE)/4 | ||
342 | #define RX_PANIC_JUMBO_THRES 6 | ||
343 | #define RX_PANIC_JUMBO_REFILL (3*RX_PANIC_JUMBO_THRES)/2 | ||
344 | #define RX_LOW_JUMBO_THRES (3*RX_JUMBO_SIZE)/4 | ||
345 | |||
346 | |||
347 | /* | ||
348 | * Size of the mini ring entries, basically these just should be big | ||
349 | * enough to take TCP ACKs | ||
350 | */ | ||
351 | #define ACE_MINI_SIZE 100 | ||
352 | |||
353 | #define ACE_MINI_BUFSIZE ACE_MINI_SIZE | ||
354 | #define ACE_STD_BUFSIZE (ACE_STD_MTU + ETH_HLEN + 4) | ||
355 | #define ACE_JUMBO_BUFSIZE (ACE_JUMBO_MTU + ETH_HLEN + 4) | ||
356 | |||
357 | /* | ||
358 | * There seems to be a magic difference in the effect between 995 and 996 | ||
359 | * but little difference between 900 and 995 ... no idea why. | ||
360 | * | ||
361 | * There is now a default set of tuning parameters which is set, depending | ||
362 | * on whether or not the user enables Jumbo frames. It's assumed that if | ||
363 | * Jumbo frames are enabled, the user wants optimal tuning for that case. | ||
364 | */ | ||
365 | #define DEF_TX_COAL 400 /* 996 */ | ||
366 | #define DEF_TX_MAX_DESC 60 /* was 40 */ | ||
367 | #define DEF_RX_COAL 120 /* 1000 */ | ||
368 | #define DEF_RX_MAX_DESC 25 | ||
369 | #define DEF_TX_RATIO 21 /* 24 */ | ||
370 | |||
371 | #define DEF_JUMBO_TX_COAL 20 | ||
372 | #define DEF_JUMBO_TX_MAX_DESC 60 | ||
373 | #define DEF_JUMBO_RX_COAL 30 | ||
374 | #define DEF_JUMBO_RX_MAX_DESC 6 | ||
375 | #define DEF_JUMBO_TX_RATIO 21 | ||
376 | |||
377 | #if tigon2FwReleaseLocal < 20001118 | ||
378 | /* | ||
379 | * Standard firmware and early modifications duplicate | ||
380 | * IRQ load without this flag (coal timer is never reset). | ||
381 | * Note that with this flag tx_coal should be less than | ||
382 | * time to xmit full tx ring. | ||
383 | * 400usec is not so bad for tx ring size of 128. | ||
384 | */ | ||
385 | #define TX_COAL_INTS_ONLY 1 /* worth it */ | ||
386 | #else | ||
387 | /* | ||
388 | * With modified firmware, this is not necessary, but still useful. | ||
389 | */ | ||
390 | #define TX_COAL_INTS_ONLY 1 | ||
391 | #endif | ||
392 | |||
393 | #define DEF_TRACE 0 | ||
394 | #define DEF_STAT (2 * TICKS_PER_SEC) | ||
395 | |||
396 | |||
397 | static int link_state[ACE_MAX_MOD_PARMS]; | ||
398 | static int trace[ACE_MAX_MOD_PARMS]; | ||
399 | static int tx_coal_tick[ACE_MAX_MOD_PARMS]; | ||
400 | static int rx_coal_tick[ACE_MAX_MOD_PARMS]; | ||
401 | static int max_tx_desc[ACE_MAX_MOD_PARMS]; | ||
402 | static int max_rx_desc[ACE_MAX_MOD_PARMS]; | ||
403 | static int tx_ratio[ACE_MAX_MOD_PARMS]; | ||
404 | static int dis_pci_mem_inval[ACE_MAX_MOD_PARMS] = {1, 1, 1, 1, 1, 1, 1, 1}; | ||
405 | |||
406 | MODULE_AUTHOR("Jes Sorensen <jes@trained-monkey.org>"); | ||
407 | MODULE_LICENSE("GPL"); | ||
408 | MODULE_DESCRIPTION("AceNIC/3C985/GA620 Gigabit Ethernet driver"); | ||
409 | #ifndef CONFIG_ACENIC_OMIT_TIGON_I | ||
410 | MODULE_FIRMWARE("acenic/tg1.bin"); | ||
411 | #endif | ||
412 | MODULE_FIRMWARE("acenic/tg2.bin"); | ||
413 | |||
414 | module_param_array_named(link, link_state, int, NULL, 0); | ||
415 | module_param_array(trace, int, NULL, 0); | ||
416 | module_param_array(tx_coal_tick, int, NULL, 0); | ||
417 | module_param_array(max_tx_desc, int, NULL, 0); | ||
418 | module_param_array(rx_coal_tick, int, NULL, 0); | ||
419 | module_param_array(max_rx_desc, int, NULL, 0); | ||
420 | module_param_array(tx_ratio, int, NULL, 0); | ||
421 | MODULE_PARM_DESC(link, "AceNIC/3C985/NetGear link state"); | ||
422 | MODULE_PARM_DESC(trace, "AceNIC/3C985/NetGear firmware trace level"); | ||
423 | MODULE_PARM_DESC(tx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first tx descriptor arrives"); | ||
424 | MODULE_PARM_DESC(max_tx_desc, "AceNIC/3C985/GA620 max number of transmit descriptors to wait"); | ||
425 | MODULE_PARM_DESC(rx_coal_tick, "AceNIC/3C985/GA620 max clock ticks to wait from first rx descriptor arrives"); | ||
426 | MODULE_PARM_DESC(max_rx_desc, "AceNIC/3C985/GA620 max number of receive descriptors to wait"); | ||
427 | MODULE_PARM_DESC(tx_ratio, "AceNIC/3C985/GA620 ratio of NIC memory used for TX/RX descriptors (range 0-63)"); | ||
428 | |||
429 | |||
430 | static const char version[] __devinitconst = | ||
431 | "acenic.c: v0.92 08/05/2002 Jes Sorensen, linux-acenic@SunSITE.dk\n" | ||
432 | " http://home.cern.ch/~jes/gige/acenic.html\n"; | ||
433 | |||
434 | static int ace_get_settings(struct net_device *, struct ethtool_cmd *); | ||
435 | static int ace_set_settings(struct net_device *, struct ethtool_cmd *); | ||
436 | static void ace_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); | ||
437 | |||
438 | static const struct ethtool_ops ace_ethtool_ops = { | ||
439 | .get_settings = ace_get_settings, | ||
440 | .set_settings = ace_set_settings, | ||
441 | .get_drvinfo = ace_get_drvinfo, | ||
442 | }; | ||
443 | |||
444 | static void ace_watchdog(struct net_device *dev); | ||
445 | |||
446 | static const struct net_device_ops ace_netdev_ops = { | ||
447 | .ndo_open = ace_open, | ||
448 | .ndo_stop = ace_close, | ||
449 | .ndo_tx_timeout = ace_watchdog, | ||
450 | .ndo_get_stats = ace_get_stats, | ||
451 | .ndo_start_xmit = ace_start_xmit, | ||
452 | .ndo_set_multicast_list = ace_set_multicast_list, | ||
453 | .ndo_validate_addr = eth_validate_addr, | ||
454 | .ndo_set_mac_address = ace_set_mac_addr, | ||
455 | .ndo_change_mtu = ace_change_mtu, | ||
456 | }; | ||
457 | |||
458 | static int __devinit acenic_probe_one(struct pci_dev *pdev, | ||
459 | const struct pci_device_id *id) | ||
460 | { | ||
461 | struct net_device *dev; | ||
462 | struct ace_private *ap; | ||
463 | static int boards_found; | ||
464 | |||
465 | dev = alloc_etherdev(sizeof(struct ace_private)); | ||
466 | if (dev == NULL) { | ||
467 | printk(KERN_ERR "acenic: Unable to allocate " | ||
468 | "net_device structure!\n"); | ||
469 | return -ENOMEM; | ||
470 | } | ||
471 | |||
472 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
473 | |||
474 | ap = netdev_priv(dev); | ||
475 | ap->pdev = pdev; | ||
476 | ap->name = pci_name(pdev); | ||
477 | |||
478 | dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | ||
479 | dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; | ||
480 | |||
481 | dev->watchdog_timeo = 5*HZ; | ||
482 | |||
483 | dev->netdev_ops = &ace_netdev_ops; | ||
484 | SET_ETHTOOL_OPS(dev, &ace_ethtool_ops); | ||
485 | |||
486 | /* we only display this string ONCE */ | ||
487 | if (!boards_found) | ||
488 | printk(version); | ||
489 | |||
490 | if (pci_enable_device(pdev)) | ||
491 | goto fail_free_netdev; | ||
492 | |||
493 | /* | ||
494 | * Enable master mode before we start playing with the | ||
495 | * pci_command word since pci_set_master() will modify | ||
496 | * it. | ||
497 | */ | ||
498 | pci_set_master(pdev); | ||
499 | |||
500 | pci_read_config_word(pdev, PCI_COMMAND, &ap->pci_command); | ||
501 | |||
502 | /* OpenFirmware on Mac's does not set this - DOH.. */ | ||
503 | if (!(ap->pci_command & PCI_COMMAND_MEMORY)) { | ||
504 | printk(KERN_INFO "%s: Enabling PCI Memory Mapped " | ||
505 | "access - was not enabled by BIOS/Firmware\n", | ||
506 | ap->name); | ||
507 | ap->pci_command = ap->pci_command | PCI_COMMAND_MEMORY; | ||
508 | pci_write_config_word(ap->pdev, PCI_COMMAND, | ||
509 | ap->pci_command); | ||
510 | wmb(); | ||
511 | } | ||
512 | |||
513 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &ap->pci_latency); | ||
514 | if (ap->pci_latency <= 0x40) { | ||
515 | ap->pci_latency = 0x40; | ||
516 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, ap->pci_latency); | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * Remap the regs into kernel space - this is abuse of | ||
521 | * dev->base_addr since it was means for I/O port | ||
522 | * addresses but who gives a damn. | ||
523 | */ | ||
524 | dev->base_addr = pci_resource_start(pdev, 0); | ||
525 | ap->regs = ioremap(dev->base_addr, 0x4000); | ||
526 | if (!ap->regs) { | ||
527 | printk(KERN_ERR "%s: Unable to map I/O register, " | ||
528 | "AceNIC %i will be disabled.\n", | ||
529 | ap->name, boards_found); | ||
530 | goto fail_free_netdev; | ||
531 | } | ||
532 | |||
533 | switch(pdev->vendor) { | ||
534 | case PCI_VENDOR_ID_ALTEON: | ||
535 | if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9100T) { | ||
536 | printk(KERN_INFO "%s: Farallon PN9100-T ", | ||
537 | ap->name); | ||
538 | } else { | ||
539 | printk(KERN_INFO "%s: Alteon AceNIC ", | ||
540 | ap->name); | ||
541 | } | ||
542 | break; | ||
543 | case PCI_VENDOR_ID_3COM: | ||
544 | printk(KERN_INFO "%s: 3Com 3C985 ", ap->name); | ||
545 | break; | ||
546 | case PCI_VENDOR_ID_NETGEAR: | ||
547 | printk(KERN_INFO "%s: NetGear GA620 ", ap->name); | ||
548 | break; | ||
549 | case PCI_VENDOR_ID_DEC: | ||
550 | if (pdev->device == PCI_DEVICE_ID_FARALLON_PN9000SX) { | ||
551 | printk(KERN_INFO "%s: Farallon PN9000-SX ", | ||
552 | ap->name); | ||
553 | break; | ||
554 | } | ||
555 | case PCI_VENDOR_ID_SGI: | ||
556 | printk(KERN_INFO "%s: SGI AceNIC ", ap->name); | ||
557 | break; | ||
558 | default: | ||
559 | printk(KERN_INFO "%s: Unknown AceNIC ", ap->name); | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | printk("Gigabit Ethernet at 0x%08lx, ", dev->base_addr); | ||
564 | printk("irq %d\n", pdev->irq); | ||
565 | |||
566 | #ifdef CONFIG_ACENIC_OMIT_TIGON_I | ||
567 | if ((readl(&ap->regs->HostCtrl) >> 28) == 4) { | ||
568 | printk(KERN_ERR "%s: Driver compiled without Tigon I" | ||
569 | " support - NIC disabled\n", dev->name); | ||
570 | goto fail_uninit; | ||
571 | } | ||
572 | #endif | ||
573 | |||
574 | if (ace_allocate_descriptors(dev)) | ||
575 | goto fail_free_netdev; | ||
576 | |||
577 | #ifdef MODULE | ||
578 | if (boards_found >= ACE_MAX_MOD_PARMS) | ||
579 | ap->board_idx = BOARD_IDX_OVERFLOW; | ||
580 | else | ||
581 | ap->board_idx = boards_found; | ||
582 | #else | ||
583 | ap->board_idx = BOARD_IDX_STATIC; | ||
584 | #endif | ||
585 | |||
586 | if (ace_init(dev)) | ||
587 | goto fail_free_netdev; | ||
588 | |||
589 | if (register_netdev(dev)) { | ||
590 | printk(KERN_ERR "acenic: device registration failed\n"); | ||
591 | goto fail_uninit; | ||
592 | } | ||
593 | ap->name = dev->name; | ||
594 | |||
595 | if (ap->pci_using_dac) | ||
596 | dev->features |= NETIF_F_HIGHDMA; | ||
597 | |||
598 | pci_set_drvdata(pdev, dev); | ||
599 | |||
600 | boards_found++; | ||
601 | return 0; | ||
602 | |||
603 | fail_uninit: | ||
604 | ace_init_cleanup(dev); | ||
605 | fail_free_netdev: | ||
606 | free_netdev(dev); | ||
607 | return -ENODEV; | ||
608 | } | ||
609 | |||
610 | static void __devexit acenic_remove_one(struct pci_dev *pdev) | ||
611 | { | ||
612 | struct net_device *dev = pci_get_drvdata(pdev); | ||
613 | struct ace_private *ap = netdev_priv(dev); | ||
614 | struct ace_regs __iomem *regs = ap->regs; | ||
615 | short i; | ||
616 | |||
617 | unregister_netdev(dev); | ||
618 | |||
619 | writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); | ||
620 | if (ap->version >= 2) | ||
621 | writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); | ||
622 | |||
623 | /* | ||
624 | * This clears any pending interrupts | ||
625 | */ | ||
626 | writel(1, ®s->Mb0Lo); | ||
627 | readl(®s->CpuCtrl); /* flush */ | ||
628 | |||
629 | /* | ||
630 | * Make sure no other CPUs are processing interrupts | ||
631 | * on the card before the buffers are being released. | ||
632 | * Otherwise one might experience some `interesting' | ||
633 | * effects. | ||
634 | * | ||
635 | * Then release the RX buffers - jumbo buffers were | ||
636 | * already released in ace_close(). | ||
637 | */ | ||
638 | ace_sync_irq(dev->irq); | ||
639 | |||
640 | for (i = 0; i < RX_STD_RING_ENTRIES; i++) { | ||
641 | struct sk_buff *skb = ap->skb->rx_std_skbuff[i].skb; | ||
642 | |||
643 | if (skb) { | ||
644 | struct ring_info *ringp; | ||
645 | dma_addr_t mapping; | ||
646 | |||
647 | ringp = &ap->skb->rx_std_skbuff[i]; | ||
648 | mapping = dma_unmap_addr(ringp, mapping); | ||
649 | pci_unmap_page(ap->pdev, mapping, | ||
650 | ACE_STD_BUFSIZE, | ||
651 | PCI_DMA_FROMDEVICE); | ||
652 | |||
653 | ap->rx_std_ring[i].size = 0; | ||
654 | ap->skb->rx_std_skbuff[i].skb = NULL; | ||
655 | dev_kfree_skb(skb); | ||
656 | } | ||
657 | } | ||
658 | |||
659 | if (ap->version >= 2) { | ||
660 | for (i = 0; i < RX_MINI_RING_ENTRIES; i++) { | ||
661 | struct sk_buff *skb = ap->skb->rx_mini_skbuff[i].skb; | ||
662 | |||
663 | if (skb) { | ||
664 | struct ring_info *ringp; | ||
665 | dma_addr_t mapping; | ||
666 | |||
667 | ringp = &ap->skb->rx_mini_skbuff[i]; | ||
668 | mapping = dma_unmap_addr(ringp,mapping); | ||
669 | pci_unmap_page(ap->pdev, mapping, | ||
670 | ACE_MINI_BUFSIZE, | ||
671 | PCI_DMA_FROMDEVICE); | ||
672 | |||
673 | ap->rx_mini_ring[i].size = 0; | ||
674 | ap->skb->rx_mini_skbuff[i].skb = NULL; | ||
675 | dev_kfree_skb(skb); | ||
676 | } | ||
677 | } | ||
678 | } | ||
679 | |||
680 | for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { | ||
681 | struct sk_buff *skb = ap->skb->rx_jumbo_skbuff[i].skb; | ||
682 | if (skb) { | ||
683 | struct ring_info *ringp; | ||
684 | dma_addr_t mapping; | ||
685 | |||
686 | ringp = &ap->skb->rx_jumbo_skbuff[i]; | ||
687 | mapping = dma_unmap_addr(ringp, mapping); | ||
688 | pci_unmap_page(ap->pdev, mapping, | ||
689 | ACE_JUMBO_BUFSIZE, | ||
690 | PCI_DMA_FROMDEVICE); | ||
691 | |||
692 | ap->rx_jumbo_ring[i].size = 0; | ||
693 | ap->skb->rx_jumbo_skbuff[i].skb = NULL; | ||
694 | dev_kfree_skb(skb); | ||
695 | } | ||
696 | } | ||
697 | |||
698 | ace_init_cleanup(dev); | ||
699 | free_netdev(dev); | ||
700 | } | ||
701 | |||
702 | static struct pci_driver acenic_pci_driver = { | ||
703 | .name = "acenic", | ||
704 | .id_table = acenic_pci_tbl, | ||
705 | .probe = acenic_probe_one, | ||
706 | .remove = __devexit_p(acenic_remove_one), | ||
707 | }; | ||
708 | |||
709 | static int __init acenic_init(void) | ||
710 | { | ||
711 | return pci_register_driver(&acenic_pci_driver); | ||
712 | } | ||
713 | |||
714 | static void __exit acenic_exit(void) | ||
715 | { | ||
716 | pci_unregister_driver(&acenic_pci_driver); | ||
717 | } | ||
718 | |||
719 | module_init(acenic_init); | ||
720 | module_exit(acenic_exit); | ||
721 | |||
722 | static void ace_free_descriptors(struct net_device *dev) | ||
723 | { | ||
724 | struct ace_private *ap = netdev_priv(dev); | ||
725 | int size; | ||
726 | |||
727 | if (ap->rx_std_ring != NULL) { | ||
728 | size = (sizeof(struct rx_desc) * | ||
729 | (RX_STD_RING_ENTRIES + | ||
730 | RX_JUMBO_RING_ENTRIES + | ||
731 | RX_MINI_RING_ENTRIES + | ||
732 | RX_RETURN_RING_ENTRIES)); | ||
733 | pci_free_consistent(ap->pdev, size, ap->rx_std_ring, | ||
734 | ap->rx_ring_base_dma); | ||
735 | ap->rx_std_ring = NULL; | ||
736 | ap->rx_jumbo_ring = NULL; | ||
737 | ap->rx_mini_ring = NULL; | ||
738 | ap->rx_return_ring = NULL; | ||
739 | } | ||
740 | if (ap->evt_ring != NULL) { | ||
741 | size = (sizeof(struct event) * EVT_RING_ENTRIES); | ||
742 | pci_free_consistent(ap->pdev, size, ap->evt_ring, | ||
743 | ap->evt_ring_dma); | ||
744 | ap->evt_ring = NULL; | ||
745 | } | ||
746 | if (ap->tx_ring != NULL && !ACE_IS_TIGON_I(ap)) { | ||
747 | size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); | ||
748 | pci_free_consistent(ap->pdev, size, ap->tx_ring, | ||
749 | ap->tx_ring_dma); | ||
750 | } | ||
751 | ap->tx_ring = NULL; | ||
752 | |||
753 | if (ap->evt_prd != NULL) { | ||
754 | pci_free_consistent(ap->pdev, sizeof(u32), | ||
755 | (void *)ap->evt_prd, ap->evt_prd_dma); | ||
756 | ap->evt_prd = NULL; | ||
757 | } | ||
758 | if (ap->rx_ret_prd != NULL) { | ||
759 | pci_free_consistent(ap->pdev, sizeof(u32), | ||
760 | (void *)ap->rx_ret_prd, | ||
761 | ap->rx_ret_prd_dma); | ||
762 | ap->rx_ret_prd = NULL; | ||
763 | } | ||
764 | if (ap->tx_csm != NULL) { | ||
765 | pci_free_consistent(ap->pdev, sizeof(u32), | ||
766 | (void *)ap->tx_csm, ap->tx_csm_dma); | ||
767 | ap->tx_csm = NULL; | ||
768 | } | ||
769 | } | ||
770 | |||
771 | |||
772 | static int ace_allocate_descriptors(struct net_device *dev) | ||
773 | { | ||
774 | struct ace_private *ap = netdev_priv(dev); | ||
775 | int size; | ||
776 | |||
777 | size = (sizeof(struct rx_desc) * | ||
778 | (RX_STD_RING_ENTRIES + | ||
779 | RX_JUMBO_RING_ENTRIES + | ||
780 | RX_MINI_RING_ENTRIES + | ||
781 | RX_RETURN_RING_ENTRIES)); | ||
782 | |||
783 | ap->rx_std_ring = pci_alloc_consistent(ap->pdev, size, | ||
784 | &ap->rx_ring_base_dma); | ||
785 | if (ap->rx_std_ring == NULL) | ||
786 | goto fail; | ||
787 | |||
788 | ap->rx_jumbo_ring = ap->rx_std_ring + RX_STD_RING_ENTRIES; | ||
789 | ap->rx_mini_ring = ap->rx_jumbo_ring + RX_JUMBO_RING_ENTRIES; | ||
790 | ap->rx_return_ring = ap->rx_mini_ring + RX_MINI_RING_ENTRIES; | ||
791 | |||
792 | size = (sizeof(struct event) * EVT_RING_ENTRIES); | ||
793 | |||
794 | ap->evt_ring = pci_alloc_consistent(ap->pdev, size, &ap->evt_ring_dma); | ||
795 | |||
796 | if (ap->evt_ring == NULL) | ||
797 | goto fail; | ||
798 | |||
799 | /* | ||
800 | * Only allocate a host TX ring for the Tigon II, the Tigon I | ||
801 | * has to use PCI registers for this ;-( | ||
802 | */ | ||
803 | if (!ACE_IS_TIGON_I(ap)) { | ||
804 | size = (sizeof(struct tx_desc) * MAX_TX_RING_ENTRIES); | ||
805 | |||
806 | ap->tx_ring = pci_alloc_consistent(ap->pdev, size, | ||
807 | &ap->tx_ring_dma); | ||
808 | |||
809 | if (ap->tx_ring == NULL) | ||
810 | goto fail; | ||
811 | } | ||
812 | |||
813 | ap->evt_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), | ||
814 | &ap->evt_prd_dma); | ||
815 | if (ap->evt_prd == NULL) | ||
816 | goto fail; | ||
817 | |||
818 | ap->rx_ret_prd = pci_alloc_consistent(ap->pdev, sizeof(u32), | ||
819 | &ap->rx_ret_prd_dma); | ||
820 | if (ap->rx_ret_prd == NULL) | ||
821 | goto fail; | ||
822 | |||
823 | ap->tx_csm = pci_alloc_consistent(ap->pdev, sizeof(u32), | ||
824 | &ap->tx_csm_dma); | ||
825 | if (ap->tx_csm == NULL) | ||
826 | goto fail; | ||
827 | |||
828 | return 0; | ||
829 | |||
830 | fail: | ||
831 | /* Clean up. */ | ||
832 | ace_init_cleanup(dev); | ||
833 | return 1; | ||
834 | } | ||
835 | |||
836 | |||
837 | /* | ||
838 | * Generic cleanup handling data allocated during init. Used when the | ||
839 | * module is unloaded or if an error occurs during initialization | ||
840 | */ | ||
841 | static void ace_init_cleanup(struct net_device *dev) | ||
842 | { | ||
843 | struct ace_private *ap; | ||
844 | |||
845 | ap = netdev_priv(dev); | ||
846 | |||
847 | ace_free_descriptors(dev); | ||
848 | |||
849 | if (ap->info) | ||
850 | pci_free_consistent(ap->pdev, sizeof(struct ace_info), | ||
851 | ap->info, ap->info_dma); | ||
852 | kfree(ap->skb); | ||
853 | kfree(ap->trace_buf); | ||
854 | |||
855 | if (dev->irq) | ||
856 | free_irq(dev->irq, dev); | ||
857 | |||
858 | iounmap(ap->regs); | ||
859 | } | ||
860 | |||
861 | |||
862 | /* | ||
863 | * Commands are considered to be slow. | ||
864 | */ | ||
865 | static inline void ace_issue_cmd(struct ace_regs __iomem *regs, struct cmd *cmd) | ||
866 | { | ||
867 | u32 idx; | ||
868 | |||
869 | idx = readl(®s->CmdPrd); | ||
870 | |||
871 | writel(*(u32 *)(cmd), ®s->CmdRng[idx]); | ||
872 | idx = (idx + 1) % CMD_RING_ENTRIES; | ||
873 | |||
874 | writel(idx, ®s->CmdPrd); | ||
875 | } | ||
876 | |||
877 | |||
878 | static int __devinit ace_init(struct net_device *dev) | ||
879 | { | ||
880 | struct ace_private *ap; | ||
881 | struct ace_regs __iomem *regs; | ||
882 | struct ace_info *info = NULL; | ||
883 | struct pci_dev *pdev; | ||
884 | unsigned long myjif; | ||
885 | u64 tmp_ptr; | ||
886 | u32 tig_ver, mac1, mac2, tmp, pci_state; | ||
887 | int board_idx, ecode = 0; | ||
888 | short i; | ||
889 | unsigned char cache_size; | ||
890 | |||
891 | ap = netdev_priv(dev); | ||
892 | regs = ap->regs; | ||
893 | |||
894 | board_idx = ap->board_idx; | ||
895 | |||
896 | /* | ||
897 | * aman@sgi.com - its useful to do a NIC reset here to | ||
898 | * address the `Firmware not running' problem subsequent | ||
899 | * to any crashes involving the NIC | ||
900 | */ | ||
901 | writel(HW_RESET | (HW_RESET << 24), ®s->HostCtrl); | ||
902 | readl(®s->HostCtrl); /* PCI write posting */ | ||
903 | udelay(5); | ||
904 | |||
905 | /* | ||
906 | * Don't access any other registers before this point! | ||
907 | */ | ||
908 | #ifdef __BIG_ENDIAN | ||
909 | /* | ||
910 | * This will most likely need BYTE_SWAP once we switch | ||
911 | * to using __raw_writel() | ||
912 | */ | ||
913 | writel((WORD_SWAP | CLR_INT | ((WORD_SWAP | CLR_INT) << 24)), | ||
914 | ®s->HostCtrl); | ||
915 | #else | ||
916 | writel((CLR_INT | WORD_SWAP | ((CLR_INT | WORD_SWAP) << 24)), | ||
917 | ®s->HostCtrl); | ||
918 | #endif | ||
919 | readl(®s->HostCtrl); /* PCI write posting */ | ||
920 | |||
921 | /* | ||
922 | * Stop the NIC CPU and clear pending interrupts | ||
923 | */ | ||
924 | writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); | ||
925 | readl(®s->CpuCtrl); /* PCI write posting */ | ||
926 | writel(0, ®s->Mb0Lo); | ||
927 | |||
928 | tig_ver = readl(®s->HostCtrl) >> 28; | ||
929 | |||
930 | switch(tig_ver){ | ||
931 | #ifndef CONFIG_ACENIC_OMIT_TIGON_I | ||
932 | case 4: | ||
933 | case 5: | ||
934 | printk(KERN_INFO " Tigon I (Rev. %i), Firmware: %i.%i.%i, ", | ||
935 | tig_ver, ap->firmware_major, ap->firmware_minor, | ||
936 | ap->firmware_fix); | ||
937 | writel(0, ®s->LocalCtrl); | ||
938 | ap->version = 1; | ||
939 | ap->tx_ring_entries = TIGON_I_TX_RING_ENTRIES; | ||
940 | break; | ||
941 | #endif | ||
942 | case 6: | ||
943 | printk(KERN_INFO " Tigon II (Rev. %i), Firmware: %i.%i.%i, ", | ||
944 | tig_ver, ap->firmware_major, ap->firmware_minor, | ||
945 | ap->firmware_fix); | ||
946 | writel(readl(®s->CpuBCtrl) | CPU_HALT, ®s->CpuBCtrl); | ||
947 | readl(®s->CpuBCtrl); /* PCI write posting */ | ||
948 | /* | ||
949 | * The SRAM bank size does _not_ indicate the amount | ||
950 | * of memory on the card, it controls the _bank_ size! | ||
951 | * Ie. a 1MB AceNIC will have two banks of 512KB. | ||
952 | */ | ||
953 | writel(SRAM_BANK_512K, ®s->LocalCtrl); | ||
954 | writel(SYNC_SRAM_TIMING, ®s->MiscCfg); | ||
955 | ap->version = 2; | ||
956 | ap->tx_ring_entries = MAX_TX_RING_ENTRIES; | ||
957 | break; | ||
958 | default: | ||
959 | printk(KERN_WARNING " Unsupported Tigon version detected " | ||
960 | "(%i)\n", tig_ver); | ||
961 | ecode = -ENODEV; | ||
962 | goto init_error; | ||
963 | } | ||
964 | |||
965 | /* | ||
966 | * ModeStat _must_ be set after the SRAM settings as this change | ||
967 | * seems to corrupt the ModeStat and possible other registers. | ||
968 | * The SRAM settings survive resets and setting it to the same | ||
969 | * value a second time works as well. This is what caused the | ||
970 | * `Firmware not running' problem on the Tigon II. | ||
971 | */ | ||
972 | #ifdef __BIG_ENDIAN | ||
973 | writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | ACE_BYTE_SWAP_BD | | ||
974 | ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat); | ||
975 | #else | ||
976 | writel(ACE_BYTE_SWAP_DMA | ACE_WARN | ACE_FATAL | | ||
977 | ACE_WORD_SWAP_BD | ACE_NO_JUMBO_FRAG, ®s->ModeStat); | ||
978 | #endif | ||
979 | readl(®s->ModeStat); /* PCI write posting */ | ||
980 | |||
981 | mac1 = 0; | ||
982 | for(i = 0; i < 4; i++) { | ||
983 | int t; | ||
984 | |||
985 | mac1 = mac1 << 8; | ||
986 | t = read_eeprom_byte(dev, 0x8c+i); | ||
987 | if (t < 0) { | ||
988 | ecode = -EIO; | ||
989 | goto init_error; | ||
990 | } else | ||
991 | mac1 |= (t & 0xff); | ||
992 | } | ||
993 | mac2 = 0; | ||
994 | for(i = 4; i < 8; i++) { | ||
995 | int t; | ||
996 | |||
997 | mac2 = mac2 << 8; | ||
998 | t = read_eeprom_byte(dev, 0x8c+i); | ||
999 | if (t < 0) { | ||
1000 | ecode = -EIO; | ||
1001 | goto init_error; | ||
1002 | } else | ||
1003 | mac2 |= (t & 0xff); | ||
1004 | } | ||
1005 | |||
1006 | writel(mac1, ®s->MacAddrHi); | ||
1007 | writel(mac2, ®s->MacAddrLo); | ||
1008 | |||
1009 | dev->dev_addr[0] = (mac1 >> 8) & 0xff; | ||
1010 | dev->dev_addr[1] = mac1 & 0xff; | ||
1011 | dev->dev_addr[2] = (mac2 >> 24) & 0xff; | ||
1012 | dev->dev_addr[3] = (mac2 >> 16) & 0xff; | ||
1013 | dev->dev_addr[4] = (mac2 >> 8) & 0xff; | ||
1014 | dev->dev_addr[5] = mac2 & 0xff; | ||
1015 | |||
1016 | printk("MAC: %pM\n", dev->dev_addr); | ||
1017 | |||
1018 | /* | ||
1019 | * Looks like this is necessary to deal with on all architectures, | ||
1020 | * even this %$#%$# N440BX Intel based thing doesn't get it right. | ||
1021 | * Ie. having two NICs in the machine, one will have the cache | ||
1022 | * line set at boot time, the other will not. | ||
1023 | */ | ||
1024 | pdev = ap->pdev; | ||
1025 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_size); | ||
1026 | cache_size <<= 2; | ||
1027 | if (cache_size != SMP_CACHE_BYTES) { | ||
1028 | printk(KERN_INFO " PCI cache line size set incorrectly " | ||
1029 | "(%i bytes) by BIOS/FW, ", cache_size); | ||
1030 | if (cache_size > SMP_CACHE_BYTES) | ||
1031 | printk("expecting %i\n", SMP_CACHE_BYTES); | ||
1032 | else { | ||
1033 | printk("correcting to %i\n", SMP_CACHE_BYTES); | ||
1034 | pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, | ||
1035 | SMP_CACHE_BYTES >> 2); | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | pci_state = readl(®s->PciState); | ||
1040 | printk(KERN_INFO " PCI bus width: %i bits, speed: %iMHz, " | ||
1041 | "latency: %i clks\n", | ||
1042 | (pci_state & PCI_32BIT) ? 32 : 64, | ||
1043 | (pci_state & PCI_66MHZ) ? 66 : 33, | ||
1044 | ap->pci_latency); | ||
1045 | |||
1046 | /* | ||
1047 | * Set the max DMA transfer size. Seems that for most systems | ||
1048 | * the performance is better when no MAX parameter is | ||
1049 | * set. However for systems enabling PCI write and invalidate, | ||
1050 | * DMA writes must be set to the L1 cache line size to get | ||
1051 | * optimal performance. | ||
1052 | * | ||
1053 | * The default is now to turn the PCI write and invalidate off | ||
1054 | * - that is what Alteon does for NT. | ||
1055 | */ | ||
1056 | tmp = READ_CMD_MEM | WRITE_CMD_MEM; | ||
1057 | if (ap->version >= 2) { | ||
1058 | tmp |= (MEM_READ_MULTIPLE | (pci_state & PCI_66MHZ)); | ||
1059 | /* | ||
1060 | * Tuning parameters only supported for 8 cards | ||
1061 | */ | ||
1062 | if (board_idx == BOARD_IDX_OVERFLOW || | ||
1063 | dis_pci_mem_inval[board_idx]) { | ||
1064 | if (ap->pci_command & PCI_COMMAND_INVALIDATE) { | ||
1065 | ap->pci_command &= ~PCI_COMMAND_INVALIDATE; | ||
1066 | pci_write_config_word(pdev, PCI_COMMAND, | ||
1067 | ap->pci_command); | ||
1068 | printk(KERN_INFO " Disabling PCI memory " | ||
1069 | "write and invalidate\n"); | ||
1070 | } | ||
1071 | } else if (ap->pci_command & PCI_COMMAND_INVALIDATE) { | ||
1072 | printk(KERN_INFO " PCI memory write & invalidate " | ||
1073 | "enabled by BIOS, enabling counter measures\n"); | ||
1074 | |||
1075 | switch(SMP_CACHE_BYTES) { | ||
1076 | case 16: | ||
1077 | tmp |= DMA_WRITE_MAX_16; | ||
1078 | break; | ||
1079 | case 32: | ||
1080 | tmp |= DMA_WRITE_MAX_32; | ||
1081 | break; | ||
1082 | case 64: | ||
1083 | tmp |= DMA_WRITE_MAX_64; | ||
1084 | break; | ||
1085 | case 128: | ||
1086 | tmp |= DMA_WRITE_MAX_128; | ||
1087 | break; | ||
1088 | default: | ||
1089 | printk(KERN_INFO " Cache line size %i not " | ||
1090 | "supported, PCI write and invalidate " | ||
1091 | "disabled\n", SMP_CACHE_BYTES); | ||
1092 | ap->pci_command &= ~PCI_COMMAND_INVALIDATE; | ||
1093 | pci_write_config_word(pdev, PCI_COMMAND, | ||
1094 | ap->pci_command); | ||
1095 | } | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | #ifdef __sparc__ | ||
1100 | /* | ||
1101 | * On this platform, we know what the best dma settings | ||
1102 | * are. We use 64-byte maximum bursts, because if we | ||
1103 | * burst larger than the cache line size (or even cross | ||
1104 | * a 64byte boundary in a single burst) the UltraSparc | ||
1105 | * PCI controller will disconnect at 64-byte multiples. | ||
1106 | * | ||
1107 | * Read-multiple will be properly enabled above, and when | ||
1108 | * set will give the PCI controller proper hints about | ||
1109 | * prefetching. | ||
1110 | */ | ||
1111 | tmp &= ~DMA_READ_WRITE_MASK; | ||
1112 | tmp |= DMA_READ_MAX_64; | ||
1113 | tmp |= DMA_WRITE_MAX_64; | ||
1114 | #endif | ||
1115 | #ifdef __alpha__ | ||
1116 | tmp &= ~DMA_READ_WRITE_MASK; | ||
1117 | tmp |= DMA_READ_MAX_128; | ||
1118 | /* | ||
1119 | * All the docs say MUST NOT. Well, I did. | ||
1120 | * Nothing terrible happens, if we load wrong size. | ||
1121 | * Bit w&i still works better! | ||
1122 | */ | ||
1123 | tmp |= DMA_WRITE_MAX_128; | ||
1124 | #endif | ||
1125 | writel(tmp, ®s->PciState); | ||
1126 | |||
1127 | #if 0 | ||
1128 | /* | ||
1129 | * The Host PCI bus controller driver has to set FBB. | ||
1130 | * If all devices on that PCI bus support FBB, then the controller | ||
1131 | * can enable FBB support in the Host PCI Bus controller (or on | ||
1132 | * the PCI-PCI bridge if that applies). | ||
1133 | * -ggg | ||
1134 | */ | ||
1135 | /* | ||
1136 | * I have received reports from people having problems when this | ||
1137 | * bit is enabled. | ||
1138 | */ | ||
1139 | if (!(ap->pci_command & PCI_COMMAND_FAST_BACK)) { | ||
1140 | printk(KERN_INFO " Enabling PCI Fast Back to Back\n"); | ||
1141 | ap->pci_command |= PCI_COMMAND_FAST_BACK; | ||
1142 | pci_write_config_word(pdev, PCI_COMMAND, ap->pci_command); | ||
1143 | } | ||
1144 | #endif | ||
1145 | |||
1146 | /* | ||
1147 | * Configure DMA attributes. | ||
1148 | */ | ||
1149 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { | ||
1150 | ap->pci_using_dac = 1; | ||
1151 | } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
1152 | ap->pci_using_dac = 0; | ||
1153 | } else { | ||
1154 | ecode = -ENODEV; | ||
1155 | goto init_error; | ||
1156 | } | ||
1157 | |||
1158 | /* | ||
1159 | * Initialize the generic info block and the command+event rings | ||
1160 | * and the control blocks for the transmit and receive rings | ||
1161 | * as they need to be setup once and for all. | ||
1162 | */ | ||
1163 | if (!(info = pci_alloc_consistent(ap->pdev, sizeof(struct ace_info), | ||
1164 | &ap->info_dma))) { | ||
1165 | ecode = -EAGAIN; | ||
1166 | goto init_error; | ||
1167 | } | ||
1168 | ap->info = info; | ||
1169 | |||
1170 | /* | ||
1171 | * Get the memory for the skb rings. | ||
1172 | */ | ||
1173 | if (!(ap->skb = kmalloc(sizeof(struct ace_skb), GFP_KERNEL))) { | ||
1174 | ecode = -EAGAIN; | ||
1175 | goto init_error; | ||
1176 | } | ||
1177 | |||
1178 | ecode = request_irq(pdev->irq, ace_interrupt, IRQF_SHARED, | ||
1179 | DRV_NAME, dev); | ||
1180 | if (ecode) { | ||
1181 | printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", | ||
1182 | DRV_NAME, pdev->irq); | ||
1183 | goto init_error; | ||
1184 | } else | ||
1185 | dev->irq = pdev->irq; | ||
1186 | |||
1187 | #ifdef INDEX_DEBUG | ||
1188 | spin_lock_init(&ap->debug_lock); | ||
1189 | ap->last_tx = ACE_TX_RING_ENTRIES(ap) - 1; | ||
1190 | ap->last_std_rx = 0; | ||
1191 | ap->last_mini_rx = 0; | ||
1192 | #endif | ||
1193 | |||
1194 | memset(ap->info, 0, sizeof(struct ace_info)); | ||
1195 | memset(ap->skb, 0, sizeof(struct ace_skb)); | ||
1196 | |||
1197 | ecode = ace_load_firmware(dev); | ||
1198 | if (ecode) | ||
1199 | goto init_error; | ||
1200 | |||
1201 | ap->fw_running = 0; | ||
1202 | |||
1203 | tmp_ptr = ap->info_dma; | ||
1204 | writel(tmp_ptr >> 32, ®s->InfoPtrHi); | ||
1205 | writel(tmp_ptr & 0xffffffff, ®s->InfoPtrLo); | ||
1206 | |||
1207 | memset(ap->evt_ring, 0, EVT_RING_ENTRIES * sizeof(struct event)); | ||
1208 | |||
1209 | set_aceaddr(&info->evt_ctrl.rngptr, ap->evt_ring_dma); | ||
1210 | info->evt_ctrl.flags = 0; | ||
1211 | |||
1212 | *(ap->evt_prd) = 0; | ||
1213 | wmb(); | ||
1214 | set_aceaddr(&info->evt_prd_ptr, ap->evt_prd_dma); | ||
1215 | writel(0, ®s->EvtCsm); | ||
1216 | |||
1217 | set_aceaddr(&info->cmd_ctrl.rngptr, 0x100); | ||
1218 | info->cmd_ctrl.flags = 0; | ||
1219 | info->cmd_ctrl.max_len = 0; | ||
1220 | |||
1221 | for (i = 0; i < CMD_RING_ENTRIES; i++) | ||
1222 | writel(0, ®s->CmdRng[i]); | ||
1223 | |||
1224 | writel(0, ®s->CmdPrd); | ||
1225 | writel(0, ®s->CmdCsm); | ||
1226 | |||
1227 | tmp_ptr = ap->info_dma; | ||
1228 | tmp_ptr += (unsigned long) &(((struct ace_info *)0)->s.stats); | ||
1229 | set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr); | ||
1230 | |||
1231 | set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma); | ||
1232 | info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE; | ||
1233 | info->rx_std_ctrl.flags = | ||
1234 | RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; | ||
1235 | |||
1236 | memset(ap->rx_std_ring, 0, | ||
1237 | RX_STD_RING_ENTRIES * sizeof(struct rx_desc)); | ||
1238 | |||
1239 | for (i = 0; i < RX_STD_RING_ENTRIES; i++) | ||
1240 | ap->rx_std_ring[i].flags = BD_FLG_TCP_UDP_SUM; | ||
1241 | |||
1242 | ap->rx_std_skbprd = 0; | ||
1243 | atomic_set(&ap->cur_rx_bufs, 0); | ||
1244 | |||
1245 | set_aceaddr(&info->rx_jumbo_ctrl.rngptr, | ||
1246 | (ap->rx_ring_base_dma + | ||
1247 | (sizeof(struct rx_desc) * RX_STD_RING_ENTRIES))); | ||
1248 | info->rx_jumbo_ctrl.max_len = 0; | ||
1249 | info->rx_jumbo_ctrl.flags = | ||
1250 | RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; | ||
1251 | |||
1252 | memset(ap->rx_jumbo_ring, 0, | ||
1253 | RX_JUMBO_RING_ENTRIES * sizeof(struct rx_desc)); | ||
1254 | |||
1255 | for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) | ||
1256 | ap->rx_jumbo_ring[i].flags = BD_FLG_TCP_UDP_SUM | BD_FLG_JUMBO; | ||
1257 | |||
1258 | ap->rx_jumbo_skbprd = 0; | ||
1259 | atomic_set(&ap->cur_jumbo_bufs, 0); | ||
1260 | |||
1261 | memset(ap->rx_mini_ring, 0, | ||
1262 | RX_MINI_RING_ENTRIES * sizeof(struct rx_desc)); | ||
1263 | |||
1264 | if (ap->version >= 2) { | ||
1265 | set_aceaddr(&info->rx_mini_ctrl.rngptr, | ||
1266 | (ap->rx_ring_base_dma + | ||
1267 | (sizeof(struct rx_desc) * | ||
1268 | (RX_STD_RING_ENTRIES + | ||
1269 | RX_JUMBO_RING_ENTRIES)))); | ||
1270 | info->rx_mini_ctrl.max_len = ACE_MINI_SIZE; | ||
1271 | info->rx_mini_ctrl.flags = | ||
1272 | RCB_FLG_TCP_UDP_SUM|RCB_FLG_NO_PSEUDO_HDR|RCB_FLG_VLAN_ASSIST; | ||
1273 | |||
1274 | for (i = 0; i < RX_MINI_RING_ENTRIES; i++) | ||
1275 | ap->rx_mini_ring[i].flags = | ||
1276 | BD_FLG_TCP_UDP_SUM | BD_FLG_MINI; | ||
1277 | } else { | ||
1278 | set_aceaddr(&info->rx_mini_ctrl.rngptr, 0); | ||
1279 | info->rx_mini_ctrl.flags = RCB_FLG_RNG_DISABLE; | ||
1280 | info->rx_mini_ctrl.max_len = 0; | ||
1281 | } | ||
1282 | |||
1283 | ap->rx_mini_skbprd = 0; | ||
1284 | atomic_set(&ap->cur_mini_bufs, 0); | ||
1285 | |||
1286 | set_aceaddr(&info->rx_return_ctrl.rngptr, | ||
1287 | (ap->rx_ring_base_dma + | ||
1288 | (sizeof(struct rx_desc) * | ||
1289 | (RX_STD_RING_ENTRIES + | ||
1290 | RX_JUMBO_RING_ENTRIES + | ||
1291 | RX_MINI_RING_ENTRIES)))); | ||
1292 | info->rx_return_ctrl.flags = 0; | ||
1293 | info->rx_return_ctrl.max_len = RX_RETURN_RING_ENTRIES; | ||
1294 | |||
1295 | memset(ap->rx_return_ring, 0, | ||
1296 | RX_RETURN_RING_ENTRIES * sizeof(struct rx_desc)); | ||
1297 | |||
1298 | set_aceaddr(&info->rx_ret_prd_ptr, ap->rx_ret_prd_dma); | ||
1299 | *(ap->rx_ret_prd) = 0; | ||
1300 | |||
1301 | writel(TX_RING_BASE, ®s->WinBase); | ||
1302 | |||
1303 | if (ACE_IS_TIGON_I(ap)) { | ||
1304 | ap->tx_ring = (__force struct tx_desc *) regs->Window; | ||
1305 | for (i = 0; i < (TIGON_I_TX_RING_ENTRIES | ||
1306 | * sizeof(struct tx_desc)) / sizeof(u32); i++) | ||
1307 | writel(0, (__force void __iomem *)ap->tx_ring + i * 4); | ||
1308 | |||
1309 | set_aceaddr(&info->tx_ctrl.rngptr, TX_RING_BASE); | ||
1310 | } else { | ||
1311 | memset(ap->tx_ring, 0, | ||
1312 | MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)); | ||
1313 | |||
1314 | set_aceaddr(&info->tx_ctrl.rngptr, ap->tx_ring_dma); | ||
1315 | } | ||
1316 | |||
1317 | info->tx_ctrl.max_len = ACE_TX_RING_ENTRIES(ap); | ||
1318 | tmp = RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | RCB_FLG_VLAN_ASSIST; | ||
1319 | |||
1320 | /* | ||
1321 | * The Tigon I does not like having the TX ring in host memory ;-( | ||
1322 | */ | ||
1323 | if (!ACE_IS_TIGON_I(ap)) | ||
1324 | tmp |= RCB_FLG_TX_HOST_RING; | ||
1325 | #if TX_COAL_INTS_ONLY | ||
1326 | tmp |= RCB_FLG_COAL_INT_ONLY; | ||
1327 | #endif | ||
1328 | info->tx_ctrl.flags = tmp; | ||
1329 | |||
1330 | set_aceaddr(&info->tx_csm_ptr, ap->tx_csm_dma); | ||
1331 | |||
1332 | /* | ||
1333 | * Potential item for tuning parameter | ||
1334 | */ | ||
1335 | #if 0 /* NO */ | ||
1336 | writel(DMA_THRESH_16W, ®s->DmaReadCfg); | ||
1337 | writel(DMA_THRESH_16W, ®s->DmaWriteCfg); | ||
1338 | #else | ||
1339 | writel(DMA_THRESH_8W, ®s->DmaReadCfg); | ||
1340 | writel(DMA_THRESH_8W, ®s->DmaWriteCfg); | ||
1341 | #endif | ||
1342 | |||
1343 | writel(0, ®s->MaskInt); | ||
1344 | writel(1, ®s->IfIdx); | ||
1345 | #if 0 | ||
1346 | /* | ||
1347 | * McKinley boxes do not like us fiddling with AssistState | ||
1348 | * this early | ||
1349 | */ | ||
1350 | writel(1, ®s->AssistState); | ||
1351 | #endif | ||
1352 | |||
1353 | writel(DEF_STAT, ®s->TuneStatTicks); | ||
1354 | writel(DEF_TRACE, ®s->TuneTrace); | ||
1355 | |||
1356 | ace_set_rxtx_parms(dev, 0); | ||
1357 | |||
1358 | if (board_idx == BOARD_IDX_OVERFLOW) { | ||
1359 | printk(KERN_WARNING "%s: more than %i NICs detected, " | ||
1360 | "ignoring module parameters!\n", | ||
1361 | ap->name, ACE_MAX_MOD_PARMS); | ||
1362 | } else if (board_idx >= 0) { | ||
1363 | if (tx_coal_tick[board_idx]) | ||
1364 | writel(tx_coal_tick[board_idx], | ||
1365 | ®s->TuneTxCoalTicks); | ||
1366 | if (max_tx_desc[board_idx]) | ||
1367 | writel(max_tx_desc[board_idx], ®s->TuneMaxTxDesc); | ||
1368 | |||
1369 | if (rx_coal_tick[board_idx]) | ||
1370 | writel(rx_coal_tick[board_idx], | ||
1371 | ®s->TuneRxCoalTicks); | ||
1372 | if (max_rx_desc[board_idx]) | ||
1373 | writel(max_rx_desc[board_idx], ®s->TuneMaxRxDesc); | ||
1374 | |||
1375 | if (trace[board_idx]) | ||
1376 | writel(trace[board_idx], ®s->TuneTrace); | ||
1377 | |||
1378 | if ((tx_ratio[board_idx] > 0) && (tx_ratio[board_idx] < 64)) | ||
1379 | writel(tx_ratio[board_idx], ®s->TxBufRat); | ||
1380 | } | ||
1381 | |||
1382 | /* | ||
1383 | * Default link parameters | ||
1384 | */ | ||
1385 | tmp = LNK_ENABLE | LNK_FULL_DUPLEX | LNK_1000MB | LNK_100MB | | ||
1386 | LNK_10MB | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL | LNK_NEGOTIATE; | ||
1387 | if(ap->version >= 2) | ||
1388 | tmp |= LNK_TX_FLOW_CTL_Y; | ||
1389 | |||
1390 | /* | ||
1391 | * Override link default parameters | ||
1392 | */ | ||
1393 | if ((board_idx >= 0) && link_state[board_idx]) { | ||
1394 | int option = link_state[board_idx]; | ||
1395 | |||
1396 | tmp = LNK_ENABLE; | ||
1397 | |||
1398 | if (option & 0x01) { | ||
1399 | printk(KERN_INFO "%s: Setting half duplex link\n", | ||
1400 | ap->name); | ||
1401 | tmp &= ~LNK_FULL_DUPLEX; | ||
1402 | } | ||
1403 | if (option & 0x02) | ||
1404 | tmp &= ~LNK_NEGOTIATE; | ||
1405 | if (option & 0x10) | ||
1406 | tmp |= LNK_10MB; | ||
1407 | if (option & 0x20) | ||
1408 | tmp |= LNK_100MB; | ||
1409 | if (option & 0x40) | ||
1410 | tmp |= LNK_1000MB; | ||
1411 | if ((option & 0x70) == 0) { | ||
1412 | printk(KERN_WARNING "%s: No media speed specified, " | ||
1413 | "forcing auto negotiation\n", ap->name); | ||
1414 | tmp |= LNK_NEGOTIATE | LNK_1000MB | | ||
1415 | LNK_100MB | LNK_10MB; | ||
1416 | } | ||
1417 | if ((option & 0x100) == 0) | ||
1418 | tmp |= LNK_NEG_FCTL; | ||
1419 | else | ||
1420 | printk(KERN_INFO "%s: Disabling flow control " | ||
1421 | "negotiation\n", ap->name); | ||
1422 | if (option & 0x200) | ||
1423 | tmp |= LNK_RX_FLOW_CTL_Y; | ||
1424 | if ((option & 0x400) && (ap->version >= 2)) { | ||
1425 | printk(KERN_INFO "%s: Enabling TX flow control\n", | ||
1426 | ap->name); | ||
1427 | tmp |= LNK_TX_FLOW_CTL_Y; | ||
1428 | } | ||
1429 | } | ||
1430 | |||
1431 | ap->link = tmp; | ||
1432 | writel(tmp, ®s->TuneLink); | ||
1433 | if (ap->version >= 2) | ||
1434 | writel(tmp, ®s->TuneFastLink); | ||
1435 | |||
1436 | writel(ap->firmware_start, ®s->Pc); | ||
1437 | |||
1438 | writel(0, ®s->Mb0Lo); | ||
1439 | |||
1440 | /* | ||
1441 | * Set tx_csm before we start receiving interrupts, otherwise | ||
1442 | * the interrupt handler might think it is supposed to process | ||
1443 | * tx ints before we are up and running, which may cause a null | ||
1444 | * pointer access in the int handler. | ||
1445 | */ | ||
1446 | ap->cur_rx = 0; | ||
1447 | ap->tx_prd = *(ap->tx_csm) = ap->tx_ret_csm = 0; | ||
1448 | |||
1449 | wmb(); | ||
1450 | ace_set_txprd(regs, ap, 0); | ||
1451 | writel(0, ®s->RxRetCsm); | ||
1452 | |||
1453 | /* | ||
1454 | * Enable DMA engine now. | ||
1455 | * If we do this sooner, Mckinley box pukes. | ||
1456 | * I assume it's because Tigon II DMA engine wants to check | ||
1457 | * *something* even before the CPU is started. | ||
1458 | */ | ||
1459 | writel(1, ®s->AssistState); /* enable DMA */ | ||
1460 | |||
1461 | /* | ||
1462 | * Start the NIC CPU | ||
1463 | */ | ||
1464 | writel(readl(®s->CpuCtrl) & ~(CPU_HALT|CPU_TRACE), ®s->CpuCtrl); | ||
1465 | readl(®s->CpuCtrl); | ||
1466 | |||
1467 | /* | ||
1468 | * Wait for the firmware to spin up - max 3 seconds. | ||
1469 | */ | ||
1470 | myjif = jiffies + 3 * HZ; | ||
1471 | while (time_before(jiffies, myjif) && !ap->fw_running) | ||
1472 | cpu_relax(); | ||
1473 | |||
1474 | if (!ap->fw_running) { | ||
1475 | printk(KERN_ERR "%s: Firmware NOT running!\n", ap->name); | ||
1476 | |||
1477 | ace_dump_trace(ap); | ||
1478 | writel(readl(®s->CpuCtrl) | CPU_HALT, ®s->CpuCtrl); | ||
1479 | readl(®s->CpuCtrl); | ||
1480 | |||
1481 | /* aman@sgi.com - account for badly behaving firmware/NIC: | ||
1482 | * - have observed that the NIC may continue to generate | ||
1483 | * interrupts for some reason; attempt to stop it - halt | ||
1484 | * second CPU for Tigon II cards, and also clear Mb0 | ||
1485 | * - if we're a module, we'll fail to load if this was | ||
1486 | * the only GbE card in the system => if the kernel does | ||
1487 | * see an interrupt from the NIC, code to handle it is | ||
1488 | * gone and OOps! - so free_irq also | ||
1489 | */ | ||
1490 | if (ap->version >= 2) | ||
1491 | writel(readl(®s->CpuBCtrl) | CPU_HALT, | ||
1492 | ®s->CpuBCtrl); | ||
1493 | writel(0, ®s->Mb0Lo); | ||
1494 | readl(®s->Mb0Lo); | ||
1495 | |||
1496 | ecode = -EBUSY; | ||
1497 | goto init_error; | ||
1498 | } | ||
1499 | |||
1500 | /* | ||
1501 | * We load the ring here as there seem to be no way to tell the | ||
1502 | * firmware to wipe the ring without re-initializing it. | ||
1503 | */ | ||
1504 | if (!test_and_set_bit(0, &ap->std_refill_busy)) | ||
1505 | ace_load_std_rx_ring(dev, RX_RING_SIZE); | ||
1506 | else | ||
1507 | printk(KERN_ERR "%s: Someone is busy refilling the RX ring\n", | ||
1508 | ap->name); | ||
1509 | if (ap->version >= 2) { | ||
1510 | if (!test_and_set_bit(0, &ap->mini_refill_busy)) | ||
1511 | ace_load_mini_rx_ring(dev, RX_MINI_SIZE); | ||
1512 | else | ||
1513 | printk(KERN_ERR "%s: Someone is busy refilling " | ||
1514 | "the RX mini ring\n", ap->name); | ||
1515 | } | ||
1516 | return 0; | ||
1517 | |||
1518 | init_error: | ||
1519 | ace_init_cleanup(dev); | ||
1520 | return ecode; | ||
1521 | } | ||
1522 | |||
1523 | |||
1524 | static void ace_set_rxtx_parms(struct net_device *dev, int jumbo) | ||
1525 | { | ||
1526 | struct ace_private *ap = netdev_priv(dev); | ||
1527 | struct ace_regs __iomem *regs = ap->regs; | ||
1528 | int board_idx = ap->board_idx; | ||
1529 | |||
1530 | if (board_idx >= 0) { | ||
1531 | if (!jumbo) { | ||
1532 | if (!tx_coal_tick[board_idx]) | ||
1533 | writel(DEF_TX_COAL, ®s->TuneTxCoalTicks); | ||
1534 | if (!max_tx_desc[board_idx]) | ||
1535 | writel(DEF_TX_MAX_DESC, ®s->TuneMaxTxDesc); | ||
1536 | if (!rx_coal_tick[board_idx]) | ||
1537 | writel(DEF_RX_COAL, ®s->TuneRxCoalTicks); | ||
1538 | if (!max_rx_desc[board_idx]) | ||
1539 | writel(DEF_RX_MAX_DESC, ®s->TuneMaxRxDesc); | ||
1540 | if (!tx_ratio[board_idx]) | ||
1541 | writel(DEF_TX_RATIO, ®s->TxBufRat); | ||
1542 | } else { | ||
1543 | if (!tx_coal_tick[board_idx]) | ||
1544 | writel(DEF_JUMBO_TX_COAL, | ||
1545 | ®s->TuneTxCoalTicks); | ||
1546 | if (!max_tx_desc[board_idx]) | ||
1547 | writel(DEF_JUMBO_TX_MAX_DESC, | ||
1548 | ®s->TuneMaxTxDesc); | ||
1549 | if (!rx_coal_tick[board_idx]) | ||
1550 | writel(DEF_JUMBO_RX_COAL, | ||
1551 | ®s->TuneRxCoalTicks); | ||
1552 | if (!max_rx_desc[board_idx]) | ||
1553 | writel(DEF_JUMBO_RX_MAX_DESC, | ||
1554 | ®s->TuneMaxRxDesc); | ||
1555 | if (!tx_ratio[board_idx]) | ||
1556 | writel(DEF_JUMBO_TX_RATIO, ®s->TxBufRat); | ||
1557 | } | ||
1558 | } | ||
1559 | } | ||
1560 | |||
1561 | |||
1562 | static void ace_watchdog(struct net_device *data) | ||
1563 | { | ||
1564 | struct net_device *dev = data; | ||
1565 | struct ace_private *ap = netdev_priv(dev); | ||
1566 | struct ace_regs __iomem *regs = ap->regs; | ||
1567 | |||
1568 | /* | ||
1569 | * We haven't received a stats update event for more than 2.5 | ||
1570 | * seconds and there is data in the transmit queue, thus we | ||
1571 | * assume the card is stuck. | ||
1572 | */ | ||
1573 | if (*ap->tx_csm != ap->tx_ret_csm) { | ||
1574 | printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n", | ||
1575 | dev->name, (unsigned int)readl(®s->HostCtrl)); | ||
1576 | /* This can happen due to ieee flow control. */ | ||
1577 | } else { | ||
1578 | printk(KERN_DEBUG "%s: BUG... transmitter died. Kicking it.\n", | ||
1579 | dev->name); | ||
1580 | #if 0 | ||
1581 | netif_wake_queue(dev); | ||
1582 | #endif | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | |||
1587 | static void ace_tasklet(unsigned long arg) | ||
1588 | { | ||
1589 | struct net_device *dev = (struct net_device *) arg; | ||
1590 | struct ace_private *ap = netdev_priv(dev); | ||
1591 | int cur_size; | ||
1592 | |||
1593 | cur_size = atomic_read(&ap->cur_rx_bufs); | ||
1594 | if ((cur_size < RX_LOW_STD_THRES) && | ||
1595 | !test_and_set_bit(0, &ap->std_refill_busy)) { | ||
1596 | #ifdef DEBUG | ||
1597 | printk("refilling buffers (current %i)\n", cur_size); | ||
1598 | #endif | ||
1599 | ace_load_std_rx_ring(dev, RX_RING_SIZE - cur_size); | ||
1600 | } | ||
1601 | |||
1602 | if (ap->version >= 2) { | ||
1603 | cur_size = atomic_read(&ap->cur_mini_bufs); | ||
1604 | if ((cur_size < RX_LOW_MINI_THRES) && | ||
1605 | !test_and_set_bit(0, &ap->mini_refill_busy)) { | ||
1606 | #ifdef DEBUG | ||
1607 | printk("refilling mini buffers (current %i)\n", | ||
1608 | cur_size); | ||
1609 | #endif | ||
1610 | ace_load_mini_rx_ring(dev, RX_MINI_SIZE - cur_size); | ||
1611 | } | ||
1612 | } | ||
1613 | |||
1614 | cur_size = atomic_read(&ap->cur_jumbo_bufs); | ||
1615 | if (ap->jumbo && (cur_size < RX_LOW_JUMBO_THRES) && | ||
1616 | !test_and_set_bit(0, &ap->jumbo_refill_busy)) { | ||
1617 | #ifdef DEBUG | ||
1618 | printk("refilling jumbo buffers (current %i)\n", cur_size); | ||
1619 | #endif | ||
1620 | ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); | ||
1621 | } | ||
1622 | ap->tasklet_pending = 0; | ||
1623 | } | ||
1624 | |||
1625 | |||
1626 | /* | ||
1627 | * Copy the contents of the NIC's trace buffer to kernel memory. | ||
1628 | */ | ||
1629 | static void ace_dump_trace(struct ace_private *ap) | ||
1630 | { | ||
1631 | #if 0 | ||
1632 | if (!ap->trace_buf) | ||
1633 | if (!(ap->trace_buf = kmalloc(ACE_TRACE_SIZE, GFP_KERNEL))) | ||
1634 | return; | ||
1635 | #endif | ||
1636 | } | ||
1637 | |||
1638 | |||
1639 | /* | ||
1640 | * Load the standard rx ring. | ||
1641 | * | ||
1642 | * Loading rings is safe without holding the spin lock since this is | ||
1643 | * done only before the device is enabled, thus no interrupts are | ||
1644 | * generated and by the interrupt handler/tasklet handler. | ||
1645 | */ | ||
1646 | static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) | ||
1647 | { | ||
1648 | struct ace_private *ap = netdev_priv(dev); | ||
1649 | struct ace_regs __iomem *regs = ap->regs; | ||
1650 | short i, idx; | ||
1651 | |||
1652 | |||
1653 | prefetchw(&ap->cur_rx_bufs); | ||
1654 | |||
1655 | idx = ap->rx_std_skbprd; | ||
1656 | |||
1657 | for (i = 0; i < nr_bufs; i++) { | ||
1658 | struct sk_buff *skb; | ||
1659 | struct rx_desc *rd; | ||
1660 | dma_addr_t mapping; | ||
1661 | |||
1662 | skb = netdev_alloc_skb_ip_align(dev, ACE_STD_BUFSIZE); | ||
1663 | if (!skb) | ||
1664 | break; | ||
1665 | |||
1666 | mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), | ||
1667 | offset_in_page(skb->data), | ||
1668 | ACE_STD_BUFSIZE, | ||
1669 | PCI_DMA_FROMDEVICE); | ||
1670 | ap->skb->rx_std_skbuff[idx].skb = skb; | ||
1671 | dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], | ||
1672 | mapping, mapping); | ||
1673 | |||
1674 | rd = &ap->rx_std_ring[idx]; | ||
1675 | set_aceaddr(&rd->addr, mapping); | ||
1676 | rd->size = ACE_STD_BUFSIZE; | ||
1677 | rd->idx = idx; | ||
1678 | idx = (idx + 1) % RX_STD_RING_ENTRIES; | ||
1679 | } | ||
1680 | |||
1681 | if (!i) | ||
1682 | goto error_out; | ||
1683 | |||
1684 | atomic_add(i, &ap->cur_rx_bufs); | ||
1685 | ap->rx_std_skbprd = idx; | ||
1686 | |||
1687 | if (ACE_IS_TIGON_I(ap)) { | ||
1688 | struct cmd cmd; | ||
1689 | cmd.evt = C_SET_RX_PRD_IDX; | ||
1690 | cmd.code = 0; | ||
1691 | cmd.idx = ap->rx_std_skbprd; | ||
1692 | ace_issue_cmd(regs, &cmd); | ||
1693 | } else { | ||
1694 | writel(idx, ®s->RxStdPrd); | ||
1695 | wmb(); | ||
1696 | } | ||
1697 | |||
1698 | out: | ||
1699 | clear_bit(0, &ap->std_refill_busy); | ||
1700 | return; | ||
1701 | |||
1702 | error_out: | ||
1703 | printk(KERN_INFO "Out of memory when allocating " | ||
1704 | "standard receive buffers\n"); | ||
1705 | goto out; | ||
1706 | } | ||
1707 | |||
1708 | |||
1709 | static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs) | ||
1710 | { | ||
1711 | struct ace_private *ap = netdev_priv(dev); | ||
1712 | struct ace_regs __iomem *regs = ap->regs; | ||
1713 | short i, idx; | ||
1714 | |||
1715 | prefetchw(&ap->cur_mini_bufs); | ||
1716 | |||
1717 | idx = ap->rx_mini_skbprd; | ||
1718 | for (i = 0; i < nr_bufs; i++) { | ||
1719 | struct sk_buff *skb; | ||
1720 | struct rx_desc *rd; | ||
1721 | dma_addr_t mapping; | ||
1722 | |||
1723 | skb = netdev_alloc_skb_ip_align(dev, ACE_MINI_BUFSIZE); | ||
1724 | if (!skb) | ||
1725 | break; | ||
1726 | |||
1727 | mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), | ||
1728 | offset_in_page(skb->data), | ||
1729 | ACE_MINI_BUFSIZE, | ||
1730 | PCI_DMA_FROMDEVICE); | ||
1731 | ap->skb->rx_mini_skbuff[idx].skb = skb; | ||
1732 | dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], | ||
1733 | mapping, mapping); | ||
1734 | |||
1735 | rd = &ap->rx_mini_ring[idx]; | ||
1736 | set_aceaddr(&rd->addr, mapping); | ||
1737 | rd->size = ACE_MINI_BUFSIZE; | ||
1738 | rd->idx = idx; | ||
1739 | idx = (idx + 1) % RX_MINI_RING_ENTRIES; | ||
1740 | } | ||
1741 | |||
1742 | if (!i) | ||
1743 | goto error_out; | ||
1744 | |||
1745 | atomic_add(i, &ap->cur_mini_bufs); | ||
1746 | |||
1747 | ap->rx_mini_skbprd = idx; | ||
1748 | |||
1749 | writel(idx, ®s->RxMiniPrd); | ||
1750 | wmb(); | ||
1751 | |||
1752 | out: | ||
1753 | clear_bit(0, &ap->mini_refill_busy); | ||
1754 | return; | ||
1755 | error_out: | ||
1756 | printk(KERN_INFO "Out of memory when allocating " | ||
1757 | "mini receive buffers\n"); | ||
1758 | goto out; | ||
1759 | } | ||
1760 | |||
1761 | |||
1762 | /* | ||
1763 | * Load the jumbo rx ring, this may happen at any time if the MTU | ||
1764 | * is changed to a value > 1500. | ||
1765 | */ | ||
1766 | static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs) | ||
1767 | { | ||
1768 | struct ace_private *ap = netdev_priv(dev); | ||
1769 | struct ace_regs __iomem *regs = ap->regs; | ||
1770 | short i, idx; | ||
1771 | |||
1772 | idx = ap->rx_jumbo_skbprd; | ||
1773 | |||
1774 | for (i = 0; i < nr_bufs; i++) { | ||
1775 | struct sk_buff *skb; | ||
1776 | struct rx_desc *rd; | ||
1777 | dma_addr_t mapping; | ||
1778 | |||
1779 | skb = netdev_alloc_skb_ip_align(dev, ACE_JUMBO_BUFSIZE); | ||
1780 | if (!skb) | ||
1781 | break; | ||
1782 | |||
1783 | mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), | ||
1784 | offset_in_page(skb->data), | ||
1785 | ACE_JUMBO_BUFSIZE, | ||
1786 | PCI_DMA_FROMDEVICE); | ||
1787 | ap->skb->rx_jumbo_skbuff[idx].skb = skb; | ||
1788 | dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], | ||
1789 | mapping, mapping); | ||
1790 | |||
1791 | rd = &ap->rx_jumbo_ring[idx]; | ||
1792 | set_aceaddr(&rd->addr, mapping); | ||
1793 | rd->size = ACE_JUMBO_BUFSIZE; | ||
1794 | rd->idx = idx; | ||
1795 | idx = (idx + 1) % RX_JUMBO_RING_ENTRIES; | ||
1796 | } | ||
1797 | |||
1798 | if (!i) | ||
1799 | goto error_out; | ||
1800 | |||
1801 | atomic_add(i, &ap->cur_jumbo_bufs); | ||
1802 | ap->rx_jumbo_skbprd = idx; | ||
1803 | |||
1804 | if (ACE_IS_TIGON_I(ap)) { | ||
1805 | struct cmd cmd; | ||
1806 | cmd.evt = C_SET_RX_JUMBO_PRD_IDX; | ||
1807 | cmd.code = 0; | ||
1808 | cmd.idx = ap->rx_jumbo_skbprd; | ||
1809 | ace_issue_cmd(regs, &cmd); | ||
1810 | } else { | ||
1811 | writel(idx, ®s->RxJumboPrd); | ||
1812 | wmb(); | ||
1813 | } | ||
1814 | |||
1815 | out: | ||
1816 | clear_bit(0, &ap->jumbo_refill_busy); | ||
1817 | return; | ||
1818 | error_out: | ||
1819 | if (net_ratelimit()) | ||
1820 | printk(KERN_INFO "Out of memory when allocating " | ||
1821 | "jumbo receive buffers\n"); | ||
1822 | goto out; | ||
1823 | } | ||
1824 | |||
1825 | |||
1826 | /* | ||
1827 | * All events are considered to be slow (RX/TX ints do not generate | ||
1828 | * events) and are handled here, outside the main interrupt handler, | ||
1829 | * to reduce the size of the handler. | ||
1830 | */ | ||
1831 | static u32 ace_handle_event(struct net_device *dev, u32 evtcsm, u32 evtprd) | ||
1832 | { | ||
1833 | struct ace_private *ap; | ||
1834 | |||
1835 | ap = netdev_priv(dev); | ||
1836 | |||
1837 | while (evtcsm != evtprd) { | ||
1838 | switch (ap->evt_ring[evtcsm].evt) { | ||
1839 | case E_FW_RUNNING: | ||
1840 | printk(KERN_INFO "%s: Firmware up and running\n", | ||
1841 | ap->name); | ||
1842 | ap->fw_running = 1; | ||
1843 | wmb(); | ||
1844 | break; | ||
1845 | case E_STATS_UPDATED: | ||
1846 | break; | ||
1847 | case E_LNK_STATE: | ||
1848 | { | ||
1849 | u16 code = ap->evt_ring[evtcsm].code; | ||
1850 | switch (code) { | ||
1851 | case E_C_LINK_UP: | ||
1852 | { | ||
1853 | u32 state = readl(&ap->regs->GigLnkState); | ||
1854 | printk(KERN_WARNING "%s: Optical link UP " | ||
1855 | "(%s Duplex, Flow Control: %s%s)\n", | ||
1856 | ap->name, | ||
1857 | state & LNK_FULL_DUPLEX ? "Full":"Half", | ||
1858 | state & LNK_TX_FLOW_CTL_Y ? "TX " : "", | ||
1859 | state & LNK_RX_FLOW_CTL_Y ? "RX" : ""); | ||
1860 | break; | ||
1861 | } | ||
1862 | case E_C_LINK_DOWN: | ||
1863 | printk(KERN_WARNING "%s: Optical link DOWN\n", | ||
1864 | ap->name); | ||
1865 | break; | ||
1866 | case E_C_LINK_10_100: | ||
1867 | printk(KERN_WARNING "%s: 10/100BaseT link " | ||
1868 | "UP\n", ap->name); | ||
1869 | break; | ||
1870 | default: | ||
1871 | printk(KERN_ERR "%s: Unknown optical link " | ||
1872 | "state %02x\n", ap->name, code); | ||
1873 | } | ||
1874 | break; | ||
1875 | } | ||
1876 | case E_ERROR: | ||
1877 | switch(ap->evt_ring[evtcsm].code) { | ||
1878 | case E_C_ERR_INVAL_CMD: | ||
1879 | printk(KERN_ERR "%s: invalid command error\n", | ||
1880 | ap->name); | ||
1881 | break; | ||
1882 | case E_C_ERR_UNIMP_CMD: | ||
1883 | printk(KERN_ERR "%s: unimplemented command " | ||
1884 | "error\n", ap->name); | ||
1885 | break; | ||
1886 | case E_C_ERR_BAD_CFG: | ||
1887 | printk(KERN_ERR "%s: bad config error\n", | ||
1888 | ap->name); | ||
1889 | break; | ||
1890 | default: | ||
1891 | printk(KERN_ERR "%s: unknown error %02x\n", | ||
1892 | ap->name, ap->evt_ring[evtcsm].code); | ||
1893 | } | ||
1894 | break; | ||
1895 | case E_RESET_JUMBO_RNG: | ||
1896 | { | ||
1897 | int i; | ||
1898 | for (i = 0; i < RX_JUMBO_RING_ENTRIES; i++) { | ||
1899 | if (ap->skb->rx_jumbo_skbuff[i].skb) { | ||
1900 | ap->rx_jumbo_ring[i].size = 0; | ||
1901 | set_aceaddr(&ap->rx_jumbo_ring[i].addr, 0); | ||
1902 | dev_kfree_skb(ap->skb->rx_jumbo_skbuff[i].skb); | ||
1903 | ap->skb->rx_jumbo_skbuff[i].skb = NULL; | ||
1904 | } | ||
1905 | } | ||
1906 | |||
1907 | if (ACE_IS_TIGON_I(ap)) { | ||
1908 | struct cmd cmd; | ||
1909 | cmd.evt = C_SET_RX_JUMBO_PRD_IDX; | ||
1910 | cmd.code = 0; | ||
1911 | cmd.idx = 0; | ||
1912 | ace_issue_cmd(ap->regs, &cmd); | ||
1913 | } else { | ||
1914 | writel(0, &((ap->regs)->RxJumboPrd)); | ||
1915 | wmb(); | ||
1916 | } | ||
1917 | |||
1918 | ap->jumbo = 0; | ||
1919 | ap->rx_jumbo_skbprd = 0; | ||
1920 | printk(KERN_INFO "%s: Jumbo ring flushed\n", | ||
1921 | ap->name); | ||
1922 | clear_bit(0, &ap->jumbo_refill_busy); | ||
1923 | break; | ||
1924 | } | ||
1925 | default: | ||
1926 | printk(KERN_ERR "%s: Unhandled event 0x%02x\n", | ||
1927 | ap->name, ap->evt_ring[evtcsm].evt); | ||
1928 | } | ||
1929 | evtcsm = (evtcsm + 1) % EVT_RING_ENTRIES; | ||
1930 | } | ||
1931 | |||
1932 | return evtcsm; | ||
1933 | } | ||
1934 | |||
1935 | |||
1936 | static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm) | ||
1937 | { | ||
1938 | struct ace_private *ap = netdev_priv(dev); | ||
1939 | u32 idx; | ||
1940 | int mini_count = 0, std_count = 0; | ||
1941 | |||
1942 | idx = rxretcsm; | ||
1943 | |||
1944 | prefetchw(&ap->cur_rx_bufs); | ||
1945 | prefetchw(&ap->cur_mini_bufs); | ||
1946 | |||
1947 | while (idx != rxretprd) { | ||
1948 | struct ring_info *rip; | ||
1949 | struct sk_buff *skb; | ||
1950 | struct rx_desc *rxdesc, *retdesc; | ||
1951 | u32 skbidx; | ||
1952 | int bd_flags, desc_type, mapsize; | ||
1953 | u16 csum; | ||
1954 | |||
1955 | |||
1956 | /* make sure the rx descriptor isn't read before rxretprd */ | ||
1957 | if (idx == rxretcsm) | ||
1958 | rmb(); | ||
1959 | |||
1960 | retdesc = &ap->rx_return_ring[idx]; | ||
1961 | skbidx = retdesc->idx; | ||
1962 | bd_flags = retdesc->flags; | ||
1963 | desc_type = bd_flags & (BD_FLG_JUMBO | BD_FLG_MINI); | ||
1964 | |||
1965 | switch(desc_type) { | ||
1966 | /* | ||
1967 | * Normal frames do not have any flags set | ||
1968 | * | ||
1969 | * Mini and normal frames arrive frequently, | ||
1970 | * so use a local counter to avoid doing | ||
1971 | * atomic operations for each packet arriving. | ||
1972 | */ | ||
1973 | case 0: | ||
1974 | rip = &ap->skb->rx_std_skbuff[skbidx]; | ||
1975 | mapsize = ACE_STD_BUFSIZE; | ||
1976 | rxdesc = &ap->rx_std_ring[skbidx]; | ||
1977 | std_count++; | ||
1978 | break; | ||
1979 | case BD_FLG_JUMBO: | ||
1980 | rip = &ap->skb->rx_jumbo_skbuff[skbidx]; | ||
1981 | mapsize = ACE_JUMBO_BUFSIZE; | ||
1982 | rxdesc = &ap->rx_jumbo_ring[skbidx]; | ||
1983 | atomic_dec(&ap->cur_jumbo_bufs); | ||
1984 | break; | ||
1985 | case BD_FLG_MINI: | ||
1986 | rip = &ap->skb->rx_mini_skbuff[skbidx]; | ||
1987 | mapsize = ACE_MINI_BUFSIZE; | ||
1988 | rxdesc = &ap->rx_mini_ring[skbidx]; | ||
1989 | mini_count++; | ||
1990 | break; | ||
1991 | default: | ||
1992 | printk(KERN_INFO "%s: unknown frame type (0x%02x) " | ||
1993 | "returned by NIC\n", dev->name, | ||
1994 | retdesc->flags); | ||
1995 | goto error; | ||
1996 | } | ||
1997 | |||
1998 | skb = rip->skb; | ||
1999 | rip->skb = NULL; | ||
2000 | pci_unmap_page(ap->pdev, | ||
2001 | dma_unmap_addr(rip, mapping), | ||
2002 | mapsize, | ||
2003 | PCI_DMA_FROMDEVICE); | ||
2004 | skb_put(skb, retdesc->size); | ||
2005 | |||
2006 | /* | ||
2007 | * Fly baby, fly! | ||
2008 | */ | ||
2009 | csum = retdesc->tcp_udp_csum; | ||
2010 | |||
2011 | skb->protocol = eth_type_trans(skb, dev); | ||
2012 | |||
2013 | /* | ||
2014 | * Instead of forcing the poor tigon mips cpu to calculate | ||
2015 | * pseudo hdr checksum, we do this ourselves. | ||
2016 | */ | ||
2017 | if (bd_flags & BD_FLG_TCP_UDP_SUM) { | ||
2018 | skb->csum = htons(csum); | ||
2019 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
2020 | } else { | ||
2021 | skb_checksum_none_assert(skb); | ||
2022 | } | ||
2023 | |||
2024 | /* send it up */ | ||
2025 | if ((bd_flags & BD_FLG_VLAN_TAG)) | ||
2026 | __vlan_hwaccel_put_tag(skb, retdesc->vlan); | ||
2027 | netif_rx(skb); | ||
2028 | |||
2029 | dev->stats.rx_packets++; | ||
2030 | dev->stats.rx_bytes += retdesc->size; | ||
2031 | |||
2032 | idx = (idx + 1) % RX_RETURN_RING_ENTRIES; | ||
2033 | } | ||
2034 | |||
2035 | atomic_sub(std_count, &ap->cur_rx_bufs); | ||
2036 | if (!ACE_IS_TIGON_I(ap)) | ||
2037 | atomic_sub(mini_count, &ap->cur_mini_bufs); | ||
2038 | |||
2039 | out: | ||
2040 | /* | ||
2041 | * According to the documentation RxRetCsm is obsolete with | ||
2042 | * the 12.3.x Firmware - my Tigon I NICs seem to disagree! | ||
2043 | */ | ||
2044 | if (ACE_IS_TIGON_I(ap)) { | ||
2045 | writel(idx, &ap->regs->RxRetCsm); | ||
2046 | } | ||
2047 | ap->cur_rx = idx; | ||
2048 | |||
2049 | return; | ||
2050 | error: | ||
2051 | idx = rxretprd; | ||
2052 | goto out; | ||
2053 | } | ||
2054 | |||
2055 | |||
2056 | static inline void ace_tx_int(struct net_device *dev, | ||
2057 | u32 txcsm, u32 idx) | ||
2058 | { | ||
2059 | struct ace_private *ap = netdev_priv(dev); | ||
2060 | |||
2061 | do { | ||
2062 | struct sk_buff *skb; | ||
2063 | struct tx_ring_info *info; | ||
2064 | |||
2065 | info = ap->skb->tx_skbuff + idx; | ||
2066 | skb = info->skb; | ||
2067 | |||
2068 | if (dma_unmap_len(info, maplen)) { | ||
2069 | pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), | ||
2070 | dma_unmap_len(info, maplen), | ||
2071 | PCI_DMA_TODEVICE); | ||
2072 | dma_unmap_len_set(info, maplen, 0); | ||
2073 | } | ||
2074 | |||
2075 | if (skb) { | ||
2076 | dev->stats.tx_packets++; | ||
2077 | dev->stats.tx_bytes += skb->len; | ||
2078 | dev_kfree_skb_irq(skb); | ||
2079 | info->skb = NULL; | ||
2080 | } | ||
2081 | |||
2082 | idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); | ||
2083 | } while (idx != txcsm); | ||
2084 | |||
2085 | if (netif_queue_stopped(dev)) | ||
2086 | netif_wake_queue(dev); | ||
2087 | |||
2088 | wmb(); | ||
2089 | ap->tx_ret_csm = txcsm; | ||
2090 | |||
2091 | /* So... tx_ret_csm is advanced _after_ check for device wakeup. | ||
2092 | * | ||
2093 | * We could try to make it before. In this case we would get | ||
2094 | * the following race condition: hard_start_xmit on other cpu | ||
2095 | * enters after we advanced tx_ret_csm and fills space, | ||
2096 | * which we have just freed, so that we make illegal device wakeup. | ||
2097 | * There is no good way to workaround this (at entry | ||
2098 | * to ace_start_xmit detects this condition and prevents | ||
2099 | * ring corruption, but it is not a good workaround.) | ||
2100 | * | ||
2101 | * When tx_ret_csm is advanced after, we wake up device _only_ | ||
2102 | * if we really have some space in ring (though the core doing | ||
2103 | * hard_start_xmit can see full ring for some period and has to | ||
2104 | * synchronize.) Superb. | ||
2105 | * BUT! We get another subtle race condition. hard_start_xmit | ||
2106 | * may think that ring is full between wakeup and advancing | ||
2107 | * tx_ret_csm and will stop device instantly! It is not so bad. | ||
2108 | * We are guaranteed that there is something in ring, so that | ||
2109 | * the next irq will resume transmission. To speedup this we could | ||
2110 | * mark descriptor, which closes ring with BD_FLG_COAL_NOW | ||
2111 | * (see ace_start_xmit). | ||
2112 | * | ||
2113 | * Well, this dilemma exists in all lock-free devices. | ||
2114 | * We, following scheme used in drivers by Donald Becker, | ||
2115 | * select the least dangerous. | ||
2116 | * --ANK | ||
2117 | */ | ||
2118 | } | ||
2119 | |||
2120 | |||
2121 | static irqreturn_t ace_interrupt(int irq, void *dev_id) | ||
2122 | { | ||
2123 | struct net_device *dev = (struct net_device *)dev_id; | ||
2124 | struct ace_private *ap = netdev_priv(dev); | ||
2125 | struct ace_regs __iomem *regs = ap->regs; | ||
2126 | u32 idx; | ||
2127 | u32 txcsm, rxretcsm, rxretprd; | ||
2128 | u32 evtcsm, evtprd; | ||
2129 | |||
2130 | /* | ||
2131 | * In case of PCI shared interrupts or spurious interrupts, | ||
2132 | * we want to make sure it is actually our interrupt before | ||
2133 | * spending any time in here. | ||
2134 | */ | ||
2135 | if (!(readl(®s->HostCtrl) & IN_INT)) | ||
2136 | return IRQ_NONE; | ||
2137 | |||
2138 | /* | ||
2139 | * ACK intr now. Otherwise we will lose updates to rx_ret_prd, | ||
2140 | * which happened _after_ rxretprd = *ap->rx_ret_prd; but before | ||
2141 | * writel(0, ®s->Mb0Lo). | ||
2142 | * | ||
2143 | * "IRQ avoidance" recommended in docs applies to IRQs served | ||
2144 | * threads and it is wrong even for that case. | ||
2145 | */ | ||
2146 | writel(0, ®s->Mb0Lo); | ||
2147 | readl(®s->Mb0Lo); | ||
2148 | |||
2149 | /* | ||
2150 | * There is no conflict between transmit handling in | ||
2151 | * start_xmit and receive processing, thus there is no reason | ||
2152 | * to take a spin lock for RX handling. Wait until we start | ||
2153 | * working on the other stuff - hey we don't need a spin lock | ||
2154 | * anymore. | ||
2155 | */ | ||
2156 | rxretprd = *ap->rx_ret_prd; | ||
2157 | rxretcsm = ap->cur_rx; | ||
2158 | |||
2159 | if (rxretprd != rxretcsm) | ||
2160 | ace_rx_int(dev, rxretprd, rxretcsm); | ||
2161 | |||
2162 | txcsm = *ap->tx_csm; | ||
2163 | idx = ap->tx_ret_csm; | ||
2164 | |||
2165 | if (txcsm != idx) { | ||
2166 | /* | ||
2167 | * If each skb takes only one descriptor this check degenerates | ||
2168 | * to identity, because new space has just been opened. | ||
2169 | * But if skbs are fragmented we must check that this index | ||
2170 | * update releases enough of space, otherwise we just | ||
2171 | * wait for device to make more work. | ||
2172 | */ | ||
2173 | if (!tx_ring_full(ap, txcsm, ap->tx_prd)) | ||
2174 | ace_tx_int(dev, txcsm, idx); | ||
2175 | } | ||
2176 | |||
2177 | evtcsm = readl(®s->EvtCsm); | ||
2178 | evtprd = *ap->evt_prd; | ||
2179 | |||
2180 | if (evtcsm != evtprd) { | ||
2181 | evtcsm = ace_handle_event(dev, evtcsm, evtprd); | ||
2182 | writel(evtcsm, ®s->EvtCsm); | ||
2183 | } | ||
2184 | |||
2185 | /* | ||
2186 | * This has to go last in the interrupt handler and run with | ||
2187 | * the spin lock released ... what lock? | ||
2188 | */ | ||
2189 | if (netif_running(dev)) { | ||
2190 | int cur_size; | ||
2191 | int run_tasklet = 0; | ||
2192 | |||
2193 | cur_size = atomic_read(&ap->cur_rx_bufs); | ||
2194 | if (cur_size < RX_LOW_STD_THRES) { | ||
2195 | if ((cur_size < RX_PANIC_STD_THRES) && | ||
2196 | !test_and_set_bit(0, &ap->std_refill_busy)) { | ||
2197 | #ifdef DEBUG | ||
2198 | printk("low on std buffers %i\n", cur_size); | ||
2199 | #endif | ||
2200 | ace_load_std_rx_ring(dev, | ||
2201 | RX_RING_SIZE - cur_size); | ||
2202 | } else | ||
2203 | run_tasklet = 1; | ||
2204 | } | ||
2205 | |||
2206 | if (!ACE_IS_TIGON_I(ap)) { | ||
2207 | cur_size = atomic_read(&ap->cur_mini_bufs); | ||
2208 | if (cur_size < RX_LOW_MINI_THRES) { | ||
2209 | if ((cur_size < RX_PANIC_MINI_THRES) && | ||
2210 | !test_and_set_bit(0, | ||
2211 | &ap->mini_refill_busy)) { | ||
2212 | #ifdef DEBUG | ||
2213 | printk("low on mini buffers %i\n", | ||
2214 | cur_size); | ||
2215 | #endif | ||
2216 | ace_load_mini_rx_ring(dev, | ||
2217 | RX_MINI_SIZE - cur_size); | ||
2218 | } else | ||
2219 | run_tasklet = 1; | ||
2220 | } | ||
2221 | } | ||
2222 | |||
2223 | if (ap->jumbo) { | ||
2224 | cur_size = atomic_read(&ap->cur_jumbo_bufs); | ||
2225 | if (cur_size < RX_LOW_JUMBO_THRES) { | ||
2226 | if ((cur_size < RX_PANIC_JUMBO_THRES) && | ||
2227 | !test_and_set_bit(0, | ||
2228 | &ap->jumbo_refill_busy)){ | ||
2229 | #ifdef DEBUG | ||
2230 | printk("low on jumbo buffers %i\n", | ||
2231 | cur_size); | ||
2232 | #endif | ||
2233 | ace_load_jumbo_rx_ring(dev, | ||
2234 | RX_JUMBO_SIZE - cur_size); | ||
2235 | } else | ||
2236 | run_tasklet = 1; | ||
2237 | } | ||
2238 | } | ||
2239 | if (run_tasklet && !ap->tasklet_pending) { | ||
2240 | ap->tasklet_pending = 1; | ||
2241 | tasklet_schedule(&ap->ace_tasklet); | ||
2242 | } | ||
2243 | } | ||
2244 | |||
2245 | return IRQ_HANDLED; | ||
2246 | } | ||
2247 | |||
2248 | static int ace_open(struct net_device *dev) | ||
2249 | { | ||
2250 | struct ace_private *ap = netdev_priv(dev); | ||
2251 | struct ace_regs __iomem *regs = ap->regs; | ||
2252 | struct cmd cmd; | ||
2253 | |||
2254 | if (!(ap->fw_running)) { | ||
2255 | printk(KERN_WARNING "%s: Firmware not running!\n", dev->name); | ||
2256 | return -EBUSY; | ||
2257 | } | ||
2258 | |||
2259 | writel(dev->mtu + ETH_HLEN + 4, ®s->IfMtu); | ||
2260 | |||
2261 | cmd.evt = C_CLEAR_STATS; | ||
2262 | cmd.code = 0; | ||
2263 | cmd.idx = 0; | ||
2264 | ace_issue_cmd(regs, &cmd); | ||
2265 | |||
2266 | cmd.evt = C_HOST_STATE; | ||
2267 | cmd.code = C_C_STACK_UP; | ||
2268 | cmd.idx = 0; | ||
2269 | ace_issue_cmd(regs, &cmd); | ||
2270 | |||
2271 | if (ap->jumbo && | ||
2272 | !test_and_set_bit(0, &ap->jumbo_refill_busy)) | ||
2273 | ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE); | ||
2274 | |||
2275 | if (dev->flags & IFF_PROMISC) { | ||
2276 | cmd.evt = C_SET_PROMISC_MODE; | ||
2277 | cmd.code = C_C_PROMISC_ENABLE; | ||
2278 | cmd.idx = 0; | ||
2279 | ace_issue_cmd(regs, &cmd); | ||
2280 | |||
2281 | ap->promisc = 1; | ||
2282 | }else | ||
2283 | ap->promisc = 0; | ||
2284 | ap->mcast_all = 0; | ||
2285 | |||
2286 | #if 0 | ||
2287 | cmd.evt = C_LNK_NEGOTIATION; | ||
2288 | cmd.code = 0; | ||
2289 | cmd.idx = 0; | ||
2290 | ace_issue_cmd(regs, &cmd); | ||
2291 | #endif | ||
2292 | |||
2293 | netif_start_queue(dev); | ||
2294 | |||
2295 | /* | ||
2296 | * Setup the bottom half rx ring refill handler | ||
2297 | */ | ||
2298 | tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev); | ||
2299 | return 0; | ||
2300 | } | ||
2301 | |||
2302 | |||
2303 | static int ace_close(struct net_device *dev) | ||
2304 | { | ||
2305 | struct ace_private *ap = netdev_priv(dev); | ||
2306 | struct ace_regs __iomem *regs = ap->regs; | ||
2307 | struct cmd cmd; | ||
2308 | unsigned long flags; | ||
2309 | short i; | ||
2310 | |||
2311 | /* | ||
2312 | * Without (or before) releasing irq and stopping hardware, this | ||
2313 | * is an absolute non-sense, by the way. It will be reset instantly | ||
2314 | * by the first irq. | ||
2315 | */ | ||
2316 | netif_stop_queue(dev); | ||
2317 | |||
2318 | |||
2319 | if (ap->promisc) { | ||
2320 | cmd.evt = C_SET_PROMISC_MODE; | ||
2321 | cmd.code = C_C_PROMISC_DISABLE; | ||
2322 | cmd.idx = 0; | ||
2323 | ace_issue_cmd(regs, &cmd); | ||
2324 | ap->promisc = 0; | ||
2325 | } | ||
2326 | |||
2327 | cmd.evt = C_HOST_STATE; | ||
2328 | cmd.code = C_C_STACK_DOWN; | ||
2329 | cmd.idx = 0; | ||
2330 | ace_issue_cmd(regs, &cmd); | ||
2331 | |||
2332 | tasklet_kill(&ap->ace_tasklet); | ||
2333 | |||
2334 | /* | ||
2335 | * Make sure one CPU is not processing packets while | ||
2336 | * buffers are being released by another. | ||
2337 | */ | ||
2338 | |||
2339 | local_irq_save(flags); | ||
2340 | ace_mask_irq(dev); | ||
2341 | |||
2342 | for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { | ||
2343 | struct sk_buff *skb; | ||
2344 | struct tx_ring_info *info; | ||
2345 | |||
2346 | info = ap->skb->tx_skbuff + i; | ||
2347 | skb = info->skb; | ||
2348 | |||
2349 | if (dma_unmap_len(info, maplen)) { | ||
2350 | if (ACE_IS_TIGON_I(ap)) { | ||
2351 | /* NB: TIGON_1 is special, tx_ring is in io space */ | ||
2352 | struct tx_desc __iomem *tx; | ||
2353 | tx = (__force struct tx_desc __iomem *) &ap->tx_ring[i]; | ||
2354 | writel(0, &tx->addr.addrhi); | ||
2355 | writel(0, &tx->addr.addrlo); | ||
2356 | writel(0, &tx->flagsize); | ||
2357 | } else | ||
2358 | memset(ap->tx_ring + i, 0, | ||
2359 | sizeof(struct tx_desc)); | ||
2360 | pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), | ||
2361 | dma_unmap_len(info, maplen), | ||
2362 | PCI_DMA_TODEVICE); | ||
2363 | dma_unmap_len_set(info, maplen, 0); | ||
2364 | } | ||
2365 | if (skb) { | ||
2366 | dev_kfree_skb(skb); | ||
2367 | info->skb = NULL; | ||
2368 | } | ||
2369 | } | ||
2370 | |||
2371 | if (ap->jumbo) { | ||
2372 | cmd.evt = C_RESET_JUMBO_RNG; | ||
2373 | cmd.code = 0; | ||
2374 | cmd.idx = 0; | ||
2375 | ace_issue_cmd(regs, &cmd); | ||
2376 | } | ||
2377 | |||
2378 | ace_unmask_irq(dev); | ||
2379 | local_irq_restore(flags); | ||
2380 | |||
2381 | return 0; | ||
2382 | } | ||
2383 | |||
2384 | |||
2385 | static inline dma_addr_t | ||
2386 | ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb, | ||
2387 | struct sk_buff *tail, u32 idx) | ||
2388 | { | ||
2389 | dma_addr_t mapping; | ||
2390 | struct tx_ring_info *info; | ||
2391 | |||
2392 | mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), | ||
2393 | offset_in_page(skb->data), | ||
2394 | skb->len, PCI_DMA_TODEVICE); | ||
2395 | |||
2396 | info = ap->skb->tx_skbuff + idx; | ||
2397 | info->skb = tail; | ||
2398 | dma_unmap_addr_set(info, mapping, mapping); | ||
2399 | dma_unmap_len_set(info, maplen, skb->len); | ||
2400 | return mapping; | ||
2401 | } | ||
2402 | |||
2403 | |||
2404 | static inline void | ||
2405 | ace_load_tx_bd(struct ace_private *ap, struct tx_desc *desc, u64 addr, | ||
2406 | u32 flagsize, u32 vlan_tag) | ||
2407 | { | ||
2408 | #if !USE_TX_COAL_NOW | ||
2409 | flagsize &= ~BD_FLG_COAL_NOW; | ||
2410 | #endif | ||
2411 | |||
2412 | if (ACE_IS_TIGON_I(ap)) { | ||
2413 | struct tx_desc __iomem *io = (__force struct tx_desc __iomem *) desc; | ||
2414 | writel(addr >> 32, &io->addr.addrhi); | ||
2415 | writel(addr & 0xffffffff, &io->addr.addrlo); | ||
2416 | writel(flagsize, &io->flagsize); | ||
2417 | writel(vlan_tag, &io->vlanres); | ||
2418 | } else { | ||
2419 | desc->addr.addrhi = addr >> 32; | ||
2420 | desc->addr.addrlo = addr; | ||
2421 | desc->flagsize = flagsize; | ||
2422 | desc->vlanres = vlan_tag; | ||
2423 | } | ||
2424 | } | ||
2425 | |||
2426 | |||
2427 | static netdev_tx_t ace_start_xmit(struct sk_buff *skb, | ||
2428 | struct net_device *dev) | ||
2429 | { | ||
2430 | struct ace_private *ap = netdev_priv(dev); | ||
2431 | struct ace_regs __iomem *regs = ap->regs; | ||
2432 | struct tx_desc *desc; | ||
2433 | u32 idx, flagsize; | ||
2434 | unsigned long maxjiff = jiffies + 3*HZ; | ||
2435 | |||
2436 | restart: | ||
2437 | idx = ap->tx_prd; | ||
2438 | |||
2439 | if (tx_ring_full(ap, ap->tx_ret_csm, idx)) | ||
2440 | goto overflow; | ||
2441 | |||
2442 | if (!skb_shinfo(skb)->nr_frags) { | ||
2443 | dma_addr_t mapping; | ||
2444 | u32 vlan_tag = 0; | ||
2445 | |||
2446 | mapping = ace_map_tx_skb(ap, skb, skb, idx); | ||
2447 | flagsize = (skb->len << 16) | (BD_FLG_END); | ||
2448 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
2449 | flagsize |= BD_FLG_TCP_UDP_SUM; | ||
2450 | if (vlan_tx_tag_present(skb)) { | ||
2451 | flagsize |= BD_FLG_VLAN_TAG; | ||
2452 | vlan_tag = vlan_tx_tag_get(skb); | ||
2453 | } | ||
2454 | desc = ap->tx_ring + idx; | ||
2455 | idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); | ||
2456 | |||
2457 | /* Look at ace_tx_int for explanations. */ | ||
2458 | if (tx_ring_full(ap, ap->tx_ret_csm, idx)) | ||
2459 | flagsize |= BD_FLG_COAL_NOW; | ||
2460 | |||
2461 | ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); | ||
2462 | } else { | ||
2463 | dma_addr_t mapping; | ||
2464 | u32 vlan_tag = 0; | ||
2465 | int i, len = 0; | ||
2466 | |||
2467 | mapping = ace_map_tx_skb(ap, skb, NULL, idx); | ||
2468 | flagsize = (skb_headlen(skb) << 16); | ||
2469 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
2470 | flagsize |= BD_FLG_TCP_UDP_SUM; | ||
2471 | if (vlan_tx_tag_present(skb)) { | ||
2472 | flagsize |= BD_FLG_VLAN_TAG; | ||
2473 | vlan_tag = vlan_tx_tag_get(skb); | ||
2474 | } | ||
2475 | |||
2476 | ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); | ||
2477 | |||
2478 | idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); | ||
2479 | |||
2480 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
2481 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
2482 | struct tx_ring_info *info; | ||
2483 | |||
2484 | len += frag->size; | ||
2485 | info = ap->skb->tx_skbuff + idx; | ||
2486 | desc = ap->tx_ring + idx; | ||
2487 | |||
2488 | mapping = pci_map_page(ap->pdev, frag->page, | ||
2489 | frag->page_offset, frag->size, | ||
2490 | PCI_DMA_TODEVICE); | ||
2491 | |||
2492 | flagsize = (frag->size << 16); | ||
2493 | if (skb->ip_summed == CHECKSUM_PARTIAL) | ||
2494 | flagsize |= BD_FLG_TCP_UDP_SUM; | ||
2495 | idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); | ||
2496 | |||
2497 | if (i == skb_shinfo(skb)->nr_frags - 1) { | ||
2498 | flagsize |= BD_FLG_END; | ||
2499 | if (tx_ring_full(ap, ap->tx_ret_csm, idx)) | ||
2500 | flagsize |= BD_FLG_COAL_NOW; | ||
2501 | |||
2502 | /* | ||
2503 | * Only the last fragment frees | ||
2504 | * the skb! | ||
2505 | */ | ||
2506 | info->skb = skb; | ||
2507 | } else { | ||
2508 | info->skb = NULL; | ||
2509 | } | ||
2510 | dma_unmap_addr_set(info, mapping, mapping); | ||
2511 | dma_unmap_len_set(info, maplen, frag->size); | ||
2512 | ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); | ||
2513 | } | ||
2514 | } | ||
2515 | |||
2516 | wmb(); | ||
2517 | ap->tx_prd = idx; | ||
2518 | ace_set_txprd(regs, ap, idx); | ||
2519 | |||
2520 | if (flagsize & BD_FLG_COAL_NOW) { | ||
2521 | netif_stop_queue(dev); | ||
2522 | |||
2523 | /* | ||
2524 | * A TX-descriptor producer (an IRQ) might have gotten | ||
2525 | * between, making the ring free again. Since xmit is | ||
2526 | * serialized, this is the only situation we have to | ||
2527 | * re-test. | ||
2528 | */ | ||
2529 | if (!tx_ring_full(ap, ap->tx_ret_csm, idx)) | ||
2530 | netif_wake_queue(dev); | ||
2531 | } | ||
2532 | |||
2533 | return NETDEV_TX_OK; | ||
2534 | |||
2535 | overflow: | ||
2536 | /* | ||
2537 | * This race condition is unavoidable with lock-free drivers. | ||
2538 | * We wake up the queue _before_ tx_prd is advanced, so that we can | ||
2539 | * enter hard_start_xmit too early, while tx ring still looks closed. | ||
2540 | * This happens ~1-4 times per 100000 packets, so that we can allow | ||
2541 | * to loop syncing to other CPU. Probably, we need an additional | ||
2542 | * wmb() in ace_tx_intr as well. | ||
2543 | * | ||
2544 | * Note that this race is relieved by reserving one more entry | ||
2545 | * in tx ring than it is necessary (see original non-SG driver). | ||
2546 | * However, with SG we need to reserve 2*MAX_SKB_FRAGS+1, which | ||
2547 | * is already overkill. | ||
2548 | * | ||
2549 | * Alternative is to return with 1 not throttling queue. In this | ||
2550 | * case loop becomes longer, no more useful effects. | ||
2551 | */ | ||
2552 | if (time_before(jiffies, maxjiff)) { | ||
2553 | barrier(); | ||
2554 | cpu_relax(); | ||
2555 | goto restart; | ||
2556 | } | ||
2557 | |||
2558 | /* The ring is stuck full. */ | ||
2559 | printk(KERN_WARNING "%s: Transmit ring stuck full\n", dev->name); | ||
2560 | return NETDEV_TX_BUSY; | ||
2561 | } | ||
2562 | |||
2563 | |||
2564 | static int ace_change_mtu(struct net_device *dev, int new_mtu) | ||
2565 | { | ||
2566 | struct ace_private *ap = netdev_priv(dev); | ||
2567 | struct ace_regs __iomem *regs = ap->regs; | ||
2568 | |||
2569 | if (new_mtu > ACE_JUMBO_MTU) | ||
2570 | return -EINVAL; | ||
2571 | |||
2572 | writel(new_mtu + ETH_HLEN + 4, ®s->IfMtu); | ||
2573 | dev->mtu = new_mtu; | ||
2574 | |||
2575 | if (new_mtu > ACE_STD_MTU) { | ||
2576 | if (!(ap->jumbo)) { | ||
2577 | printk(KERN_INFO "%s: Enabling Jumbo frame " | ||
2578 | "support\n", dev->name); | ||
2579 | ap->jumbo = 1; | ||
2580 | if (!test_and_set_bit(0, &ap->jumbo_refill_busy)) | ||
2581 | ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE); | ||
2582 | ace_set_rxtx_parms(dev, 1); | ||
2583 | } | ||
2584 | } else { | ||
2585 | while (test_and_set_bit(0, &ap->jumbo_refill_busy)); | ||
2586 | ace_sync_irq(dev->irq); | ||
2587 | ace_set_rxtx_parms(dev, 0); | ||
2588 | if (ap->jumbo) { | ||
2589 | struct cmd cmd; | ||
2590 | |||
2591 | cmd.evt = C_RESET_JUMBO_RNG; | ||
2592 | cmd.code = 0; | ||
2593 | cmd.idx = 0; | ||
2594 | ace_issue_cmd(regs, &cmd); | ||
2595 | } | ||
2596 | } | ||
2597 | |||
2598 | return 0; | ||
2599 | } | ||
2600 | |||
2601 | static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
2602 | { | ||
2603 | struct ace_private *ap = netdev_priv(dev); | ||
2604 | struct ace_regs __iomem *regs = ap->regs; | ||
2605 | u32 link; | ||
2606 | |||
2607 | memset(ecmd, 0, sizeof(struct ethtool_cmd)); | ||
2608 | ecmd->supported = | ||
2609 | (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | | ||
2610 | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | ||
2611 | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | | ||
2612 | SUPPORTED_Autoneg | SUPPORTED_FIBRE); | ||
2613 | |||
2614 | ecmd->port = PORT_FIBRE; | ||
2615 | ecmd->transceiver = XCVR_INTERNAL; | ||
2616 | |||
2617 | link = readl(®s->GigLnkState); | ||
2618 | if (link & LNK_1000MB) | ||
2619 | ethtool_cmd_speed_set(ecmd, SPEED_1000); | ||
2620 | else { | ||
2621 | link = readl(®s->FastLnkState); | ||
2622 | if (link & LNK_100MB) | ||
2623 | ethtool_cmd_speed_set(ecmd, SPEED_100); | ||
2624 | else if (link & LNK_10MB) | ||
2625 | ethtool_cmd_speed_set(ecmd, SPEED_10); | ||
2626 | else | ||
2627 | ethtool_cmd_speed_set(ecmd, 0); | ||
2628 | } | ||
2629 | if (link & LNK_FULL_DUPLEX) | ||
2630 | ecmd->duplex = DUPLEX_FULL; | ||
2631 | else | ||
2632 | ecmd->duplex = DUPLEX_HALF; | ||
2633 | |||
2634 | if (link & LNK_NEGOTIATE) | ||
2635 | ecmd->autoneg = AUTONEG_ENABLE; | ||
2636 | else | ||
2637 | ecmd->autoneg = AUTONEG_DISABLE; | ||
2638 | |||
2639 | #if 0 | ||
2640 | /* | ||
2641 | * Current struct ethtool_cmd is insufficient | ||
2642 | */ | ||
2643 | ecmd->trace = readl(®s->TuneTrace); | ||
2644 | |||
2645 | ecmd->txcoal = readl(®s->TuneTxCoalTicks); | ||
2646 | ecmd->rxcoal = readl(®s->TuneRxCoalTicks); | ||
2647 | #endif | ||
2648 | ecmd->maxtxpkt = readl(®s->TuneMaxTxDesc); | ||
2649 | ecmd->maxrxpkt = readl(®s->TuneMaxRxDesc); | ||
2650 | |||
2651 | return 0; | ||
2652 | } | ||
2653 | |||
2654 | static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
2655 | { | ||
2656 | struct ace_private *ap = netdev_priv(dev); | ||
2657 | struct ace_regs __iomem *regs = ap->regs; | ||
2658 | u32 link, speed; | ||
2659 | |||
2660 | link = readl(®s->GigLnkState); | ||
2661 | if (link & LNK_1000MB) | ||
2662 | speed = SPEED_1000; | ||
2663 | else { | ||
2664 | link = readl(®s->FastLnkState); | ||
2665 | if (link & LNK_100MB) | ||
2666 | speed = SPEED_100; | ||
2667 | else if (link & LNK_10MB) | ||
2668 | speed = SPEED_10; | ||
2669 | else | ||
2670 | speed = SPEED_100; | ||
2671 | } | ||
2672 | |||
2673 | link = LNK_ENABLE | LNK_1000MB | LNK_100MB | LNK_10MB | | ||
2674 | LNK_RX_FLOW_CTL_Y | LNK_NEG_FCTL; | ||
2675 | if (!ACE_IS_TIGON_I(ap)) | ||
2676 | link |= LNK_TX_FLOW_CTL_Y; | ||
2677 | if (ecmd->autoneg == AUTONEG_ENABLE) | ||
2678 | link |= LNK_NEGOTIATE; | ||
2679 | if (ethtool_cmd_speed(ecmd) != speed) { | ||
2680 | link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB); | ||
2681 | switch (ethtool_cmd_speed(ecmd)) { | ||
2682 | case SPEED_1000: | ||
2683 | link |= LNK_1000MB; | ||
2684 | break; | ||
2685 | case SPEED_100: | ||
2686 | link |= LNK_100MB; | ||
2687 | break; | ||
2688 | case SPEED_10: | ||
2689 | link |= LNK_10MB; | ||
2690 | break; | ||
2691 | } | ||
2692 | } | ||
2693 | |||
2694 | if (ecmd->duplex == DUPLEX_FULL) | ||
2695 | link |= LNK_FULL_DUPLEX; | ||
2696 | |||
2697 | if (link != ap->link) { | ||
2698 | struct cmd cmd; | ||
2699 | printk(KERN_INFO "%s: Renegotiating link state\n", | ||
2700 | dev->name); | ||
2701 | |||
2702 | ap->link = link; | ||
2703 | writel(link, ®s->TuneLink); | ||
2704 | if (!ACE_IS_TIGON_I(ap)) | ||
2705 | writel(link, ®s->TuneFastLink); | ||
2706 | wmb(); | ||
2707 | |||
2708 | cmd.evt = C_LNK_NEGOTIATION; | ||
2709 | cmd.code = 0; | ||
2710 | cmd.idx = 0; | ||
2711 | ace_issue_cmd(regs, &cmd); | ||
2712 | } | ||
2713 | return 0; | ||
2714 | } | ||
2715 | |||
2716 | static void ace_get_drvinfo(struct net_device *dev, | ||
2717 | struct ethtool_drvinfo *info) | ||
2718 | { | ||
2719 | struct ace_private *ap = netdev_priv(dev); | ||
2720 | |||
2721 | strlcpy(info->driver, "acenic", sizeof(info->driver)); | ||
2722 | snprintf(info->version, sizeof(info->version), "%i.%i.%i", | ||
2723 | ap->firmware_major, ap->firmware_minor, | ||
2724 | ap->firmware_fix); | ||
2725 | |||
2726 | if (ap->pdev) | ||
2727 | strlcpy(info->bus_info, pci_name(ap->pdev), | ||
2728 | sizeof(info->bus_info)); | ||
2729 | |||
2730 | } | ||
2731 | |||
2732 | /* | ||
2733 | * Set the hardware MAC address. | ||
2734 | */ | ||
2735 | static int ace_set_mac_addr(struct net_device *dev, void *p) | ||
2736 | { | ||
2737 | struct ace_private *ap = netdev_priv(dev); | ||
2738 | struct ace_regs __iomem *regs = ap->regs; | ||
2739 | struct sockaddr *addr=p; | ||
2740 | u8 *da; | ||
2741 | struct cmd cmd; | ||
2742 | |||
2743 | if(netif_running(dev)) | ||
2744 | return -EBUSY; | ||
2745 | |||
2746 | memcpy(dev->dev_addr, addr->sa_data,dev->addr_len); | ||
2747 | |||
2748 | da = (u8 *)dev->dev_addr; | ||
2749 | |||
2750 | writel(da[0] << 8 | da[1], ®s->MacAddrHi); | ||
2751 | writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5], | ||
2752 | ®s->MacAddrLo); | ||
2753 | |||
2754 | cmd.evt = C_SET_MAC_ADDR; | ||
2755 | cmd.code = 0; | ||
2756 | cmd.idx = 0; | ||
2757 | ace_issue_cmd(regs, &cmd); | ||
2758 | |||
2759 | return 0; | ||
2760 | } | ||
2761 | |||
2762 | |||
2763 | static void ace_set_multicast_list(struct net_device *dev) | ||
2764 | { | ||
2765 | struct ace_private *ap = netdev_priv(dev); | ||
2766 | struct ace_regs __iomem *regs = ap->regs; | ||
2767 | struct cmd cmd; | ||
2768 | |||
2769 | if ((dev->flags & IFF_ALLMULTI) && !(ap->mcast_all)) { | ||
2770 | cmd.evt = C_SET_MULTICAST_MODE; | ||
2771 | cmd.code = C_C_MCAST_ENABLE; | ||
2772 | cmd.idx = 0; | ||
2773 | ace_issue_cmd(regs, &cmd); | ||
2774 | ap->mcast_all = 1; | ||
2775 | } else if (ap->mcast_all) { | ||
2776 | cmd.evt = C_SET_MULTICAST_MODE; | ||
2777 | cmd.code = C_C_MCAST_DISABLE; | ||
2778 | cmd.idx = 0; | ||
2779 | ace_issue_cmd(regs, &cmd); | ||
2780 | ap->mcast_all = 0; | ||
2781 | } | ||
2782 | |||
2783 | if ((dev->flags & IFF_PROMISC) && !(ap->promisc)) { | ||
2784 | cmd.evt = C_SET_PROMISC_MODE; | ||
2785 | cmd.code = C_C_PROMISC_ENABLE; | ||
2786 | cmd.idx = 0; | ||
2787 | ace_issue_cmd(regs, &cmd); | ||
2788 | ap->promisc = 1; | ||
2789 | }else if (!(dev->flags & IFF_PROMISC) && (ap->promisc)) { | ||
2790 | cmd.evt = C_SET_PROMISC_MODE; | ||
2791 | cmd.code = C_C_PROMISC_DISABLE; | ||
2792 | cmd.idx = 0; | ||
2793 | ace_issue_cmd(regs, &cmd); | ||
2794 | ap->promisc = 0; | ||
2795 | } | ||
2796 | |||
2797 | /* | ||
2798 | * For the time being multicast relies on the upper layers | ||
2799 | * filtering it properly. The Firmware does not allow one to | ||
2800 | * set the entire multicast list at a time and keeping track of | ||
2801 | * it here is going to be messy. | ||
2802 | */ | ||
2803 | if (!netdev_mc_empty(dev) && !ap->mcast_all) { | ||
2804 | cmd.evt = C_SET_MULTICAST_MODE; | ||
2805 | cmd.code = C_C_MCAST_ENABLE; | ||
2806 | cmd.idx = 0; | ||
2807 | ace_issue_cmd(regs, &cmd); | ||
2808 | }else if (!ap->mcast_all) { | ||
2809 | cmd.evt = C_SET_MULTICAST_MODE; | ||
2810 | cmd.code = C_C_MCAST_DISABLE; | ||
2811 | cmd.idx = 0; | ||
2812 | ace_issue_cmd(regs, &cmd); | ||
2813 | } | ||
2814 | } | ||
2815 | |||
2816 | |||
2817 | static struct net_device_stats *ace_get_stats(struct net_device *dev) | ||
2818 | { | ||
2819 | struct ace_private *ap = netdev_priv(dev); | ||
2820 | struct ace_mac_stats __iomem *mac_stats = | ||
2821 | (struct ace_mac_stats __iomem *)ap->regs->Stats; | ||
2822 | |||
2823 | dev->stats.rx_missed_errors = readl(&mac_stats->drop_space); | ||
2824 | dev->stats.multicast = readl(&mac_stats->kept_mc); | ||
2825 | dev->stats.collisions = readl(&mac_stats->coll); | ||
2826 | |||
2827 | return &dev->stats; | ||
2828 | } | ||
2829 | |||
2830 | |||
2831 | static void __devinit ace_copy(struct ace_regs __iomem *regs, const __be32 *src, | ||
2832 | u32 dest, int size) | ||
2833 | { | ||
2834 | void __iomem *tdest; | ||
2835 | short tsize, i; | ||
2836 | |||
2837 | if (size <= 0) | ||
2838 | return; | ||
2839 | |||
2840 | while (size > 0) { | ||
2841 | tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), | ||
2842 | min_t(u32, size, ACE_WINDOW_SIZE)); | ||
2843 | tdest = (void __iomem *) ®s->Window + | ||
2844 | (dest & (ACE_WINDOW_SIZE - 1)); | ||
2845 | writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase); | ||
2846 | for (i = 0; i < (tsize / 4); i++) { | ||
2847 | /* Firmware is big-endian */ | ||
2848 | writel(be32_to_cpup(src), tdest); | ||
2849 | src++; | ||
2850 | tdest += 4; | ||
2851 | dest += 4; | ||
2852 | size -= 4; | ||
2853 | } | ||
2854 | } | ||
2855 | } | ||
2856 | |||
2857 | |||
2858 | static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int size) | ||
2859 | { | ||
2860 | void __iomem *tdest; | ||
2861 | short tsize = 0, i; | ||
2862 | |||
2863 | if (size <= 0) | ||
2864 | return; | ||
2865 | |||
2866 | while (size > 0) { | ||
2867 | tsize = min_t(u32, ((~dest & (ACE_WINDOW_SIZE - 1)) + 1), | ||
2868 | min_t(u32, size, ACE_WINDOW_SIZE)); | ||
2869 | tdest = (void __iomem *) ®s->Window + | ||
2870 | (dest & (ACE_WINDOW_SIZE - 1)); | ||
2871 | writel(dest & ~(ACE_WINDOW_SIZE - 1), ®s->WinBase); | ||
2872 | |||
2873 | for (i = 0; i < (tsize / 4); i++) { | ||
2874 | writel(0, tdest + i*4); | ||
2875 | } | ||
2876 | |||
2877 | dest += tsize; | ||
2878 | size -= tsize; | ||
2879 | } | ||
2880 | } | ||
2881 | |||
2882 | |||
2883 | /* | ||
2884 | * Download the firmware into the SRAM on the NIC | ||
2885 | * | ||
2886 | * This operation requires the NIC to be halted and is performed with | ||
2887 | * interrupts disabled and with the spinlock hold. | ||
2888 | */ | ||
2889 | static int __devinit ace_load_firmware(struct net_device *dev) | ||
2890 | { | ||
2891 | const struct firmware *fw; | ||
2892 | const char *fw_name = "acenic/tg2.bin"; | ||
2893 | struct ace_private *ap = netdev_priv(dev); | ||
2894 | struct ace_regs __iomem *regs = ap->regs; | ||
2895 | const __be32 *fw_data; | ||
2896 | u32 load_addr; | ||
2897 | int ret; | ||
2898 | |||
2899 | if (!(readl(®s->CpuCtrl) & CPU_HALTED)) { | ||
2900 | printk(KERN_ERR "%s: trying to download firmware while the " | ||
2901 | "CPU is running!\n", ap->name); | ||
2902 | return -EFAULT; | ||
2903 | } | ||
2904 | |||
2905 | if (ACE_IS_TIGON_I(ap)) | ||
2906 | fw_name = "acenic/tg1.bin"; | ||
2907 | |||
2908 | ret = request_firmware(&fw, fw_name, &ap->pdev->dev); | ||
2909 | if (ret) { | ||
2910 | printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n", | ||
2911 | ap->name, fw_name); | ||
2912 | return ret; | ||
2913 | } | ||
2914 | |||
2915 | fw_data = (void *)fw->data; | ||
2916 | |||
2917 | /* Firmware blob starts with version numbers, followed by | ||
2918 | load and start address. Remainder is the blob to be loaded | ||
2919 | contiguously from load address. We don't bother to represent | ||
2920 | the BSS/SBSS sections any more, since we were clearing the | ||
2921 | whole thing anyway. */ | ||
2922 | ap->firmware_major = fw->data[0]; | ||
2923 | ap->firmware_minor = fw->data[1]; | ||
2924 | ap->firmware_fix = fw->data[2]; | ||
2925 | |||
2926 | ap->firmware_start = be32_to_cpu(fw_data[1]); | ||
2927 | if (ap->firmware_start < 0x4000 || ap->firmware_start >= 0x80000) { | ||
2928 | printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n", | ||
2929 | ap->name, ap->firmware_start, fw_name); | ||
2930 | ret = -EINVAL; | ||
2931 | goto out; | ||
2932 | } | ||
2933 | |||
2934 | load_addr = be32_to_cpu(fw_data[2]); | ||
2935 | if (load_addr < 0x4000 || load_addr >= 0x80000) { | ||
2936 | printk(KERN_ERR "%s: bogus load address %08x in \"%s\"\n", | ||
2937 | ap->name, load_addr, fw_name); | ||
2938 | ret = -EINVAL; | ||
2939 | goto out; | ||
2940 | } | ||
2941 | |||
2942 | /* | ||
2943 | * Do not try to clear more than 512KiB or we end up seeing | ||
2944 | * funny things on NICs with only 512KiB SRAM | ||
2945 | */ | ||
2946 | ace_clear(regs, 0x2000, 0x80000-0x2000); | ||
2947 | ace_copy(regs, &fw_data[3], load_addr, fw->size-12); | ||
2948 | out: | ||
2949 | release_firmware(fw); | ||
2950 | return ret; | ||
2951 | } | ||
2952 | |||
2953 | |||
2954 | /* | ||
2955 | * The eeprom on the AceNIC is an Atmel i2c EEPROM. | ||
2956 | * | ||
2957 | * Accessing the EEPROM is `interesting' to say the least - don't read | ||
2958 | * this code right after dinner. | ||
2959 | * | ||
2960 | * This is all about black magic and bit-banging the device .... I | ||
2961 | * wonder in what hospital they have put the guy who designed the i2c | ||
2962 | * specs. | ||
2963 | * | ||
2964 | * Oh yes, this is only the beginning! | ||
2965 | * | ||
2966 | * Thanks to Stevarino Webinski for helping tracking down the bugs in the | ||
2967 | * code i2c readout code by beta testing all my hacks. | ||
2968 | */ | ||
2969 | static void __devinit eeprom_start(struct ace_regs __iomem *regs) | ||
2970 | { | ||
2971 | u32 local; | ||
2972 | |||
2973 | readl(®s->LocalCtrl); | ||
2974 | udelay(ACE_SHORT_DELAY); | ||
2975 | local = readl(®s->LocalCtrl); | ||
2976 | local |= EEPROM_DATA_OUT | EEPROM_WRITE_ENABLE; | ||
2977 | writel(local, ®s->LocalCtrl); | ||
2978 | readl(®s->LocalCtrl); | ||
2979 | mb(); | ||
2980 | udelay(ACE_SHORT_DELAY); | ||
2981 | local |= EEPROM_CLK_OUT; | ||
2982 | writel(local, ®s->LocalCtrl); | ||
2983 | readl(®s->LocalCtrl); | ||
2984 | mb(); | ||
2985 | udelay(ACE_SHORT_DELAY); | ||
2986 | local &= ~EEPROM_DATA_OUT; | ||
2987 | writel(local, ®s->LocalCtrl); | ||
2988 | readl(®s->LocalCtrl); | ||
2989 | mb(); | ||
2990 | udelay(ACE_SHORT_DELAY); | ||
2991 | local &= ~EEPROM_CLK_OUT; | ||
2992 | writel(local, ®s->LocalCtrl); | ||
2993 | readl(®s->LocalCtrl); | ||
2994 | mb(); | ||
2995 | } | ||
2996 | |||
2997 | |||
2998 | static void __devinit eeprom_prep(struct ace_regs __iomem *regs, u8 magic) | ||
2999 | { | ||
3000 | short i; | ||
3001 | u32 local; | ||
3002 | |||
3003 | udelay(ACE_SHORT_DELAY); | ||
3004 | local = readl(®s->LocalCtrl); | ||
3005 | local &= ~EEPROM_DATA_OUT; | ||
3006 | local |= EEPROM_WRITE_ENABLE; | ||
3007 | writel(local, ®s->LocalCtrl); | ||
3008 | readl(®s->LocalCtrl); | ||
3009 | mb(); | ||
3010 | |||
3011 | for (i = 0; i < 8; i++, magic <<= 1) { | ||
3012 | udelay(ACE_SHORT_DELAY); | ||
3013 | if (magic & 0x80) | ||
3014 | local |= EEPROM_DATA_OUT; | ||
3015 | else | ||
3016 | local &= ~EEPROM_DATA_OUT; | ||
3017 | writel(local, ®s->LocalCtrl); | ||
3018 | readl(®s->LocalCtrl); | ||
3019 | mb(); | ||
3020 | |||
3021 | udelay(ACE_SHORT_DELAY); | ||
3022 | local |= EEPROM_CLK_OUT; | ||
3023 | writel(local, ®s->LocalCtrl); | ||
3024 | readl(®s->LocalCtrl); | ||
3025 | mb(); | ||
3026 | udelay(ACE_SHORT_DELAY); | ||
3027 | local &= ~(EEPROM_CLK_OUT | EEPROM_DATA_OUT); | ||
3028 | writel(local, ®s->LocalCtrl); | ||
3029 | readl(®s->LocalCtrl); | ||
3030 | mb(); | ||
3031 | } | ||
3032 | } | ||
3033 | |||
3034 | |||
3035 | static int __devinit eeprom_check_ack(struct ace_regs __iomem *regs) | ||
3036 | { | ||
3037 | int state; | ||
3038 | u32 local; | ||
3039 | |||
3040 | local = readl(®s->LocalCtrl); | ||
3041 | local &= ~EEPROM_WRITE_ENABLE; | ||
3042 | writel(local, ®s->LocalCtrl); | ||
3043 | readl(®s->LocalCtrl); | ||
3044 | mb(); | ||
3045 | udelay(ACE_LONG_DELAY); | ||
3046 | local |= EEPROM_CLK_OUT; | ||
3047 | writel(local, ®s->LocalCtrl); | ||
3048 | readl(®s->LocalCtrl); | ||
3049 | mb(); | ||
3050 | udelay(ACE_SHORT_DELAY); | ||
3051 | /* sample data in middle of high clk */ | ||
3052 | state = (readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0; | ||
3053 | udelay(ACE_SHORT_DELAY); | ||
3054 | mb(); | ||
3055 | writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl); | ||
3056 | readl(®s->LocalCtrl); | ||
3057 | mb(); | ||
3058 | |||
3059 | return state; | ||
3060 | } | ||
3061 | |||
3062 | |||
3063 | static void __devinit eeprom_stop(struct ace_regs __iomem *regs) | ||
3064 | { | ||
3065 | u32 local; | ||
3066 | |||
3067 | udelay(ACE_SHORT_DELAY); | ||
3068 | local = readl(®s->LocalCtrl); | ||
3069 | local |= EEPROM_WRITE_ENABLE; | ||
3070 | writel(local, ®s->LocalCtrl); | ||
3071 | readl(®s->LocalCtrl); | ||
3072 | mb(); | ||
3073 | udelay(ACE_SHORT_DELAY); | ||
3074 | local &= ~EEPROM_DATA_OUT; | ||
3075 | writel(local, ®s->LocalCtrl); | ||
3076 | readl(®s->LocalCtrl); | ||
3077 | mb(); | ||
3078 | udelay(ACE_SHORT_DELAY); | ||
3079 | local |= EEPROM_CLK_OUT; | ||
3080 | writel(local, ®s->LocalCtrl); | ||
3081 | readl(®s->LocalCtrl); | ||
3082 | mb(); | ||
3083 | udelay(ACE_SHORT_DELAY); | ||
3084 | local |= EEPROM_DATA_OUT; | ||
3085 | writel(local, ®s->LocalCtrl); | ||
3086 | readl(®s->LocalCtrl); | ||
3087 | mb(); | ||
3088 | udelay(ACE_LONG_DELAY); | ||
3089 | local &= ~EEPROM_CLK_OUT; | ||
3090 | writel(local, ®s->LocalCtrl); | ||
3091 | mb(); | ||
3092 | } | ||
3093 | |||
3094 | |||
3095 | /* | ||
3096 | * Read a whole byte from the EEPROM. | ||
3097 | */ | ||
3098 | static int __devinit read_eeprom_byte(struct net_device *dev, | ||
3099 | unsigned long offset) | ||
3100 | { | ||
3101 | struct ace_private *ap = netdev_priv(dev); | ||
3102 | struct ace_regs __iomem *regs = ap->regs; | ||
3103 | unsigned long flags; | ||
3104 | u32 local; | ||
3105 | int result = 0; | ||
3106 | short i; | ||
3107 | |||
3108 | /* | ||
3109 | * Don't take interrupts on this CPU will bit banging | ||
3110 | * the %#%#@$ I2C device | ||
3111 | */ | ||
3112 | local_irq_save(flags); | ||
3113 | |||
3114 | eeprom_start(regs); | ||
3115 | |||
3116 | eeprom_prep(regs, EEPROM_WRITE_SELECT); | ||
3117 | if (eeprom_check_ack(regs)) { | ||
3118 | local_irq_restore(flags); | ||
3119 | printk(KERN_ERR "%s: Unable to sync eeprom\n", ap->name); | ||
3120 | result = -EIO; | ||
3121 | goto eeprom_read_error; | ||
3122 | } | ||
3123 | |||
3124 | eeprom_prep(regs, (offset >> 8) & 0xff); | ||
3125 | if (eeprom_check_ack(regs)) { | ||
3126 | local_irq_restore(flags); | ||
3127 | printk(KERN_ERR "%s: Unable to set address byte 0\n", | ||
3128 | ap->name); | ||
3129 | result = -EIO; | ||
3130 | goto eeprom_read_error; | ||
3131 | } | ||
3132 | |||
3133 | eeprom_prep(regs, offset & 0xff); | ||
3134 | if (eeprom_check_ack(regs)) { | ||
3135 | local_irq_restore(flags); | ||
3136 | printk(KERN_ERR "%s: Unable to set address byte 1\n", | ||
3137 | ap->name); | ||
3138 | result = -EIO; | ||
3139 | goto eeprom_read_error; | ||
3140 | } | ||
3141 | |||
3142 | eeprom_start(regs); | ||
3143 | eeprom_prep(regs, EEPROM_READ_SELECT); | ||
3144 | if (eeprom_check_ack(regs)) { | ||
3145 | local_irq_restore(flags); | ||
3146 | printk(KERN_ERR "%s: Unable to set READ_SELECT\n", | ||
3147 | ap->name); | ||
3148 | result = -EIO; | ||
3149 | goto eeprom_read_error; | ||
3150 | } | ||
3151 | |||
3152 | for (i = 0; i < 8; i++) { | ||
3153 | local = readl(®s->LocalCtrl); | ||
3154 | local &= ~EEPROM_WRITE_ENABLE; | ||
3155 | writel(local, ®s->LocalCtrl); | ||
3156 | readl(®s->LocalCtrl); | ||
3157 | udelay(ACE_LONG_DELAY); | ||
3158 | mb(); | ||
3159 | local |= EEPROM_CLK_OUT; | ||
3160 | writel(local, ®s->LocalCtrl); | ||
3161 | readl(®s->LocalCtrl); | ||
3162 | mb(); | ||
3163 | udelay(ACE_SHORT_DELAY); | ||
3164 | /* sample data mid high clk */ | ||
3165 | result = (result << 1) | | ||
3166 | ((readl(®s->LocalCtrl) & EEPROM_DATA_IN) != 0); | ||
3167 | udelay(ACE_SHORT_DELAY); | ||
3168 | mb(); | ||
3169 | local = readl(®s->LocalCtrl); | ||
3170 | local &= ~EEPROM_CLK_OUT; | ||
3171 | writel(local, ®s->LocalCtrl); | ||
3172 | readl(®s->LocalCtrl); | ||
3173 | udelay(ACE_SHORT_DELAY); | ||
3174 | mb(); | ||
3175 | if (i == 7) { | ||
3176 | local |= EEPROM_WRITE_ENABLE; | ||
3177 | writel(local, ®s->LocalCtrl); | ||
3178 | readl(®s->LocalCtrl); | ||
3179 | mb(); | ||
3180 | udelay(ACE_SHORT_DELAY); | ||
3181 | } | ||
3182 | } | ||
3183 | |||
3184 | local |= EEPROM_DATA_OUT; | ||
3185 | writel(local, ®s->LocalCtrl); | ||
3186 | readl(®s->LocalCtrl); | ||
3187 | mb(); | ||
3188 | udelay(ACE_SHORT_DELAY); | ||
3189 | writel(readl(®s->LocalCtrl) | EEPROM_CLK_OUT, ®s->LocalCtrl); | ||
3190 | readl(®s->LocalCtrl); | ||
3191 | udelay(ACE_LONG_DELAY); | ||
3192 | writel(readl(®s->LocalCtrl) & ~EEPROM_CLK_OUT, ®s->LocalCtrl); | ||
3193 | readl(®s->LocalCtrl); | ||
3194 | mb(); | ||
3195 | udelay(ACE_SHORT_DELAY); | ||
3196 | eeprom_stop(regs); | ||
3197 | |||
3198 | local_irq_restore(flags); | ||
3199 | out: | ||
3200 | return result; | ||
3201 | |||
3202 | eeprom_read_error: | ||
3203 | printk(KERN_ERR "%s: Unable to read eeprom byte 0x%02lx\n", | ||
3204 | ap->name, offset); | ||
3205 | goto out; | ||
3206 | } | ||
diff --git a/drivers/net/ethernet/3com/acenic.h b/drivers/net/ethernet/3com/acenic.h new file mode 100644 index 000000000000..51c486cfbb8c --- /dev/null +++ b/drivers/net/ethernet/3com/acenic.h | |||
@@ -0,0 +1,790 @@ | |||
1 | #ifndef _ACENIC_H_ | ||
2 | #define _ACENIC_H_ | ||
3 | #include <linux/interrupt.h> | ||
4 | |||
5 | |||
6 | /* | ||
7 | * Generate TX index update each time, when TX ring is closed. | ||
8 | * Normally, this is not useful, because results in more dma (and irqs | ||
9 | * without TX_COAL_INTS_ONLY). | ||
10 | */ | ||
11 | #define USE_TX_COAL_NOW 0 | ||
12 | |||
13 | /* | ||
14 | * Addressing: | ||
15 | * | ||
16 | * The Tigon uses 64-bit host addresses, regardless of their actual | ||
17 | * length, and it expects a big-endian format. For 32 bit systems the | ||
18 | * upper 32 bits of the address are simply ignored (zero), however for | ||
19 | * little endian 64 bit systems (Alpha) this looks strange with the | ||
20 | * two parts of the address word being swapped. | ||
21 | * | ||
22 | * The addresses are split in two 32 bit words for all architectures | ||
23 | * as some of them are in PCI shared memory and it is necessary to use | ||
24 | * readl/writel to access them. | ||
25 | * | ||
26 | * The addressing code is derived from Pete Wyckoff's work, but | ||
27 | * modified to deal properly with readl/writel usage. | ||
28 | */ | ||
29 | |||
30 | struct ace_regs { | ||
31 | u32 pad0[16]; /* PCI control registers */ | ||
32 | |||
33 | u32 HostCtrl; /* 0x40 */ | ||
34 | u32 LocalCtrl; | ||
35 | |||
36 | u32 pad1[2]; | ||
37 | |||
38 | u32 MiscCfg; /* 0x50 */ | ||
39 | |||
40 | u32 pad2[2]; | ||
41 | |||
42 | u32 PciState; | ||
43 | |||
44 | u32 pad3[2]; /* 0x60 */ | ||
45 | |||
46 | u32 WinBase; | ||
47 | u32 WinData; | ||
48 | |||
49 | u32 pad4[12]; /* 0x70 */ | ||
50 | |||
51 | u32 DmaWriteState; /* 0xa0 */ | ||
52 | u32 pad5[3]; | ||
53 | u32 DmaReadState; /* 0xb0 */ | ||
54 | |||
55 | u32 pad6[26]; | ||
56 | |||
57 | u32 AssistState; | ||
58 | |||
59 | u32 pad7[8]; /* 0x120 */ | ||
60 | |||
61 | u32 CpuCtrl; /* 0x140 */ | ||
62 | u32 Pc; | ||
63 | |||
64 | u32 pad8[3]; | ||
65 | |||
66 | u32 SramAddr; /* 0x154 */ | ||
67 | u32 SramData; | ||
68 | |||
69 | u32 pad9[49]; | ||
70 | |||
71 | u32 MacRxState; /* 0x220 */ | ||
72 | |||
73 | u32 pad10[7]; | ||
74 | |||
75 | u32 CpuBCtrl; /* 0x240 */ | ||
76 | u32 PcB; | ||
77 | |||
78 | u32 pad11[3]; | ||
79 | |||
80 | u32 SramBAddr; /* 0x254 */ | ||
81 | u32 SramBData; | ||
82 | |||
83 | u32 pad12[105]; | ||
84 | |||
85 | u32 pad13[32]; /* 0x400 */ | ||
86 | u32 Stats[32]; | ||
87 | |||
88 | u32 Mb0Hi; /* 0x500 */ | ||
89 | u32 Mb0Lo; | ||
90 | u32 Mb1Hi; | ||
91 | u32 CmdPrd; | ||
92 | u32 Mb2Hi; | ||
93 | u32 TxPrd; | ||
94 | u32 Mb3Hi; | ||
95 | u32 RxStdPrd; | ||
96 | u32 Mb4Hi; | ||
97 | u32 RxJumboPrd; | ||
98 | u32 Mb5Hi; | ||
99 | u32 RxMiniPrd; | ||
100 | u32 Mb6Hi; | ||
101 | u32 Mb6Lo; | ||
102 | u32 Mb7Hi; | ||
103 | u32 Mb7Lo; | ||
104 | u32 Mb8Hi; | ||
105 | u32 Mb8Lo; | ||
106 | u32 Mb9Hi; | ||
107 | u32 Mb9Lo; | ||
108 | u32 MbAHi; | ||
109 | u32 MbALo; | ||
110 | u32 MbBHi; | ||
111 | u32 MbBLo; | ||
112 | u32 MbCHi; | ||
113 | u32 MbCLo; | ||
114 | u32 MbDHi; | ||
115 | u32 MbDLo; | ||
116 | u32 MbEHi; | ||
117 | u32 MbELo; | ||
118 | u32 MbFHi; | ||
119 | u32 MbFLo; | ||
120 | |||
121 | u32 pad14[32]; | ||
122 | |||
123 | u32 MacAddrHi; /* 0x600 */ | ||
124 | u32 MacAddrLo; | ||
125 | u32 InfoPtrHi; | ||
126 | u32 InfoPtrLo; | ||
127 | u32 MultiCastHi; /* 0x610 */ | ||
128 | u32 MultiCastLo; | ||
129 | u32 ModeStat; | ||
130 | u32 DmaReadCfg; | ||
131 | u32 DmaWriteCfg; /* 0x620 */ | ||
132 | u32 TxBufRat; | ||
133 | u32 EvtCsm; | ||
134 | u32 CmdCsm; | ||
135 | u32 TuneRxCoalTicks;/* 0x630 */ | ||
136 | u32 TuneTxCoalTicks; | ||
137 | u32 TuneStatTicks; | ||
138 | u32 TuneMaxTxDesc; | ||
139 | u32 TuneMaxRxDesc; /* 0x640 */ | ||
140 | u32 TuneTrace; | ||
141 | u32 TuneLink; | ||
142 | u32 TuneFastLink; | ||
143 | u32 TracePtr; /* 0x650 */ | ||
144 | u32 TraceStrt; | ||
145 | u32 TraceLen; | ||
146 | u32 IfIdx; | ||
147 | u32 IfMtu; /* 0x660 */ | ||
148 | u32 MaskInt; | ||
149 | u32 GigLnkState; | ||
150 | u32 FastLnkState; | ||
151 | u32 pad16[4]; /* 0x670 */ | ||
152 | u32 RxRetCsm; /* 0x680 */ | ||
153 | |||
154 | u32 pad17[31]; | ||
155 | |||
156 | u32 CmdRng[64]; /* 0x700 */ | ||
157 | u32 Window[0x200]; | ||
158 | }; | ||
159 | |||
160 | |||
161 | typedef struct { | ||
162 | u32 addrhi; | ||
163 | u32 addrlo; | ||
164 | } aceaddr; | ||
165 | |||
166 | |||
167 | #define ACE_WINDOW_SIZE 0x800 | ||
168 | |||
169 | #define ACE_JUMBO_MTU 9000 | ||
170 | #define ACE_STD_MTU 1500 | ||
171 | |||
172 | #define ACE_TRACE_SIZE 0x8000 | ||
173 | |||
174 | /* | ||
175 | * Host control register bits. | ||
176 | */ | ||
177 | |||
178 | #define IN_INT 0x01 | ||
179 | #define CLR_INT 0x02 | ||
180 | #define HW_RESET 0x08 | ||
181 | #define BYTE_SWAP 0x10 | ||
182 | #define WORD_SWAP 0x20 | ||
183 | #define MASK_INTS 0x40 | ||
184 | |||
185 | /* | ||
186 | * Local control register bits. | ||
187 | */ | ||
188 | |||
189 | #define EEPROM_DATA_IN 0x800000 | ||
190 | #define EEPROM_DATA_OUT 0x400000 | ||
191 | #define EEPROM_WRITE_ENABLE 0x200000 | ||
192 | #define EEPROM_CLK_OUT 0x100000 | ||
193 | |||
194 | #define EEPROM_BASE 0xa0000000 | ||
195 | |||
196 | #define EEPROM_WRITE_SELECT 0xa0 | ||
197 | #define EEPROM_READ_SELECT 0xa1 | ||
198 | |||
199 | #define SRAM_BANK_512K 0x200 | ||
200 | |||
201 | |||
202 | /* | ||
203 | * udelay() values for when clocking the eeprom | ||
204 | */ | ||
205 | #define ACE_SHORT_DELAY 2 | ||
206 | #define ACE_LONG_DELAY 4 | ||
207 | |||
208 | |||
209 | /* | ||
210 | * Misc Config bits | ||
211 | */ | ||
212 | |||
213 | #define SYNC_SRAM_TIMING 0x100000 | ||
214 | |||
215 | |||
216 | /* | ||
217 | * CPU state bits. | ||
218 | */ | ||
219 | |||
220 | #define CPU_RESET 0x01 | ||
221 | #define CPU_TRACE 0x02 | ||
222 | #define CPU_PROM_FAILED 0x10 | ||
223 | #define CPU_HALT 0x00010000 | ||
224 | #define CPU_HALTED 0xffff0000 | ||
225 | |||
226 | |||
227 | /* | ||
228 | * PCI State bits. | ||
229 | */ | ||
230 | |||
231 | #define DMA_READ_MAX_4 0x04 | ||
232 | #define DMA_READ_MAX_16 0x08 | ||
233 | #define DMA_READ_MAX_32 0x0c | ||
234 | #define DMA_READ_MAX_64 0x10 | ||
235 | #define DMA_READ_MAX_128 0x14 | ||
236 | #define DMA_READ_MAX_256 0x18 | ||
237 | #define DMA_READ_MAX_1K 0x1c | ||
238 | #define DMA_WRITE_MAX_4 0x20 | ||
239 | #define DMA_WRITE_MAX_16 0x40 | ||
240 | #define DMA_WRITE_MAX_32 0x60 | ||
241 | #define DMA_WRITE_MAX_64 0x80 | ||
242 | #define DMA_WRITE_MAX_128 0xa0 | ||
243 | #define DMA_WRITE_MAX_256 0xc0 | ||
244 | #define DMA_WRITE_MAX_1K 0xe0 | ||
245 | #define DMA_READ_WRITE_MASK 0xfc | ||
246 | #define MEM_READ_MULTIPLE 0x00020000 | ||
247 | #define PCI_66MHZ 0x00080000 | ||
248 | #define PCI_32BIT 0x00100000 | ||
249 | #define DMA_WRITE_ALL_ALIGN 0x00800000 | ||
250 | #define READ_CMD_MEM 0x06000000 | ||
251 | #define WRITE_CMD_MEM 0x70000000 | ||
252 | |||
253 | |||
254 | /* | ||
255 | * Mode status | ||
256 | */ | ||
257 | |||
258 | #define ACE_BYTE_SWAP_BD 0x02 | ||
259 | #define ACE_WORD_SWAP_BD 0x04 /* not actually used */ | ||
260 | #define ACE_WARN 0x08 | ||
261 | #define ACE_BYTE_SWAP_DMA 0x10 | ||
262 | #define ACE_NO_JUMBO_FRAG 0x200 | ||
263 | #define ACE_FATAL 0x40000000 | ||
264 | |||
265 | |||
266 | /* | ||
267 | * DMA config | ||
268 | */ | ||
269 | |||
270 | #define DMA_THRESH_1W 0x10 | ||
271 | #define DMA_THRESH_2W 0x20 | ||
272 | #define DMA_THRESH_4W 0x40 | ||
273 | #define DMA_THRESH_8W 0x80 | ||
274 | #define DMA_THRESH_16W 0x100 | ||
275 | #define DMA_THRESH_32W 0x0 /* not described in doc, but exists. */ | ||
276 | |||
277 | |||
278 | /* | ||
279 | * Tuning parameters | ||
280 | */ | ||
281 | |||
282 | #define TICKS_PER_SEC 1000000 | ||
283 | |||
284 | |||
285 | /* | ||
286 | * Link bits | ||
287 | */ | ||
288 | |||
289 | #define LNK_PREF 0x00008000 | ||
290 | #define LNK_10MB 0x00010000 | ||
291 | #define LNK_100MB 0x00020000 | ||
292 | #define LNK_1000MB 0x00040000 | ||
293 | #define LNK_FULL_DUPLEX 0x00080000 | ||
294 | #define LNK_HALF_DUPLEX 0x00100000 | ||
295 | #define LNK_TX_FLOW_CTL_Y 0x00200000 | ||
296 | #define LNK_NEG_ADVANCED 0x00400000 | ||
297 | #define LNK_RX_FLOW_CTL_Y 0x00800000 | ||
298 | #define LNK_NIC 0x01000000 | ||
299 | #define LNK_JAM 0x02000000 | ||
300 | #define LNK_JUMBO 0x04000000 | ||
301 | #define LNK_ALTEON 0x08000000 | ||
302 | #define LNK_NEG_FCTL 0x10000000 | ||
303 | #define LNK_NEGOTIATE 0x20000000 | ||
304 | #define LNK_ENABLE 0x40000000 | ||
305 | #define LNK_UP 0x80000000 | ||
306 | |||
307 | |||
308 | /* | ||
309 | * Event definitions | ||
310 | */ | ||
311 | |||
312 | #define EVT_RING_ENTRIES 256 | ||
313 | #define EVT_RING_SIZE (EVT_RING_ENTRIES * sizeof(struct event)) | ||
314 | |||
315 | struct event { | ||
316 | #ifdef __LITTLE_ENDIAN_BITFIELD | ||
317 | u32 idx:12; | ||
318 | u32 code:12; | ||
319 | u32 evt:8; | ||
320 | #else | ||
321 | u32 evt:8; | ||
322 | u32 code:12; | ||
323 | u32 idx:12; | ||
324 | #endif | ||
325 | u32 pad; | ||
326 | }; | ||
327 | |||
328 | |||
329 | /* | ||
330 | * Events | ||
331 | */ | ||
332 | |||
333 | #define E_FW_RUNNING 0x01 | ||
334 | #define E_STATS_UPDATED 0x04 | ||
335 | |||
336 | #define E_STATS_UPDATE 0x04 | ||
337 | |||
338 | #define E_LNK_STATE 0x06 | ||
339 | #define E_C_LINK_UP 0x01 | ||
340 | #define E_C_LINK_DOWN 0x02 | ||
341 | #define E_C_LINK_10_100 0x03 | ||
342 | |||
343 | #define E_ERROR 0x07 | ||
344 | #define E_C_ERR_INVAL_CMD 0x01 | ||
345 | #define E_C_ERR_UNIMP_CMD 0x02 | ||
346 | #define E_C_ERR_BAD_CFG 0x03 | ||
347 | |||
348 | #define E_MCAST_LIST 0x08 | ||
349 | #define E_C_MCAST_ADDR_ADD 0x01 | ||
350 | #define E_C_MCAST_ADDR_DEL 0x02 | ||
351 | |||
352 | #define E_RESET_JUMBO_RNG 0x09 | ||
353 | |||
354 | |||
355 | /* | ||
356 | * Commands | ||
357 | */ | ||
358 | |||
359 | #define CMD_RING_ENTRIES 64 | ||
360 | |||
361 | struct cmd { | ||
362 | #ifdef __LITTLE_ENDIAN_BITFIELD | ||
363 | u32 idx:12; | ||
364 | u32 code:12; | ||
365 | u32 evt:8; | ||
366 | #else | ||
367 | u32 evt:8; | ||
368 | u32 code:12; | ||
369 | u32 idx:12; | ||
370 | #endif | ||
371 | }; | ||
372 | |||
373 | |||
374 | #define C_HOST_STATE 0x01 | ||
375 | #define C_C_STACK_UP 0x01 | ||
376 | #define C_C_STACK_DOWN 0x02 | ||
377 | |||
378 | #define C_FDR_FILTERING 0x02 | ||
379 | #define C_C_FDR_FILT_ENABLE 0x01 | ||
380 | #define C_C_FDR_FILT_DISABLE 0x02 | ||
381 | |||
382 | #define C_SET_RX_PRD_IDX 0x03 | ||
383 | #define C_UPDATE_STATS 0x04 | ||
384 | #define C_RESET_JUMBO_RNG 0x05 | ||
385 | #define C_ADD_MULTICAST_ADDR 0x08 | ||
386 | #define C_DEL_MULTICAST_ADDR 0x09 | ||
387 | |||
388 | #define C_SET_PROMISC_MODE 0x0a | ||
389 | #define C_C_PROMISC_ENABLE 0x01 | ||
390 | #define C_C_PROMISC_DISABLE 0x02 | ||
391 | |||
392 | #define C_LNK_NEGOTIATION 0x0b | ||
393 | #define C_C_NEGOTIATE_BOTH 0x00 | ||
394 | #define C_C_NEGOTIATE_GIG 0x01 | ||
395 | #define C_C_NEGOTIATE_10_100 0x02 | ||
396 | |||
397 | #define C_SET_MAC_ADDR 0x0c | ||
398 | #define C_CLEAR_PROFILE 0x0d | ||
399 | |||
400 | #define C_SET_MULTICAST_MODE 0x0e | ||
401 | #define C_C_MCAST_ENABLE 0x01 | ||
402 | #define C_C_MCAST_DISABLE 0x02 | ||
403 | |||
404 | #define C_CLEAR_STATS 0x0f | ||
405 | #define C_SET_RX_JUMBO_PRD_IDX 0x10 | ||
406 | #define C_REFRESH_STATS 0x11 | ||
407 | |||
408 | |||
409 | /* | ||
410 | * Descriptor flags | ||
411 | */ | ||
412 | #define BD_FLG_TCP_UDP_SUM 0x01 | ||
413 | #define BD_FLG_IP_SUM 0x02 | ||
414 | #define BD_FLG_END 0x04 | ||
415 | #define BD_FLG_MORE 0x08 | ||
416 | #define BD_FLG_JUMBO 0x10 | ||
417 | #define BD_FLG_UCAST 0x20 | ||
418 | #define BD_FLG_MCAST 0x40 | ||
419 | #define BD_FLG_BCAST 0x60 | ||
420 | #define BD_FLG_TYP_MASK 0x60 | ||
421 | #define BD_FLG_IP_FRAG 0x80 | ||
422 | #define BD_FLG_IP_FRAG_END 0x100 | ||
423 | #define BD_FLG_VLAN_TAG 0x200 | ||
424 | #define BD_FLG_FRAME_ERROR 0x400 | ||
425 | #define BD_FLG_COAL_NOW 0x800 | ||
426 | #define BD_FLG_MINI 0x1000 | ||
427 | |||
428 | |||
429 | /* | ||
430 | * Ring Control block flags | ||
431 | */ | ||
432 | #define RCB_FLG_TCP_UDP_SUM 0x01 | ||
433 | #define RCB_FLG_IP_SUM 0x02 | ||
434 | #define RCB_FLG_NO_PSEUDO_HDR 0x08 | ||
435 | #define RCB_FLG_VLAN_ASSIST 0x10 | ||
436 | #define RCB_FLG_COAL_INT_ONLY 0x20 | ||
437 | #define RCB_FLG_TX_HOST_RING 0x40 | ||
438 | #define RCB_FLG_IEEE_SNAP_SUM 0x80 | ||
439 | #define RCB_FLG_EXT_RX_BD 0x100 | ||
440 | #define RCB_FLG_RNG_DISABLE 0x200 | ||
441 | |||
442 | |||
443 | /* | ||
444 | * TX ring - maximum TX ring entries for Tigon I's is 128 | ||
445 | */ | ||
446 | #define MAX_TX_RING_ENTRIES 256 | ||
447 | #define TIGON_I_TX_RING_ENTRIES 128 | ||
448 | #define TX_RING_SIZE (MAX_TX_RING_ENTRIES * sizeof(struct tx_desc)) | ||
449 | #define TX_RING_BASE 0x3800 | ||
450 | |||
451 | struct tx_desc{ | ||
452 | aceaddr addr; | ||
453 | u32 flagsize; | ||
454 | #if 0 | ||
455 | /* | ||
456 | * This is in PCI shared mem and must be accessed with readl/writel | ||
457 | * real layout is: | ||
458 | */ | ||
459 | #if __LITTLE_ENDIAN | ||
460 | u16 flags; | ||
461 | u16 size; | ||
462 | u16 vlan; | ||
463 | u16 reserved; | ||
464 | #else | ||
465 | u16 size; | ||
466 | u16 flags; | ||
467 | u16 reserved; | ||
468 | u16 vlan; | ||
469 | #endif | ||
470 | #endif | ||
471 | u32 vlanres; | ||
472 | }; | ||
473 | |||
474 | |||
475 | #define RX_STD_RING_ENTRIES 512 | ||
476 | #define RX_STD_RING_SIZE (RX_STD_RING_ENTRIES * sizeof(struct rx_desc)) | ||
477 | |||
478 | #define RX_JUMBO_RING_ENTRIES 256 | ||
479 | #define RX_JUMBO_RING_SIZE (RX_JUMBO_RING_ENTRIES *sizeof(struct rx_desc)) | ||
480 | |||
481 | #define RX_MINI_RING_ENTRIES 1024 | ||
482 | #define RX_MINI_RING_SIZE (RX_MINI_RING_ENTRIES *sizeof(struct rx_desc)) | ||
483 | |||
484 | #define RX_RETURN_RING_ENTRIES 2048 | ||
485 | #define RX_RETURN_RING_SIZE (RX_MAX_RETURN_RING_ENTRIES * \ | ||
486 | sizeof(struct rx_desc)) | ||
487 | |||
488 | struct rx_desc{ | ||
489 | aceaddr addr; | ||
490 | #ifdef __LITTLE_ENDIAN | ||
491 | u16 size; | ||
492 | u16 idx; | ||
493 | #else | ||
494 | u16 idx; | ||
495 | u16 size; | ||
496 | #endif | ||
497 | #ifdef __LITTLE_ENDIAN | ||
498 | u16 flags; | ||
499 | u16 type; | ||
500 | #else | ||
501 | u16 type; | ||
502 | u16 flags; | ||
503 | #endif | ||
504 | #ifdef __LITTLE_ENDIAN | ||
505 | u16 tcp_udp_csum; | ||
506 | u16 ip_csum; | ||
507 | #else | ||
508 | u16 ip_csum; | ||
509 | u16 tcp_udp_csum; | ||
510 | #endif | ||
511 | #ifdef __LITTLE_ENDIAN | ||
512 | u16 vlan; | ||
513 | u16 err_flags; | ||
514 | #else | ||
515 | u16 err_flags; | ||
516 | u16 vlan; | ||
517 | #endif | ||
518 | u32 reserved; | ||
519 | u32 opague; | ||
520 | }; | ||
521 | |||
522 | |||
523 | /* | ||
524 | * This struct is shared with the NIC firmware. | ||
525 | */ | ||
526 | struct ring_ctrl { | ||
527 | aceaddr rngptr; | ||
528 | #ifdef __LITTLE_ENDIAN | ||
529 | u16 flags; | ||
530 | u16 max_len; | ||
531 | #else | ||
532 | u16 max_len; | ||
533 | u16 flags; | ||
534 | #endif | ||
535 | u32 pad; | ||
536 | }; | ||
537 | |||
538 | |||
539 | struct ace_mac_stats { | ||
540 | u32 excess_colls; | ||
541 | u32 coll_1; | ||
542 | u32 coll_2; | ||
543 | u32 coll_3; | ||
544 | u32 coll_4; | ||
545 | u32 coll_5; | ||
546 | u32 coll_6; | ||
547 | u32 coll_7; | ||
548 | u32 coll_8; | ||
549 | u32 coll_9; | ||
550 | u32 coll_10; | ||
551 | u32 coll_11; | ||
552 | u32 coll_12; | ||
553 | u32 coll_13; | ||
554 | u32 coll_14; | ||
555 | u32 coll_15; | ||
556 | u32 late_coll; | ||
557 | u32 defers; | ||
558 | u32 crc_err; | ||
559 | u32 underrun; | ||
560 | u32 crs_err; | ||
561 | u32 pad[3]; | ||
562 | u32 drop_ula; | ||
563 | u32 drop_mc; | ||
564 | u32 drop_fc; | ||
565 | u32 drop_space; | ||
566 | u32 coll; | ||
567 | u32 kept_bc; | ||
568 | u32 kept_mc; | ||
569 | u32 kept_uc; | ||
570 | }; | ||
571 | |||
572 | |||
573 | struct ace_info { | ||
574 | union { | ||
575 | u32 stats[256]; | ||
576 | } s; | ||
577 | struct ring_ctrl evt_ctrl; | ||
578 | struct ring_ctrl cmd_ctrl; | ||
579 | struct ring_ctrl tx_ctrl; | ||
580 | struct ring_ctrl rx_std_ctrl; | ||
581 | struct ring_ctrl rx_jumbo_ctrl; | ||
582 | struct ring_ctrl rx_mini_ctrl; | ||
583 | struct ring_ctrl rx_return_ctrl; | ||
584 | aceaddr evt_prd_ptr; | ||
585 | aceaddr rx_ret_prd_ptr; | ||
586 | aceaddr tx_csm_ptr; | ||
587 | aceaddr stats2_ptr; | ||
588 | }; | ||
589 | |||
590 | |||
591 | struct ring_info { | ||
592 | struct sk_buff *skb; | ||
593 | DEFINE_DMA_UNMAP_ADDR(mapping); | ||
594 | }; | ||
595 | |||
596 | |||
597 | /* | ||
598 | * Funny... As soon as we add maplen on alpha, it starts to work | ||
599 | * much slower. Hmm... is it because struct does not fit to one cacheline? | ||
600 | * So, split tx_ring_info. | ||
601 | */ | ||
602 | struct tx_ring_info { | ||
603 | struct sk_buff *skb; | ||
604 | DEFINE_DMA_UNMAP_ADDR(mapping); | ||
605 | DEFINE_DMA_UNMAP_LEN(maplen); | ||
606 | }; | ||
607 | |||
608 | |||
609 | /* | ||
610 | * struct ace_skb holding the rings of skb's. This is an awful lot of | ||
611 | * pointers, but I don't see any other smart mode to do this in an | ||
612 | * efficient manner ;-( | ||
613 | */ | ||
614 | struct ace_skb | ||
615 | { | ||
616 | struct tx_ring_info tx_skbuff[MAX_TX_RING_ENTRIES]; | ||
617 | struct ring_info rx_std_skbuff[RX_STD_RING_ENTRIES]; | ||
618 | struct ring_info rx_mini_skbuff[RX_MINI_RING_ENTRIES]; | ||
619 | struct ring_info rx_jumbo_skbuff[RX_JUMBO_RING_ENTRIES]; | ||
620 | }; | ||
621 | |||
622 | |||
623 | /* | ||
624 | * Struct private for the AceNIC. | ||
625 | * | ||
626 | * Elements are grouped so variables used by the tx handling goes | ||
627 | * together, and will go into the same cache lines etc. in order to | ||
628 | * avoid cache line contention between the rx and tx handling on SMP. | ||
629 | * | ||
630 | * Frequently accessed variables are put at the beginning of the | ||
631 | * struct to help the compiler generate better/shorter code. | ||
632 | */ | ||
633 | struct ace_private | ||
634 | { | ||
635 | struct ace_info *info; | ||
636 | struct ace_regs __iomem *regs; /* register base */ | ||
637 | struct ace_skb *skb; | ||
638 | dma_addr_t info_dma; /* 32/64 bit */ | ||
639 | |||
640 | int version, link; | ||
641 | int promisc, mcast_all; | ||
642 | |||
643 | /* | ||
644 | * TX elements | ||
645 | */ | ||
646 | struct tx_desc *tx_ring; | ||
647 | u32 tx_prd; | ||
648 | volatile u32 tx_ret_csm; | ||
649 | int tx_ring_entries; | ||
650 | |||
651 | /* | ||
652 | * RX elements | ||
653 | */ | ||
654 | unsigned long std_refill_busy | ||
655 | __attribute__ ((aligned (SMP_CACHE_BYTES))); | ||
656 | unsigned long mini_refill_busy, jumbo_refill_busy; | ||
657 | atomic_t cur_rx_bufs; | ||
658 | atomic_t cur_mini_bufs; | ||
659 | atomic_t cur_jumbo_bufs; | ||
660 | u32 rx_std_skbprd, rx_mini_skbprd, rx_jumbo_skbprd; | ||
661 | u32 cur_rx; | ||
662 | |||
663 | struct rx_desc *rx_std_ring; | ||
664 | struct rx_desc *rx_jumbo_ring; | ||
665 | struct rx_desc *rx_mini_ring; | ||
666 | struct rx_desc *rx_return_ring; | ||
667 | |||
668 | int tasklet_pending, jumbo; | ||
669 | struct tasklet_struct ace_tasklet; | ||
670 | |||
671 | struct event *evt_ring; | ||
672 | |||
673 | volatile u32 *evt_prd, *rx_ret_prd, *tx_csm; | ||
674 | |||
675 | dma_addr_t tx_ring_dma; /* 32/64 bit */ | ||
676 | dma_addr_t rx_ring_base_dma; | ||
677 | dma_addr_t evt_ring_dma; | ||
678 | dma_addr_t evt_prd_dma, rx_ret_prd_dma, tx_csm_dma; | ||
679 | |||
680 | unsigned char *trace_buf; | ||
681 | struct pci_dev *pdev; | ||
682 | struct net_device *next; | ||
683 | volatile int fw_running; | ||
684 | int board_idx; | ||
685 | u16 pci_command; | ||
686 | u8 pci_latency; | ||
687 | const char *name; | ||
688 | #ifdef INDEX_DEBUG | ||
689 | spinlock_t debug_lock | ||
690 | __attribute__ ((aligned (SMP_CACHE_BYTES))); | ||
691 | u32 last_tx, last_std_rx, last_mini_rx; | ||
692 | #endif | ||
693 | int pci_using_dac; | ||
694 | u8 firmware_major; | ||
695 | u8 firmware_minor; | ||
696 | u8 firmware_fix; | ||
697 | u32 firmware_start; | ||
698 | }; | ||
699 | |||
700 | |||
701 | #define TX_RESERVED MAX_SKB_FRAGS | ||
702 | |||
703 | static inline int tx_space (struct ace_private *ap, u32 csm, u32 prd) | ||
704 | { | ||
705 | return (csm - prd - 1) & (ACE_TX_RING_ENTRIES(ap) - 1); | ||
706 | } | ||
707 | |||
708 | #define tx_free(ap) tx_space((ap)->tx_ret_csm, (ap)->tx_prd, ap) | ||
709 | #define tx_ring_full(ap, csm, prd) (tx_space(ap, csm, prd) <= TX_RESERVED) | ||
710 | |||
711 | static inline void set_aceaddr(aceaddr *aa, dma_addr_t addr) | ||
712 | { | ||
713 | u64 baddr = (u64) addr; | ||
714 | aa->addrlo = baddr & 0xffffffff; | ||
715 | aa->addrhi = baddr >> 32; | ||
716 | wmb(); | ||
717 | } | ||
718 | |||
719 | |||
720 | static inline void ace_set_txprd(struct ace_regs __iomem *regs, | ||
721 | struct ace_private *ap, u32 value) | ||
722 | { | ||
723 | #ifdef INDEX_DEBUG | ||
724 | unsigned long flags; | ||
725 | spin_lock_irqsave(&ap->debug_lock, flags); | ||
726 | writel(value, ®s->TxPrd); | ||
727 | if (value == ap->last_tx) | ||
728 | printk(KERN_ERR "AceNIC RACE ALERT! writing identical value " | ||
729 | "to tx producer (%i)\n", value); | ||
730 | ap->last_tx = value; | ||
731 | spin_unlock_irqrestore(&ap->debug_lock, flags); | ||
732 | #else | ||
733 | writel(value, ®s->TxPrd); | ||
734 | #endif | ||
735 | wmb(); | ||
736 | } | ||
737 | |||
738 | |||
739 | static inline void ace_mask_irq(struct net_device *dev) | ||
740 | { | ||
741 | struct ace_private *ap = netdev_priv(dev); | ||
742 | struct ace_regs __iomem *regs = ap->regs; | ||
743 | |||
744 | if (ACE_IS_TIGON_I(ap)) | ||
745 | writel(1, ®s->MaskInt); | ||
746 | else | ||
747 | writel(readl(®s->HostCtrl) | MASK_INTS, ®s->HostCtrl); | ||
748 | |||
749 | ace_sync_irq(dev->irq); | ||
750 | } | ||
751 | |||
752 | |||
753 | static inline void ace_unmask_irq(struct net_device *dev) | ||
754 | { | ||
755 | struct ace_private *ap = netdev_priv(dev); | ||
756 | struct ace_regs __iomem *regs = ap->regs; | ||
757 | |||
758 | if (ACE_IS_TIGON_I(ap)) | ||
759 | writel(0, ®s->MaskInt); | ||
760 | else | ||
761 | writel(readl(®s->HostCtrl) & ~MASK_INTS, ®s->HostCtrl); | ||
762 | } | ||
763 | |||
764 | |||
765 | /* | ||
766 | * Prototypes | ||
767 | */ | ||
768 | static int ace_init(struct net_device *dev); | ||
769 | static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs); | ||
770 | static void ace_load_mini_rx_ring(struct net_device *dev, int nr_bufs); | ||
771 | static void ace_load_jumbo_rx_ring(struct net_device *dev, int nr_bufs); | ||
772 | static irqreturn_t ace_interrupt(int irq, void *dev_id); | ||
773 | static int ace_load_firmware(struct net_device *dev); | ||
774 | static int ace_open(struct net_device *dev); | ||
775 | static netdev_tx_t ace_start_xmit(struct sk_buff *skb, | ||
776 | struct net_device *dev); | ||
777 | static int ace_close(struct net_device *dev); | ||
778 | static void ace_tasklet(unsigned long dev); | ||
779 | static void ace_dump_trace(struct ace_private *ap); | ||
780 | static void ace_set_multicast_list(struct net_device *dev); | ||
781 | static int ace_change_mtu(struct net_device *dev, int new_mtu); | ||
782 | static int ace_set_mac_addr(struct net_device *dev, void *p); | ||
783 | static void ace_set_rxtx_parms(struct net_device *dev, int jumbo); | ||
784 | static int ace_allocate_descriptors(struct net_device *dev); | ||
785 | static void ace_free_descriptors(struct net_device *dev); | ||
786 | static void ace_init_cleanup(struct net_device *dev); | ||
787 | static struct net_device_stats *ace_get_stats(struct net_device *dev); | ||
788 | static int read_eeprom_byte(struct net_device *dev, unsigned long offset); | ||
789 | |||
790 | #endif /* _ACENIC_H_ */ | ||
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c new file mode 100644 index 000000000000..1d5091a1e49a --- /dev/null +++ b/drivers/net/ethernet/3com/typhoon.c | |||
@@ -0,0 +1,2574 @@ | |||
1 | /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */ | ||
2 | /* | ||
3 | Written 2002-2004 by David Dillow <dave@thedillows.org> | ||
4 | Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and | ||
5 | Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>. | ||
6 | |||
7 | This software may be used and distributed according to the terms of | ||
8 | the GNU General Public License (GPL), incorporated herein by reference. | ||
9 | Drivers based on or derived from this code fall under the GPL and must | ||
10 | retain the authorship, copyright and license notice. This file is not | ||
11 | a complete program and may only be used when the entire operating | ||
12 | system is licensed under the GPL. | ||
13 | |||
14 | This software is available on a public web site. It may enable | ||
15 | cryptographic capabilities of the 3Com hardware, and may be | ||
16 | exported from the United States under License Exception "TSU" | ||
17 | pursuant to 15 C.F.R. Section 740.13(e). | ||
18 | |||
19 | This work was funded by the National Library of Medicine under | ||
20 | the Department of Energy project number 0274DD06D1 and NLM project | ||
21 | number Y1-LM-2015-01. | ||
22 | |||
23 | This driver is designed for the 3Com 3CR990 Family of cards with the | ||
24 | 3XP Processor. It has been tested on x86 and sparc64. | ||
25 | |||
26 | KNOWN ISSUES: | ||
27 | *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware | ||
28 | issue. Hopefully 3Com will fix it. | ||
29 | *) Waiting for a command response takes 8ms due to non-preemptable | ||
30 | polling. Only significant for getting stats and creating | ||
31 | SAs, but an ugly wart never the less. | ||
32 | |||
33 | TODO: | ||
34 | *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming. | ||
35 | *) Add more support for ethtool (especially for NIC stats) | ||
36 | *) Allow disabling of RX checksum offloading | ||
37 | *) Fix MAC changing to work while the interface is up | ||
38 | (Need to put commands on the TX ring, which changes | ||
39 | the locking) | ||
40 | *) Add in FCS to {rx,tx}_bytes, since the hardware doesn't. See | ||
41 | http://oss.sgi.com/cgi-bin/mesg.cgi?a=netdev&i=20031215152211.7003fe8e.rddunlap%40osdl.org | ||
42 | */ | ||
43 | |||
44 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | ||
45 | * Setting to > 1518 effectively disables this feature. | ||
46 | */ | ||
47 | static int rx_copybreak = 200; | ||
48 | |||
49 | /* Should we use MMIO or Port IO? | ||
50 | * 0: Port IO | ||
51 | * 1: MMIO | ||
52 | * 2: Try MMIO, fallback to Port IO | ||
53 | */ | ||
54 | static unsigned int use_mmio = 2; | ||
55 | |||
56 | /* end user-configurable values */ | ||
57 | |||
58 | /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). | ||
59 | */ | ||
60 | static const int multicast_filter_limit = 32; | ||
61 | |||
62 | /* Operational parameters that are set at compile time. */ | ||
63 | |||
64 | /* Keep the ring sizes a power of two for compile efficiency. | ||
65 | * The compiler will convert <unsigned>'%'<2^N> into a bit mask. | ||
66 | * Making the Tx ring too large decreases the effectiveness of channel | ||
67 | * bonding and packet priority. | ||
68 | * There are no ill effects from too-large receive rings. | ||
69 | * | ||
70 | * We don't currently use the Hi Tx ring so, don't make it very big. | ||
71 | * | ||
72 | * Beware that if we start using the Hi Tx ring, we will need to change | ||
73 | * typhoon_num_free_tx() and typhoon_tx_complete() to account for that. | ||
74 | */ | ||
75 | #define TXHI_ENTRIES 2 | ||
76 | #define TXLO_ENTRIES 128 | ||
77 | #define RX_ENTRIES 32 | ||
78 | #define COMMAND_ENTRIES 16 | ||
79 | #define RESPONSE_ENTRIES 32 | ||
80 | |||
81 | #define COMMAND_RING_SIZE (COMMAND_ENTRIES * sizeof(struct cmd_desc)) | ||
82 | #define RESPONSE_RING_SIZE (RESPONSE_ENTRIES * sizeof(struct resp_desc)) | ||
83 | |||
84 | /* The 3XP will preload and remove 64 entries from the free buffer | ||
85 | * list, and we need one entry to keep the ring from wrapping, so | ||
86 | * to keep this a power of two, we use 128 entries. | ||
87 | */ | ||
88 | #define RXFREE_ENTRIES 128 | ||
89 | #define RXENT_ENTRIES (RXFREE_ENTRIES - 1) | ||
90 | |||
91 | /* Operational parameters that usually are not changed. */ | ||
92 | |||
93 | /* Time in jiffies before concluding the transmitter is hung. */ | ||
94 | #define TX_TIMEOUT (2*HZ) | ||
95 | |||
96 | #define PKT_BUF_SZ 1536 | ||
97 | #define FIRMWARE_NAME "3com/typhoon.bin" | ||
98 | |||
99 | #define pr_fmt(fmt) KBUILD_MODNAME " " fmt | ||
100 | |||
101 | #include <linux/module.h> | ||
102 | #include <linux/kernel.h> | ||
103 | #include <linux/sched.h> | ||
104 | #include <linux/string.h> | ||
105 | #include <linux/timer.h> | ||
106 | #include <linux/errno.h> | ||
107 | #include <linux/ioport.h> | ||
108 | #include <linux/interrupt.h> | ||
109 | #include <linux/pci.h> | ||
110 | #include <linux/netdevice.h> | ||
111 | #include <linux/etherdevice.h> | ||
112 | #include <linux/skbuff.h> | ||
113 | #include <linux/mm.h> | ||
114 | #include <linux/init.h> | ||
115 | #include <linux/delay.h> | ||
116 | #include <linux/ethtool.h> | ||
117 | #include <linux/if_vlan.h> | ||
118 | #include <linux/crc32.h> | ||
119 | #include <linux/bitops.h> | ||
120 | #include <asm/processor.h> | ||
121 | #include <asm/io.h> | ||
122 | #include <asm/uaccess.h> | ||
123 | #include <linux/in6.h> | ||
124 | #include <linux/dma-mapping.h> | ||
125 | #include <linux/firmware.h> | ||
126 | |||
127 | #include "typhoon.h" | ||
128 | |||
129 | MODULE_AUTHOR("David Dillow <dave@thedillows.org>"); | ||
130 | MODULE_VERSION("1.0"); | ||
131 | MODULE_LICENSE("GPL"); | ||
132 | MODULE_FIRMWARE(FIRMWARE_NAME); | ||
133 | MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)"); | ||
134 | MODULE_PARM_DESC(rx_copybreak, "Packets smaller than this are copied and " | ||
135 | "the buffer given back to the NIC. Default " | ||
136 | "is 200."); | ||
137 | MODULE_PARM_DESC(use_mmio, "Use MMIO (1) or PIO(0) to access the NIC. " | ||
138 | "Default is to try MMIO and fallback to PIO."); | ||
139 | module_param(rx_copybreak, int, 0); | ||
140 | module_param(use_mmio, int, 0); | ||
141 | |||
142 | #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32 | ||
143 | #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO | ||
144 | #undef NETIF_F_TSO | ||
145 | #endif | ||
146 | |||
147 | #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS) | ||
148 | #error TX ring too small! | ||
149 | #endif | ||
150 | |||
151 | struct typhoon_card_info { | ||
152 | const char *name; | ||
153 | const int capabilities; | ||
154 | }; | ||
155 | |||
156 | #define TYPHOON_CRYPTO_NONE 0x00 | ||
157 | #define TYPHOON_CRYPTO_DES 0x01 | ||
158 | #define TYPHOON_CRYPTO_3DES 0x02 | ||
159 | #define TYPHOON_CRYPTO_VARIABLE 0x04 | ||
160 | #define TYPHOON_FIBER 0x08 | ||
161 | #define TYPHOON_WAKEUP_NEEDS_RESET 0x10 | ||
162 | |||
163 | enum typhoon_cards { | ||
164 | TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR, | ||
165 | TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR, | ||
166 | TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR, | ||
167 | TYPHOON_FXM, | ||
168 | }; | ||
169 | |||
170 | /* directly indexed by enum typhoon_cards, above */ | ||
171 | static struct typhoon_card_info typhoon_card_info[] __devinitdata = { | ||
172 | { "3Com Typhoon (3C990-TX)", | ||
173 | TYPHOON_CRYPTO_NONE}, | ||
174 | { "3Com Typhoon (3CR990-TX-95)", | ||
175 | TYPHOON_CRYPTO_DES}, | ||
176 | { "3Com Typhoon (3CR990-TX-97)", | ||
177 | TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, | ||
178 | { "3Com Typhoon (3C990SVR)", | ||
179 | TYPHOON_CRYPTO_NONE}, | ||
180 | { "3Com Typhoon (3CR990SVR95)", | ||
181 | TYPHOON_CRYPTO_DES}, | ||
182 | { "3Com Typhoon (3CR990SVR97)", | ||
183 | TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES}, | ||
184 | { "3Com Typhoon2 (3C990B-TX-M)", | ||
185 | TYPHOON_CRYPTO_VARIABLE}, | ||
186 | { "3Com Typhoon2 (3C990BSVR)", | ||
187 | TYPHOON_CRYPTO_VARIABLE}, | ||
188 | { "3Com Typhoon (3CR990-FX-95)", | ||
189 | TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, | ||
190 | { "3Com Typhoon (3CR990-FX-97)", | ||
191 | TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, | ||
192 | { "3Com Typhoon (3CR990-FX-95 Server)", | ||
193 | TYPHOON_CRYPTO_DES | TYPHOON_FIBER}, | ||
194 | { "3Com Typhoon (3CR990-FX-97 Server)", | ||
195 | TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER}, | ||
196 | { "3Com Typhoon2 (3C990B-FX-97)", | ||
197 | TYPHOON_CRYPTO_VARIABLE | TYPHOON_FIBER}, | ||
198 | }; | ||
199 | |||
200 | /* Notes on the new subsystem numbering scheme: | ||
201 | * bits 0-1 indicate crypto capabilities: (0) variable, (1) DES, or (2) 3DES | ||
202 | * bit 4 indicates if this card has secured firmware (we don't support it) | ||
203 | * bit 8 indicates if this is a (0) copper or (1) fiber card | ||
204 | * bits 12-16 indicate card type: (0) client and (1) server | ||
205 | */ | ||
206 | static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = { | ||
207 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990, | ||
208 | PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX }, | ||
209 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95, | ||
210 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 }, | ||
211 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97, | ||
212 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 }, | ||
213 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, | ||
214 | PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM }, | ||
215 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, | ||
216 | PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FXM }, | ||
217 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B, | ||
218 | PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR }, | ||
219 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, | ||
220 | PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 }, | ||
221 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, | ||
222 | PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 }, | ||
223 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, | ||
224 | PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR }, | ||
225 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX, | ||
226 | PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR }, | ||
227 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95, | ||
228 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 }, | ||
229 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97, | ||
230 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 }, | ||
231 | { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR, | ||
232 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR }, | ||
233 | { 0, } | ||
234 | }; | ||
235 | MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl); | ||
236 | |||
237 | /* Define the shared memory area | ||
238 | * Align everything the 3XP will normally be using. | ||
239 | * We'll need to move/align txHi if we start using that ring. | ||
240 | */ | ||
241 | #define __3xp_aligned ____cacheline_aligned | ||
242 | struct typhoon_shared { | ||
243 | struct typhoon_interface iface; | ||
244 | struct typhoon_indexes indexes __3xp_aligned; | ||
245 | struct tx_desc txLo[TXLO_ENTRIES] __3xp_aligned; | ||
246 | struct rx_desc rxLo[RX_ENTRIES] __3xp_aligned; | ||
247 | struct rx_desc rxHi[RX_ENTRIES] __3xp_aligned; | ||
248 | struct cmd_desc cmd[COMMAND_ENTRIES] __3xp_aligned; | ||
249 | struct resp_desc resp[RESPONSE_ENTRIES] __3xp_aligned; | ||
250 | struct rx_free rxBuff[RXFREE_ENTRIES] __3xp_aligned; | ||
251 | u32 zeroWord; | ||
252 | struct tx_desc txHi[TXHI_ENTRIES]; | ||
253 | } __packed; | ||
254 | |||
255 | struct rxbuff_ent { | ||
256 | struct sk_buff *skb; | ||
257 | dma_addr_t dma_addr; | ||
258 | }; | ||
259 | |||
260 | struct typhoon { | ||
261 | /* Tx cache line section */ | ||
262 | struct transmit_ring txLoRing ____cacheline_aligned; | ||
263 | struct pci_dev * tx_pdev; | ||
264 | void __iomem *tx_ioaddr; | ||
265 | u32 txlo_dma_addr; | ||
266 | |||
267 | /* Irq/Rx cache line section */ | ||
268 | void __iomem *ioaddr ____cacheline_aligned; | ||
269 | struct typhoon_indexes *indexes; | ||
270 | u8 awaiting_resp; | ||
271 | u8 duplex; | ||
272 | u8 speed; | ||
273 | u8 card_state; | ||
274 | struct basic_ring rxLoRing; | ||
275 | struct pci_dev * pdev; | ||
276 | struct net_device * dev; | ||
277 | struct napi_struct napi; | ||
278 | struct basic_ring rxHiRing; | ||
279 | struct basic_ring rxBuffRing; | ||
280 | struct rxbuff_ent rxbuffers[RXENT_ENTRIES]; | ||
281 | |||
282 | /* general section */ | ||
283 | spinlock_t command_lock ____cacheline_aligned; | ||
284 | struct basic_ring cmdRing; | ||
285 | struct basic_ring respRing; | ||
286 | struct net_device_stats stats; | ||
287 | struct net_device_stats stats_saved; | ||
288 | struct typhoon_shared * shared; | ||
289 | dma_addr_t shared_dma; | ||
290 | __le16 xcvr_select; | ||
291 | __le16 wol_events; | ||
292 | __le32 offload; | ||
293 | |||
294 | /* unused stuff (future use) */ | ||
295 | int capabilities; | ||
296 | struct transmit_ring txHiRing; | ||
297 | }; | ||
298 | |||
299 | enum completion_wait_values { | ||
300 | NoWait = 0, WaitNoSleep, WaitSleep, | ||
301 | }; | ||
302 | |||
303 | /* These are the values for the typhoon.card_state variable. | ||
304 | * These determine where the statistics will come from in get_stats(). | ||
305 | * The sleep image does not support the statistics we need. | ||
306 | */ | ||
307 | enum state_values { | ||
308 | Sleeping = 0, Running, | ||
309 | }; | ||
310 | |||
311 | /* PCI writes are not guaranteed to be posted in order, but outstanding writes | ||
312 | * cannot pass a read, so this forces current writes to post. | ||
313 | */ | ||
314 | #define typhoon_post_pci_writes(x) \ | ||
315 | do { if(likely(use_mmio)) ioread32(x+TYPHOON_REG_HEARTBEAT); } while(0) | ||
316 | |||
317 | /* We'll wait up to six seconds for a reset, and half a second normally. | ||
318 | */ | ||
319 | #define TYPHOON_UDELAY 50 | ||
320 | #define TYPHOON_RESET_TIMEOUT_SLEEP (6 * HZ) | ||
321 | #define TYPHOON_RESET_TIMEOUT_NOSLEEP ((6 * 1000000) / TYPHOON_UDELAY) | ||
322 | #define TYPHOON_WAIT_TIMEOUT ((1000000 / 2) / TYPHOON_UDELAY) | ||
323 | |||
324 | #if defined(NETIF_F_TSO) | ||
325 | #define skb_tso_size(x) (skb_shinfo(x)->gso_size) | ||
326 | #define TSO_NUM_DESCRIPTORS 2 | ||
327 | #define TSO_OFFLOAD_ON TYPHOON_OFFLOAD_TCP_SEGMENT | ||
328 | #else | ||
329 | #define NETIF_F_TSO 0 | ||
330 | #define skb_tso_size(x) 0 | ||
331 | #define TSO_NUM_DESCRIPTORS 0 | ||
332 | #define TSO_OFFLOAD_ON 0 | ||
333 | #endif | ||
334 | |||
335 | static inline void | ||
336 | typhoon_inc_index(u32 *index, const int count, const int num_entries) | ||
337 | { | ||
338 | /* Increment a ring index -- we can use this for all rings execept | ||
339 | * the Rx rings, as they use different size descriptors | ||
340 | * otherwise, everything is the same size as a cmd_desc | ||
341 | */ | ||
342 | *index += count * sizeof(struct cmd_desc); | ||
343 | *index %= num_entries * sizeof(struct cmd_desc); | ||
344 | } | ||
345 | |||
346 | static inline void | ||
347 | typhoon_inc_cmd_index(u32 *index, const int count) | ||
348 | { | ||
349 | typhoon_inc_index(index, count, COMMAND_ENTRIES); | ||
350 | } | ||
351 | |||
352 | static inline void | ||
353 | typhoon_inc_resp_index(u32 *index, const int count) | ||
354 | { | ||
355 | typhoon_inc_index(index, count, RESPONSE_ENTRIES); | ||
356 | } | ||
357 | |||
358 | static inline void | ||
359 | typhoon_inc_rxfree_index(u32 *index, const int count) | ||
360 | { | ||
361 | typhoon_inc_index(index, count, RXFREE_ENTRIES); | ||
362 | } | ||
363 | |||
364 | static inline void | ||
365 | typhoon_inc_tx_index(u32 *index, const int count) | ||
366 | { | ||
367 | /* if we start using the Hi Tx ring, this needs updateing */ | ||
368 | typhoon_inc_index(index, count, TXLO_ENTRIES); | ||
369 | } | ||
370 | |||
371 | static inline void | ||
372 | typhoon_inc_rx_index(u32 *index, const int count) | ||
373 | { | ||
374 | /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */ | ||
375 | *index += count * sizeof(struct rx_desc); | ||
376 | *index %= RX_ENTRIES * sizeof(struct rx_desc); | ||
377 | } | ||
378 | |||
379 | static int | ||
380 | typhoon_reset(void __iomem *ioaddr, int wait_type) | ||
381 | { | ||
382 | int i, err = 0; | ||
383 | int timeout; | ||
384 | |||
385 | if(wait_type == WaitNoSleep) | ||
386 | timeout = TYPHOON_RESET_TIMEOUT_NOSLEEP; | ||
387 | else | ||
388 | timeout = TYPHOON_RESET_TIMEOUT_SLEEP; | ||
389 | |||
390 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); | ||
391 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); | ||
392 | |||
393 | iowrite32(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET); | ||
394 | typhoon_post_pci_writes(ioaddr); | ||
395 | udelay(1); | ||
396 | iowrite32(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET); | ||
397 | |||
398 | if(wait_type != NoWait) { | ||
399 | for(i = 0; i < timeout; i++) { | ||
400 | if(ioread32(ioaddr + TYPHOON_REG_STATUS) == | ||
401 | TYPHOON_STATUS_WAITING_FOR_HOST) | ||
402 | goto out; | ||
403 | |||
404 | if(wait_type == WaitSleep) | ||
405 | schedule_timeout_uninterruptible(1); | ||
406 | else | ||
407 | udelay(TYPHOON_UDELAY); | ||
408 | } | ||
409 | |||
410 | err = -ETIMEDOUT; | ||
411 | } | ||
412 | |||
413 | out: | ||
414 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); | ||
415 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); | ||
416 | |||
417 | /* The 3XP seems to need a little extra time to complete the load | ||
418 | * of the sleep image before we can reliably boot it. Failure to | ||
419 | * do this occasionally results in a hung adapter after boot in | ||
420 | * typhoon_init_one() while trying to read the MAC address or | ||
421 | * putting the card to sleep. 3Com's driver waits 5ms, but | ||
422 | * that seems to be overkill. However, if we can sleep, we might | ||
423 | * as well give it that much time. Otherwise, we'll give it 500us, | ||
424 | * which should be enough (I've see it work well at 100us, but still | ||
425 | * saw occasional problems.) | ||
426 | */ | ||
427 | if(wait_type == WaitSleep) | ||
428 | msleep(5); | ||
429 | else | ||
430 | udelay(500); | ||
431 | return err; | ||
432 | } | ||
433 | |||
434 | static int | ||
435 | typhoon_wait_status(void __iomem *ioaddr, u32 wait_value) | ||
436 | { | ||
437 | int i, err = 0; | ||
438 | |||
439 | for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { | ||
440 | if(ioread32(ioaddr + TYPHOON_REG_STATUS) == wait_value) | ||
441 | goto out; | ||
442 | udelay(TYPHOON_UDELAY); | ||
443 | } | ||
444 | |||
445 | err = -ETIMEDOUT; | ||
446 | |||
447 | out: | ||
448 | return err; | ||
449 | } | ||
450 | |||
451 | static inline void | ||
452 | typhoon_media_status(struct net_device *dev, struct resp_desc *resp) | ||
453 | { | ||
454 | if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK) | ||
455 | netif_carrier_off(dev); | ||
456 | else | ||
457 | netif_carrier_on(dev); | ||
458 | } | ||
459 | |||
460 | static inline void | ||
461 | typhoon_hello(struct typhoon *tp) | ||
462 | { | ||
463 | struct basic_ring *ring = &tp->cmdRing; | ||
464 | struct cmd_desc *cmd; | ||
465 | |||
466 | /* We only get a hello request if we've not sent anything to the | ||
467 | * card in a long while. If the lock is held, then we're in the | ||
468 | * process of issuing a command, so we don't need to respond. | ||
469 | */ | ||
470 | if(spin_trylock(&tp->command_lock)) { | ||
471 | cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite); | ||
472 | typhoon_inc_cmd_index(&ring->lastWrite, 1); | ||
473 | |||
474 | INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP); | ||
475 | wmb(); | ||
476 | iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); | ||
477 | spin_unlock(&tp->command_lock); | ||
478 | } | ||
479 | } | ||
480 | |||
481 | static int | ||
482 | typhoon_process_response(struct typhoon *tp, int resp_size, | ||
483 | struct resp_desc *resp_save) | ||
484 | { | ||
485 | struct typhoon_indexes *indexes = tp->indexes; | ||
486 | struct resp_desc *resp; | ||
487 | u8 *base = tp->respRing.ringBase; | ||
488 | int count, len, wrap_len; | ||
489 | u32 cleared; | ||
490 | u32 ready; | ||
491 | |||
492 | cleared = le32_to_cpu(indexes->respCleared); | ||
493 | ready = le32_to_cpu(indexes->respReady); | ||
494 | while(cleared != ready) { | ||
495 | resp = (struct resp_desc *)(base + cleared); | ||
496 | count = resp->numDesc + 1; | ||
497 | if(resp_save && resp->seqNo) { | ||
498 | if(count > resp_size) { | ||
499 | resp_save->flags = TYPHOON_RESP_ERROR; | ||
500 | goto cleanup; | ||
501 | } | ||
502 | |||
503 | wrap_len = 0; | ||
504 | len = count * sizeof(*resp); | ||
505 | if(unlikely(cleared + len > RESPONSE_RING_SIZE)) { | ||
506 | wrap_len = cleared + len - RESPONSE_RING_SIZE; | ||
507 | len = RESPONSE_RING_SIZE - cleared; | ||
508 | } | ||
509 | |||
510 | memcpy(resp_save, resp, len); | ||
511 | if(unlikely(wrap_len)) { | ||
512 | resp_save += len / sizeof(*resp); | ||
513 | memcpy(resp_save, base, wrap_len); | ||
514 | } | ||
515 | |||
516 | resp_save = NULL; | ||
517 | } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) { | ||
518 | typhoon_media_status(tp->dev, resp); | ||
519 | } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) { | ||
520 | typhoon_hello(tp); | ||
521 | } else { | ||
522 | netdev_err(tp->dev, | ||
523 | "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n", | ||
524 | le16_to_cpu(resp->cmd), | ||
525 | resp->numDesc, resp->flags, | ||
526 | le16_to_cpu(resp->parm1), | ||
527 | le32_to_cpu(resp->parm2), | ||
528 | le32_to_cpu(resp->parm3)); | ||
529 | } | ||
530 | |||
531 | cleanup: | ||
532 | typhoon_inc_resp_index(&cleared, count); | ||
533 | } | ||
534 | |||
535 | indexes->respCleared = cpu_to_le32(cleared); | ||
536 | wmb(); | ||
537 | return resp_save == NULL; | ||
538 | } | ||
539 | |||
540 | static inline int | ||
541 | typhoon_num_free(int lastWrite, int lastRead, int ringSize) | ||
542 | { | ||
543 | /* this works for all descriptors but rx_desc, as they are a | ||
544 | * different size than the cmd_desc -- everyone else is the same | ||
545 | */ | ||
546 | lastWrite /= sizeof(struct cmd_desc); | ||
547 | lastRead /= sizeof(struct cmd_desc); | ||
548 | return (ringSize + lastRead - lastWrite - 1) % ringSize; | ||
549 | } | ||
550 | |||
551 | static inline int | ||
552 | typhoon_num_free_cmd(struct typhoon *tp) | ||
553 | { | ||
554 | int lastWrite = tp->cmdRing.lastWrite; | ||
555 | int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared); | ||
556 | |||
557 | return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES); | ||
558 | } | ||
559 | |||
560 | static inline int | ||
561 | typhoon_num_free_resp(struct typhoon *tp) | ||
562 | { | ||
563 | int respReady = le32_to_cpu(tp->indexes->respReady); | ||
564 | int respCleared = le32_to_cpu(tp->indexes->respCleared); | ||
565 | |||
566 | return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES); | ||
567 | } | ||
568 | |||
569 | static inline int | ||
570 | typhoon_num_free_tx(struct transmit_ring *ring) | ||
571 | { | ||
572 | /* if we start using the Hi Tx ring, this needs updating */ | ||
573 | return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES); | ||
574 | } | ||
575 | |||
576 | static int | ||
577 | typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd, | ||
578 | int num_resp, struct resp_desc *resp) | ||
579 | { | ||
580 | struct typhoon_indexes *indexes = tp->indexes; | ||
581 | struct basic_ring *ring = &tp->cmdRing; | ||
582 | struct resp_desc local_resp; | ||
583 | int i, err = 0; | ||
584 | int got_resp; | ||
585 | int freeCmd, freeResp; | ||
586 | int len, wrap_len; | ||
587 | |||
588 | spin_lock(&tp->command_lock); | ||
589 | |||
590 | freeCmd = typhoon_num_free_cmd(tp); | ||
591 | freeResp = typhoon_num_free_resp(tp); | ||
592 | |||
593 | if(freeCmd < num_cmd || freeResp < num_resp) { | ||
594 | netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n", | ||
595 | freeCmd, num_cmd, freeResp, num_resp); | ||
596 | err = -ENOMEM; | ||
597 | goto out; | ||
598 | } | ||
599 | |||
600 | if(cmd->flags & TYPHOON_CMD_RESPOND) { | ||
601 | /* If we're expecting a response, but the caller hasn't given | ||
602 | * us a place to put it, we'll provide one. | ||
603 | */ | ||
604 | tp->awaiting_resp = 1; | ||
605 | if(resp == NULL) { | ||
606 | resp = &local_resp; | ||
607 | num_resp = 1; | ||
608 | } | ||
609 | } | ||
610 | |||
611 | wrap_len = 0; | ||
612 | len = num_cmd * sizeof(*cmd); | ||
613 | if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) { | ||
614 | wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE; | ||
615 | len = COMMAND_RING_SIZE - ring->lastWrite; | ||
616 | } | ||
617 | |||
618 | memcpy(ring->ringBase + ring->lastWrite, cmd, len); | ||
619 | if(unlikely(wrap_len)) { | ||
620 | struct cmd_desc *wrap_ptr = cmd; | ||
621 | wrap_ptr += len / sizeof(*cmd); | ||
622 | memcpy(ring->ringBase, wrap_ptr, wrap_len); | ||
623 | } | ||
624 | |||
625 | typhoon_inc_cmd_index(&ring->lastWrite, num_cmd); | ||
626 | |||
627 | /* "I feel a presence... another warrior is on the mesa." | ||
628 | */ | ||
629 | wmb(); | ||
630 | iowrite32(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY); | ||
631 | typhoon_post_pci_writes(tp->ioaddr); | ||
632 | |||
633 | if((cmd->flags & TYPHOON_CMD_RESPOND) == 0) | ||
634 | goto out; | ||
635 | |||
636 | /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to | ||
637 | * preempt or do anything other than take interrupts. So, don't | ||
638 | * wait for a response unless you have to. | ||
639 | * | ||
640 | * I've thought about trying to sleep here, but we're called | ||
641 | * from many contexts that don't allow that. Also, given the way | ||
642 | * 3Com has implemented irq coalescing, we would likely timeout -- | ||
643 | * this has been observed in real life! | ||
644 | * | ||
645 | * The big killer is we have to wait to get stats from the card, | ||
646 | * though we could go to a periodic refresh of those if we don't | ||
647 | * mind them getting somewhat stale. The rest of the waiting | ||
648 | * commands occur during open/close/suspend/resume, so they aren't | ||
649 | * time critical. Creating SAs in the future will also have to | ||
650 | * wait here. | ||
651 | */ | ||
652 | got_resp = 0; | ||
653 | for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) { | ||
654 | if(indexes->respCleared != indexes->respReady) | ||
655 | got_resp = typhoon_process_response(tp, num_resp, | ||
656 | resp); | ||
657 | udelay(TYPHOON_UDELAY); | ||
658 | } | ||
659 | |||
660 | if(!got_resp) { | ||
661 | err = -ETIMEDOUT; | ||
662 | goto out; | ||
663 | } | ||
664 | |||
665 | /* Collect the error response even if we don't care about the | ||
666 | * rest of the response | ||
667 | */ | ||
668 | if(resp->flags & TYPHOON_RESP_ERROR) | ||
669 | err = -EIO; | ||
670 | |||
671 | out: | ||
672 | if(tp->awaiting_resp) { | ||
673 | tp->awaiting_resp = 0; | ||
674 | smp_wmb(); | ||
675 | |||
676 | /* Ugh. If a response was added to the ring between | ||
677 | * the call to typhoon_process_response() and the clearing | ||
678 | * of tp->awaiting_resp, we could have missed the interrupt | ||
679 | * and it could hang in the ring an indeterminate amount of | ||
680 | * time. So, check for it, and interrupt ourselves if this | ||
681 | * is the case. | ||
682 | */ | ||
683 | if(indexes->respCleared != indexes->respReady) | ||
684 | iowrite32(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT); | ||
685 | } | ||
686 | |||
687 | spin_unlock(&tp->command_lock); | ||
688 | return err; | ||
689 | } | ||
690 | |||
691 | static inline void | ||
692 | typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing, | ||
693 | u32 ring_dma) | ||
694 | { | ||
695 | struct tcpopt_desc *tcpd; | ||
696 | u32 tcpd_offset = ring_dma; | ||
697 | |||
698 | tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite); | ||
699 | tcpd_offset += txRing->lastWrite; | ||
700 | tcpd_offset += offsetof(struct tcpopt_desc, bytesTx); | ||
701 | typhoon_inc_tx_index(&txRing->lastWrite, 1); | ||
702 | |||
703 | tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG; | ||
704 | tcpd->numDesc = 1; | ||
705 | tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb)); | ||
706 | tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST; | ||
707 | tcpd->respAddrLo = cpu_to_le32(tcpd_offset); | ||
708 | tcpd->bytesTx = cpu_to_le32(skb->len); | ||
709 | tcpd->status = 0; | ||
710 | } | ||
711 | |||
712 | static netdev_tx_t | ||
713 | typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) | ||
714 | { | ||
715 | struct typhoon *tp = netdev_priv(dev); | ||
716 | struct transmit_ring *txRing; | ||
717 | struct tx_desc *txd, *first_txd; | ||
718 | dma_addr_t skb_dma; | ||
719 | int numDesc; | ||
720 | |||
721 | /* we have two rings to choose from, but we only use txLo for now | ||
722 | * If we start using the Hi ring as well, we'll need to update | ||
723 | * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(), | ||
724 | * and TXHI_ENTRIES to match, as well as update the TSO code below | ||
725 | * to get the right DMA address | ||
726 | */ | ||
727 | txRing = &tp->txLoRing; | ||
728 | |||
729 | /* We need one descriptor for each fragment of the sk_buff, plus the | ||
730 | * one for the ->data area of it. | ||
731 | * | ||
732 | * The docs say a maximum of 16 fragment descriptors per TCP option | ||
733 | * descriptor, then make a new packet descriptor and option descriptor | ||
734 | * for the next 16 fragments. The engineers say just an option | ||
735 | * descriptor is needed. I've tested up to 26 fragments with a single | ||
736 | * packet descriptor/option descriptor combo, so I use that for now. | ||
737 | * | ||
738 | * If problems develop with TSO, check this first. | ||
739 | */ | ||
740 | numDesc = skb_shinfo(skb)->nr_frags + 1; | ||
741 | if (skb_is_gso(skb)) | ||
742 | numDesc++; | ||
743 | |||
744 | /* When checking for free space in the ring, we need to also | ||
745 | * account for the initial Tx descriptor, and we always must leave | ||
746 | * at least one descriptor unused in the ring so that it doesn't | ||
747 | * wrap and look empty. | ||
748 | * | ||
749 | * The only time we should loop here is when we hit the race | ||
750 | * between marking the queue awake and updating the cleared index. | ||
751 | * Just loop and it will appear. This comes from the acenic driver. | ||
752 | */ | ||
753 | while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2))) | ||
754 | smp_rmb(); | ||
755 | |||
756 | first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); | ||
757 | typhoon_inc_tx_index(&txRing->lastWrite, 1); | ||
758 | |||
759 | first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID; | ||
760 | first_txd->numDesc = 0; | ||
761 | first_txd->len = 0; | ||
762 | first_txd->tx_addr = (u64)((unsigned long) skb); | ||
763 | first_txd->processFlags = 0; | ||
764 | |||
765 | if(skb->ip_summed == CHECKSUM_PARTIAL) { | ||
766 | /* The 3XP will figure out if this is UDP/TCP */ | ||
767 | first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM; | ||
768 | first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM; | ||
769 | first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM; | ||
770 | } | ||
771 | |||
772 | if(vlan_tx_tag_present(skb)) { | ||
773 | first_txd->processFlags |= | ||
774 | TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY; | ||
775 | first_txd->processFlags |= | ||
776 | cpu_to_le32(htons(vlan_tx_tag_get(skb)) << | ||
777 | TYPHOON_TX_PF_VLAN_TAG_SHIFT); | ||
778 | } | ||
779 | |||
780 | if (skb_is_gso(skb)) { | ||
781 | first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; | ||
782 | first_txd->numDesc++; | ||
783 | |||
784 | typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr); | ||
785 | } | ||
786 | |||
787 | txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite); | ||
788 | typhoon_inc_tx_index(&txRing->lastWrite, 1); | ||
789 | |||
790 | /* No need to worry about padding packet -- the firmware pads | ||
791 | * it with zeros to ETH_ZLEN for us. | ||
792 | */ | ||
793 | if(skb_shinfo(skb)->nr_frags == 0) { | ||
794 | skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len, | ||
795 | PCI_DMA_TODEVICE); | ||
796 | txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; | ||
797 | txd->len = cpu_to_le16(skb->len); | ||
798 | txd->frag.addr = cpu_to_le32(skb_dma); | ||
799 | txd->frag.addrHi = 0; | ||
800 | first_txd->numDesc++; | ||
801 | } else { | ||
802 | int i, len; | ||
803 | |||
804 | len = skb_headlen(skb); | ||
805 | skb_dma = pci_map_single(tp->tx_pdev, skb->data, len, | ||
806 | PCI_DMA_TODEVICE); | ||
807 | txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; | ||
808 | txd->len = cpu_to_le16(len); | ||
809 | txd->frag.addr = cpu_to_le32(skb_dma); | ||
810 | txd->frag.addrHi = 0; | ||
811 | first_txd->numDesc++; | ||
812 | |||
813 | for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | ||
814 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
815 | void *frag_addr; | ||
816 | |||
817 | txd = (struct tx_desc *) (txRing->ringBase + | ||
818 | txRing->lastWrite); | ||
819 | typhoon_inc_tx_index(&txRing->lastWrite, 1); | ||
820 | |||
821 | len = frag->size; | ||
822 | frag_addr = (void *) page_address(frag->page) + | ||
823 | frag->page_offset; | ||
824 | skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len, | ||
825 | PCI_DMA_TODEVICE); | ||
826 | txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID; | ||
827 | txd->len = cpu_to_le16(len); | ||
828 | txd->frag.addr = cpu_to_le32(skb_dma); | ||
829 | txd->frag.addrHi = 0; | ||
830 | first_txd->numDesc++; | ||
831 | } | ||
832 | } | ||
833 | |||
834 | /* Kick the 3XP | ||
835 | */ | ||
836 | wmb(); | ||
837 | iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister); | ||
838 | |||
839 | /* If we don't have room to put the worst case packet on the | ||
840 | * queue, then we must stop the queue. We need 2 extra | ||
841 | * descriptors -- one to prevent ring wrap, and one for the | ||
842 | * Tx header. | ||
843 | */ | ||
844 | numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1; | ||
845 | |||
846 | if(typhoon_num_free_tx(txRing) < (numDesc + 2)) { | ||
847 | netif_stop_queue(dev); | ||
848 | |||
849 | /* A Tx complete IRQ could have gotten between, making | ||
850 | * the ring free again. Only need to recheck here, since | ||
851 | * Tx is serialized. | ||
852 | */ | ||
853 | if(typhoon_num_free_tx(txRing) >= (numDesc + 2)) | ||
854 | netif_wake_queue(dev); | ||
855 | } | ||
856 | |||
857 | return NETDEV_TX_OK; | ||
858 | } | ||
859 | |||
860 | static void | ||
861 | typhoon_set_rx_mode(struct net_device *dev) | ||
862 | { | ||
863 | struct typhoon *tp = netdev_priv(dev); | ||
864 | struct cmd_desc xp_cmd; | ||
865 | u32 mc_filter[2]; | ||
866 | __le16 filter; | ||
867 | |||
868 | filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; | ||
869 | if(dev->flags & IFF_PROMISC) { | ||
870 | filter |= TYPHOON_RX_FILTER_PROMISCOUS; | ||
871 | } else if ((netdev_mc_count(dev) > multicast_filter_limit) || | ||
872 | (dev->flags & IFF_ALLMULTI)) { | ||
873 | /* Too many to match, or accept all multicasts. */ | ||
874 | filter |= TYPHOON_RX_FILTER_ALL_MCAST; | ||
875 | } else if (!netdev_mc_empty(dev)) { | ||
876 | struct netdev_hw_addr *ha; | ||
877 | |||
878 | memset(mc_filter, 0, sizeof(mc_filter)); | ||
879 | netdev_for_each_mc_addr(ha, dev) { | ||
880 | int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f; | ||
881 | mc_filter[bit >> 5] |= 1 << (bit & 0x1f); | ||
882 | } | ||
883 | |||
884 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, | ||
885 | TYPHOON_CMD_SET_MULTICAST_HASH); | ||
886 | xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET; | ||
887 | xp_cmd.parm2 = cpu_to_le32(mc_filter[0]); | ||
888 | xp_cmd.parm3 = cpu_to_le32(mc_filter[1]); | ||
889 | typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
890 | |||
891 | filter |= TYPHOON_RX_FILTER_MCAST_HASH; | ||
892 | } | ||
893 | |||
894 | INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); | ||
895 | xp_cmd.parm1 = filter; | ||
896 | typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
897 | } | ||
898 | |||
899 | static int | ||
900 | typhoon_do_get_stats(struct typhoon *tp) | ||
901 | { | ||
902 | struct net_device_stats *stats = &tp->stats; | ||
903 | struct net_device_stats *saved = &tp->stats_saved; | ||
904 | struct cmd_desc xp_cmd; | ||
905 | struct resp_desc xp_resp[7]; | ||
906 | struct stats_resp *s = (struct stats_resp *) xp_resp; | ||
907 | int err; | ||
908 | |||
909 | INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS); | ||
910 | err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp); | ||
911 | if(err < 0) | ||
912 | return err; | ||
913 | |||
914 | /* 3Com's Linux driver uses txMultipleCollisions as it's | ||
915 | * collisions value, but there is some other collision info as well... | ||
916 | * | ||
917 | * The extra status reported would be a good candidate for | ||
918 | * ethtool_ops->get_{strings,stats}() | ||
919 | */ | ||
920 | stats->tx_packets = le32_to_cpu(s->txPackets) + | ||
921 | saved->tx_packets; | ||
922 | stats->tx_bytes = le64_to_cpu(s->txBytes) + | ||
923 | saved->tx_bytes; | ||
924 | stats->tx_errors = le32_to_cpu(s->txCarrierLost) + | ||
925 | saved->tx_errors; | ||
926 | stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost) + | ||
927 | saved->tx_carrier_errors; | ||
928 | stats->collisions = le32_to_cpu(s->txMultipleCollisions) + | ||
929 | saved->collisions; | ||
930 | stats->rx_packets = le32_to_cpu(s->rxPacketsGood) + | ||
931 | saved->rx_packets; | ||
932 | stats->rx_bytes = le64_to_cpu(s->rxBytesGood) + | ||
933 | saved->rx_bytes; | ||
934 | stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns) + | ||
935 | saved->rx_fifo_errors; | ||
936 | stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) + | ||
937 | le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors) + | ||
938 | saved->rx_errors; | ||
939 | stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors) + | ||
940 | saved->rx_crc_errors; | ||
941 | stats->rx_length_errors = le32_to_cpu(s->rxOversized) + | ||
942 | saved->rx_length_errors; | ||
943 | tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ? | ||
944 | SPEED_100 : SPEED_10; | ||
945 | tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ? | ||
946 | DUPLEX_FULL : DUPLEX_HALF; | ||
947 | |||
948 | return 0; | ||
949 | } | ||
950 | |||
951 | static struct net_device_stats * | ||
952 | typhoon_get_stats(struct net_device *dev) | ||
953 | { | ||
954 | struct typhoon *tp = netdev_priv(dev); | ||
955 | struct net_device_stats *stats = &tp->stats; | ||
956 | struct net_device_stats *saved = &tp->stats_saved; | ||
957 | |||
958 | smp_rmb(); | ||
959 | if(tp->card_state == Sleeping) | ||
960 | return saved; | ||
961 | |||
962 | if(typhoon_do_get_stats(tp) < 0) { | ||
963 | netdev_err(dev, "error getting stats\n"); | ||
964 | return saved; | ||
965 | } | ||
966 | |||
967 | return stats; | ||
968 | } | ||
969 | |||
970 | static int | ||
971 | typhoon_set_mac_address(struct net_device *dev, void *addr) | ||
972 | { | ||
973 | struct sockaddr *saddr = (struct sockaddr *) addr; | ||
974 | |||
975 | if(netif_running(dev)) | ||
976 | return -EBUSY; | ||
977 | |||
978 | memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len); | ||
979 | return 0; | ||
980 | } | ||
981 | |||
982 | static void | ||
983 | typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
984 | { | ||
985 | struct typhoon *tp = netdev_priv(dev); | ||
986 | struct pci_dev *pci_dev = tp->pdev; | ||
987 | struct cmd_desc xp_cmd; | ||
988 | struct resp_desc xp_resp[3]; | ||
989 | |||
990 | smp_rmb(); | ||
991 | if(tp->card_state == Sleeping) { | ||
992 | strcpy(info->fw_version, "Sleep image"); | ||
993 | } else { | ||
994 | INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); | ||
995 | if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { | ||
996 | strcpy(info->fw_version, "Unknown runtime"); | ||
997 | } else { | ||
998 | u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); | ||
999 | snprintf(info->fw_version, 32, "%02x.%03x.%03x", | ||
1000 | sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, | ||
1001 | sleep_ver & 0xfff); | ||
1002 | } | ||
1003 | } | ||
1004 | |||
1005 | strcpy(info->driver, KBUILD_MODNAME); | ||
1006 | strcpy(info->bus_info, pci_name(pci_dev)); | ||
1007 | } | ||
1008 | |||
1009 | static int | ||
1010 | typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1011 | { | ||
1012 | struct typhoon *tp = netdev_priv(dev); | ||
1013 | |||
1014 | cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | | ||
1015 | SUPPORTED_Autoneg; | ||
1016 | |||
1017 | switch (tp->xcvr_select) { | ||
1018 | case TYPHOON_XCVR_10HALF: | ||
1019 | cmd->advertising = ADVERTISED_10baseT_Half; | ||
1020 | break; | ||
1021 | case TYPHOON_XCVR_10FULL: | ||
1022 | cmd->advertising = ADVERTISED_10baseT_Full; | ||
1023 | break; | ||
1024 | case TYPHOON_XCVR_100HALF: | ||
1025 | cmd->advertising = ADVERTISED_100baseT_Half; | ||
1026 | break; | ||
1027 | case TYPHOON_XCVR_100FULL: | ||
1028 | cmd->advertising = ADVERTISED_100baseT_Full; | ||
1029 | break; | ||
1030 | case TYPHOON_XCVR_AUTONEG: | ||
1031 | cmd->advertising = ADVERTISED_10baseT_Half | | ||
1032 | ADVERTISED_10baseT_Full | | ||
1033 | ADVERTISED_100baseT_Half | | ||
1034 | ADVERTISED_100baseT_Full | | ||
1035 | ADVERTISED_Autoneg; | ||
1036 | break; | ||
1037 | } | ||
1038 | |||
1039 | if(tp->capabilities & TYPHOON_FIBER) { | ||
1040 | cmd->supported |= SUPPORTED_FIBRE; | ||
1041 | cmd->advertising |= ADVERTISED_FIBRE; | ||
1042 | cmd->port = PORT_FIBRE; | ||
1043 | } else { | ||
1044 | cmd->supported |= SUPPORTED_10baseT_Half | | ||
1045 | SUPPORTED_10baseT_Full | | ||
1046 | SUPPORTED_TP; | ||
1047 | cmd->advertising |= ADVERTISED_TP; | ||
1048 | cmd->port = PORT_TP; | ||
1049 | } | ||
1050 | |||
1051 | /* need to get stats to make these link speed/duplex valid */ | ||
1052 | typhoon_do_get_stats(tp); | ||
1053 | ethtool_cmd_speed_set(cmd, tp->speed); | ||
1054 | cmd->duplex = tp->duplex; | ||
1055 | cmd->phy_address = 0; | ||
1056 | cmd->transceiver = XCVR_INTERNAL; | ||
1057 | if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG) | ||
1058 | cmd->autoneg = AUTONEG_ENABLE; | ||
1059 | else | ||
1060 | cmd->autoneg = AUTONEG_DISABLE; | ||
1061 | cmd->maxtxpkt = 1; | ||
1062 | cmd->maxrxpkt = 1; | ||
1063 | |||
1064 | return 0; | ||
1065 | } | ||
1066 | |||
1067 | static int | ||
1068 | typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1069 | { | ||
1070 | struct typhoon *tp = netdev_priv(dev); | ||
1071 | u32 speed = ethtool_cmd_speed(cmd); | ||
1072 | struct cmd_desc xp_cmd; | ||
1073 | __le16 xcvr; | ||
1074 | int err; | ||
1075 | |||
1076 | err = -EINVAL; | ||
1077 | if (cmd->autoneg == AUTONEG_ENABLE) { | ||
1078 | xcvr = TYPHOON_XCVR_AUTONEG; | ||
1079 | } else { | ||
1080 | if (cmd->duplex == DUPLEX_HALF) { | ||
1081 | if (speed == SPEED_10) | ||
1082 | xcvr = TYPHOON_XCVR_10HALF; | ||
1083 | else if (speed == SPEED_100) | ||
1084 | xcvr = TYPHOON_XCVR_100HALF; | ||
1085 | else | ||
1086 | goto out; | ||
1087 | } else if (cmd->duplex == DUPLEX_FULL) { | ||
1088 | if (speed == SPEED_10) | ||
1089 | xcvr = TYPHOON_XCVR_10FULL; | ||
1090 | else if (speed == SPEED_100) | ||
1091 | xcvr = TYPHOON_XCVR_100FULL; | ||
1092 | else | ||
1093 | goto out; | ||
1094 | } else | ||
1095 | goto out; | ||
1096 | } | ||
1097 | |||
1098 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); | ||
1099 | xp_cmd.parm1 = xcvr; | ||
1100 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1101 | if(err < 0) | ||
1102 | goto out; | ||
1103 | |||
1104 | tp->xcvr_select = xcvr; | ||
1105 | if(cmd->autoneg == AUTONEG_ENABLE) { | ||
1106 | tp->speed = 0xff; /* invalid */ | ||
1107 | tp->duplex = 0xff; /* invalid */ | ||
1108 | } else { | ||
1109 | tp->speed = speed; | ||
1110 | tp->duplex = cmd->duplex; | ||
1111 | } | ||
1112 | |||
1113 | out: | ||
1114 | return err; | ||
1115 | } | ||
1116 | |||
1117 | static void | ||
1118 | typhoon_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
1119 | { | ||
1120 | struct typhoon *tp = netdev_priv(dev); | ||
1121 | |||
1122 | wol->supported = WAKE_PHY | WAKE_MAGIC; | ||
1123 | wol->wolopts = 0; | ||
1124 | if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT) | ||
1125 | wol->wolopts |= WAKE_PHY; | ||
1126 | if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) | ||
1127 | wol->wolopts |= WAKE_MAGIC; | ||
1128 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | ||
1129 | } | ||
1130 | |||
1131 | static int | ||
1132 | typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
1133 | { | ||
1134 | struct typhoon *tp = netdev_priv(dev); | ||
1135 | |||
1136 | if(wol->wolopts & ~(WAKE_PHY | WAKE_MAGIC)) | ||
1137 | return -EINVAL; | ||
1138 | |||
1139 | tp->wol_events = 0; | ||
1140 | if(wol->wolopts & WAKE_PHY) | ||
1141 | tp->wol_events |= TYPHOON_WAKE_LINK_EVENT; | ||
1142 | if(wol->wolopts & WAKE_MAGIC) | ||
1143 | tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT; | ||
1144 | |||
1145 | return 0; | ||
1146 | } | ||
1147 | |||
1148 | static void | ||
1149 | typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | ||
1150 | { | ||
1151 | ering->rx_max_pending = RXENT_ENTRIES; | ||
1152 | ering->rx_mini_max_pending = 0; | ||
1153 | ering->rx_jumbo_max_pending = 0; | ||
1154 | ering->tx_max_pending = TXLO_ENTRIES - 1; | ||
1155 | |||
1156 | ering->rx_pending = RXENT_ENTRIES; | ||
1157 | ering->rx_mini_pending = 0; | ||
1158 | ering->rx_jumbo_pending = 0; | ||
1159 | ering->tx_pending = TXLO_ENTRIES - 1; | ||
1160 | } | ||
1161 | |||
1162 | static const struct ethtool_ops typhoon_ethtool_ops = { | ||
1163 | .get_settings = typhoon_get_settings, | ||
1164 | .set_settings = typhoon_set_settings, | ||
1165 | .get_drvinfo = typhoon_get_drvinfo, | ||
1166 | .get_wol = typhoon_get_wol, | ||
1167 | .set_wol = typhoon_set_wol, | ||
1168 | .get_link = ethtool_op_get_link, | ||
1169 | .get_ringparam = typhoon_get_ringparam, | ||
1170 | }; | ||
1171 | |||
1172 | static int | ||
1173 | typhoon_wait_interrupt(void __iomem *ioaddr) | ||
1174 | { | ||
1175 | int i, err = 0; | ||
1176 | |||
1177 | for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { | ||
1178 | if(ioread32(ioaddr + TYPHOON_REG_INTR_STATUS) & | ||
1179 | TYPHOON_INTR_BOOTCMD) | ||
1180 | goto out; | ||
1181 | udelay(TYPHOON_UDELAY); | ||
1182 | } | ||
1183 | |||
1184 | err = -ETIMEDOUT; | ||
1185 | |||
1186 | out: | ||
1187 | iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); | ||
1188 | return err; | ||
1189 | } | ||
1190 | |||
1191 | #define shared_offset(x) offsetof(struct typhoon_shared, x) | ||
1192 | |||
1193 | static void | ||
1194 | typhoon_init_interface(struct typhoon *tp) | ||
1195 | { | ||
1196 | struct typhoon_interface *iface = &tp->shared->iface; | ||
1197 | dma_addr_t shared_dma; | ||
1198 | |||
1199 | memset(tp->shared, 0, sizeof(struct typhoon_shared)); | ||
1200 | |||
1201 | /* The *Hi members of iface are all init'd to zero by the memset(). | ||
1202 | */ | ||
1203 | shared_dma = tp->shared_dma + shared_offset(indexes); | ||
1204 | iface->ringIndex = cpu_to_le32(shared_dma); | ||
1205 | |||
1206 | shared_dma = tp->shared_dma + shared_offset(txLo); | ||
1207 | iface->txLoAddr = cpu_to_le32(shared_dma); | ||
1208 | iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc)); | ||
1209 | |||
1210 | shared_dma = tp->shared_dma + shared_offset(txHi); | ||
1211 | iface->txHiAddr = cpu_to_le32(shared_dma); | ||
1212 | iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc)); | ||
1213 | |||
1214 | shared_dma = tp->shared_dma + shared_offset(rxBuff); | ||
1215 | iface->rxBuffAddr = cpu_to_le32(shared_dma); | ||
1216 | iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES * | ||
1217 | sizeof(struct rx_free)); | ||
1218 | |||
1219 | shared_dma = tp->shared_dma + shared_offset(rxLo); | ||
1220 | iface->rxLoAddr = cpu_to_le32(shared_dma); | ||
1221 | iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); | ||
1222 | |||
1223 | shared_dma = tp->shared_dma + shared_offset(rxHi); | ||
1224 | iface->rxHiAddr = cpu_to_le32(shared_dma); | ||
1225 | iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc)); | ||
1226 | |||
1227 | shared_dma = tp->shared_dma + shared_offset(cmd); | ||
1228 | iface->cmdAddr = cpu_to_le32(shared_dma); | ||
1229 | iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE); | ||
1230 | |||
1231 | shared_dma = tp->shared_dma + shared_offset(resp); | ||
1232 | iface->respAddr = cpu_to_le32(shared_dma); | ||
1233 | iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE); | ||
1234 | |||
1235 | shared_dma = tp->shared_dma + shared_offset(zeroWord); | ||
1236 | iface->zeroAddr = cpu_to_le32(shared_dma); | ||
1237 | |||
1238 | tp->indexes = &tp->shared->indexes; | ||
1239 | tp->txLoRing.ringBase = (u8 *) tp->shared->txLo; | ||
1240 | tp->txHiRing.ringBase = (u8 *) tp->shared->txHi; | ||
1241 | tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo; | ||
1242 | tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi; | ||
1243 | tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff; | ||
1244 | tp->cmdRing.ringBase = (u8 *) tp->shared->cmd; | ||
1245 | tp->respRing.ringBase = (u8 *) tp->shared->resp; | ||
1246 | |||
1247 | tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY; | ||
1248 | tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY; | ||
1249 | |||
1250 | tp->txlo_dma_addr = le32_to_cpu(iface->txLoAddr); | ||
1251 | tp->card_state = Sleeping; | ||
1252 | |||
1253 | tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM; | ||
1254 | tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON; | ||
1255 | tp->offload |= TYPHOON_OFFLOAD_VLAN; | ||
1256 | |||
1257 | spin_lock_init(&tp->command_lock); | ||
1258 | |||
1259 | /* Force the writes to the shared memory area out before continuing. */ | ||
1260 | wmb(); | ||
1261 | } | ||
1262 | |||
1263 | static void | ||
1264 | typhoon_init_rings(struct typhoon *tp) | ||
1265 | { | ||
1266 | memset(tp->indexes, 0, sizeof(struct typhoon_indexes)); | ||
1267 | |||
1268 | tp->txLoRing.lastWrite = 0; | ||
1269 | tp->txHiRing.lastWrite = 0; | ||
1270 | tp->rxLoRing.lastWrite = 0; | ||
1271 | tp->rxHiRing.lastWrite = 0; | ||
1272 | tp->rxBuffRing.lastWrite = 0; | ||
1273 | tp->cmdRing.lastWrite = 0; | ||
1274 | tp->respRing.lastWrite = 0; | ||
1275 | |||
1276 | tp->txLoRing.lastRead = 0; | ||
1277 | tp->txHiRing.lastRead = 0; | ||
1278 | } | ||
1279 | |||
1280 | static const struct firmware *typhoon_fw; | ||
1281 | |||
1282 | static int | ||
1283 | typhoon_request_firmware(struct typhoon *tp) | ||
1284 | { | ||
1285 | const struct typhoon_file_header *fHdr; | ||
1286 | const struct typhoon_section_header *sHdr; | ||
1287 | const u8 *image_data; | ||
1288 | u32 numSections; | ||
1289 | u32 section_len; | ||
1290 | u32 remaining; | ||
1291 | int err; | ||
1292 | |||
1293 | if (typhoon_fw) | ||
1294 | return 0; | ||
1295 | |||
1296 | err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev); | ||
1297 | if (err) { | ||
1298 | netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", | ||
1299 | FIRMWARE_NAME); | ||
1300 | return err; | ||
1301 | } | ||
1302 | |||
1303 | image_data = (u8 *) typhoon_fw->data; | ||
1304 | remaining = typhoon_fw->size; | ||
1305 | if (remaining < sizeof(struct typhoon_file_header)) | ||
1306 | goto invalid_fw; | ||
1307 | |||
1308 | fHdr = (struct typhoon_file_header *) image_data; | ||
1309 | if (memcmp(fHdr->tag, "TYPHOON", 8)) | ||
1310 | goto invalid_fw; | ||
1311 | |||
1312 | numSections = le32_to_cpu(fHdr->numSections); | ||
1313 | image_data += sizeof(struct typhoon_file_header); | ||
1314 | remaining -= sizeof(struct typhoon_file_header); | ||
1315 | |||
1316 | while (numSections--) { | ||
1317 | if (remaining < sizeof(struct typhoon_section_header)) | ||
1318 | goto invalid_fw; | ||
1319 | |||
1320 | sHdr = (struct typhoon_section_header *) image_data; | ||
1321 | image_data += sizeof(struct typhoon_section_header); | ||
1322 | section_len = le32_to_cpu(sHdr->len); | ||
1323 | |||
1324 | if (remaining < section_len) | ||
1325 | goto invalid_fw; | ||
1326 | |||
1327 | image_data += section_len; | ||
1328 | remaining -= section_len; | ||
1329 | } | ||
1330 | |||
1331 | return 0; | ||
1332 | |||
1333 | invalid_fw: | ||
1334 | netdev_err(tp->dev, "Invalid firmware image\n"); | ||
1335 | release_firmware(typhoon_fw); | ||
1336 | typhoon_fw = NULL; | ||
1337 | return -EINVAL; | ||
1338 | } | ||
1339 | |||
1340 | static int | ||
1341 | typhoon_download_firmware(struct typhoon *tp) | ||
1342 | { | ||
1343 | void __iomem *ioaddr = tp->ioaddr; | ||
1344 | struct pci_dev *pdev = tp->pdev; | ||
1345 | const struct typhoon_file_header *fHdr; | ||
1346 | const struct typhoon_section_header *sHdr; | ||
1347 | const u8 *image_data; | ||
1348 | void *dpage; | ||
1349 | dma_addr_t dpage_dma; | ||
1350 | __sum16 csum; | ||
1351 | u32 irqEnabled; | ||
1352 | u32 irqMasked; | ||
1353 | u32 numSections; | ||
1354 | u32 section_len; | ||
1355 | u32 len; | ||
1356 | u32 load_addr; | ||
1357 | u32 hmac; | ||
1358 | int i; | ||
1359 | int err; | ||
1360 | |||
1361 | image_data = (u8 *) typhoon_fw->data; | ||
1362 | fHdr = (struct typhoon_file_header *) image_data; | ||
1363 | |||
1364 | /* Cannot just map the firmware image using pci_map_single() as | ||
1365 | * the firmware is vmalloc()'d and may not be physically contiguous, | ||
1366 | * so we allocate some consistent memory to copy the sections into. | ||
1367 | */ | ||
1368 | err = -ENOMEM; | ||
1369 | dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma); | ||
1370 | if(!dpage) { | ||
1371 | netdev_err(tp->dev, "no DMA mem for firmware\n"); | ||
1372 | goto err_out; | ||
1373 | } | ||
1374 | |||
1375 | irqEnabled = ioread32(ioaddr + TYPHOON_REG_INTR_ENABLE); | ||
1376 | iowrite32(irqEnabled | TYPHOON_INTR_BOOTCMD, | ||
1377 | ioaddr + TYPHOON_REG_INTR_ENABLE); | ||
1378 | irqMasked = ioread32(ioaddr + TYPHOON_REG_INTR_MASK); | ||
1379 | iowrite32(irqMasked | TYPHOON_INTR_BOOTCMD, | ||
1380 | ioaddr + TYPHOON_REG_INTR_MASK); | ||
1381 | |||
1382 | err = -ETIMEDOUT; | ||
1383 | if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { | ||
1384 | netdev_err(tp->dev, "card ready timeout\n"); | ||
1385 | goto err_out_irq; | ||
1386 | } | ||
1387 | |||
1388 | numSections = le32_to_cpu(fHdr->numSections); | ||
1389 | load_addr = le32_to_cpu(fHdr->startAddr); | ||
1390 | |||
1391 | iowrite32(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS); | ||
1392 | iowrite32(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR); | ||
1393 | hmac = le32_to_cpu(fHdr->hmacDigest[0]); | ||
1394 | iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_0); | ||
1395 | hmac = le32_to_cpu(fHdr->hmacDigest[1]); | ||
1396 | iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_1); | ||
1397 | hmac = le32_to_cpu(fHdr->hmacDigest[2]); | ||
1398 | iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_2); | ||
1399 | hmac = le32_to_cpu(fHdr->hmacDigest[3]); | ||
1400 | iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_3); | ||
1401 | hmac = le32_to_cpu(fHdr->hmacDigest[4]); | ||
1402 | iowrite32(hmac, ioaddr + TYPHOON_REG_DOWNLOAD_HMAC_4); | ||
1403 | typhoon_post_pci_writes(ioaddr); | ||
1404 | iowrite32(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND); | ||
1405 | |||
1406 | image_data += sizeof(struct typhoon_file_header); | ||
1407 | |||
1408 | /* The ioread32() in typhoon_wait_interrupt() will force the | ||
1409 | * last write to the command register to post, so | ||
1410 | * we don't need a typhoon_post_pci_writes() after it. | ||
1411 | */ | ||
1412 | for(i = 0; i < numSections; i++) { | ||
1413 | sHdr = (struct typhoon_section_header *) image_data; | ||
1414 | image_data += sizeof(struct typhoon_section_header); | ||
1415 | load_addr = le32_to_cpu(sHdr->startAddr); | ||
1416 | section_len = le32_to_cpu(sHdr->len); | ||
1417 | |||
1418 | while(section_len) { | ||
1419 | len = min_t(u32, section_len, PAGE_SIZE); | ||
1420 | |||
1421 | if(typhoon_wait_interrupt(ioaddr) < 0 || | ||
1422 | ioread32(ioaddr + TYPHOON_REG_STATUS) != | ||
1423 | TYPHOON_STATUS_WAITING_FOR_SEGMENT) { | ||
1424 | netdev_err(tp->dev, "segment ready timeout\n"); | ||
1425 | goto err_out_irq; | ||
1426 | } | ||
1427 | |||
1428 | /* Do an pseudo IPv4 checksum on the data -- first | ||
1429 | * need to convert each u16 to cpu order before | ||
1430 | * summing. Fortunately, due to the properties of | ||
1431 | * the checksum, we can do this once, at the end. | ||
1432 | */ | ||
1433 | csum = csum_fold(csum_partial_copy_nocheck(image_data, | ||
1434 | dpage, len, | ||
1435 | 0)); | ||
1436 | |||
1437 | iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH); | ||
1438 | iowrite32(le16_to_cpu((__force __le16)csum), | ||
1439 | ioaddr + TYPHOON_REG_BOOT_CHECKSUM); | ||
1440 | iowrite32(load_addr, | ||
1441 | ioaddr + TYPHOON_REG_BOOT_DEST_ADDR); | ||
1442 | iowrite32(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI); | ||
1443 | iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO); | ||
1444 | typhoon_post_pci_writes(ioaddr); | ||
1445 | iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE, | ||
1446 | ioaddr + TYPHOON_REG_COMMAND); | ||
1447 | |||
1448 | image_data += len; | ||
1449 | load_addr += len; | ||
1450 | section_len -= len; | ||
1451 | } | ||
1452 | } | ||
1453 | |||
1454 | if(typhoon_wait_interrupt(ioaddr) < 0 || | ||
1455 | ioread32(ioaddr + TYPHOON_REG_STATUS) != | ||
1456 | TYPHOON_STATUS_WAITING_FOR_SEGMENT) { | ||
1457 | netdev_err(tp->dev, "final segment ready timeout\n"); | ||
1458 | goto err_out_irq; | ||
1459 | } | ||
1460 | |||
1461 | iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND); | ||
1462 | |||
1463 | if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { | ||
1464 | netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n", | ||
1465 | ioread32(ioaddr + TYPHOON_REG_STATUS)); | ||
1466 | goto err_out_irq; | ||
1467 | } | ||
1468 | |||
1469 | err = 0; | ||
1470 | |||
1471 | err_out_irq: | ||
1472 | iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK); | ||
1473 | iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE); | ||
1474 | |||
1475 | pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma); | ||
1476 | |||
1477 | err_out: | ||
1478 | return err; | ||
1479 | } | ||
1480 | |||
1481 | static int | ||
1482 | typhoon_boot_3XP(struct typhoon *tp, u32 initial_status) | ||
1483 | { | ||
1484 | void __iomem *ioaddr = tp->ioaddr; | ||
1485 | |||
1486 | if(typhoon_wait_status(ioaddr, initial_status) < 0) { | ||
1487 | netdev_err(tp->dev, "boot ready timeout\n"); | ||
1488 | goto out_timeout; | ||
1489 | } | ||
1490 | |||
1491 | iowrite32(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI); | ||
1492 | iowrite32(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO); | ||
1493 | typhoon_post_pci_writes(ioaddr); | ||
1494 | iowrite32(TYPHOON_BOOTCMD_REG_BOOT_RECORD, | ||
1495 | ioaddr + TYPHOON_REG_COMMAND); | ||
1496 | |||
1497 | if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) { | ||
1498 | netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n", | ||
1499 | ioread32(ioaddr + TYPHOON_REG_STATUS)); | ||
1500 | goto out_timeout; | ||
1501 | } | ||
1502 | |||
1503 | /* Clear the Transmit and Command ready registers | ||
1504 | */ | ||
1505 | iowrite32(0, ioaddr + TYPHOON_REG_TX_HI_READY); | ||
1506 | iowrite32(0, ioaddr + TYPHOON_REG_CMD_READY); | ||
1507 | iowrite32(0, ioaddr + TYPHOON_REG_TX_LO_READY); | ||
1508 | typhoon_post_pci_writes(ioaddr); | ||
1509 | iowrite32(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND); | ||
1510 | |||
1511 | return 0; | ||
1512 | |||
1513 | out_timeout: | ||
1514 | return -ETIMEDOUT; | ||
1515 | } | ||
1516 | |||
1517 | static u32 | ||
1518 | typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing, | ||
1519 | volatile __le32 * index) | ||
1520 | { | ||
1521 | u32 lastRead = txRing->lastRead; | ||
1522 | struct tx_desc *tx; | ||
1523 | dma_addr_t skb_dma; | ||
1524 | int dma_len; | ||
1525 | int type; | ||
1526 | |||
1527 | while(lastRead != le32_to_cpu(*index)) { | ||
1528 | tx = (struct tx_desc *) (txRing->ringBase + lastRead); | ||
1529 | type = tx->flags & TYPHOON_TYPE_MASK; | ||
1530 | |||
1531 | if(type == TYPHOON_TX_DESC) { | ||
1532 | /* This tx_desc describes a packet. | ||
1533 | */ | ||
1534 | unsigned long ptr = tx->tx_addr; | ||
1535 | struct sk_buff *skb = (struct sk_buff *) ptr; | ||
1536 | dev_kfree_skb_irq(skb); | ||
1537 | } else if(type == TYPHOON_FRAG_DESC) { | ||
1538 | /* This tx_desc describes a memory mapping. Free it. | ||
1539 | */ | ||
1540 | skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr); | ||
1541 | dma_len = le16_to_cpu(tx->len); | ||
1542 | pci_unmap_single(tp->pdev, skb_dma, dma_len, | ||
1543 | PCI_DMA_TODEVICE); | ||
1544 | } | ||
1545 | |||
1546 | tx->flags = 0; | ||
1547 | typhoon_inc_tx_index(&lastRead, 1); | ||
1548 | } | ||
1549 | |||
1550 | return lastRead; | ||
1551 | } | ||
1552 | |||
1553 | static void | ||
1554 | typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing, | ||
1555 | volatile __le32 * index) | ||
1556 | { | ||
1557 | u32 lastRead; | ||
1558 | int numDesc = MAX_SKB_FRAGS + 1; | ||
1559 | |||
1560 | /* This will need changing if we start to use the Hi Tx ring. */ | ||
1561 | lastRead = typhoon_clean_tx(tp, txRing, index); | ||
1562 | if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite, | ||
1563 | lastRead, TXLO_ENTRIES) > (numDesc + 2)) | ||
1564 | netif_wake_queue(tp->dev); | ||
1565 | |||
1566 | txRing->lastRead = lastRead; | ||
1567 | smp_wmb(); | ||
1568 | } | ||
1569 | |||
1570 | static void | ||
1571 | typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx) | ||
1572 | { | ||
1573 | struct typhoon_indexes *indexes = tp->indexes; | ||
1574 | struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; | ||
1575 | struct basic_ring *ring = &tp->rxBuffRing; | ||
1576 | struct rx_free *r; | ||
1577 | |||
1578 | if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == | ||
1579 | le32_to_cpu(indexes->rxBuffCleared)) { | ||
1580 | /* no room in ring, just drop the skb | ||
1581 | */ | ||
1582 | dev_kfree_skb_any(rxb->skb); | ||
1583 | rxb->skb = NULL; | ||
1584 | return; | ||
1585 | } | ||
1586 | |||
1587 | r = (struct rx_free *) (ring->ringBase + ring->lastWrite); | ||
1588 | typhoon_inc_rxfree_index(&ring->lastWrite, 1); | ||
1589 | r->virtAddr = idx; | ||
1590 | r->physAddr = cpu_to_le32(rxb->dma_addr); | ||
1591 | |||
1592 | /* Tell the card about it */ | ||
1593 | wmb(); | ||
1594 | indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); | ||
1595 | } | ||
1596 | |||
1597 | static int | ||
1598 | typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx) | ||
1599 | { | ||
1600 | struct typhoon_indexes *indexes = tp->indexes; | ||
1601 | struct rxbuff_ent *rxb = &tp->rxbuffers[idx]; | ||
1602 | struct basic_ring *ring = &tp->rxBuffRing; | ||
1603 | struct rx_free *r; | ||
1604 | struct sk_buff *skb; | ||
1605 | dma_addr_t dma_addr; | ||
1606 | |||
1607 | rxb->skb = NULL; | ||
1608 | |||
1609 | if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) == | ||
1610 | le32_to_cpu(indexes->rxBuffCleared)) | ||
1611 | return -ENOMEM; | ||
1612 | |||
1613 | skb = dev_alloc_skb(PKT_BUF_SZ); | ||
1614 | if(!skb) | ||
1615 | return -ENOMEM; | ||
1616 | |||
1617 | #if 0 | ||
1618 | /* Please, 3com, fix the firmware to allow DMA to a unaligned | ||
1619 | * address! Pretty please? | ||
1620 | */ | ||
1621 | skb_reserve(skb, 2); | ||
1622 | #endif | ||
1623 | |||
1624 | skb->dev = tp->dev; | ||
1625 | dma_addr = pci_map_single(tp->pdev, skb->data, | ||
1626 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
1627 | |||
1628 | /* Since no card does 64 bit DAC, the high bits will never | ||
1629 | * change from zero. | ||
1630 | */ | ||
1631 | r = (struct rx_free *) (ring->ringBase + ring->lastWrite); | ||
1632 | typhoon_inc_rxfree_index(&ring->lastWrite, 1); | ||
1633 | r->virtAddr = idx; | ||
1634 | r->physAddr = cpu_to_le32(dma_addr); | ||
1635 | rxb->skb = skb; | ||
1636 | rxb->dma_addr = dma_addr; | ||
1637 | |||
1638 | /* Tell the card about it */ | ||
1639 | wmb(); | ||
1640 | indexes->rxBuffReady = cpu_to_le32(ring->lastWrite); | ||
1641 | return 0; | ||
1642 | } | ||
1643 | |||
1644 | static int | ||
1645 | typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * ready, | ||
1646 | volatile __le32 * cleared, int budget) | ||
1647 | { | ||
1648 | struct rx_desc *rx; | ||
1649 | struct sk_buff *skb, *new_skb; | ||
1650 | struct rxbuff_ent *rxb; | ||
1651 | dma_addr_t dma_addr; | ||
1652 | u32 local_ready; | ||
1653 | u32 rxaddr; | ||
1654 | int pkt_len; | ||
1655 | u32 idx; | ||
1656 | __le32 csum_bits; | ||
1657 | int received; | ||
1658 | |||
1659 | received = 0; | ||
1660 | local_ready = le32_to_cpu(*ready); | ||
1661 | rxaddr = le32_to_cpu(*cleared); | ||
1662 | while(rxaddr != local_ready && budget > 0) { | ||
1663 | rx = (struct rx_desc *) (rxRing->ringBase + rxaddr); | ||
1664 | idx = rx->addr; | ||
1665 | rxb = &tp->rxbuffers[idx]; | ||
1666 | skb = rxb->skb; | ||
1667 | dma_addr = rxb->dma_addr; | ||
1668 | |||
1669 | typhoon_inc_rx_index(&rxaddr, 1); | ||
1670 | |||
1671 | if(rx->flags & TYPHOON_RX_ERROR) { | ||
1672 | typhoon_recycle_rx_skb(tp, idx); | ||
1673 | continue; | ||
1674 | } | ||
1675 | |||
1676 | pkt_len = le16_to_cpu(rx->frameLen); | ||
1677 | |||
1678 | if(pkt_len < rx_copybreak && | ||
1679 | (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
1680 | skb_reserve(new_skb, 2); | ||
1681 | pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, | ||
1682 | PKT_BUF_SZ, | ||
1683 | PCI_DMA_FROMDEVICE); | ||
1684 | skb_copy_to_linear_data(new_skb, skb->data, pkt_len); | ||
1685 | pci_dma_sync_single_for_device(tp->pdev, dma_addr, | ||
1686 | PKT_BUF_SZ, | ||
1687 | PCI_DMA_FROMDEVICE); | ||
1688 | skb_put(new_skb, pkt_len); | ||
1689 | typhoon_recycle_rx_skb(tp, idx); | ||
1690 | } else { | ||
1691 | new_skb = skb; | ||
1692 | skb_put(new_skb, pkt_len); | ||
1693 | pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ, | ||
1694 | PCI_DMA_FROMDEVICE); | ||
1695 | typhoon_alloc_rx_skb(tp, idx); | ||
1696 | } | ||
1697 | new_skb->protocol = eth_type_trans(new_skb, tp->dev); | ||
1698 | csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD | | ||
1699 | TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD); | ||
1700 | if(csum_bits == | ||
1701 | (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD) || | ||
1702 | csum_bits == | ||
1703 | (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) { | ||
1704 | new_skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
1705 | } else | ||
1706 | skb_checksum_none_assert(new_skb); | ||
1707 | |||
1708 | if (rx->rxStatus & TYPHOON_RX_VLAN) | ||
1709 | __vlan_hwaccel_put_tag(new_skb, | ||
1710 | ntohl(rx->vlanTag) & 0xffff); | ||
1711 | netif_receive_skb(new_skb); | ||
1712 | |||
1713 | received++; | ||
1714 | budget--; | ||
1715 | } | ||
1716 | *cleared = cpu_to_le32(rxaddr); | ||
1717 | |||
1718 | return received; | ||
1719 | } | ||
1720 | |||
1721 | static void | ||
1722 | typhoon_fill_free_ring(struct typhoon *tp) | ||
1723 | { | ||
1724 | u32 i; | ||
1725 | |||
1726 | for(i = 0; i < RXENT_ENTRIES; i++) { | ||
1727 | struct rxbuff_ent *rxb = &tp->rxbuffers[i]; | ||
1728 | if(rxb->skb) | ||
1729 | continue; | ||
1730 | if(typhoon_alloc_rx_skb(tp, i) < 0) | ||
1731 | break; | ||
1732 | } | ||
1733 | } | ||
1734 | |||
1735 | static int | ||
1736 | typhoon_poll(struct napi_struct *napi, int budget) | ||
1737 | { | ||
1738 | struct typhoon *tp = container_of(napi, struct typhoon, napi); | ||
1739 | struct typhoon_indexes *indexes = tp->indexes; | ||
1740 | int work_done; | ||
1741 | |||
1742 | rmb(); | ||
1743 | if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared) | ||
1744 | typhoon_process_response(tp, 0, NULL); | ||
1745 | |||
1746 | if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead) | ||
1747 | typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared); | ||
1748 | |||
1749 | work_done = 0; | ||
1750 | |||
1751 | if(indexes->rxHiCleared != indexes->rxHiReady) { | ||
1752 | work_done += typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady, | ||
1753 | &indexes->rxHiCleared, budget); | ||
1754 | } | ||
1755 | |||
1756 | if(indexes->rxLoCleared != indexes->rxLoReady) { | ||
1757 | work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady, | ||
1758 | &indexes->rxLoCleared, budget - work_done); | ||
1759 | } | ||
1760 | |||
1761 | if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) { | ||
1762 | /* rxBuff ring is empty, try to fill it. */ | ||
1763 | typhoon_fill_free_ring(tp); | ||
1764 | } | ||
1765 | |||
1766 | if (work_done < budget) { | ||
1767 | napi_complete(napi); | ||
1768 | iowrite32(TYPHOON_INTR_NONE, | ||
1769 | tp->ioaddr + TYPHOON_REG_INTR_MASK); | ||
1770 | typhoon_post_pci_writes(tp->ioaddr); | ||
1771 | } | ||
1772 | |||
1773 | return work_done; | ||
1774 | } | ||
1775 | |||
1776 | static irqreturn_t | ||
1777 | typhoon_interrupt(int irq, void *dev_instance) | ||
1778 | { | ||
1779 | struct net_device *dev = dev_instance; | ||
1780 | struct typhoon *tp = netdev_priv(dev); | ||
1781 | void __iomem *ioaddr = tp->ioaddr; | ||
1782 | u32 intr_status; | ||
1783 | |||
1784 | intr_status = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); | ||
1785 | if(!(intr_status & TYPHOON_INTR_HOST_INT)) | ||
1786 | return IRQ_NONE; | ||
1787 | |||
1788 | iowrite32(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS); | ||
1789 | |||
1790 | if (napi_schedule_prep(&tp->napi)) { | ||
1791 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); | ||
1792 | typhoon_post_pci_writes(ioaddr); | ||
1793 | __napi_schedule(&tp->napi); | ||
1794 | } else { | ||
1795 | netdev_err(dev, "Error, poll already scheduled\n"); | ||
1796 | } | ||
1797 | return IRQ_HANDLED; | ||
1798 | } | ||
1799 | |||
1800 | static void | ||
1801 | typhoon_free_rx_rings(struct typhoon *tp) | ||
1802 | { | ||
1803 | u32 i; | ||
1804 | |||
1805 | for(i = 0; i < RXENT_ENTRIES; i++) { | ||
1806 | struct rxbuff_ent *rxb = &tp->rxbuffers[i]; | ||
1807 | if(rxb->skb) { | ||
1808 | pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ, | ||
1809 | PCI_DMA_FROMDEVICE); | ||
1810 | dev_kfree_skb(rxb->skb); | ||
1811 | rxb->skb = NULL; | ||
1812 | } | ||
1813 | } | ||
1814 | } | ||
1815 | |||
1816 | static int | ||
1817 | typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events) | ||
1818 | { | ||
1819 | struct pci_dev *pdev = tp->pdev; | ||
1820 | void __iomem *ioaddr = tp->ioaddr; | ||
1821 | struct cmd_desc xp_cmd; | ||
1822 | int err; | ||
1823 | |||
1824 | INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS); | ||
1825 | xp_cmd.parm1 = events; | ||
1826 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1827 | if(err < 0) { | ||
1828 | netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n", | ||
1829 | err); | ||
1830 | return err; | ||
1831 | } | ||
1832 | |||
1833 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP); | ||
1834 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1835 | if(err < 0) { | ||
1836 | netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err); | ||
1837 | return err; | ||
1838 | } | ||
1839 | |||
1840 | if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0) | ||
1841 | return -ETIMEDOUT; | ||
1842 | |||
1843 | /* Since we cannot monitor the status of the link while sleeping, | ||
1844 | * tell the world it went away. | ||
1845 | */ | ||
1846 | netif_carrier_off(tp->dev); | ||
1847 | |||
1848 | pci_enable_wake(tp->pdev, state, 1); | ||
1849 | pci_disable_device(pdev); | ||
1850 | return pci_set_power_state(pdev, state); | ||
1851 | } | ||
1852 | |||
1853 | static int | ||
1854 | typhoon_wakeup(struct typhoon *tp, int wait_type) | ||
1855 | { | ||
1856 | struct pci_dev *pdev = tp->pdev; | ||
1857 | void __iomem *ioaddr = tp->ioaddr; | ||
1858 | |||
1859 | pci_set_power_state(pdev, PCI_D0); | ||
1860 | pci_restore_state(pdev); | ||
1861 | |||
1862 | /* Post 2.x.x versions of the Sleep Image require a reset before | ||
1863 | * we can download the Runtime Image. But let's not make users of | ||
1864 | * the old firmware pay for the reset. | ||
1865 | */ | ||
1866 | iowrite32(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND); | ||
1867 | if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0 || | ||
1868 | (tp->capabilities & TYPHOON_WAKEUP_NEEDS_RESET)) | ||
1869 | return typhoon_reset(ioaddr, wait_type); | ||
1870 | |||
1871 | return 0; | ||
1872 | } | ||
1873 | |||
1874 | static int | ||
1875 | typhoon_start_runtime(struct typhoon *tp) | ||
1876 | { | ||
1877 | struct net_device *dev = tp->dev; | ||
1878 | void __iomem *ioaddr = tp->ioaddr; | ||
1879 | struct cmd_desc xp_cmd; | ||
1880 | int err; | ||
1881 | |||
1882 | typhoon_init_rings(tp); | ||
1883 | typhoon_fill_free_ring(tp); | ||
1884 | |||
1885 | err = typhoon_download_firmware(tp); | ||
1886 | if(err < 0) { | ||
1887 | netdev_err(tp->dev, "cannot load runtime on 3XP\n"); | ||
1888 | goto error_out; | ||
1889 | } | ||
1890 | |||
1891 | if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) { | ||
1892 | netdev_err(tp->dev, "cannot boot 3XP\n"); | ||
1893 | err = -EIO; | ||
1894 | goto error_out; | ||
1895 | } | ||
1896 | |||
1897 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE); | ||
1898 | xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ); | ||
1899 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1900 | if(err < 0) | ||
1901 | goto error_out; | ||
1902 | |||
1903 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); | ||
1904 | xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); | ||
1905 | xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); | ||
1906 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1907 | if(err < 0) | ||
1908 | goto error_out; | ||
1909 | |||
1910 | /* Disable IRQ coalescing -- we can reenable it when 3Com gives | ||
1911 | * us some more information on how to control it. | ||
1912 | */ | ||
1913 | INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL); | ||
1914 | xp_cmd.parm1 = 0; | ||
1915 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1916 | if(err < 0) | ||
1917 | goto error_out; | ||
1918 | |||
1919 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT); | ||
1920 | xp_cmd.parm1 = tp->xcvr_select; | ||
1921 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1922 | if(err < 0) | ||
1923 | goto error_out; | ||
1924 | |||
1925 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE); | ||
1926 | xp_cmd.parm1 = cpu_to_le16(ETH_P_8021Q); | ||
1927 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1928 | if(err < 0) | ||
1929 | goto error_out; | ||
1930 | |||
1931 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS); | ||
1932 | xp_cmd.parm2 = tp->offload; | ||
1933 | xp_cmd.parm3 = tp->offload; | ||
1934 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1935 | if(err < 0) | ||
1936 | goto error_out; | ||
1937 | |||
1938 | typhoon_set_rx_mode(dev); | ||
1939 | |||
1940 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE); | ||
1941 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1942 | if(err < 0) | ||
1943 | goto error_out; | ||
1944 | |||
1945 | INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE); | ||
1946 | err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1947 | if(err < 0) | ||
1948 | goto error_out; | ||
1949 | |||
1950 | tp->card_state = Running; | ||
1951 | smp_wmb(); | ||
1952 | |||
1953 | iowrite32(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); | ||
1954 | iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK); | ||
1955 | typhoon_post_pci_writes(ioaddr); | ||
1956 | |||
1957 | return 0; | ||
1958 | |||
1959 | error_out: | ||
1960 | typhoon_reset(ioaddr, WaitNoSleep); | ||
1961 | typhoon_free_rx_rings(tp); | ||
1962 | typhoon_init_rings(tp); | ||
1963 | return err; | ||
1964 | } | ||
1965 | |||
1966 | static int | ||
1967 | typhoon_stop_runtime(struct typhoon *tp, int wait_type) | ||
1968 | { | ||
1969 | struct typhoon_indexes *indexes = tp->indexes; | ||
1970 | struct transmit_ring *txLo = &tp->txLoRing; | ||
1971 | void __iomem *ioaddr = tp->ioaddr; | ||
1972 | struct cmd_desc xp_cmd; | ||
1973 | int i; | ||
1974 | |||
1975 | /* Disable interrupts early, since we can't schedule a poll | ||
1976 | * when called with !netif_running(). This will be posted | ||
1977 | * when we force the posting of the command. | ||
1978 | */ | ||
1979 | iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); | ||
1980 | |||
1981 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE); | ||
1982 | typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1983 | |||
1984 | /* Wait 1/2 sec for any outstanding transmits to occur | ||
1985 | * We'll cleanup after the reset if this times out. | ||
1986 | */ | ||
1987 | for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) { | ||
1988 | if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite)) | ||
1989 | break; | ||
1990 | udelay(TYPHOON_UDELAY); | ||
1991 | } | ||
1992 | |||
1993 | if(i == TYPHOON_WAIT_TIMEOUT) | ||
1994 | netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n"); | ||
1995 | |||
1996 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE); | ||
1997 | typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
1998 | |||
1999 | /* save the statistics so when we bring the interface up again, | ||
2000 | * the values reported to userspace are correct. | ||
2001 | */ | ||
2002 | tp->card_state = Sleeping; | ||
2003 | smp_wmb(); | ||
2004 | typhoon_do_get_stats(tp); | ||
2005 | memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats)); | ||
2006 | |||
2007 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT); | ||
2008 | typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL); | ||
2009 | |||
2010 | if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0) | ||
2011 | netdev_err(tp->dev, "timed out waiting for 3XP to halt\n"); | ||
2012 | |||
2013 | if(typhoon_reset(ioaddr, wait_type) < 0) { | ||
2014 | netdev_err(tp->dev, "unable to reset 3XP\n"); | ||
2015 | return -ETIMEDOUT; | ||
2016 | } | ||
2017 | |||
2018 | /* cleanup any outstanding Tx packets */ | ||
2019 | if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) { | ||
2020 | indexes->txLoCleared = cpu_to_le32(txLo->lastWrite); | ||
2021 | typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared); | ||
2022 | } | ||
2023 | |||
2024 | return 0; | ||
2025 | } | ||
2026 | |||
2027 | static void | ||
2028 | typhoon_tx_timeout(struct net_device *dev) | ||
2029 | { | ||
2030 | struct typhoon *tp = netdev_priv(dev); | ||
2031 | |||
2032 | if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) { | ||
2033 | netdev_warn(dev, "could not reset in tx timeout\n"); | ||
2034 | goto truly_dead; | ||
2035 | } | ||
2036 | |||
2037 | /* If we ever start using the Hi ring, it will need cleaning too */ | ||
2038 | typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared); | ||
2039 | typhoon_free_rx_rings(tp); | ||
2040 | |||
2041 | if(typhoon_start_runtime(tp) < 0) { | ||
2042 | netdev_err(dev, "could not start runtime in tx timeout\n"); | ||
2043 | goto truly_dead; | ||
2044 | } | ||
2045 | |||
2046 | netif_wake_queue(dev); | ||
2047 | return; | ||
2048 | |||
2049 | truly_dead: | ||
2050 | /* Reset the hardware, and turn off carrier to avoid more timeouts */ | ||
2051 | typhoon_reset(tp->ioaddr, NoWait); | ||
2052 | netif_carrier_off(dev); | ||
2053 | } | ||
2054 | |||
2055 | static int | ||
2056 | typhoon_open(struct net_device *dev) | ||
2057 | { | ||
2058 | struct typhoon *tp = netdev_priv(dev); | ||
2059 | int err; | ||
2060 | |||
2061 | err = typhoon_request_firmware(tp); | ||
2062 | if (err) | ||
2063 | goto out; | ||
2064 | |||
2065 | err = typhoon_wakeup(tp, WaitSleep); | ||
2066 | if(err < 0) { | ||
2067 | netdev_err(dev, "unable to wakeup device\n"); | ||
2068 | goto out_sleep; | ||
2069 | } | ||
2070 | |||
2071 | err = request_irq(dev->irq, typhoon_interrupt, IRQF_SHARED, | ||
2072 | dev->name, dev); | ||
2073 | if(err < 0) | ||
2074 | goto out_sleep; | ||
2075 | |||
2076 | napi_enable(&tp->napi); | ||
2077 | |||
2078 | err = typhoon_start_runtime(tp); | ||
2079 | if(err < 0) { | ||
2080 | napi_disable(&tp->napi); | ||
2081 | goto out_irq; | ||
2082 | } | ||
2083 | |||
2084 | netif_start_queue(dev); | ||
2085 | return 0; | ||
2086 | |||
2087 | out_irq: | ||
2088 | free_irq(dev->irq, dev); | ||
2089 | |||
2090 | out_sleep: | ||
2091 | if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { | ||
2092 | netdev_err(dev, "unable to reboot into sleep img\n"); | ||
2093 | typhoon_reset(tp->ioaddr, NoWait); | ||
2094 | goto out; | ||
2095 | } | ||
2096 | |||
2097 | if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) | ||
2098 | netdev_err(dev, "unable to go back to sleep\n"); | ||
2099 | |||
2100 | out: | ||
2101 | return err; | ||
2102 | } | ||
2103 | |||
2104 | static int | ||
2105 | typhoon_close(struct net_device *dev) | ||
2106 | { | ||
2107 | struct typhoon *tp = netdev_priv(dev); | ||
2108 | |||
2109 | netif_stop_queue(dev); | ||
2110 | napi_disable(&tp->napi); | ||
2111 | |||
2112 | if(typhoon_stop_runtime(tp, WaitSleep) < 0) | ||
2113 | netdev_err(dev, "unable to stop runtime\n"); | ||
2114 | |||
2115 | /* Make sure there is no irq handler running on a different CPU. */ | ||
2116 | free_irq(dev->irq, dev); | ||
2117 | |||
2118 | typhoon_free_rx_rings(tp); | ||
2119 | typhoon_init_rings(tp); | ||
2120 | |||
2121 | if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) | ||
2122 | netdev_err(dev, "unable to boot sleep image\n"); | ||
2123 | |||
2124 | if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) | ||
2125 | netdev_err(dev, "unable to put card to sleep\n"); | ||
2126 | |||
2127 | return 0; | ||
2128 | } | ||
2129 | |||
2130 | #ifdef CONFIG_PM | ||
2131 | static int | ||
2132 | typhoon_resume(struct pci_dev *pdev) | ||
2133 | { | ||
2134 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2135 | struct typhoon *tp = netdev_priv(dev); | ||
2136 | |||
2137 | /* If we're down, resume when we are upped. | ||
2138 | */ | ||
2139 | if(!netif_running(dev)) | ||
2140 | return 0; | ||
2141 | |||
2142 | if(typhoon_wakeup(tp, WaitNoSleep) < 0) { | ||
2143 | netdev_err(dev, "critical: could not wake up in resume\n"); | ||
2144 | goto reset; | ||
2145 | } | ||
2146 | |||
2147 | if(typhoon_start_runtime(tp) < 0) { | ||
2148 | netdev_err(dev, "critical: could not start runtime in resume\n"); | ||
2149 | goto reset; | ||
2150 | } | ||
2151 | |||
2152 | netif_device_attach(dev); | ||
2153 | return 0; | ||
2154 | |||
2155 | reset: | ||
2156 | typhoon_reset(tp->ioaddr, NoWait); | ||
2157 | return -EBUSY; | ||
2158 | } | ||
2159 | |||
2160 | static int | ||
2161 | typhoon_suspend(struct pci_dev *pdev, pm_message_t state) | ||
2162 | { | ||
2163 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2164 | struct typhoon *tp = netdev_priv(dev); | ||
2165 | struct cmd_desc xp_cmd; | ||
2166 | |||
2167 | /* If we're down, we're already suspended. | ||
2168 | */ | ||
2169 | if(!netif_running(dev)) | ||
2170 | return 0; | ||
2171 | |||
2172 | /* TYPHOON_OFFLOAD_VLAN is always on now, so this doesn't work */ | ||
2173 | if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) | ||
2174 | netdev_warn(dev, "cannot do WAKE_MAGIC with VLAN offloading\n"); | ||
2175 | |||
2176 | netif_device_detach(dev); | ||
2177 | |||
2178 | if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) { | ||
2179 | netdev_err(dev, "unable to stop runtime\n"); | ||
2180 | goto need_resume; | ||
2181 | } | ||
2182 | |||
2183 | typhoon_free_rx_rings(tp); | ||
2184 | typhoon_init_rings(tp); | ||
2185 | |||
2186 | if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { | ||
2187 | netdev_err(dev, "unable to boot sleep image\n"); | ||
2188 | goto need_resume; | ||
2189 | } | ||
2190 | |||
2191 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS); | ||
2192 | xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0])); | ||
2193 | xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2])); | ||
2194 | if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { | ||
2195 | netdev_err(dev, "unable to set mac address in suspend\n"); | ||
2196 | goto need_resume; | ||
2197 | } | ||
2198 | |||
2199 | INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER); | ||
2200 | xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST; | ||
2201 | if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) { | ||
2202 | netdev_err(dev, "unable to set rx filter in suspend\n"); | ||
2203 | goto need_resume; | ||
2204 | } | ||
2205 | |||
2206 | if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) { | ||
2207 | netdev_err(dev, "unable to put card to sleep\n"); | ||
2208 | goto need_resume; | ||
2209 | } | ||
2210 | |||
2211 | return 0; | ||
2212 | |||
2213 | need_resume: | ||
2214 | typhoon_resume(pdev); | ||
2215 | return -EBUSY; | ||
2216 | } | ||
2217 | #endif | ||
2218 | |||
2219 | static int __devinit | ||
2220 | typhoon_test_mmio(struct pci_dev *pdev) | ||
2221 | { | ||
2222 | void __iomem *ioaddr = pci_iomap(pdev, 1, 128); | ||
2223 | int mode = 0; | ||
2224 | u32 val; | ||
2225 | |||
2226 | if(!ioaddr) | ||
2227 | goto out; | ||
2228 | |||
2229 | if(ioread32(ioaddr + TYPHOON_REG_STATUS) != | ||
2230 | TYPHOON_STATUS_WAITING_FOR_HOST) | ||
2231 | goto out_unmap; | ||
2232 | |||
2233 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); | ||
2234 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); | ||
2235 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE); | ||
2236 | |||
2237 | /* Ok, see if we can change our interrupt status register by | ||
2238 | * sending ourselves an interrupt. If so, then MMIO works. | ||
2239 | * The 50usec delay is arbitrary -- it could probably be smaller. | ||
2240 | */ | ||
2241 | val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); | ||
2242 | if((val & TYPHOON_INTR_SELF) == 0) { | ||
2243 | iowrite32(1, ioaddr + TYPHOON_REG_SELF_INTERRUPT); | ||
2244 | ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); | ||
2245 | udelay(50); | ||
2246 | val = ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); | ||
2247 | if(val & TYPHOON_INTR_SELF) | ||
2248 | mode = 1; | ||
2249 | } | ||
2250 | |||
2251 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK); | ||
2252 | iowrite32(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS); | ||
2253 | iowrite32(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE); | ||
2254 | ioread32(ioaddr + TYPHOON_REG_INTR_STATUS); | ||
2255 | |||
2256 | out_unmap: | ||
2257 | pci_iounmap(pdev, ioaddr); | ||
2258 | |||
2259 | out: | ||
2260 | if(!mode) | ||
2261 | pr_info("%s: falling back to port IO\n", pci_name(pdev)); | ||
2262 | return mode; | ||
2263 | } | ||
2264 | |||
2265 | static const struct net_device_ops typhoon_netdev_ops = { | ||
2266 | .ndo_open = typhoon_open, | ||
2267 | .ndo_stop = typhoon_close, | ||
2268 | .ndo_start_xmit = typhoon_start_tx, | ||
2269 | .ndo_set_multicast_list = typhoon_set_rx_mode, | ||
2270 | .ndo_tx_timeout = typhoon_tx_timeout, | ||
2271 | .ndo_get_stats = typhoon_get_stats, | ||
2272 | .ndo_validate_addr = eth_validate_addr, | ||
2273 | .ndo_set_mac_address = typhoon_set_mac_address, | ||
2274 | .ndo_change_mtu = eth_change_mtu, | ||
2275 | }; | ||
2276 | |||
2277 | static int __devinit | ||
2278 | typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
2279 | { | ||
2280 | struct net_device *dev; | ||
2281 | struct typhoon *tp; | ||
2282 | int card_id = (int) ent->driver_data; | ||
2283 | void __iomem *ioaddr; | ||
2284 | void *shared; | ||
2285 | dma_addr_t shared_dma; | ||
2286 | struct cmd_desc xp_cmd; | ||
2287 | struct resp_desc xp_resp[3]; | ||
2288 | int err = 0; | ||
2289 | const char *err_msg; | ||
2290 | |||
2291 | dev = alloc_etherdev(sizeof(*tp)); | ||
2292 | if(dev == NULL) { | ||
2293 | err_msg = "unable to alloc new net device"; | ||
2294 | err = -ENOMEM; | ||
2295 | goto error_out; | ||
2296 | } | ||
2297 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
2298 | |||
2299 | err = pci_enable_device(pdev); | ||
2300 | if(err < 0) { | ||
2301 | err_msg = "unable to enable device"; | ||
2302 | goto error_out_dev; | ||
2303 | } | ||
2304 | |||
2305 | err = pci_set_mwi(pdev); | ||
2306 | if(err < 0) { | ||
2307 | err_msg = "unable to set MWI"; | ||
2308 | goto error_out_disable; | ||
2309 | } | ||
2310 | |||
2311 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2312 | if(err < 0) { | ||
2313 | err_msg = "No usable DMA configuration"; | ||
2314 | goto error_out_mwi; | ||
2315 | } | ||
2316 | |||
2317 | /* sanity checks on IO and MMIO BARs | ||
2318 | */ | ||
2319 | if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) { | ||
2320 | err_msg = "region #1 not a PCI IO resource, aborting"; | ||
2321 | err = -ENODEV; | ||
2322 | goto error_out_mwi; | ||
2323 | } | ||
2324 | if(pci_resource_len(pdev, 0) < 128) { | ||
2325 | err_msg = "Invalid PCI IO region size, aborting"; | ||
2326 | err = -ENODEV; | ||
2327 | goto error_out_mwi; | ||
2328 | } | ||
2329 | if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | ||
2330 | err_msg = "region #1 not a PCI MMIO resource, aborting"; | ||
2331 | err = -ENODEV; | ||
2332 | goto error_out_mwi; | ||
2333 | } | ||
2334 | if(pci_resource_len(pdev, 1) < 128) { | ||
2335 | err_msg = "Invalid PCI MMIO region size, aborting"; | ||
2336 | err = -ENODEV; | ||
2337 | goto error_out_mwi; | ||
2338 | } | ||
2339 | |||
2340 | err = pci_request_regions(pdev, KBUILD_MODNAME); | ||
2341 | if(err < 0) { | ||
2342 | err_msg = "could not request regions"; | ||
2343 | goto error_out_mwi; | ||
2344 | } | ||
2345 | |||
2346 | /* map our registers | ||
2347 | */ | ||
2348 | if(use_mmio != 0 && use_mmio != 1) | ||
2349 | use_mmio = typhoon_test_mmio(pdev); | ||
2350 | |||
2351 | ioaddr = pci_iomap(pdev, use_mmio, 128); | ||
2352 | if (!ioaddr) { | ||
2353 | err_msg = "cannot remap registers, aborting"; | ||
2354 | err = -EIO; | ||
2355 | goto error_out_regions; | ||
2356 | } | ||
2357 | |||
2358 | /* allocate pci dma space for rx and tx descriptor rings | ||
2359 | */ | ||
2360 | shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared), | ||
2361 | &shared_dma); | ||
2362 | if(!shared) { | ||
2363 | err_msg = "could not allocate DMA memory"; | ||
2364 | err = -ENOMEM; | ||
2365 | goto error_out_remap; | ||
2366 | } | ||
2367 | |||
2368 | dev->irq = pdev->irq; | ||
2369 | tp = netdev_priv(dev); | ||
2370 | tp->shared = shared; | ||
2371 | tp->shared_dma = shared_dma; | ||
2372 | tp->pdev = pdev; | ||
2373 | tp->tx_pdev = pdev; | ||
2374 | tp->ioaddr = ioaddr; | ||
2375 | tp->tx_ioaddr = ioaddr; | ||
2376 | tp->dev = dev; | ||
2377 | |||
2378 | /* Init sequence: | ||
2379 | * 1) Reset the adapter to clear any bad juju | ||
2380 | * 2) Reload the sleep image | ||
2381 | * 3) Boot the sleep image | ||
2382 | * 4) Get the hardware address. | ||
2383 | * 5) Put the card to sleep. | ||
2384 | */ | ||
2385 | if (typhoon_reset(ioaddr, WaitSleep) < 0) { | ||
2386 | err_msg = "could not reset 3XP"; | ||
2387 | err = -EIO; | ||
2388 | goto error_out_dma; | ||
2389 | } | ||
2390 | |||
2391 | /* Now that we've reset the 3XP and are sure it's not going to | ||
2392 | * write all over memory, enable bus mastering, and save our | ||
2393 | * state for resuming after a suspend. | ||
2394 | */ | ||
2395 | pci_set_master(pdev); | ||
2396 | pci_save_state(pdev); | ||
2397 | |||
2398 | typhoon_init_interface(tp); | ||
2399 | typhoon_init_rings(tp); | ||
2400 | |||
2401 | if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) { | ||
2402 | err_msg = "cannot boot 3XP sleep image"; | ||
2403 | err = -EIO; | ||
2404 | goto error_out_reset; | ||
2405 | } | ||
2406 | |||
2407 | INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS); | ||
2408 | if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) { | ||
2409 | err_msg = "cannot read MAC address"; | ||
2410 | err = -EIO; | ||
2411 | goto error_out_reset; | ||
2412 | } | ||
2413 | |||
2414 | *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1)); | ||
2415 | *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2)); | ||
2416 | |||
2417 | if(!is_valid_ether_addr(dev->dev_addr)) { | ||
2418 | err_msg = "Could not obtain valid ethernet address, aborting"; | ||
2419 | goto error_out_reset; | ||
2420 | } | ||
2421 | |||
2422 | /* Read the Sleep Image version last, so the response is valid | ||
2423 | * later when we print out the version reported. | ||
2424 | */ | ||
2425 | INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS); | ||
2426 | if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) { | ||
2427 | err_msg = "Could not get Sleep Image version"; | ||
2428 | goto error_out_reset; | ||
2429 | } | ||
2430 | |||
2431 | tp->capabilities = typhoon_card_info[card_id].capabilities; | ||
2432 | tp->xcvr_select = TYPHOON_XCVR_AUTONEG; | ||
2433 | |||
2434 | /* Typhoon 1.0 Sleep Images return one response descriptor to the | ||
2435 | * READ_VERSIONS command. Those versions are OK after waking up | ||
2436 | * from sleep without needing a reset. Typhoon 1.1+ Sleep Images | ||
2437 | * seem to need a little extra help to get started. Since we don't | ||
2438 | * know how to nudge it along, just kick it. | ||
2439 | */ | ||
2440 | if(xp_resp[0].numDesc != 0) | ||
2441 | tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET; | ||
2442 | |||
2443 | if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) { | ||
2444 | err_msg = "cannot put adapter to sleep"; | ||
2445 | err = -EIO; | ||
2446 | goto error_out_reset; | ||
2447 | } | ||
2448 | |||
2449 | /* The chip-specific entries in the device structure. */ | ||
2450 | dev->netdev_ops = &typhoon_netdev_ops; | ||
2451 | netif_napi_add(dev, &tp->napi, typhoon_poll, 16); | ||
2452 | dev->watchdog_timeo = TX_TIMEOUT; | ||
2453 | |||
2454 | SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops); | ||
2455 | |||
2456 | /* We can handle scatter gather, up to 16 entries, and | ||
2457 | * we can do IP checksumming (only version 4, doh...) | ||
2458 | * | ||
2459 | * There's no way to turn off the RX VLAN offloading and stripping | ||
2460 | * on the current 3XP firmware -- it does not respect the offload | ||
2461 | * settings -- so we only allow the user to toggle the TX processing. | ||
2462 | */ | ||
2463 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | | ||
2464 | NETIF_F_HW_VLAN_TX; | ||
2465 | dev->features = dev->hw_features | | ||
2466 | NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM; | ||
2467 | |||
2468 | if(register_netdev(dev) < 0) { | ||
2469 | err_msg = "unable to register netdev"; | ||
2470 | goto error_out_reset; | ||
2471 | } | ||
2472 | |||
2473 | pci_set_drvdata(pdev, dev); | ||
2474 | |||
2475 | netdev_info(dev, "%s at %s 0x%llx, %pM\n", | ||
2476 | typhoon_card_info[card_id].name, | ||
2477 | use_mmio ? "MMIO" : "IO", | ||
2478 | (unsigned long long)pci_resource_start(pdev, use_mmio), | ||
2479 | dev->dev_addr); | ||
2480 | |||
2481 | /* xp_resp still contains the response to the READ_VERSIONS command. | ||
2482 | * For debugging, let the user know what version he has. | ||
2483 | */ | ||
2484 | if(xp_resp[0].numDesc == 0) { | ||
2485 | /* This is the Typhoon 1.0 type Sleep Image, last 16 bits | ||
2486 | * of version is Month/Day of build. | ||
2487 | */ | ||
2488 | u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff; | ||
2489 | netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n", | ||
2490 | monthday >> 8, monthday & 0xff); | ||
2491 | } else if(xp_resp[0].numDesc == 2) { | ||
2492 | /* This is the Typhoon 1.1+ type Sleep Image | ||
2493 | */ | ||
2494 | u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2); | ||
2495 | u8 *ver_string = (u8 *) &xp_resp[1]; | ||
2496 | ver_string[25] = 0; | ||
2497 | netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n", | ||
2498 | sleep_ver >> 24, (sleep_ver >> 12) & 0xfff, | ||
2499 | sleep_ver & 0xfff, ver_string); | ||
2500 | } else { | ||
2501 | netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n", | ||
2502 | xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2)); | ||
2503 | } | ||
2504 | |||
2505 | return 0; | ||
2506 | |||
2507 | error_out_reset: | ||
2508 | typhoon_reset(ioaddr, NoWait); | ||
2509 | |||
2510 | error_out_dma: | ||
2511 | pci_free_consistent(pdev, sizeof(struct typhoon_shared), | ||
2512 | shared, shared_dma); | ||
2513 | error_out_remap: | ||
2514 | pci_iounmap(pdev, ioaddr); | ||
2515 | error_out_regions: | ||
2516 | pci_release_regions(pdev); | ||
2517 | error_out_mwi: | ||
2518 | pci_clear_mwi(pdev); | ||
2519 | error_out_disable: | ||
2520 | pci_disable_device(pdev); | ||
2521 | error_out_dev: | ||
2522 | free_netdev(dev); | ||
2523 | error_out: | ||
2524 | pr_err("%s: %s\n", pci_name(pdev), err_msg); | ||
2525 | return err; | ||
2526 | } | ||
2527 | |||
2528 | static void __devexit | ||
2529 | typhoon_remove_one(struct pci_dev *pdev) | ||
2530 | { | ||
2531 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2532 | struct typhoon *tp = netdev_priv(dev); | ||
2533 | |||
2534 | unregister_netdev(dev); | ||
2535 | pci_set_power_state(pdev, PCI_D0); | ||
2536 | pci_restore_state(pdev); | ||
2537 | typhoon_reset(tp->ioaddr, NoWait); | ||
2538 | pci_iounmap(pdev, tp->ioaddr); | ||
2539 | pci_free_consistent(pdev, sizeof(struct typhoon_shared), | ||
2540 | tp->shared, tp->shared_dma); | ||
2541 | pci_release_regions(pdev); | ||
2542 | pci_clear_mwi(pdev); | ||
2543 | pci_disable_device(pdev); | ||
2544 | pci_set_drvdata(pdev, NULL); | ||
2545 | free_netdev(dev); | ||
2546 | } | ||
2547 | |||
2548 | static struct pci_driver typhoon_driver = { | ||
2549 | .name = KBUILD_MODNAME, | ||
2550 | .id_table = typhoon_pci_tbl, | ||
2551 | .probe = typhoon_init_one, | ||
2552 | .remove = __devexit_p(typhoon_remove_one), | ||
2553 | #ifdef CONFIG_PM | ||
2554 | .suspend = typhoon_suspend, | ||
2555 | .resume = typhoon_resume, | ||
2556 | #endif | ||
2557 | }; | ||
2558 | |||
2559 | static int __init | ||
2560 | typhoon_init(void) | ||
2561 | { | ||
2562 | return pci_register_driver(&typhoon_driver); | ||
2563 | } | ||
2564 | |||
2565 | static void __exit | ||
2566 | typhoon_cleanup(void) | ||
2567 | { | ||
2568 | if (typhoon_fw) | ||
2569 | release_firmware(typhoon_fw); | ||
2570 | pci_unregister_driver(&typhoon_driver); | ||
2571 | } | ||
2572 | |||
2573 | module_init(typhoon_init); | ||
2574 | module_exit(typhoon_cleanup); | ||
diff --git a/drivers/net/ethernet/3com/typhoon.h b/drivers/net/ethernet/3com/typhoon.h new file mode 100644 index 000000000000..88187fc84aa3 --- /dev/null +++ b/drivers/net/ethernet/3com/typhoon.h | |||
@@ -0,0 +1,624 @@ | |||
1 | /* typhoon.h: chip info for the 3Com 3CR990 family of controllers */ | ||
2 | /* | ||
3 | Written 2002-2003 by David Dillow <dave@thedillows.org> | ||
4 | |||
5 | This software may be used and distributed according to the terms of | ||
6 | the GNU General Public License (GPL), incorporated herein by reference. | ||
7 | Drivers based on or derived from this code fall under the GPL and must | ||
8 | retain the authorship, copyright and license notice. This file is not | ||
9 | a complete program and may only be used when the entire operating | ||
10 | system is licensed under the GPL. | ||
11 | |||
12 | This software is available on a public web site. It may enable | ||
13 | cryptographic capabilities of the 3Com hardware, and may be | ||
14 | exported from the United States under License Exception "TSU" | ||
15 | pursuant to 15 C.F.R. Section 740.13(e). | ||
16 | |||
17 | This work was funded by the National Library of Medicine under | ||
18 | the Department of Energy project number 0274DD06D1 and NLM project | ||
19 | number Y1-LM-2015-01. | ||
20 | */ | ||
21 | |||
22 | /* All Typhoon ring positions are specificed in bytes, and point to the | ||
23 | * first "clean" entry in the ring -- ie the next entry we use for whatever | ||
24 | * purpose. | ||
25 | */ | ||
26 | |||
27 | /* The Typhoon basic ring | ||
28 | * ringBase: where this ring lives (our virtual address) | ||
29 | * lastWrite: the next entry we'll use | ||
30 | */ | ||
31 | struct basic_ring { | ||
32 | u8 *ringBase; | ||
33 | u32 lastWrite; | ||
34 | }; | ||
35 | |||
36 | /* The Typoon transmit ring -- same as a basic ring, plus: | ||
37 | * lastRead: where we're at in regard to cleaning up the ring | ||
38 | * writeRegister: register to use for writing (different for Hi & Lo rings) | ||
39 | */ | ||
40 | struct transmit_ring { | ||
41 | u8 *ringBase; | ||
42 | u32 lastWrite; | ||
43 | u32 lastRead; | ||
44 | int writeRegister; | ||
45 | }; | ||
46 | |||
47 | /* The host<->Typhoon ring index structure | ||
48 | * This indicates the current positions in the rings | ||
49 | * | ||
50 | * All values must be in little endian format for the 3XP | ||
51 | * | ||
52 | * rxHiCleared: entry we've cleared to in the Hi receive ring | ||
53 | * rxLoCleared: entry we've cleared to in the Lo receive ring | ||
54 | * rxBuffReady: next entry we'll put a free buffer in | ||
55 | * respCleared: entry we've cleared to in the response ring | ||
56 | * | ||
57 | * txLoCleared: entry the NIC has cleared to in the Lo transmit ring | ||
58 | * txHiCleared: entry the NIC has cleared to in the Hi transmit ring | ||
59 | * rxLoReady: entry the NIC has filled to in the Lo receive ring | ||
60 | * rxBuffCleared: entry the NIC has cleared in the free buffer ring | ||
61 | * cmdCleared: entry the NIC has cleared in the command ring | ||
62 | * respReady: entry the NIC has filled to in the response ring | ||
63 | * rxHiReady: entry the NIC has filled to in the Hi receive ring | ||
64 | */ | ||
65 | struct typhoon_indexes { | ||
66 | /* The first four are written by the host, and read by the NIC */ | ||
67 | volatile __le32 rxHiCleared; | ||
68 | volatile __le32 rxLoCleared; | ||
69 | volatile __le32 rxBuffReady; | ||
70 | volatile __le32 respCleared; | ||
71 | |||
72 | /* The remaining are written by the NIC, and read by the host */ | ||
73 | volatile __le32 txLoCleared; | ||
74 | volatile __le32 txHiCleared; | ||
75 | volatile __le32 rxLoReady; | ||
76 | volatile __le32 rxBuffCleared; | ||
77 | volatile __le32 cmdCleared; | ||
78 | volatile __le32 respReady; | ||
79 | volatile __le32 rxHiReady; | ||
80 | } __packed; | ||
81 | |||
82 | /* The host<->Typhoon interface | ||
83 | * Our means of communicating where things are | ||
84 | * | ||
85 | * All values must be in little endian format for the 3XP | ||
86 | * | ||
87 | * ringIndex: 64 bit bus address of the index structure | ||
88 | * txLoAddr: 64 bit bus address of the Lo transmit ring | ||
89 | * txLoSize: size (in bytes) of the Lo transmit ring | ||
90 | * txHi*: as above for the Hi priority transmit ring | ||
91 | * rxLo*: as above for the Lo priority receive ring | ||
92 | * rxBuff*: as above for the free buffer ring | ||
93 | * cmd*: as above for the command ring | ||
94 | * resp*: as above for the response ring | ||
95 | * zeroAddr: 64 bit bus address of a zero word (for DMA) | ||
96 | * rxHi*: as above for the Hi Priority receive ring | ||
97 | * | ||
98 | * While there is room for 64 bit addresses, current versions of the 3XP | ||
99 | * only do 32 bit addresses, so the *Hi for each of the above will always | ||
100 | * be zero. | ||
101 | */ | ||
102 | struct typhoon_interface { | ||
103 | __le32 ringIndex; | ||
104 | __le32 ringIndexHi; | ||
105 | __le32 txLoAddr; | ||
106 | __le32 txLoAddrHi; | ||
107 | __le32 txLoSize; | ||
108 | __le32 txHiAddr; | ||
109 | __le32 txHiAddrHi; | ||
110 | __le32 txHiSize; | ||
111 | __le32 rxLoAddr; | ||
112 | __le32 rxLoAddrHi; | ||
113 | __le32 rxLoSize; | ||
114 | __le32 rxBuffAddr; | ||
115 | __le32 rxBuffAddrHi; | ||
116 | __le32 rxBuffSize; | ||
117 | __le32 cmdAddr; | ||
118 | __le32 cmdAddrHi; | ||
119 | __le32 cmdSize; | ||
120 | __le32 respAddr; | ||
121 | __le32 respAddrHi; | ||
122 | __le32 respSize; | ||
123 | __le32 zeroAddr; | ||
124 | __le32 zeroAddrHi; | ||
125 | __le32 rxHiAddr; | ||
126 | __le32 rxHiAddrHi; | ||
127 | __le32 rxHiSize; | ||
128 | } __packed; | ||
129 | |||
130 | /* The Typhoon transmit/fragment descriptor | ||
131 | * | ||
132 | * A packet is described by a packet descriptor, followed by option descriptors, | ||
133 | * if any, then one or more fragment descriptors. | ||
134 | * | ||
135 | * Packet descriptor: | ||
136 | * flags: Descriptor type | ||
137 | * len:i zero, or length of this packet | ||
138 | * addr*: 8 bytes of opaque data to the firmware -- for skb pointer | ||
139 | * processFlags: Determine offload tasks to perform on this packet. | ||
140 | * | ||
141 | * Fragment descriptor: | ||
142 | * flags: Descriptor type | ||
143 | * len:i length of this fragment | ||
144 | * addr: low bytes of DMA address for this part of the packet | ||
145 | * addrHi: hi bytes of DMA address for this part of the packet | ||
146 | * processFlags: must be zero | ||
147 | * | ||
148 | * TYPHOON_DESC_VALID is not mentioned in their docs, but their Linux | ||
149 | * driver uses it. | ||
150 | */ | ||
151 | struct tx_desc { | ||
152 | u8 flags; | ||
153 | #define TYPHOON_TYPE_MASK 0x07 | ||
154 | #define TYPHOON_FRAG_DESC 0x00 | ||
155 | #define TYPHOON_TX_DESC 0x01 | ||
156 | #define TYPHOON_CMD_DESC 0x02 | ||
157 | #define TYPHOON_OPT_DESC 0x03 | ||
158 | #define TYPHOON_RX_DESC 0x04 | ||
159 | #define TYPHOON_RESP_DESC 0x05 | ||
160 | #define TYPHOON_OPT_TYPE_MASK 0xf0 | ||
161 | #define TYPHOON_OPT_IPSEC 0x00 | ||
162 | #define TYPHOON_OPT_TCP_SEG 0x10 | ||
163 | #define TYPHOON_CMD_RESPOND 0x40 | ||
164 | #define TYPHOON_RESP_ERROR 0x40 | ||
165 | #define TYPHOON_RX_ERROR 0x40 | ||
166 | #define TYPHOON_DESC_VALID 0x80 | ||
167 | u8 numDesc; | ||
168 | __le16 len; | ||
169 | union { | ||
170 | struct { | ||
171 | __le32 addr; | ||
172 | __le32 addrHi; | ||
173 | } frag; | ||
174 | u64 tx_addr; /* opaque for hardware, for TX_DESC */ | ||
175 | }; | ||
176 | __le32 processFlags; | ||
177 | #define TYPHOON_TX_PF_NO_CRC cpu_to_le32(0x00000001) | ||
178 | #define TYPHOON_TX_PF_IP_CHKSUM cpu_to_le32(0x00000002) | ||
179 | #define TYPHOON_TX_PF_TCP_CHKSUM cpu_to_le32(0x00000004) | ||
180 | #define TYPHOON_TX_PF_TCP_SEGMENT cpu_to_le32(0x00000008) | ||
181 | #define TYPHOON_TX_PF_INSERT_VLAN cpu_to_le32(0x00000010) | ||
182 | #define TYPHOON_TX_PF_IPSEC cpu_to_le32(0x00000020) | ||
183 | #define TYPHOON_TX_PF_VLAN_PRIORITY cpu_to_le32(0x00000040) | ||
184 | #define TYPHOON_TX_PF_UDP_CHKSUM cpu_to_le32(0x00000080) | ||
185 | #define TYPHOON_TX_PF_PAD_FRAME cpu_to_le32(0x00000100) | ||
186 | #define TYPHOON_TX_PF_RESERVED cpu_to_le32(0x00000e00) | ||
187 | #define TYPHOON_TX_PF_VLAN_MASK cpu_to_le32(0x0ffff000) | ||
188 | #define TYPHOON_TX_PF_INTERNAL cpu_to_le32(0xf0000000) | ||
189 | #define TYPHOON_TX_PF_VLAN_TAG_SHIFT 12 | ||
190 | } __packed; | ||
191 | |||
192 | /* The TCP Segmentation offload option descriptor | ||
193 | * | ||
194 | * flags: descriptor type | ||
195 | * numDesc: must be 1 | ||
196 | * mss_flags: bits 0-11 (little endian) are MSS, 12 is first TSO descriptor | ||
197 | * 13 is list TSO descriptor, set both if only one TSO | ||
198 | * respAddrLo: low bytes of address of the bytesTx field of this descriptor | ||
199 | * bytesTx: total number of bytes in this TSO request | ||
200 | * status: 0 on completion | ||
201 | */ | ||
202 | struct tcpopt_desc { | ||
203 | u8 flags; | ||
204 | u8 numDesc; | ||
205 | __le16 mss_flags; | ||
206 | #define TYPHOON_TSO_FIRST cpu_to_le16(0x1000) | ||
207 | #define TYPHOON_TSO_LAST cpu_to_le16(0x2000) | ||
208 | __le32 respAddrLo; | ||
209 | __le32 bytesTx; | ||
210 | __le32 status; | ||
211 | } __packed; | ||
212 | |||
213 | /* The IPSEC Offload descriptor | ||
214 | * | ||
215 | * flags: descriptor type | ||
216 | * numDesc: must be 1 | ||
217 | * ipsecFlags: bit 0: 0 -- generate IV, 1 -- use supplied IV | ||
218 | * sa1, sa2: Security Association IDs for this packet | ||
219 | * reserved: set to 0 | ||
220 | */ | ||
221 | struct ipsec_desc { | ||
222 | u8 flags; | ||
223 | u8 numDesc; | ||
224 | __le16 ipsecFlags; | ||
225 | #define TYPHOON_IPSEC_GEN_IV cpu_to_le16(0x0000) | ||
226 | #define TYPHOON_IPSEC_USE_IV cpu_to_le16(0x0001) | ||
227 | __le32 sa1; | ||
228 | __le32 sa2; | ||
229 | __le32 reserved; | ||
230 | } __packed; | ||
231 | |||
232 | /* The Typhoon receive descriptor (Updated by NIC) | ||
233 | * | ||
234 | * flags: Descriptor type, error indication | ||
235 | * numDesc: Always zero | ||
236 | * frameLen: the size of the packet received | ||
237 | * addr: low 32 bytes of the virtual addr passed in for this buffer | ||
238 | * addrHi: high 32 bytes of the virtual addr passed in for this buffer | ||
239 | * rxStatus: Error if set in flags, otherwise result of offload processing | ||
240 | * filterResults: results of filtering on packet, not used | ||
241 | * ipsecResults: Results of IPSEC processing | ||
242 | * vlanTag: the 801.2q TCI from the packet | ||
243 | */ | ||
244 | struct rx_desc { | ||
245 | u8 flags; | ||
246 | u8 numDesc; | ||
247 | __le16 frameLen; | ||
248 | u32 addr; /* opaque, comes from virtAddr */ | ||
249 | u32 addrHi; /* opaque, comes from virtAddrHi */ | ||
250 | __le32 rxStatus; | ||
251 | #define TYPHOON_RX_ERR_INTERNAL cpu_to_le32(0x00000000) | ||
252 | #define TYPHOON_RX_ERR_FIFO_UNDERRUN cpu_to_le32(0x00000001) | ||
253 | #define TYPHOON_RX_ERR_BAD_SSD cpu_to_le32(0x00000002) | ||
254 | #define TYPHOON_RX_ERR_RUNT cpu_to_le32(0x00000003) | ||
255 | #define TYPHOON_RX_ERR_CRC cpu_to_le32(0x00000004) | ||
256 | #define TYPHOON_RX_ERR_OVERSIZE cpu_to_le32(0x00000005) | ||
257 | #define TYPHOON_RX_ERR_ALIGN cpu_to_le32(0x00000006) | ||
258 | #define TYPHOON_RX_ERR_DRIBBLE cpu_to_le32(0x00000007) | ||
259 | #define TYPHOON_RX_PROTO_MASK cpu_to_le32(0x00000003) | ||
260 | #define TYPHOON_RX_PROTO_UNKNOWN cpu_to_le32(0x00000000) | ||
261 | #define TYPHOON_RX_PROTO_IP cpu_to_le32(0x00000001) | ||
262 | #define TYPHOON_RX_PROTO_IPX cpu_to_le32(0x00000002) | ||
263 | #define TYPHOON_RX_VLAN cpu_to_le32(0x00000004) | ||
264 | #define TYPHOON_RX_IP_FRAG cpu_to_le32(0x00000008) | ||
265 | #define TYPHOON_RX_IPSEC cpu_to_le32(0x00000010) | ||
266 | #define TYPHOON_RX_IP_CHK_FAIL cpu_to_le32(0x00000020) | ||
267 | #define TYPHOON_RX_TCP_CHK_FAIL cpu_to_le32(0x00000040) | ||
268 | #define TYPHOON_RX_UDP_CHK_FAIL cpu_to_le32(0x00000080) | ||
269 | #define TYPHOON_RX_IP_CHK_GOOD cpu_to_le32(0x00000100) | ||
270 | #define TYPHOON_RX_TCP_CHK_GOOD cpu_to_le32(0x00000200) | ||
271 | #define TYPHOON_RX_UDP_CHK_GOOD cpu_to_le32(0x00000400) | ||
272 | __le16 filterResults; | ||
273 | #define TYPHOON_RX_FILTER_MASK cpu_to_le16(0x7fff) | ||
274 | #define TYPHOON_RX_FILTERED cpu_to_le16(0x8000) | ||
275 | __le16 ipsecResults; | ||
276 | #define TYPHOON_RX_OUTER_AH_GOOD cpu_to_le16(0x0001) | ||
277 | #define TYPHOON_RX_OUTER_ESP_GOOD cpu_to_le16(0x0002) | ||
278 | #define TYPHOON_RX_INNER_AH_GOOD cpu_to_le16(0x0004) | ||
279 | #define TYPHOON_RX_INNER_ESP_GOOD cpu_to_le16(0x0008) | ||
280 | #define TYPHOON_RX_OUTER_AH_FAIL cpu_to_le16(0x0010) | ||
281 | #define TYPHOON_RX_OUTER_ESP_FAIL cpu_to_le16(0x0020) | ||
282 | #define TYPHOON_RX_INNER_AH_FAIL cpu_to_le16(0x0040) | ||
283 | #define TYPHOON_RX_INNER_ESP_FAIL cpu_to_le16(0x0080) | ||
284 | #define TYPHOON_RX_UNKNOWN_SA cpu_to_le16(0x0100) | ||
285 | #define TYPHOON_RX_ESP_FORMAT_ERR cpu_to_le16(0x0200) | ||
286 | __be32 vlanTag; | ||
287 | } __packed; | ||
288 | |||
289 | /* The Typhoon free buffer descriptor, used to give a buffer to the NIC | ||
290 | * | ||
291 | * physAddr: low 32 bits of the bus address of the buffer | ||
292 | * physAddrHi: high 32 bits of the bus address of the buffer, always zero | ||
293 | * virtAddr: low 32 bits of the skb address | ||
294 | * virtAddrHi: high 32 bits of the skb address, always zero | ||
295 | * | ||
296 | * the virt* address is basically two 32 bit cookies, just passed back | ||
297 | * from the NIC | ||
298 | */ | ||
299 | struct rx_free { | ||
300 | __le32 physAddr; | ||
301 | __le32 physAddrHi; | ||
302 | u32 virtAddr; | ||
303 | u32 virtAddrHi; | ||
304 | } __packed; | ||
305 | |||
306 | /* The Typhoon command descriptor, used for commands and responses | ||
307 | * | ||
308 | * flags: descriptor type | ||
309 | * numDesc: number of descriptors following in this command/response, | ||
310 | * ie, zero for a one descriptor command | ||
311 | * cmd: the command | ||
312 | * seqNo: sequence number (unused) | ||
313 | * parm1: use varies by command | ||
314 | * parm2: use varies by command | ||
315 | * parm3: use varies by command | ||
316 | */ | ||
317 | struct cmd_desc { | ||
318 | u8 flags; | ||
319 | u8 numDesc; | ||
320 | __le16 cmd; | ||
321 | #define TYPHOON_CMD_TX_ENABLE cpu_to_le16(0x0001) | ||
322 | #define TYPHOON_CMD_TX_DISABLE cpu_to_le16(0x0002) | ||
323 | #define TYPHOON_CMD_RX_ENABLE cpu_to_le16(0x0003) | ||
324 | #define TYPHOON_CMD_RX_DISABLE cpu_to_le16(0x0004) | ||
325 | #define TYPHOON_CMD_SET_RX_FILTER cpu_to_le16(0x0005) | ||
326 | #define TYPHOON_CMD_READ_STATS cpu_to_le16(0x0007) | ||
327 | #define TYPHOON_CMD_XCVR_SELECT cpu_to_le16(0x0013) | ||
328 | #define TYPHOON_CMD_SET_MAX_PKT_SIZE cpu_to_le16(0x001a) | ||
329 | #define TYPHOON_CMD_READ_MEDIA_STATUS cpu_to_le16(0x001b) | ||
330 | #define TYPHOON_CMD_GOTO_SLEEP cpu_to_le16(0x0023) | ||
331 | #define TYPHOON_CMD_SET_MULTICAST_HASH cpu_to_le16(0x0025) | ||
332 | #define TYPHOON_CMD_SET_MAC_ADDRESS cpu_to_le16(0x0026) | ||
333 | #define TYPHOON_CMD_READ_MAC_ADDRESS cpu_to_le16(0x0027) | ||
334 | #define TYPHOON_CMD_VLAN_TYPE_WRITE cpu_to_le16(0x002b) | ||
335 | #define TYPHOON_CMD_CREATE_SA cpu_to_le16(0x0034) | ||
336 | #define TYPHOON_CMD_DELETE_SA cpu_to_le16(0x0035) | ||
337 | #define TYPHOON_CMD_READ_VERSIONS cpu_to_le16(0x0043) | ||
338 | #define TYPHOON_CMD_IRQ_COALESCE_CTRL cpu_to_le16(0x0045) | ||
339 | #define TYPHOON_CMD_ENABLE_WAKE_EVENTS cpu_to_le16(0x0049) | ||
340 | #define TYPHOON_CMD_SET_OFFLOAD_TASKS cpu_to_le16(0x004f) | ||
341 | #define TYPHOON_CMD_HELLO_RESP cpu_to_le16(0x0057) | ||
342 | #define TYPHOON_CMD_HALT cpu_to_le16(0x005d) | ||
343 | #define TYPHOON_CMD_READ_IPSEC_INFO cpu_to_le16(0x005e) | ||
344 | #define TYPHOON_CMD_GET_IPSEC_ENABLE cpu_to_le16(0x0067) | ||
345 | #define TYPHOON_CMD_GET_CMD_LVL cpu_to_le16(0x0069) | ||
346 | u16 seqNo; | ||
347 | __le16 parm1; | ||
348 | __le32 parm2; | ||
349 | __le32 parm3; | ||
350 | } __packed; | ||
351 | |||
352 | /* The Typhoon response descriptor, see command descriptor for details | ||
353 | */ | ||
354 | struct resp_desc { | ||
355 | u8 flags; | ||
356 | u8 numDesc; | ||
357 | __le16 cmd; | ||
358 | __le16 seqNo; | ||
359 | __le16 parm1; | ||
360 | __le32 parm2; | ||
361 | __le32 parm3; | ||
362 | } __packed; | ||
363 | |||
364 | #define INIT_COMMAND_NO_RESPONSE(x, command) \ | ||
365 | do { struct cmd_desc *_ptr = (x); \ | ||
366 | memset(_ptr, 0, sizeof(struct cmd_desc)); \ | ||
367 | _ptr->flags = TYPHOON_CMD_DESC | TYPHOON_DESC_VALID; \ | ||
368 | _ptr->cmd = command; \ | ||
369 | } while(0) | ||
370 | |||
371 | /* We set seqNo to 1 if we're expecting a response from this command */ | ||
372 | #define INIT_COMMAND_WITH_RESPONSE(x, command) \ | ||
373 | do { struct cmd_desc *_ptr = (x); \ | ||
374 | memset(_ptr, 0, sizeof(struct cmd_desc)); \ | ||
375 | _ptr->flags = TYPHOON_CMD_RESPOND | TYPHOON_CMD_DESC; \ | ||
376 | _ptr->flags |= TYPHOON_DESC_VALID; \ | ||
377 | _ptr->cmd = command; \ | ||
378 | _ptr->seqNo = 1; \ | ||
379 | } while(0) | ||
380 | |||
381 | /* TYPHOON_CMD_SET_RX_FILTER filter bits (cmd.parm1) | ||
382 | */ | ||
383 | #define TYPHOON_RX_FILTER_DIRECTED cpu_to_le16(0x0001) | ||
384 | #define TYPHOON_RX_FILTER_ALL_MCAST cpu_to_le16(0x0002) | ||
385 | #define TYPHOON_RX_FILTER_BROADCAST cpu_to_le16(0x0004) | ||
386 | #define TYPHOON_RX_FILTER_PROMISCOUS cpu_to_le16(0x0008) | ||
387 | #define TYPHOON_RX_FILTER_MCAST_HASH cpu_to_le16(0x0010) | ||
388 | |||
389 | /* TYPHOON_CMD_READ_STATS response format | ||
390 | */ | ||
391 | struct stats_resp { | ||
392 | u8 flags; | ||
393 | u8 numDesc; | ||
394 | __le16 cmd; | ||
395 | __le16 seqNo; | ||
396 | __le16 unused; | ||
397 | __le32 txPackets; | ||
398 | __le64 txBytes; | ||
399 | __le32 txDeferred; | ||
400 | __le32 txLateCollisions; | ||
401 | __le32 txCollisions; | ||
402 | __le32 txCarrierLost; | ||
403 | __le32 txMultipleCollisions; | ||
404 | __le32 txExcessiveCollisions; | ||
405 | __le32 txFifoUnderruns; | ||
406 | __le32 txMulticastTxOverflows; | ||
407 | __le32 txFiltered; | ||
408 | __le32 rxPacketsGood; | ||
409 | __le64 rxBytesGood; | ||
410 | __le32 rxFifoOverruns; | ||
411 | __le32 BadSSD; | ||
412 | __le32 rxCrcErrors; | ||
413 | __le32 rxOversized; | ||
414 | __le32 rxBroadcast; | ||
415 | __le32 rxMulticast; | ||
416 | __le32 rxOverflow; | ||
417 | __le32 rxFiltered; | ||
418 | __le32 linkStatus; | ||
419 | #define TYPHOON_LINK_STAT_MASK cpu_to_le32(0x00000001) | ||
420 | #define TYPHOON_LINK_GOOD cpu_to_le32(0x00000001) | ||
421 | #define TYPHOON_LINK_BAD cpu_to_le32(0x00000000) | ||
422 | #define TYPHOON_LINK_SPEED_MASK cpu_to_le32(0x00000002) | ||
423 | #define TYPHOON_LINK_100MBPS cpu_to_le32(0x00000002) | ||
424 | #define TYPHOON_LINK_10MBPS cpu_to_le32(0x00000000) | ||
425 | #define TYPHOON_LINK_DUPLEX_MASK cpu_to_le32(0x00000004) | ||
426 | #define TYPHOON_LINK_FULL_DUPLEX cpu_to_le32(0x00000004) | ||
427 | #define TYPHOON_LINK_HALF_DUPLEX cpu_to_le32(0x00000000) | ||
428 | __le32 unused2; | ||
429 | __le32 unused3; | ||
430 | } __packed; | ||
431 | |||
432 | /* TYPHOON_CMD_XCVR_SELECT xcvr values (resp.parm1) | ||
433 | */ | ||
434 | #define TYPHOON_XCVR_10HALF cpu_to_le16(0x0000) | ||
435 | #define TYPHOON_XCVR_10FULL cpu_to_le16(0x0001) | ||
436 | #define TYPHOON_XCVR_100HALF cpu_to_le16(0x0002) | ||
437 | #define TYPHOON_XCVR_100FULL cpu_to_le16(0x0003) | ||
438 | #define TYPHOON_XCVR_AUTONEG cpu_to_le16(0x0004) | ||
439 | |||
440 | /* TYPHOON_CMD_READ_MEDIA_STATUS (resp.parm1) | ||
441 | */ | ||
442 | #define TYPHOON_MEDIA_STAT_CRC_STRIP_DISABLE cpu_to_le16(0x0004) | ||
443 | #define TYPHOON_MEDIA_STAT_COLLISION_DETECT cpu_to_le16(0x0010) | ||
444 | #define TYPHOON_MEDIA_STAT_CARRIER_SENSE cpu_to_le16(0x0020) | ||
445 | #define TYPHOON_MEDIA_STAT_POLARITY_REV cpu_to_le16(0x0400) | ||
446 | #define TYPHOON_MEDIA_STAT_NO_LINK cpu_to_le16(0x0800) | ||
447 | |||
448 | /* TYPHOON_CMD_SET_MULTICAST_HASH enable values (cmd.parm1) | ||
449 | */ | ||
450 | #define TYPHOON_MCAST_HASH_DISABLE cpu_to_le16(0x0000) | ||
451 | #define TYPHOON_MCAST_HASH_ENABLE cpu_to_le16(0x0001) | ||
452 | #define TYPHOON_MCAST_HASH_SET cpu_to_le16(0x0002) | ||
453 | |||
454 | /* TYPHOON_CMD_CREATE_SA descriptor and settings | ||
455 | */ | ||
456 | struct sa_descriptor { | ||
457 | u8 flags; | ||
458 | u8 numDesc; | ||
459 | u16 cmd; | ||
460 | u16 seqNo; | ||
461 | u16 mode; | ||
462 | #define TYPHOON_SA_MODE_NULL cpu_to_le16(0x0000) | ||
463 | #define TYPHOON_SA_MODE_AH cpu_to_le16(0x0001) | ||
464 | #define TYPHOON_SA_MODE_ESP cpu_to_le16(0x0002) | ||
465 | u8 hashFlags; | ||
466 | #define TYPHOON_SA_HASH_ENABLE 0x01 | ||
467 | #define TYPHOON_SA_HASH_SHA1 0x02 | ||
468 | #define TYPHOON_SA_HASH_MD5 0x04 | ||
469 | u8 direction; | ||
470 | #define TYPHOON_SA_DIR_RX 0x00 | ||
471 | #define TYPHOON_SA_DIR_TX 0x01 | ||
472 | u8 encryptionFlags; | ||
473 | #define TYPHOON_SA_ENCRYPT_ENABLE 0x01 | ||
474 | #define TYPHOON_SA_ENCRYPT_DES 0x02 | ||
475 | #define TYPHOON_SA_ENCRYPT_3DES 0x00 | ||
476 | #define TYPHOON_SA_ENCRYPT_3DES_2KEY 0x00 | ||
477 | #define TYPHOON_SA_ENCRYPT_3DES_3KEY 0x04 | ||
478 | #define TYPHOON_SA_ENCRYPT_CBC 0x08 | ||
479 | #define TYPHOON_SA_ENCRYPT_ECB 0x00 | ||
480 | u8 specifyIndex; | ||
481 | #define TYPHOON_SA_SPECIFY_INDEX 0x01 | ||
482 | #define TYPHOON_SA_GENERATE_INDEX 0x00 | ||
483 | u32 SPI; | ||
484 | u32 destAddr; | ||
485 | u32 destMask; | ||
486 | u8 integKey[20]; | ||
487 | u8 confKey[24]; | ||
488 | u32 index; | ||
489 | u32 unused; | ||
490 | u32 unused2; | ||
491 | } __packed; | ||
492 | |||
493 | /* TYPHOON_CMD_SET_OFFLOAD_TASKS bits (cmd.parm2 (Tx) & cmd.parm3 (Rx)) | ||
494 | * This is all for IPv4. | ||
495 | */ | ||
496 | #define TYPHOON_OFFLOAD_TCP_CHKSUM cpu_to_le32(0x00000002) | ||
497 | #define TYPHOON_OFFLOAD_UDP_CHKSUM cpu_to_le32(0x00000004) | ||
498 | #define TYPHOON_OFFLOAD_IP_CHKSUM cpu_to_le32(0x00000008) | ||
499 | #define TYPHOON_OFFLOAD_IPSEC cpu_to_le32(0x00000010) | ||
500 | #define TYPHOON_OFFLOAD_BCAST_THROTTLE cpu_to_le32(0x00000020) | ||
501 | #define TYPHOON_OFFLOAD_DHCP_PREVENT cpu_to_le32(0x00000040) | ||
502 | #define TYPHOON_OFFLOAD_VLAN cpu_to_le32(0x00000080) | ||
503 | #define TYPHOON_OFFLOAD_FILTERING cpu_to_le32(0x00000100) | ||
504 | #define TYPHOON_OFFLOAD_TCP_SEGMENT cpu_to_le32(0x00000200) | ||
505 | |||
506 | /* TYPHOON_CMD_ENABLE_WAKE_EVENTS bits (cmd.parm1) | ||
507 | */ | ||
508 | #define TYPHOON_WAKE_MAGIC_PKT cpu_to_le16(0x01) | ||
509 | #define TYPHOON_WAKE_LINK_EVENT cpu_to_le16(0x02) | ||
510 | #define TYPHOON_WAKE_ICMP_ECHO cpu_to_le16(0x04) | ||
511 | #define TYPHOON_WAKE_ARP cpu_to_le16(0x08) | ||
512 | |||
513 | /* These are used to load the firmware image on the NIC | ||
514 | */ | ||
515 | struct typhoon_file_header { | ||
516 | u8 tag[8]; | ||
517 | __le32 version; | ||
518 | __le32 numSections; | ||
519 | __le32 startAddr; | ||
520 | __le32 hmacDigest[5]; | ||
521 | } __packed; | ||
522 | |||
523 | struct typhoon_section_header { | ||
524 | __le32 len; | ||
525 | u16 checksum; | ||
526 | u16 reserved; | ||
527 | __le32 startAddr; | ||
528 | } __packed; | ||
529 | |||
530 | /* The Typhoon Register offsets | ||
531 | */ | ||
532 | #define TYPHOON_REG_SOFT_RESET 0x00 | ||
533 | #define TYPHOON_REG_INTR_STATUS 0x04 | ||
534 | #define TYPHOON_REG_INTR_ENABLE 0x08 | ||
535 | #define TYPHOON_REG_INTR_MASK 0x0c | ||
536 | #define TYPHOON_REG_SELF_INTERRUPT 0x10 | ||
537 | #define TYPHOON_REG_HOST2ARM7 0x14 | ||
538 | #define TYPHOON_REG_HOST2ARM6 0x18 | ||
539 | #define TYPHOON_REG_HOST2ARM5 0x1c | ||
540 | #define TYPHOON_REG_HOST2ARM4 0x20 | ||
541 | #define TYPHOON_REG_HOST2ARM3 0x24 | ||
542 | #define TYPHOON_REG_HOST2ARM2 0x28 | ||
543 | #define TYPHOON_REG_HOST2ARM1 0x2c | ||
544 | #define TYPHOON_REG_HOST2ARM0 0x30 | ||
545 | #define TYPHOON_REG_ARM2HOST3 0x34 | ||
546 | #define TYPHOON_REG_ARM2HOST2 0x38 | ||
547 | #define TYPHOON_REG_ARM2HOST1 0x3c | ||
548 | #define TYPHOON_REG_ARM2HOST0 0x40 | ||
549 | |||
550 | #define TYPHOON_REG_BOOT_DATA_LO TYPHOON_REG_HOST2ARM5 | ||
551 | #define TYPHOON_REG_BOOT_DATA_HI TYPHOON_REG_HOST2ARM4 | ||
552 | #define TYPHOON_REG_BOOT_DEST_ADDR TYPHOON_REG_HOST2ARM3 | ||
553 | #define TYPHOON_REG_BOOT_CHECKSUM TYPHOON_REG_HOST2ARM2 | ||
554 | #define TYPHOON_REG_BOOT_LENGTH TYPHOON_REG_HOST2ARM1 | ||
555 | |||
556 | #define TYPHOON_REG_DOWNLOAD_BOOT_ADDR TYPHOON_REG_HOST2ARM1 | ||
557 | #define TYPHOON_REG_DOWNLOAD_HMAC_0 TYPHOON_REG_HOST2ARM2 | ||
558 | #define TYPHOON_REG_DOWNLOAD_HMAC_1 TYPHOON_REG_HOST2ARM3 | ||
559 | #define TYPHOON_REG_DOWNLOAD_HMAC_2 TYPHOON_REG_HOST2ARM4 | ||
560 | #define TYPHOON_REG_DOWNLOAD_HMAC_3 TYPHOON_REG_HOST2ARM5 | ||
561 | #define TYPHOON_REG_DOWNLOAD_HMAC_4 TYPHOON_REG_HOST2ARM6 | ||
562 | |||
563 | #define TYPHOON_REG_BOOT_RECORD_ADDR_HI TYPHOON_REG_HOST2ARM2 | ||
564 | #define TYPHOON_REG_BOOT_RECORD_ADDR_LO TYPHOON_REG_HOST2ARM1 | ||
565 | |||
566 | #define TYPHOON_REG_TX_LO_READY TYPHOON_REG_HOST2ARM3 | ||
567 | #define TYPHOON_REG_CMD_READY TYPHOON_REG_HOST2ARM2 | ||
568 | #define TYPHOON_REG_TX_HI_READY TYPHOON_REG_HOST2ARM1 | ||
569 | |||
570 | #define TYPHOON_REG_COMMAND TYPHOON_REG_HOST2ARM0 | ||
571 | #define TYPHOON_REG_HEARTBEAT TYPHOON_REG_ARM2HOST3 | ||
572 | #define TYPHOON_REG_STATUS TYPHOON_REG_ARM2HOST0 | ||
573 | |||
574 | /* 3XP Reset values (TYPHOON_REG_SOFT_RESET) | ||
575 | */ | ||
576 | #define TYPHOON_RESET_ALL 0x7f | ||
577 | #define TYPHOON_RESET_NONE 0x00 | ||
578 | |||
579 | /* 3XP irq bits (TYPHOON_REG_INTR{STATUS,ENABLE,MASK}) | ||
580 | * | ||
581 | * Some of these came from OpenBSD, as the 3Com docs have it wrong | ||
582 | * (INTR_SELF) or don't list it at all (INTR_*_ABORT) | ||
583 | * | ||
584 | * Enabling irqs on the Heartbeat reg (ArmToHost3) gets you an irq | ||
585 | * about every 8ms, so don't do it. | ||
586 | */ | ||
587 | #define TYPHOON_INTR_HOST_INT 0x00000001 | ||
588 | #define TYPHOON_INTR_ARM2HOST0 0x00000002 | ||
589 | #define TYPHOON_INTR_ARM2HOST1 0x00000004 | ||
590 | #define TYPHOON_INTR_ARM2HOST2 0x00000008 | ||
591 | #define TYPHOON_INTR_ARM2HOST3 0x00000010 | ||
592 | #define TYPHOON_INTR_DMA0 0x00000020 | ||
593 | #define TYPHOON_INTR_DMA1 0x00000040 | ||
594 | #define TYPHOON_INTR_DMA2 0x00000080 | ||
595 | #define TYPHOON_INTR_DMA3 0x00000100 | ||
596 | #define TYPHOON_INTR_MASTER_ABORT 0x00000200 | ||
597 | #define TYPHOON_INTR_TARGET_ABORT 0x00000400 | ||
598 | #define TYPHOON_INTR_SELF 0x00000800 | ||
599 | #define TYPHOON_INTR_RESERVED 0xfffff000 | ||
600 | |||
601 | #define TYPHOON_INTR_BOOTCMD TYPHOON_INTR_ARM2HOST0 | ||
602 | |||
603 | #define TYPHOON_INTR_ENABLE_ALL 0xffffffef | ||
604 | #define TYPHOON_INTR_ALL 0xffffffff | ||
605 | #define TYPHOON_INTR_NONE 0x00000000 | ||
606 | |||
607 | /* The commands for the 3XP chip (TYPHOON_REG_COMMAND) | ||
608 | */ | ||
609 | #define TYPHOON_BOOTCMD_BOOT 0x00 | ||
610 | #define TYPHOON_BOOTCMD_WAKEUP 0xfa | ||
611 | #define TYPHOON_BOOTCMD_DNLD_COMPLETE 0xfb | ||
612 | #define TYPHOON_BOOTCMD_SEG_AVAILABLE 0xfc | ||
613 | #define TYPHOON_BOOTCMD_RUNTIME_IMAGE 0xfd | ||
614 | #define TYPHOON_BOOTCMD_REG_BOOT_RECORD 0xff | ||
615 | |||
616 | /* 3XP Status values (TYPHOON_REG_STATUS) | ||
617 | */ | ||
618 | #define TYPHOON_STATUS_WAITING_FOR_BOOT 0x07 | ||
619 | #define TYPHOON_STATUS_SECOND_INIT 0x08 | ||
620 | #define TYPHOON_STATUS_RUNNING 0x09 | ||
621 | #define TYPHOON_STATUS_WAITING_FOR_HOST 0x0d | ||
622 | #define TYPHOON_STATUS_WAITING_FOR_SEGMENT 0x10 | ||
623 | #define TYPHOON_STATUS_SLEEPING 0x11 | ||
624 | #define TYPHOON_STATUS_HALTED 0x14 | ||