aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sundance.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/sundance.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/sundance.c')
-rw-r--r--drivers/net/sundance.c1785
1 files changed, 1785 insertions, 0 deletions
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
new file mode 100644
index 000000000000..08cb7177a175
--- /dev/null
+++ b/drivers/net/sundance.c
@@ -0,0 +1,1785 @@
1/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
2/*
3 Written 1999-2000 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
19
20
21 Version LK1.01a (jgarzik):
22 - Replace some MII-related magic numbers with constants
23
24 Version LK1.02 (D-Link):
25 - Add new board to PCI ID list
26 - Fix multicast bug
27
28 Version LK1.03 (D-Link):
29 - New Rx scheme, reduce Rx congestion
30 - Option to disable flow control
31
32 Version LK1.04 (D-Link):
33 - Tx timeout recovery
34 - More support for ethtool.
35
36 Version LK1.04a:
37 - Remove unused/constant members from struct pci_id_info
38 (which then allows removal of 'drv_flags' from private struct)
39 (jgarzik)
40 - If no phy is found, fail to load that board (jgarzik)
41 - Always start phy id scan at id 1 to avoid problems (Donald Becker)
42 - Autodetect where mii_preable_required is needed,
43 default to not needed. (Donald Becker)
44
45 Version LK1.04b:
46 - Remove mii_preamble_required module parameter (Donald Becker)
47 - Add per-interface mii_preamble_required (setting is autodetected)
48 (Donald Becker)
49 - Remove unnecessary cast from void pointer (jgarzik)
50 - Re-align comments in private struct (jgarzik)
51
52 Version LK1.04c (jgarzik):
53 - Support bitmapped message levels (NETIF_MSG_xxx), and the
54 two ethtool ioctls that get/set them
55 - Don't hand-code MII ethtool support, use standard API/lib
56
57 Version LK1.04d:
58 - Merge from Donald Becker's sundance.c: (Jason Lunz)
59 * proper support for variably-sized MTUs
60 * default to PIO, to fix chip bugs
61 - Add missing unregister_netdev (Jason Lunz)
62 - Add CONFIG_SUNDANCE_MMIO config option (jgarzik)
63 - Better rx buf size calculation (Donald Becker)
64
65 Version LK1.05 (D-Link):
66 - Fix DFE-580TX packet drop issue (for DL10050C)
67 - Fix reset_tx logic
68
69 Version LK1.06 (D-Link):
70 - Fix crash while unloading driver
71
72 Versin LK1.06b (D-Link):
73 - New tx scheme, adaptive tx_coalesce
74
75 Version LK1.07 (D-Link):
76 - Fix tx bugs in big-endian machines
77 - Remove unused max_interrupt_work module parameter, the new
78 NAPI-like rx scheme doesn't need it.
79 - Remove redundancy get_stats() in intr_handler(), those
80 I/O access could affect performance in ARM-based system
81 - Add Linux software VLAN support
82
83 Version LK1.08 (D-Link):
84 - Fix bug of custom mac address
85 (StationAddr register only accept word write)
86
87 Version LK1.09 (D-Link):
88 - Fix the flowctrl bug.
89 - Set Pause bit in MII ANAR if flow control enabled.
90
91 Version LK1.09a (ICPlus):
92 - Add the delay time in reading the contents of EEPROM
93
94*/
95
96#define DRV_NAME "sundance"
97#define DRV_VERSION "1.01+LK1.09a"
98#define DRV_RELDATE "10-Jul-2003"
99
100
101/* The user-configurable values.
102 These may be modified when a driver module is loaded.*/
103static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
104/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
105 Typical is a 64 element hash table based on the Ethernet CRC. */
106static int multicast_filter_limit = 32;
107
108/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
109 Setting to > 1518 effectively disables this feature.
110 This chip can receive into offset buffers, so the Alpha does not
111 need a copy-align. */
112static int rx_copybreak;
113static int flowctrl=1;
114
115/* media[] specifies the media type the NIC operates at.
116 autosense Autosensing active media.
117 10mbps_hd 10Mbps half duplex.
118 10mbps_fd 10Mbps full duplex.
119 100mbps_hd 100Mbps half duplex.
120 100mbps_fd 100Mbps full duplex.
121 0 Autosensing active media.
122 1 10Mbps half duplex.
123 2 10Mbps full duplex.
124 3 100Mbps half duplex.
125 4 100Mbps full duplex.
126*/
127#define MAX_UNITS 8
128static char *media[MAX_UNITS];
129
130
131/* Operational parameters that are set at compile time. */
132
133/* Keep the ring sizes a power of two for compile efficiency.
134 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
135 Making the Tx ring too large decreases the effectiveness of channel
136 bonding and packet priority, and more than 128 requires modifying the
137 Tx error recovery.
138 Large receive rings merely waste memory. */
139#define TX_RING_SIZE 32
140#define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */
141#define RX_RING_SIZE 64
142#define RX_BUDGET 32
143#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
144#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
145
146/* Operational parameters that usually are not changed. */
147/* Time in jiffies before concluding the transmitter is hung. */
148#define TX_TIMEOUT (4*HZ)
149#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
150
151/* Include files, designed to support most kernel versions 2.0.0 and later. */
152#include <linux/module.h>
153#include <linux/kernel.h>
154#include <linux/string.h>
155#include <linux/timer.h>
156#include <linux/errno.h>
157#include <linux/ioport.h>
158#include <linux/slab.h>
159#include <linux/interrupt.h>
160#include <linux/pci.h>
161#include <linux/netdevice.h>
162#include <linux/etherdevice.h>
163#include <linux/skbuff.h>
164#include <linux/init.h>
165#include <linux/bitops.h>
166#include <asm/uaccess.h>
167#include <asm/processor.h> /* Processor type for cache alignment. */
168#include <asm/io.h>
169#include <linux/delay.h>
170#include <linux/spinlock.h>
171#ifndef _COMPAT_WITH_OLD_KERNEL
172#include <linux/crc32.h>
173#include <linux/ethtool.h>
174#include <linux/mii.h>
175#else
176#include "crc32.h"
177#include "ethtool.h"
178#include "mii.h"
179#include "compat.h"
180#endif
181
182/* These identify the driver base version and may not be removed. */
183static char version[] __devinitdata =
184KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
185KERN_INFO " http://www.scyld.com/network/sundance.html\n";
186
187MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
188MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
189MODULE_LICENSE("GPL");
190
191module_param(debug, int, 0);
192module_param(rx_copybreak, int, 0);
193module_param_array(media, charp, NULL, 0);
194module_param(flowctrl, int, 0);
195MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)");
196MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames");
197MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]");
198
199/*
200 Theory of Operation
201
202I. Board Compatibility
203
204This driver is designed for the Sundance Technologies "Alta" ST201 chip.
205
206II. Board-specific settings
207
208III. Driver operation
209
210IIIa. Ring buffers
211
212This driver uses two statically allocated fixed-size descriptor lists
213formed into rings by a branch from the final descriptor to the beginning of
214the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
215Some chips explicitly use only 2^N sized rings, while others use a
216'next descriptor' pointer that the driver forms into rings.
217
218IIIb/c. Transmit/Receive Structure
219
220This driver uses a zero-copy receive and transmit scheme.
221The driver allocates full frame size skbuffs for the Rx ring buffers at
222open() time and passes the skb->data field to the chip as receive data
223buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
224a fresh skbuff is allocated and the frame is copied to the new skbuff.
225When the incoming frame is larger, the skbuff is passed directly up the
226protocol stack. Buffers consumed this way are replaced by newly allocated
227skbuffs in a later phase of receives.
228
229The RX_COPYBREAK value is chosen to trade-off the memory wasted by
230using a full-sized skbuff for small frames vs. the copying costs of larger
231frames. New boards are typically used in generously configured machines
232and the underfilled buffers have negligible impact compared to the benefit of
233a single allocation size, so the default value of zero results in never
234copying packets. When copying is done, the cost is usually mitigated by using
235a combined copy/checksum routine. Copying also preloads the cache, which is
236most useful with small frames.
237
238A subtle aspect of the operation is that the IP header at offset 14 in an
239ethernet frame isn't longword aligned for further processing.
240Unaligned buffers are permitted by the Sundance hardware, so
241frames are received into the skbuff at an offset of "+2", 16-byte aligning
242the IP header.
243
244IIId. Synchronization
245
246The driver runs as two independent, single-threaded flows of control. One
247is the send-packet routine, which enforces single-threaded use by the
248dev->tbusy flag. The other thread is the interrupt handler, which is single
249threaded by the hardware and interrupt handling software.
250
251The send packet thread has partial control over the Tx ring and 'dev->tbusy'
252flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
253queue slot is empty, it clears the tbusy flag when finished otherwise it sets
254the 'lp->tx_full' flag.
255
256The interrupt handler has exclusive control over the Rx ring and records stats
257from the Tx ring. After reaping the stats, it marks the Tx queue entry as
258empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
259clears both the tx_full and tbusy flags.
260
261IV. Notes
262
263IVb. References
264
265The Sundance ST201 datasheet, preliminary version.
266http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
267http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
268
269IVc. Errata
270
271*/
272
273/* Work-around for Kendin chip bugs. */
274#ifndef CONFIG_SUNDANCE_MMIO
275#define USE_IO_OPS 1
276#endif
277
278static struct pci_device_id sundance_pci_tbl[] = {
279 {0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0},
280 {0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1},
281 {0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2},
282 {0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3},
283 {0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
284 {0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
285 {0,}
286};
287MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
288
289enum {
290 netdev_io_size = 128
291};
292
293struct pci_id_info {
294 const char *name;
295};
296static struct pci_id_info pci_id_tbl[] = {
297 {"D-Link DFE-550TX FAST Ethernet Adapter"},
298 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
299 {"D-Link DFE-580TX 4 port Server Adapter"},
300 {"D-Link DFE-530TXS FAST Ethernet Adapter"},
301 {"D-Link DL10050-based FAST Ethernet Adapter"},
302 {"Sundance Technology Alta"},
303 {NULL,}, /* 0 terminated list. */
304};
305
306/* This driver was written to use PCI memory space, however x86-oriented
307 hardware often uses I/O space accesses. */
308
309/* Offsets to the device registers.
310 Unlike software-only systems, device drivers interact with complex hardware.
311 It's not useful to define symbolic names for every register bit in the
312 device. The name can only partially document the semantics and make
313 the driver longer and more difficult to read.
314 In general, only the important configuration values or bits changed
315 multiple times should be defined symbolically.
316*/
317enum alta_offsets {
318 DMACtrl = 0x00,
319 TxListPtr = 0x04,
320 TxDMABurstThresh = 0x08,
321 TxDMAUrgentThresh = 0x09,
322 TxDMAPollPeriod = 0x0a,
323 RxDMAStatus = 0x0c,
324 RxListPtr = 0x10,
325 DebugCtrl0 = 0x1a,
326 DebugCtrl1 = 0x1c,
327 RxDMABurstThresh = 0x14,
328 RxDMAUrgentThresh = 0x15,
329 RxDMAPollPeriod = 0x16,
330 LEDCtrl = 0x1a,
331 ASICCtrl = 0x30,
332 EEData = 0x34,
333 EECtrl = 0x36,
334 TxStartThresh = 0x3c,
335 RxEarlyThresh = 0x3e,
336 FlashAddr = 0x40,
337 FlashData = 0x44,
338 TxStatus = 0x46,
339 TxFrameId = 0x47,
340 DownCounter = 0x18,
341 IntrClear = 0x4a,
342 IntrEnable = 0x4c,
343 IntrStatus = 0x4e,
344 MACCtrl0 = 0x50,
345 MACCtrl1 = 0x52,
346 StationAddr = 0x54,
347 MaxFrameSize = 0x5A,
348 RxMode = 0x5c,
349 MIICtrl = 0x5e,
350 MulticastFilter0 = 0x60,
351 MulticastFilter1 = 0x64,
352 RxOctetsLow = 0x68,
353 RxOctetsHigh = 0x6a,
354 TxOctetsLow = 0x6c,
355 TxOctetsHigh = 0x6e,
356 TxFramesOK = 0x70,
357 RxFramesOK = 0x72,
358 StatsCarrierError = 0x74,
359 StatsLateColl = 0x75,
360 StatsMultiColl = 0x76,
361 StatsOneColl = 0x77,
362 StatsTxDefer = 0x78,
363 RxMissed = 0x79,
364 StatsTxXSDefer = 0x7a,
365 StatsTxAbort = 0x7b,
366 StatsBcastTx = 0x7c,
367 StatsBcastRx = 0x7d,
368 StatsMcastTx = 0x7e,
369 StatsMcastRx = 0x7f,
370 /* Aliased and bogus values! */
371 RxStatus = 0x0c,
372};
373enum ASICCtrl_HiWord_bit {
374 GlobalReset = 0x0001,
375 RxReset = 0x0002,
376 TxReset = 0x0004,
377 DMAReset = 0x0008,
378 FIFOReset = 0x0010,
379 NetworkReset = 0x0020,
380 HostReset = 0x0040,
381 ResetBusy = 0x0400,
382};
383
384/* Bits in the interrupt status/mask registers. */
385enum intr_status_bits {
386 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
387 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
388 IntrDrvRqst=0x0040,
389 StatsMax=0x0080, LinkChange=0x0100,
390 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
391};
392
393/* Bits in the RxMode register. */
394enum rx_mode_bits {
395 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
396 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
397};
398/* Bits in MACCtrl. */
399enum mac_ctrl0_bits {
400 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
401 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
402};
403enum mac_ctrl1_bits {
404 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
405 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
406 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
407};
408
409/* The Rx and Tx buffer descriptors. */
410/* Note that using only 32 bit fields simplifies conversion to big-endian
411 architectures. */
412struct netdev_desc {
413 u32 next_desc;
414 u32 status;
415 struct desc_frag { u32 addr, length; } frag[1];
416};
417
418/* Bits in netdev_desc.status */
419enum desc_status_bits {
420 DescOwn=0x8000,
421 DescEndPacket=0x4000,
422 DescEndRing=0x2000,
423 LastFrag=0x80000000,
424 DescIntrOnTx=0x8000,
425 DescIntrOnDMADone=0x80000000,
426 DisableAlign = 0x00000001,
427};
428
429#define PRIV_ALIGN 15 /* Required alignment mask */
430/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
431 within the structure. */
432#define MII_CNT 4
433struct netdev_private {
434 /* Descriptor rings first for alignment. */
435 struct netdev_desc *rx_ring;
436 struct netdev_desc *tx_ring;
437 struct sk_buff* rx_skbuff[RX_RING_SIZE];
438 struct sk_buff* tx_skbuff[TX_RING_SIZE];
439 dma_addr_t tx_ring_dma;
440 dma_addr_t rx_ring_dma;
441 struct net_device_stats stats;
442 struct timer_list timer; /* Media monitoring timer. */
443 /* Frequently used values: keep some adjacent for cache effect. */
444 spinlock_t lock;
445 spinlock_t rx_lock; /* Group with Tx control cache line. */
446 int msg_enable;
447 int chip_id;
448 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
449 unsigned int rx_buf_sz; /* Based on MTU+slack. */
450 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
451 unsigned int cur_tx, dirty_tx;
452 /* These values are keep track of the transceiver/media in use. */
453 unsigned int flowctrl:1;
454 unsigned int default_port:4; /* Last dev->if_port value. */
455 unsigned int an_enable:1;
456 unsigned int speed;
457 struct tasklet_struct rx_tasklet;
458 struct tasklet_struct tx_tasklet;
459 int budget;
460 int cur_task;
461 /* Multicast and receive mode. */
462 spinlock_t mcastlock; /* SMP lock multicast updates. */
463 u16 mcast_filter[4];
464 /* MII transceiver section. */
465 struct mii_if_info mii_if;
466 int mii_preamble_required;
467 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
468 struct pci_dev *pci_dev;
469 void __iomem *base;
470 unsigned char pci_rev_id;
471};
472
473/* The station address location in the EEPROM. */
474#define EEPROM_SA_OFFSET 0x10
475#define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \
476 IntrDrvRqst | IntrTxDone | StatsMax | \
477 LinkChange)
478
479static int change_mtu(struct net_device *dev, int new_mtu);
480static int eeprom_read(void __iomem *ioaddr, int location);
481static int mdio_read(struct net_device *dev, int phy_id, int location);
482static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
483static int netdev_open(struct net_device *dev);
484static void check_duplex(struct net_device *dev);
485static void netdev_timer(unsigned long data);
486static void tx_timeout(struct net_device *dev);
487static void init_ring(struct net_device *dev);
488static int start_tx(struct sk_buff *skb, struct net_device *dev);
489static int reset_tx (struct net_device *dev);
490static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
491static void rx_poll(unsigned long data);
492static void tx_poll(unsigned long data);
493static void refill_rx (struct net_device *dev);
494static void netdev_error(struct net_device *dev, int intr_status);
495static void netdev_error(struct net_device *dev, int intr_status);
496static void set_rx_mode(struct net_device *dev);
497static int __set_mac_addr(struct net_device *dev);
498static struct net_device_stats *get_stats(struct net_device *dev);
499static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500static int netdev_close(struct net_device *dev);
501static struct ethtool_ops ethtool_ops;
502
503static int __devinit sundance_probe1 (struct pci_dev *pdev,
504 const struct pci_device_id *ent)
505{
506 struct net_device *dev;
507 struct netdev_private *np;
508 static int card_idx;
509 int chip_idx = ent->driver_data;
510 int irq;
511 int i;
512 void __iomem *ioaddr;
513 u16 mii_ctl;
514 void *ring_space;
515 dma_addr_t ring_dma;
516#ifdef USE_IO_OPS
517 int bar = 0;
518#else
519 int bar = 1;
520#endif
521
522
523/* when built into the kernel, we only print version if device is found */
524#ifndef MODULE
525 static int printed_version;
526 if (!printed_version++)
527 printk(version);
528#endif
529
530 if (pci_enable_device(pdev))
531 return -EIO;
532 pci_set_master(pdev);
533
534 irq = pdev->irq;
535
536 dev = alloc_etherdev(sizeof(*np));
537 if (!dev)
538 return -ENOMEM;
539 SET_MODULE_OWNER(dev);
540 SET_NETDEV_DEV(dev, &pdev->dev);
541
542 if (pci_request_regions(pdev, DRV_NAME))
543 goto err_out_netdev;
544
545 ioaddr = pci_iomap(pdev, bar, netdev_io_size);
546 if (!ioaddr)
547 goto err_out_res;
548
549 for (i = 0; i < 3; i++)
550 ((u16 *)dev->dev_addr)[i] =
551 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
552
553 dev->base_addr = (unsigned long)ioaddr;
554 dev->irq = irq;
555
556 np = netdev_priv(dev);
557 np->base = ioaddr;
558 np->pci_dev = pdev;
559 np->chip_id = chip_idx;
560 np->msg_enable = (1 << debug) - 1;
561 spin_lock_init(&np->lock);
562 tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
563 tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
564
565 ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
566 if (!ring_space)
567 goto err_out_cleardev;
568 np->tx_ring = (struct netdev_desc *)ring_space;
569 np->tx_ring_dma = ring_dma;
570
571 ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
572 if (!ring_space)
573 goto err_out_unmap_tx;
574 np->rx_ring = (struct netdev_desc *)ring_space;
575 np->rx_ring_dma = ring_dma;
576
577 np->mii_if.dev = dev;
578 np->mii_if.mdio_read = mdio_read;
579 np->mii_if.mdio_write = mdio_write;
580 np->mii_if.phy_id_mask = 0x1f;
581 np->mii_if.reg_num_mask = 0x1f;
582
583 /* The chip-specific entries in the device structure. */
584 dev->open = &netdev_open;
585 dev->hard_start_xmit = &start_tx;
586 dev->stop = &netdev_close;
587 dev->get_stats = &get_stats;
588 dev->set_multicast_list = &set_rx_mode;
589 dev->do_ioctl = &netdev_ioctl;
590 SET_ETHTOOL_OPS(dev, &ethtool_ops);
591 dev->tx_timeout = &tx_timeout;
592 dev->watchdog_timeo = TX_TIMEOUT;
593 dev->change_mtu = &change_mtu;
594 pci_set_drvdata(pdev, dev);
595
596 pci_read_config_byte(pdev, PCI_REVISION_ID, &np->pci_rev_id);
597
598 i = register_netdev(dev);
599 if (i)
600 goto err_out_unmap_rx;
601
602 printk(KERN_INFO "%s: %s at %p, ",
603 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
604 for (i = 0; i < 5; i++)
605 printk("%2.2x:", dev->dev_addr[i]);
606 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
607
608 if (1) {
609 int phy, phy_idx = 0;
610 np->phys[0] = 1; /* Default setting */
611 np->mii_preamble_required++;
612 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
613 int mii_status = mdio_read(dev, phy, MII_BMSR);
614 if (mii_status != 0xffff && mii_status != 0x0000) {
615 np->phys[phy_idx++] = phy;
616 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
617 if ((mii_status & 0x0040) == 0)
618 np->mii_preamble_required++;
619 printk(KERN_INFO "%s: MII PHY found at address %d, status "
620 "0x%4.4x advertising %4.4x.\n",
621 dev->name, phy, mii_status, np->mii_if.advertising);
622 }
623 }
624 np->mii_preamble_required--;
625
626 if (phy_idx == 0) {
627 printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n",
628 dev->name, ioread32(ioaddr + ASICCtrl));
629 goto err_out_unregister;
630 }
631
632 np->mii_if.phy_id = np->phys[0];
633 }
634
635 /* Parse override configuration */
636 np->an_enable = 1;
637 if (card_idx < MAX_UNITS) {
638 if (media[card_idx] != NULL) {
639 np->an_enable = 0;
640 if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
641 strcmp (media[card_idx], "4") == 0) {
642 np->speed = 100;
643 np->mii_if.full_duplex = 1;
644 } else if (strcmp (media[card_idx], "100mbps_hd") == 0
645 || strcmp (media[card_idx], "3") == 0) {
646 np->speed = 100;
647 np->mii_if.full_duplex = 0;
648 } else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
649 strcmp (media[card_idx], "2") == 0) {
650 np->speed = 10;
651 np->mii_if.full_duplex = 1;
652 } else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
653 strcmp (media[card_idx], "1") == 0) {
654 np->speed = 10;
655 np->mii_if.full_duplex = 0;
656 } else {
657 np->an_enable = 1;
658 }
659 }
660 if (flowctrl == 1)
661 np->flowctrl = 1;
662 }
663
664 /* Fibre PHY? */
665 if (ioread32 (ioaddr + ASICCtrl) & 0x80) {
666 /* Default 100Mbps Full */
667 if (np->an_enable) {
668 np->speed = 100;
669 np->mii_if.full_duplex = 1;
670 np->an_enable = 0;
671 }
672 }
673 /* Reset PHY */
674 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET);
675 mdelay (300);
676 /* If flow control enabled, we need to advertise it.*/
677 if (np->flowctrl)
678 mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400);
679 mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
680 /* Force media type */
681 if (!np->an_enable) {
682 mii_ctl = 0;
683 mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0;
684 mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0;
685 mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl);
686 printk (KERN_INFO "Override speed=%d, %s duplex\n",
687 np->speed, np->mii_if.full_duplex ? "Full" : "Half");
688
689 }
690
691 /* Perhaps move the reset here? */
692 /* Reset the chip to erase previous misconfiguration. */
693 if (netif_msg_hw(np))
694 printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl));
695 iowrite16(0x007f, ioaddr + ASICCtrl + 2);
696 if (netif_msg_hw(np))
697 printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl));
698
699 card_idx++;
700 return 0;
701
702err_out_unregister:
703 unregister_netdev(dev);
704err_out_unmap_rx:
705 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
706err_out_unmap_tx:
707 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
708err_out_cleardev:
709 pci_set_drvdata(pdev, NULL);
710 pci_iounmap(pdev, ioaddr);
711err_out_res:
712 pci_release_regions(pdev);
713err_out_netdev:
714 free_netdev (dev);
715 return -ENODEV;
716}
717
718static int change_mtu(struct net_device *dev, int new_mtu)
719{
720 if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */
721 return -EINVAL;
722 if (netif_running(dev))
723 return -EBUSY;
724 dev->mtu = new_mtu;
725 return 0;
726}
727
728#define eeprom_delay(ee_addr) ioread32(ee_addr)
729/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
730static int __devinit eeprom_read(void __iomem *ioaddr, int location)
731{
732 int boguscnt = 10000; /* Typical 1900 ticks. */
733 iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl);
734 do {
735 eeprom_delay(ioaddr + EECtrl);
736 if (! (ioread16(ioaddr + EECtrl) & 0x8000)) {
737 return ioread16(ioaddr + EEData);
738 }
739 } while (--boguscnt > 0);
740 return 0;
741}
742
743/* MII transceiver control section.
744 Read and write the MII registers using software-generated serial
745 MDIO protocol. See the MII specifications or DP83840A data sheet
746 for details.
747
748 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
749 met by back-to-back 33Mhz PCI cycles. */
750#define mdio_delay() ioread8(mdio_addr)
751
752enum mii_reg_bits {
753 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
754};
755#define MDIO_EnbIn (0)
756#define MDIO_WRITE0 (MDIO_EnbOutput)
757#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
758
759/* Generate the preamble required for initial synchronization and
760 a few older transceivers. */
761static void mdio_sync(void __iomem *mdio_addr)
762{
763 int bits = 32;
764
765 /* Establish sync by sending at least 32 logic ones. */
766 while (--bits >= 0) {
767 iowrite8(MDIO_WRITE1, mdio_addr);
768 mdio_delay();
769 iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
770 mdio_delay();
771 }
772}
773
774static int mdio_read(struct net_device *dev, int phy_id, int location)
775{
776 struct netdev_private *np = netdev_priv(dev);
777 void __iomem *mdio_addr = np->base + MIICtrl;
778 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
779 int i, retval = 0;
780
781 if (np->mii_preamble_required)
782 mdio_sync(mdio_addr);
783
784 /* Shift the read command bits out. */
785 for (i = 15; i >= 0; i--) {
786 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
787
788 iowrite8(dataval, mdio_addr);
789 mdio_delay();
790 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
791 mdio_delay();
792 }
793 /* Read the two transition, 16 data, and wire-idle bits. */
794 for (i = 19; i > 0; i--) {
795 iowrite8(MDIO_EnbIn, mdio_addr);
796 mdio_delay();
797 retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0);
798 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
799 mdio_delay();
800 }
801 return (retval>>1) & 0xffff;
802}
803
804static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
805{
806 struct netdev_private *np = netdev_priv(dev);
807 void __iomem *mdio_addr = np->base + MIICtrl;
808 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
809 int i;
810
811 if (np->mii_preamble_required)
812 mdio_sync(mdio_addr);
813
814 /* Shift the command bits out. */
815 for (i = 31; i >= 0; i--) {
816 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
817
818 iowrite8(dataval, mdio_addr);
819 mdio_delay();
820 iowrite8(dataval | MDIO_ShiftClk, mdio_addr);
821 mdio_delay();
822 }
823 /* Clear out extra bits. */
824 for (i = 2; i > 0; i--) {
825 iowrite8(MDIO_EnbIn, mdio_addr);
826 mdio_delay();
827 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
828 mdio_delay();
829 }
830 return;
831}
832
833static int netdev_open(struct net_device *dev)
834{
835 struct netdev_private *np = netdev_priv(dev);
836 void __iomem *ioaddr = np->base;
837 int i;
838
839 /* Do we need to reset the chip??? */
840
841 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
842 if (i)
843 return i;
844
845 if (netif_msg_ifup(np))
846 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
847 dev->name, dev->irq);
848 init_ring(dev);
849
850 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
851 /* The Tx list pointer is written as packets are queued. */
852
853 /* Initialize other registers. */
854 __set_mac_addr(dev);
855#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
856 iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize);
857#else
858 iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize);
859#endif
860 if (dev->mtu > 2047)
861 iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
862
863 /* Configure the PCI bus bursts and FIFO thresholds. */
864
865 if (dev->if_port == 0)
866 dev->if_port = np->default_port;
867
868 spin_lock_init(&np->mcastlock);
869
870 set_rx_mode(dev);
871 iowrite16(0, ioaddr + IntrEnable);
872 iowrite16(0, ioaddr + DownCounter);
873 /* Set the chip to poll every N*320nsec. */
874 iowrite8(100, ioaddr + RxDMAPollPeriod);
875 iowrite8(127, ioaddr + TxDMAPollPeriod);
876 /* Fix DFE-580TX packet drop issue */
877 if (np->pci_rev_id >= 0x14)
878 iowrite8(0x01, ioaddr + DebugCtrl1);
879 netif_start_queue(dev);
880
881 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
882
883 if (netif_msg_ifup(np))
884 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
885 "MAC Control %x, %4.4x %4.4x.\n",
886 dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus),
887 ioread32(ioaddr + MACCtrl0),
888 ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0));
889
890 /* Set the timer to check for link beat. */
891 init_timer(&np->timer);
892 np->timer.expires = jiffies + 3*HZ;
893 np->timer.data = (unsigned long)dev;
894 np->timer.function = &netdev_timer; /* timer handler */
895 add_timer(&np->timer);
896
897 /* Enable interrupts by setting the interrupt mask. */
898 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
899
900 return 0;
901}
902
903static void check_duplex(struct net_device *dev)
904{
905 struct netdev_private *np = netdev_priv(dev);
906 void __iomem *ioaddr = np->base;
907 int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
908 int negotiated = mii_lpa & np->mii_if.advertising;
909 int duplex;
910
911 /* Force media */
912 if (!np->an_enable || mii_lpa == 0xffff) {
913 if (np->mii_if.full_duplex)
914 iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex,
915 ioaddr + MACCtrl0);
916 return;
917 }
918
919 /* Autonegotiation */
920 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
921 if (np->mii_if.full_duplex != duplex) {
922 np->mii_if.full_duplex = duplex;
923 if (netif_msg_link(np))
924 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
925 "negotiated capability %4.4x.\n", dev->name,
926 duplex ? "full" : "half", np->phys[0], negotiated);
927 iowrite16(ioread16(ioaddr + MACCtrl0) | duplex ? 0x20 : 0, ioaddr + MACCtrl0);
928 }
929}
930
931static void netdev_timer(unsigned long data)
932{
933 struct net_device *dev = (struct net_device *)data;
934 struct netdev_private *np = netdev_priv(dev);
935 void __iomem *ioaddr = np->base;
936 int next_tick = 10*HZ;
937
938 if (netif_msg_timer(np)) {
939 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
940 "Tx %x Rx %x.\n",
941 dev->name, ioread16(ioaddr + IntrEnable),
942 ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
943 }
944 check_duplex(dev);
945 np->timer.expires = jiffies + next_tick;
946 add_timer(&np->timer);
947}
948
949static void tx_timeout(struct net_device *dev)
950{
951 struct netdev_private *np = netdev_priv(dev);
952 void __iomem *ioaddr = np->base;
953 unsigned long flag;
954
955 netif_stop_queue(dev);
956 tasklet_disable(&np->tx_tasklet);
957 iowrite16(0, ioaddr + IntrEnable);
958 printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
959 "TxFrameId %2.2x,"
960 " resetting...\n", dev->name, ioread8(ioaddr + TxStatus),
961 ioread8(ioaddr + TxFrameId));
962
963 {
964 int i;
965 for (i=0; i<TX_RING_SIZE; i++) {
966 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
967 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
968 le32_to_cpu(np->tx_ring[i].next_desc),
969 le32_to_cpu(np->tx_ring[i].status),
970 (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff,
971 le32_to_cpu(np->tx_ring[i].frag[0].addr),
972 le32_to_cpu(np->tx_ring[i].frag[0].length));
973 }
974 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
975 ioread32(np->base + TxListPtr),
976 netif_queue_stopped(dev));
977 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
978 np->cur_tx, np->cur_tx % TX_RING_SIZE,
979 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
980 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
981 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
982 }
983 spin_lock_irqsave(&np->lock, flag);
984
985 /* Stop and restart the chip's Tx processes . */
986 reset_tx(dev);
987 spin_unlock_irqrestore(&np->lock, flag);
988
989 dev->if_port = 0;
990
991 dev->trans_start = jiffies;
992 np->stats.tx_errors++;
993 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
994 netif_wake_queue(dev);
995 }
996 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
997 tasklet_enable(&np->tx_tasklet);
998}
999
1000
1001/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1002static void init_ring(struct net_device *dev)
1003{
1004 struct netdev_private *np = netdev_priv(dev);
1005 int i;
1006
1007 np->cur_rx = np->cur_tx = 0;
1008 np->dirty_rx = np->dirty_tx = 0;
1009 np->cur_task = 0;
1010
1011 np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16);
1012
1013 /* Initialize all Rx descriptors. */
1014 for (i = 0; i < RX_RING_SIZE; i++) {
1015 np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma +
1016 ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring));
1017 np->rx_ring[i].status = 0;
1018 np->rx_ring[i].frag[0].length = 0;
1019 np->rx_skbuff[i] = NULL;
1020 }
1021
1022 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1023 for (i = 0; i < RX_RING_SIZE; i++) {
1024 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1025 np->rx_skbuff[i] = skb;
1026 if (skb == NULL)
1027 break;
1028 skb->dev = dev; /* Mark as being used by this device. */
1029 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1030 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1031 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz,
1032 PCI_DMA_FROMDEVICE));
1033 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1034 }
1035 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1036
1037 for (i = 0; i < TX_RING_SIZE; i++) {
1038 np->tx_skbuff[i] = NULL;
1039 np->tx_ring[i].status = 0;
1040 }
1041 return;
1042}
1043
1044static void tx_poll (unsigned long data)
1045{
1046 struct net_device *dev = (struct net_device *)data;
1047 struct netdev_private *np = netdev_priv(dev);
1048 unsigned head = np->cur_task % TX_RING_SIZE;
1049 struct netdev_desc *txdesc =
1050 &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
1051
1052 /* Chain the next pointer */
1053 for (; np->cur_tx - np->cur_task > 0; np->cur_task++) {
1054 int entry = np->cur_task % TX_RING_SIZE;
1055 txdesc = &np->tx_ring[entry];
1056 if (np->last_tx) {
1057 np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma +
1058 entry*sizeof(struct netdev_desc));
1059 }
1060 np->last_tx = txdesc;
1061 }
1062 /* Indicate the latest descriptor of tx ring */
1063 txdesc->status |= cpu_to_le32(DescIntrOnTx);
1064
1065 if (ioread32 (np->base + TxListPtr) == 0)
1066 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1067 np->base + TxListPtr);
1068 return;
1069}
1070
1071static int
1072start_tx (struct sk_buff *skb, struct net_device *dev)
1073{
1074 struct netdev_private *np = netdev_priv(dev);
1075 struct netdev_desc *txdesc;
1076 unsigned entry;
1077
1078 /* Calculate the next Tx descriptor entry. */
1079 entry = np->cur_tx % TX_RING_SIZE;
1080 np->tx_skbuff[entry] = skb;
1081 txdesc = &np->tx_ring[entry];
1082
1083 txdesc->next_desc = 0;
1084 txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign);
1085 txdesc->frag[0].addr = cpu_to_le32 (pci_map_single (np->pci_dev, skb->data,
1086 skb->len,
1087 PCI_DMA_TODEVICE));
1088 txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag);
1089
1090 /* Increment cur_tx before tasklet_schedule() */
1091 np->cur_tx++;
1092 mb();
1093 /* Schedule a tx_poll() task */
1094 tasklet_schedule(&np->tx_tasklet);
1095
1096 /* On some architectures: explicitly flush cache lines here. */
1097 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1
1098 && !netif_queue_stopped(dev)) {
1099 /* do nothing */
1100 } else {
1101 netif_stop_queue (dev);
1102 }
1103 dev->trans_start = jiffies;
1104 if (netif_msg_tx_queued(np)) {
1105 printk (KERN_DEBUG
1106 "%s: Transmit frame #%d queued in slot %d.\n",
1107 dev->name, np->cur_tx, entry);
1108 }
1109 return 0;
1110}
1111
1112/* Reset hardware tx and free all of tx buffers */
1113static int
1114reset_tx (struct net_device *dev)
1115{
1116 struct netdev_private *np = netdev_priv(dev);
1117 void __iomem *ioaddr = np->base;
1118 struct sk_buff *skb;
1119 int i;
1120 int irq = in_interrupt();
1121
1122 /* Reset tx logic, TxListPtr will be cleaned */
1123 iowrite16 (TxDisable, ioaddr + MACCtrl1);
1124 iowrite16 (TxReset | DMAReset | FIFOReset | NetworkReset,
1125 ioaddr + ASICCtrl + 2);
1126 for (i=50; i > 0; i--) {
1127 if ((ioread16(ioaddr + ASICCtrl + 2) & ResetBusy) == 0)
1128 break;
1129 mdelay(1);
1130 }
1131 /* free all tx skbuff */
1132 for (i = 0; i < TX_RING_SIZE; i++) {
1133 skb = np->tx_skbuff[i];
1134 if (skb) {
1135 pci_unmap_single(np->pci_dev,
1136 np->tx_ring[i].frag[0].addr, skb->len,
1137 PCI_DMA_TODEVICE);
1138 if (irq)
1139 dev_kfree_skb_irq (skb);
1140 else
1141 dev_kfree_skb (skb);
1142 np->tx_skbuff[i] = NULL;
1143 np->stats.tx_dropped++;
1144 }
1145 }
1146 np->cur_tx = np->dirty_tx = 0;
1147 np->cur_task = 0;
1148 iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
1149 return 0;
1150}
1151
1152/* The interrupt handler cleans up after the Tx thread,
1153 and schedule a Rx thread work */
1154static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1155{
1156 struct net_device *dev = (struct net_device *)dev_instance;
1157 struct netdev_private *np = netdev_priv(dev);
1158 void __iomem *ioaddr = np->base;
1159 int hw_frame_id;
1160 int tx_cnt;
1161 int tx_status;
1162 int handled = 0;
1163
1164
1165 do {
1166 int intr_status = ioread16(ioaddr + IntrStatus);
1167 iowrite16(intr_status, ioaddr + IntrStatus);
1168
1169 if (netif_msg_intr(np))
1170 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
1171 dev->name, intr_status);
1172
1173 if (!(intr_status & DEFAULT_INTR))
1174 break;
1175
1176 handled = 1;
1177
1178 if (intr_status & (IntrRxDMADone)) {
1179 iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone),
1180 ioaddr + IntrEnable);
1181 if (np->budget < 0)
1182 np->budget = RX_BUDGET;
1183 tasklet_schedule(&np->rx_tasklet);
1184 }
1185 if (intr_status & (IntrTxDone | IntrDrvRqst)) {
1186 tx_status = ioread16 (ioaddr + TxStatus);
1187 for (tx_cnt=32; tx_status & 0x80; --tx_cnt) {
1188 if (netif_msg_tx_done(np))
1189 printk
1190 ("%s: Transmit status is %2.2x.\n",
1191 dev->name, tx_status);
1192 if (tx_status & 0x1e) {
1193 np->stats.tx_errors++;
1194 if (tx_status & 0x10)
1195 np->stats.tx_fifo_errors++;
1196 if (tx_status & 0x08)
1197 np->stats.collisions++;
1198 if (tx_status & 0x02)
1199 np->stats.tx_window_errors++;
1200 /* This reset has not been verified!. */
1201 if (tx_status & 0x10) { /* Reset the Tx. */
1202 np->stats.tx_fifo_errors++;
1203 spin_lock(&np->lock);
1204 reset_tx(dev);
1205 spin_unlock(&np->lock);
1206 }
1207 if (tx_status & 0x1e) /* Restart the Tx. */
1208 iowrite16 (TxEnable,
1209 ioaddr + MACCtrl1);
1210 }
1211 /* Yup, this is a documentation bug. It cost me *hours*. */
1212 iowrite16 (0, ioaddr + TxStatus);
1213 if (tx_cnt < 0) {
1214 iowrite32(5000, ioaddr + DownCounter);
1215 break;
1216 }
1217 tx_status = ioread16 (ioaddr + TxStatus);
1218 }
1219 hw_frame_id = (tx_status >> 8) & 0xff;
1220 } else {
1221 hw_frame_id = ioread8(ioaddr + TxFrameId);
1222 }
1223
1224 if (np->pci_rev_id >= 0x14) {
1225 spin_lock(&np->lock);
1226 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1227 int entry = np->dirty_tx % TX_RING_SIZE;
1228 struct sk_buff *skb;
1229 int sw_frame_id;
1230 sw_frame_id = (le32_to_cpu(
1231 np->tx_ring[entry].status) >> 2) & 0xff;
1232 if (sw_frame_id == hw_frame_id &&
1233 !(le32_to_cpu(np->tx_ring[entry].status)
1234 & 0x00010000))
1235 break;
1236 if (sw_frame_id == (hw_frame_id + 1) %
1237 TX_RING_SIZE)
1238 break;
1239 skb = np->tx_skbuff[entry];
1240 /* Free the original skb. */
1241 pci_unmap_single(np->pci_dev,
1242 np->tx_ring[entry].frag[0].addr,
1243 skb->len, PCI_DMA_TODEVICE);
1244 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1245 np->tx_skbuff[entry] = NULL;
1246 np->tx_ring[entry].frag[0].addr = 0;
1247 np->tx_ring[entry].frag[0].length = 0;
1248 }
1249 spin_unlock(&np->lock);
1250 } else {
1251 spin_lock(&np->lock);
1252 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1253 int entry = np->dirty_tx % TX_RING_SIZE;
1254 struct sk_buff *skb;
1255 if (!(le32_to_cpu(np->tx_ring[entry].status)
1256 & 0x00010000))
1257 break;
1258 skb = np->tx_skbuff[entry];
1259 /* Free the original skb. */
1260 pci_unmap_single(np->pci_dev,
1261 np->tx_ring[entry].frag[0].addr,
1262 skb->len, PCI_DMA_TODEVICE);
1263 dev_kfree_skb_irq (np->tx_skbuff[entry]);
1264 np->tx_skbuff[entry] = NULL;
1265 np->tx_ring[entry].frag[0].addr = 0;
1266 np->tx_ring[entry].frag[0].length = 0;
1267 }
1268 spin_unlock(&np->lock);
1269 }
1270
1271 if (netif_queue_stopped(dev) &&
1272 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
1273 /* The ring is no longer full, clear busy flag. */
1274 netif_wake_queue (dev);
1275 }
1276 /* Abnormal error summary/uncommon events handlers. */
1277 if (intr_status & (IntrPCIErr | LinkChange | StatsMax))
1278 netdev_error(dev, intr_status);
1279 } while (0);
1280 if (netif_msg_intr(np))
1281 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1282 dev->name, ioread16(ioaddr + IntrStatus));
1283 return IRQ_RETVAL(handled);
1284}
1285
1286static void rx_poll(unsigned long data)
1287{
1288 struct net_device *dev = (struct net_device *)data;
1289 struct netdev_private *np = netdev_priv(dev);
1290 int entry = np->cur_rx % RX_RING_SIZE;
1291 int boguscnt = np->budget;
1292 void __iomem *ioaddr = np->base;
1293 int received = 0;
1294
1295 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1296 while (1) {
1297 struct netdev_desc *desc = &(np->rx_ring[entry]);
1298 u32 frame_status = le32_to_cpu(desc->status);
1299 int pkt_len;
1300
1301 if (--boguscnt < 0) {
1302 goto not_done;
1303 }
1304 if (!(frame_status & DescOwn))
1305 break;
1306 pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
1307 if (netif_msg_rx_status(np))
1308 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
1309 frame_status);
1310 if (frame_status & 0x001f4000) {
1311 /* There was a error. */
1312 if (netif_msg_rx_err(np))
1313 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
1314 frame_status);
1315 np->stats.rx_errors++;
1316 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
1317 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
1318 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
1319 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
1320 if (frame_status & 0x00100000) {
1321 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
1322 " status %8.8x.\n",
1323 dev->name, frame_status);
1324 }
1325 } else {
1326 struct sk_buff *skb;
1327#ifndef final_version
1328 if (netif_msg_rx_status(np))
1329 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
1330 ", bogus_cnt %d.\n",
1331 pkt_len, boguscnt);
1332#endif
1333 /* Check if the packet is long enough to accept without copying
1334 to a minimally-sized skbuff. */
1335 if (pkt_len < rx_copybreak
1336 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1337 skb->dev = dev;
1338 skb_reserve(skb, 2); /* 16 byte align the IP header */
1339 pci_dma_sync_single_for_cpu(np->pci_dev,
1340 desc->frag[0].addr,
1341 np->rx_buf_sz,
1342 PCI_DMA_FROMDEVICE);
1343
1344 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
1345 pci_dma_sync_single_for_device(np->pci_dev,
1346 desc->frag[0].addr,
1347 np->rx_buf_sz,
1348 PCI_DMA_FROMDEVICE);
1349 skb_put(skb, pkt_len);
1350 } else {
1351 pci_unmap_single(np->pci_dev,
1352 desc->frag[0].addr,
1353 np->rx_buf_sz,
1354 PCI_DMA_FROMDEVICE);
1355 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1356 np->rx_skbuff[entry] = NULL;
1357 }
1358 skb->protocol = eth_type_trans(skb, dev);
1359 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1360 netif_rx(skb);
1361 dev->last_rx = jiffies;
1362 }
1363 entry = (entry + 1) % RX_RING_SIZE;
1364 received++;
1365 }
1366 np->cur_rx = entry;
1367 refill_rx (dev);
1368 np->budget -= received;
1369 iowrite16(DEFAULT_INTR, ioaddr + IntrEnable);
1370 return;
1371
1372not_done:
1373 np->cur_rx = entry;
1374 refill_rx (dev);
1375 if (!received)
1376 received = 1;
1377 np->budget -= received;
1378 if (np->budget <= 0)
1379 np->budget = RX_BUDGET;
1380 tasklet_schedule(&np->rx_tasklet);
1381 return;
1382}
1383
1384static void refill_rx (struct net_device *dev)
1385{
1386 struct netdev_private *np = netdev_priv(dev);
1387 int entry;
1388 int cnt = 0;
1389
1390 /* Refill the Rx ring buffers. */
1391 for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0;
1392 np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) {
1393 struct sk_buff *skb;
1394 entry = np->dirty_rx % RX_RING_SIZE;
1395 if (np->rx_skbuff[entry] == NULL) {
1396 skb = dev_alloc_skb(np->rx_buf_sz);
1397 np->rx_skbuff[entry] = skb;
1398 if (skb == NULL)
1399 break; /* Better luck next round. */
1400 skb->dev = dev; /* Mark as being used by this device. */
1401 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1402 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1403 pci_map_single(np->pci_dev, skb->tail,
1404 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1405 }
1406 /* Perhaps we need not reset this field. */
1407 np->rx_ring[entry].frag[0].length =
1408 cpu_to_le32(np->rx_buf_sz | LastFrag);
1409 np->rx_ring[entry].status = 0;
1410 cnt++;
1411 }
1412 return;
1413}
1414static void netdev_error(struct net_device *dev, int intr_status)
1415{
1416 struct netdev_private *np = netdev_priv(dev);
1417 void __iomem *ioaddr = np->base;
1418 u16 mii_ctl, mii_advertise, mii_lpa;
1419 int speed;
1420
1421 if (intr_status & LinkChange) {
1422 if (np->an_enable) {
1423 mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE);
1424 mii_lpa= mdio_read (dev, np->phys[0], MII_LPA);
1425 mii_advertise &= mii_lpa;
1426 printk (KERN_INFO "%s: Link changed: ", dev->name);
1427 if (mii_advertise & ADVERTISE_100FULL) {
1428 np->speed = 100;
1429 printk ("100Mbps, full duplex\n");
1430 } else if (mii_advertise & ADVERTISE_100HALF) {
1431 np->speed = 100;
1432 printk ("100Mbps, half duplex\n");
1433 } else if (mii_advertise & ADVERTISE_10FULL) {
1434 np->speed = 10;
1435 printk ("10Mbps, full duplex\n");
1436 } else if (mii_advertise & ADVERTISE_10HALF) {
1437 np->speed = 10;
1438 printk ("10Mbps, half duplex\n");
1439 } else
1440 printk ("\n");
1441
1442 } else {
1443 mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR);
1444 speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10;
1445 np->speed = speed;
1446 printk (KERN_INFO "%s: Link changed: %dMbps ,",
1447 dev->name, speed);
1448 printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ?
1449 "full" : "half");
1450 }
1451 check_duplex (dev);
1452 if (np->flowctrl && np->mii_if.full_duplex) {
1453 iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200,
1454 ioaddr + MulticastFilter1+2);
1455 iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl,
1456 ioaddr + MACCtrl0);
1457 }
1458 }
1459 if (intr_status & StatsMax) {
1460 get_stats(dev);
1461 }
1462 if (intr_status & IntrPCIErr) {
1463 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1464 dev->name, intr_status);
1465 /* We must do a global reset of DMA to continue. */
1466 }
1467}
1468
1469static struct net_device_stats *get_stats(struct net_device *dev)
1470{
1471 struct netdev_private *np = netdev_priv(dev);
1472 void __iomem *ioaddr = np->base;
1473 int i;
1474
1475 /* We should lock this segment of code for SMP eventually, although
1476 the vulnerability window is very small and statistics are
1477 non-critical. */
1478 /* The chip only need report frame silently dropped. */
1479 np->stats.rx_missed_errors += ioread8(ioaddr + RxMissed);
1480 np->stats.tx_packets += ioread16(ioaddr + TxFramesOK);
1481 np->stats.rx_packets += ioread16(ioaddr + RxFramesOK);
1482 np->stats.collisions += ioread8(ioaddr + StatsLateColl);
1483 np->stats.collisions += ioread8(ioaddr + StatsMultiColl);
1484 np->stats.collisions += ioread8(ioaddr + StatsOneColl);
1485 np->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError);
1486 ioread8(ioaddr + StatsTxDefer);
1487 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1488 ioread8(ioaddr + i);
1489 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow);
1490 np->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16;
1491 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow);
1492 np->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16;
1493
1494 return &np->stats;
1495}
1496
1497static void set_rx_mode(struct net_device *dev)
1498{
1499 struct netdev_private *np = netdev_priv(dev);
1500 void __iomem *ioaddr = np->base;
1501 u16 mc_filter[4]; /* Multicast hash filter */
1502 u32 rx_mode;
1503 int i;
1504
1505 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1506 /* Unconditionally log net taps. */
1507 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1508 memset(mc_filter, 0xff, sizeof(mc_filter));
1509 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1510 } else if ((dev->mc_count > multicast_filter_limit)
1511 || (dev->flags & IFF_ALLMULTI)) {
1512 /* Too many to match, or accept all multicasts. */
1513 memset(mc_filter, 0xff, sizeof(mc_filter));
1514 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1515 } else if (dev->mc_count) {
1516 struct dev_mc_list *mclist;
1517 int bit;
1518 int index;
1519 int crc;
1520 memset (mc_filter, 0, sizeof (mc_filter));
1521 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1522 i++, mclist = mclist->next) {
1523 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
1524 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1525 if (crc & 0x80000000) index |= 1 << bit;
1526 mc_filter[index/16] |= (1 << (index % 16));
1527 }
1528 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1529 } else {
1530 iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1531 return;
1532 }
1533 if (np->mii_if.full_duplex && np->flowctrl)
1534 mc_filter[3] |= 0x0200;
1535
1536 for (i = 0; i < 4; i++)
1537 iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1538 iowrite8(rx_mode, ioaddr + RxMode);
1539}
1540
1541static int __set_mac_addr(struct net_device *dev)
1542{
1543 struct netdev_private *np = netdev_priv(dev);
1544 u16 addr16;
1545
1546 addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8));
1547 iowrite16(addr16, np->base + StationAddr);
1548 addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8));
1549 iowrite16(addr16, np->base + StationAddr+2);
1550 addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8));
1551 iowrite16(addr16, np->base + StationAddr+4);
1552 return 0;
1553}
1554
1555static int check_if_running(struct net_device *dev)
1556{
1557 if (!netif_running(dev))
1558 return -EINVAL;
1559 return 0;
1560}
1561
1562static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1563{
1564 struct netdev_private *np = netdev_priv(dev);
1565 strcpy(info->driver, DRV_NAME);
1566 strcpy(info->version, DRV_VERSION);
1567 strcpy(info->bus_info, pci_name(np->pci_dev));
1568}
1569
1570static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1571{
1572 struct netdev_private *np = netdev_priv(dev);
1573 spin_lock_irq(&np->lock);
1574 mii_ethtool_gset(&np->mii_if, ecmd);
1575 spin_unlock_irq(&np->lock);
1576 return 0;
1577}
1578
1579static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1580{
1581 struct netdev_private *np = netdev_priv(dev);
1582 int res;
1583 spin_lock_irq(&np->lock);
1584 res = mii_ethtool_sset(&np->mii_if, ecmd);
1585 spin_unlock_irq(&np->lock);
1586 return res;
1587}
1588
1589static int nway_reset(struct net_device *dev)
1590{
1591 struct netdev_private *np = netdev_priv(dev);
1592 return mii_nway_restart(&np->mii_if);
1593}
1594
1595static u32 get_link(struct net_device *dev)
1596{
1597 struct netdev_private *np = netdev_priv(dev);
1598 return mii_link_ok(&np->mii_if);
1599}
1600
1601static u32 get_msglevel(struct net_device *dev)
1602{
1603 struct netdev_private *np = netdev_priv(dev);
1604 return np->msg_enable;
1605}
1606
1607static void set_msglevel(struct net_device *dev, u32 val)
1608{
1609 struct netdev_private *np = netdev_priv(dev);
1610 np->msg_enable = val;
1611}
1612
1613static struct ethtool_ops ethtool_ops = {
1614 .begin = check_if_running,
1615 .get_drvinfo = get_drvinfo,
1616 .get_settings = get_settings,
1617 .set_settings = set_settings,
1618 .nway_reset = nway_reset,
1619 .get_link = get_link,
1620 .get_msglevel = get_msglevel,
1621 .set_msglevel = set_msglevel,
1622};
1623
1624static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1625{
1626 struct netdev_private *np = netdev_priv(dev);
1627 void __iomem *ioaddr = np->base;
1628 int rc;
1629 int i;
1630
1631 if (!netif_running(dev))
1632 return -EINVAL;
1633
1634 spin_lock_irq(&np->lock);
1635 rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL);
1636 spin_unlock_irq(&np->lock);
1637 switch (cmd) {
1638 case SIOCDEVPRIVATE:
1639 for (i=0; i<TX_RING_SIZE; i++) {
1640 printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i,
1641 (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)),
1642 le32_to_cpu(np->tx_ring[i].next_desc),
1643 le32_to_cpu(np->tx_ring[i].status),
1644 (le32_to_cpu(np->tx_ring[i].status) >> 2)
1645 & 0xff,
1646 le32_to_cpu(np->tx_ring[i].frag[0].addr),
1647 le32_to_cpu(np->tx_ring[i].frag[0].length));
1648 }
1649 printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n",
1650 ioread32(np->base + TxListPtr),
1651 netif_queue_stopped(dev));
1652 printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n",
1653 np->cur_tx, np->cur_tx % TX_RING_SIZE,
1654 np->dirty_tx, np->dirty_tx % TX_RING_SIZE);
1655 printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx);
1656 printk(KERN_DEBUG "cur_task=%d\n", np->cur_task);
1657 printk(KERN_DEBUG "TxStatus=%04x\n", ioread16(ioaddr + TxStatus));
1658 return 0;
1659 }
1660
1661
1662 return rc;
1663}
1664
1665static int netdev_close(struct net_device *dev)
1666{
1667 struct netdev_private *np = netdev_priv(dev);
1668 void __iomem *ioaddr = np->base;
1669 struct sk_buff *skb;
1670 int i;
1671
1672 netif_stop_queue(dev);
1673
1674 if (netif_msg_ifdown(np)) {
1675 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1676 "Rx %4.4x Int %2.2x.\n",
1677 dev->name, ioread8(ioaddr + TxStatus),
1678 ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus));
1679 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1680 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1681 }
1682
1683 /* Disable interrupts by clearing the interrupt mask. */
1684 iowrite16(0x0000, ioaddr + IntrEnable);
1685
1686 /* Stop the chip's Tx and Rx processes. */
1687 iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1688
1689 /* Wait and kill tasklet */
1690 tasklet_kill(&np->rx_tasklet);
1691 tasklet_kill(&np->tx_tasklet);
1692
1693#ifdef __i386__
1694 if (netif_msg_hw(np)) {
1695 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1696 (int)(np->tx_ring_dma));
1697 for (i = 0; i < TX_RING_SIZE; i++)
1698 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1699 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1700 np->tx_ring[i].frag[0].length);
1701 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1702 (int)(np->rx_ring_dma));
1703 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1704 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1705 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1706 np->rx_ring[i].frag[0].length);
1707 }
1708 }
1709#endif /* __i386__ debugging only */
1710
1711 free_irq(dev->irq, dev);
1712
1713 del_timer_sync(&np->timer);
1714
1715 /* Free all the skbuffs in the Rx queue. */
1716 for (i = 0; i < RX_RING_SIZE; i++) {
1717 np->rx_ring[i].status = 0;
1718 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1719 skb = np->rx_skbuff[i];
1720 if (skb) {
1721 pci_unmap_single(np->pci_dev,
1722 np->rx_ring[i].frag[0].addr, np->rx_buf_sz,
1723 PCI_DMA_FROMDEVICE);
1724 dev_kfree_skb(skb);
1725 np->rx_skbuff[i] = NULL;
1726 }
1727 }
1728 for (i = 0; i < TX_RING_SIZE; i++) {
1729 skb = np->tx_skbuff[i];
1730 if (skb) {
1731 pci_unmap_single(np->pci_dev,
1732 np->tx_ring[i].frag[0].addr, skb->len,
1733 PCI_DMA_TODEVICE);
1734 dev_kfree_skb(skb);
1735 np->tx_skbuff[i] = NULL;
1736 }
1737 }
1738
1739 return 0;
1740}
1741
1742static void __devexit sundance_remove1 (struct pci_dev *pdev)
1743{
1744 struct net_device *dev = pci_get_drvdata(pdev);
1745
1746 if (dev) {
1747 struct netdev_private *np = netdev_priv(dev);
1748
1749 unregister_netdev(dev);
1750 pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring,
1751 np->rx_ring_dma);
1752 pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring,
1753 np->tx_ring_dma);
1754 pci_iounmap(pdev, np->base);
1755 pci_release_regions(pdev);
1756 free_netdev(dev);
1757 pci_set_drvdata(pdev, NULL);
1758 }
1759}
1760
1761static struct pci_driver sundance_driver = {
1762 .name = DRV_NAME,
1763 .id_table = sundance_pci_tbl,
1764 .probe = sundance_probe1,
1765 .remove = __devexit_p(sundance_remove1),
1766};
1767
1768static int __init sundance_init(void)
1769{
1770/* when a module, this is printed whether or not devices are found in probe */
1771#ifdef MODULE
1772 printk(version);
1773#endif
1774 return pci_module_init(&sundance_driver);
1775}
1776
1777static void __exit sundance_exit(void)
1778{
1779 pci_unregister_driver(&sundance_driver);
1780}
1781
1782module_init(sundance_init);
1783module_exit(sundance_exit);
1784
1785