aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/starfire.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/starfire.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/starfire.c')
-rw-r--r--drivers/net/starfire.c2218
1 files changed, 2218 insertions, 0 deletions
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
new file mode 100644
index 000000000000..236bdd3f6ba0
--- /dev/null
+++ b/drivers/net/starfire.c
@@ -0,0 +1,2218 @@
1/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
2/*
3 Written 1998-2000 by Donald Becker.
4
5 Current maintainer is Ion Badulescu <ionut@cs.columbia.edu>. Please
6 send all bug reports to me, and not to Donald Becker, as this code
7 has been heavily modified from Donald's original version.
8
9 This software may be used and distributed according to the terms of
10 the GNU General Public License (GPL), incorporated herein by reference.
11 Drivers based on or derived from this code fall under the GPL and must
12 retain the authorship, copyright and license notice. This file is not
13 a complete program and may only be used when the entire operating
14 system is licensed under the GPL.
15
16 The information below comes from Donald Becker's original driver:
17
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
21 Annapolis MD 21403
22
23 Support and updates available at
24 http://www.scyld.com/network/starfire.html
25
26 -----------------------------------------------------------
27
28 Linux kernel-specific changes:
29
30 LK1.1.1 (jgarzik):
31 - Use PCI driver interface
32 - Fix MOD_xxx races
33 - softnet fixups
34
35 LK1.1.2 (jgarzik):
36 - Merge Becker version 0.15
37
38 LK1.1.3 (Andrew Morton)
39 - Timer cleanups
40
41 LK1.1.4 (jgarzik):
42 - Merge Becker version 1.03
43
44 LK1.2.1 (Ion Badulescu <ionut@cs.columbia.edu>)
45 - Support hardware Rx/Tx checksumming
46 - Use the GFP firmware taken from Adaptec's Netware driver
47
48 LK1.2.2 (Ion Badulescu)
49 - Backported to 2.2.x
50
51 LK1.2.3 (Ion Badulescu)
52 - Fix the flaky mdio interface
53 - More compat clean-ups
54
55 LK1.2.4 (Ion Badulescu)
56 - More 2.2.x initialization fixes
57
58 LK1.2.5 (Ion Badulescu)
59 - Several fixes from Manfred Spraul
60
61 LK1.2.6 (Ion Badulescu)
62 - Fixed ifup/ifdown/ifup problem in 2.4.x
63
64 LK1.2.7 (Ion Badulescu)
65 - Removed unused code
66 - Made more functions static and __init
67
68 LK1.2.8 (Ion Badulescu)
69 - Quell bogus error messages, inform about the Tx threshold
70 - Removed #ifdef CONFIG_PCI, this driver is PCI only
71
72 LK1.2.9 (Ion Badulescu)
73 - Merged Jeff Garzik's changes from 2.4.4-pre5
74 - Added 2.2.x compatibility stuff required by the above changes
75
76 LK1.2.9a (Ion Badulescu)
77 - More updates from Jeff Garzik
78
79 LK1.3.0 (Ion Badulescu)
80 - Merged zerocopy support
81
82 LK1.3.1 (Ion Badulescu)
83 - Added ethtool support
84 - Added GPIO (media change) interrupt support
85
86 LK1.3.2 (Ion Badulescu)
87 - Fixed 2.2.x compatibility issues introduced in 1.3.1
88 - Fixed ethtool ioctl returning uninitialized memory
89
90 LK1.3.3 (Ion Badulescu)
91 - Initialize the TxMode register properly
92 - Don't dereference dev->priv after freeing it
93
94 LK1.3.4 (Ion Badulescu)
95 - Fixed initialization timing problems
96 - Fixed interrupt mask definitions
97
98 LK1.3.5 (jgarzik)
99 - ethtool NWAY_RST, GLINK, [GS]MSGLVL support
100
101 LK1.3.6:
102 - Sparc64 support and fixes (Ion Badulescu)
103 - Better stats and error handling (Ion Badulescu)
104 - Use new pci_set_mwi() PCI API function (jgarzik)
105
106 LK1.3.7 (Ion Badulescu)
107 - minimal implementation of tx_timeout()
108 - correctly shutdown the Rx/Tx engines in netdev_close()
109 - added calls to netif_carrier_on/off
110 (patch from Stefan Rompf <srompf@isg.de>)
111 - VLAN support
112
113 LK1.3.8 (Ion Badulescu)
114 - adjust DMA burst size on sparc64
115 - 64-bit support
116 - reworked zerocopy support for 64-bit buffers
117 - working and usable interrupt mitigation/latency
118 - reduced Tx interrupt frequency for lower interrupt overhead
119
120 LK1.3.9 (Ion Badulescu)
121 - bugfix for mcast filter
122 - enable the right kind of Tx interrupts (TxDMADone, not TxDone)
123
124 LK1.4.0 (Ion Badulescu)
125 - NAPI support
126
127 LK1.4.1 (Ion Badulescu)
128 - flush PCI posting buffers after disabling Rx interrupts
129 - put the chip to a D3 slumber on driver unload
130 - added config option to enable/disable NAPI
131
132TODO: bugfixes (no bugs known as of right now)
133*/
134
135#define DRV_NAME "starfire"
136#define DRV_VERSION "1.03+LK1.4.1"
137#define DRV_RELDATE "February 10, 2002"
138
139#include <linux/config.h>
140#include <linux/version.h>
141#include <linux/module.h>
142#include <linux/kernel.h>
143#include <linux/pci.h>
144#include <linux/netdevice.h>
145#include <linux/etherdevice.h>
146#include <linux/init.h>
147#include <linux/delay.h>
148#include <asm/processor.h> /* Processor type for cache alignment. */
149#include <asm/uaccess.h>
150#include <asm/io.h>
151
152/*
153 * Adaptec's license for their drivers (which is where I got the
154 * firmware files) does not allow one to redistribute them. Thus, we can't
155 * include the firmware with this driver.
156 *
157 * However, should a legal-to-distribute firmware become available,
158 * the driver developer would need only to obtain the firmware in the
159 * form of a C header file.
160 * Once that's done, the #undef below must be changed into a #define
161 * for this driver to really use the firmware. Note that Rx/Tx
162 * hardware TCP checksumming is not possible without the firmware.
163 *
164 * WANTED: legal firmware to include with this GPL'd driver.
165 */
166#undef HAS_FIRMWARE
167/*
168 * The current frame processor firmware fails to checksum a fragment
169 * of length 1. If and when this is fixed, the #define below can be removed.
170 */
171#define HAS_BROKEN_FIRMWARE
172/*
173 * Define this if using the driver with the zero-copy patch
174 */
175#if defined(HAS_FIRMWARE) && defined(MAX_SKB_FRAGS)
176#define ZEROCOPY
177#endif
178
179#ifdef HAS_FIRMWARE
180#include "starfire_firmware.h"
181#endif /* HAS_FIRMWARE */
182
183#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
184#define VLAN_SUPPORT
185#endif
186
187#ifndef CONFIG_ADAPTEC_STARFIRE_NAPI
188#undef HAVE_NETDEV_POLL
189#endif
190
191/* The user-configurable values.
192 These may be modified when a driver module is loaded.*/
193
194/* Used for tuning interrupt latency vs. overhead. */
195static int intr_latency;
196static int small_frames;
197
198static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
199static int max_interrupt_work = 20;
200static int mtu;
201/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
202 The Starfire has a 512 element hash table based on the Ethernet CRC. */
203static int multicast_filter_limit = 512;
204/* Whether to do TCP/UDP checksums in hardware */
205#ifdef HAS_FIRMWARE
206static int enable_hw_cksum = 1;
207#else
208static int enable_hw_cksum = 0;
209#endif
210
211#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
212/*
213 * Set the copy breakpoint for the copy-only-tiny-frames scheme.
214 * Setting to > 1518 effectively disables this feature.
215 *
216 * NOTE:
217 * The ia64 doesn't allow for unaligned loads even of integers being
218 * misaligned on a 2 byte boundary. Thus always force copying of
219 * packets as the starfire doesn't allow for misaligned DMAs ;-(
220 * 23/10/2000 - Jes
221 *
222 * The Alpha and the Sparc don't like unaligned loads, either. On Sparc64,
223 * at least, having unaligned frames leads to a rather serious performance
224 * penalty. -Ion
225 */
226#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__)
227static int rx_copybreak = PKT_BUF_SZ;
228#else
229static int rx_copybreak /* = 0 */;
230#endif
231
232/* PCI DMA burst size -- on sparc64 we want to force it to 64 bytes, on the others the default of 128 is fine. */
233#ifdef __sparc__
234#define DMA_BURST_SIZE 64
235#else
236#define DMA_BURST_SIZE 128
237#endif
238
239/* Used to pass the media type, etc.
240 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
241 The media type is usually passed in 'options[]'.
242 These variables are deprecated, use ethtool instead. -Ion
243*/
244#define MAX_UNITS 8 /* More are supported, limit only on options */
245static int options[MAX_UNITS] = {0, };
246static int full_duplex[MAX_UNITS] = {0, };
247
248/* Operational parameters that are set at compile time. */
249
250/* The "native" ring sizes are either 256 or 2048.
251 However in some modes a descriptor may be marked to wrap the ring earlier.
252*/
253#define RX_RING_SIZE 256
254#define TX_RING_SIZE 32
255/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
256#define DONE_Q_SIZE 1024
257/* All queues must be aligned on a 256-byte boundary */
258#define QUEUE_ALIGN 256
259
260#if RX_RING_SIZE > 256
261#define RX_Q_ENTRIES Rx2048QEntries
262#else
263#define RX_Q_ENTRIES Rx256QEntries
264#endif
265
266/* Operational parameters that usually are not changed. */
267/* Time in jiffies before concluding the transmitter is hung. */
268#define TX_TIMEOUT (2 * HZ)
269
270/*
271 * This SUCKS.
272 * We need a much better method to determine if dma_addr_t is 64-bit.
273 */
274#if (defined(__i386__) && defined(CONFIG_HIGHMEM) && (LINUX_VERSION_CODE > 0x20500 || defined(CONFIG_HIGHMEM64G))) || defined(__x86_64__) || defined (__ia64__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR))
275/* 64-bit dma_addr_t */
276#define ADDR_64BITS /* This chip uses 64 bit addresses. */
277#define cpu_to_dma(x) cpu_to_le64(x)
278#define dma_to_cpu(x) le64_to_cpu(x)
279#define RX_DESC_Q_ADDR_SIZE RxDescQAddr64bit
280#define TX_DESC_Q_ADDR_SIZE TxDescQAddr64bit
281#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr64bit
282#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr64bit
283#define RX_DESC_ADDR_SIZE RxDescAddr64bit
284#else /* 32-bit dma_addr_t */
285#define cpu_to_dma(x) cpu_to_le32(x)
286#define dma_to_cpu(x) le32_to_cpu(x)
287#define RX_DESC_Q_ADDR_SIZE RxDescQAddr32bit
288#define TX_DESC_Q_ADDR_SIZE TxDescQAddr32bit
289#define RX_COMPL_Q_ADDR_SIZE RxComplQAddr32bit
290#define TX_COMPL_Q_ADDR_SIZE TxComplQAddr32bit
291#define RX_DESC_ADDR_SIZE RxDescAddr32bit
292#endif
293
294#ifdef MAX_SKB_FRAGS
295#define skb_first_frag_len(skb) skb_headlen(skb)
296#define skb_num_frags(skb) (skb_shinfo(skb)->nr_frags + 1)
297#else /* not MAX_SKB_FRAGS */
298#define skb_first_frag_len(skb) (skb->len)
299#define skb_num_frags(skb) 1
300#endif /* not MAX_SKB_FRAGS */
301
302/* 2.2.x compatibility code */
303#if LINUX_VERSION_CODE < 0x20300
304
305#include "starfire-kcomp22.h"
306
307#else /* LINUX_VERSION_CODE > 0x20300 */
308
309#include <linux/crc32.h>
310#include <linux/ethtool.h>
311#include <linux/mii.h>
312
313#include <linux/if_vlan.h>
314
315#define init_tx_timer(dev, func, timeout) \
316 dev->tx_timeout = func; \
317 dev->watchdog_timeo = timeout;
318#define kick_tx_timer(dev, func, timeout)
319
320#define netif_start_if(dev)
321#define netif_stop_if(dev)
322
323#define PCI_SLOT_NAME(pci_dev) pci_name(pci_dev)
324
325#endif /* LINUX_VERSION_CODE > 0x20300 */
326
327#ifdef HAVE_NETDEV_POLL
328#define init_poll(dev) \
329 dev->poll = &netdev_poll; \
330 dev->weight = max_interrupt_work;
331#define netdev_rx(dev, ioaddr) \
332do { \
333 u32 intr_enable; \
334 if (netif_rx_schedule_prep(dev)) { \
335 __netif_rx_schedule(dev); \
336 intr_enable = readl(ioaddr + IntrEnable); \
337 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
338 writel(intr_enable, ioaddr + IntrEnable); \
339 readl(ioaddr + IntrEnable); /* flush PCI posting buffers */ \
340 } else { \
341 /* Paranoia check */ \
342 intr_enable = readl(ioaddr + IntrEnable); \
343 if (intr_enable & (IntrRxDone | IntrRxEmpty)) { \
344 printk("%s: interrupt while in polling mode!\n", dev->name); \
345 intr_enable &= ~(IntrRxDone | IntrRxEmpty); \
346 writel(intr_enable, ioaddr + IntrEnable); \
347 } \
348 } \
349} while (0)
350#define netdev_receive_skb(skb) netif_receive_skb(skb)
351#define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_receive_skb(skb, vlgrp, vlid)
352static int netdev_poll(struct net_device *dev, int *budget);
353#else /* not HAVE_NETDEV_POLL */
354#define init_poll(dev)
355#define netdev_receive_skb(skb) netif_rx(skb)
356#define vlan_netdev_receive_skb(skb, vlgrp, vlid) vlan_hwaccel_rx(skb, vlgrp, vlid)
357#define netdev_rx(dev, ioaddr) \
358do { \
359 int quota = np->dirty_rx + RX_RING_SIZE - np->cur_rx; \
360 __netdev_rx(dev, &quota);\
361} while (0)
362#endif /* not HAVE_NETDEV_POLL */
363/* end of compatibility code */
364
365
366/* These identify the driver base version and may not be removed. */
367static char version[] __devinitdata =
368KERN_INFO "starfire.c:v1.03 7/26/2000 Written by Donald Becker <becker@scyld.com>\n"
369KERN_INFO " (unofficial 2.2/2.4 kernel port, version " DRV_VERSION ", " DRV_RELDATE ")\n";
370
371MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
372MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
373MODULE_LICENSE("GPL");
374
375module_param(max_interrupt_work, int, 0);
376module_param(mtu, int, 0);
377module_param(debug, int, 0);
378module_param(rx_copybreak, int, 0);
379module_param(intr_latency, int, 0);
380module_param(small_frames, int, 0);
381module_param_array(options, int, NULL, 0);
382module_param_array(full_duplex, int, NULL, 0);
383module_param(enable_hw_cksum, int, 0);
384MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
385MODULE_PARM_DESC(mtu, "MTU (all boards)");
386MODULE_PARM_DESC(debug, "Debug level (0-6)");
387MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
388MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
389MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
390MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
391MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
392MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
393
394/*
395 Theory of Operation
396
397I. Board Compatibility
398
399This driver is for the Adaptec 6915 "Starfire" 64 bit PCI Ethernet adapter.
400
401II. Board-specific settings
402
403III. Driver operation
404
405IIIa. Ring buffers
406
407The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
408ring sizes are set fixed by the hardware, but may optionally be wrapped
409earlier by the END bit in the descriptor.
410This driver uses that hardware queue size for the Rx ring, where a large
411number of entries has no ill effect beyond increases the potential backlog.
412The Tx ring is wrapped with the END bit, since a large hardware Tx queue
413disables the queue layer priority ordering and we have no mechanism to
414utilize the hardware two-level priority queue. When modifying the
415RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
416levels.
417
418IIIb/c. Transmit/Receive Structure
419
420See the Adaptec manual for the many possible structures, and options for
421each structure. There are far too many to document all of them here.
422
423For transmit this driver uses type 0/1 transmit descriptors (depending
424on the 32/64 bitness of the architecture), and relies on automatic
425minimum-length padding. It does not use the completion queue
426consumer index, but instead checks for non-zero status entries.
427
428For receive this driver uses type 0/1/2/3 receive descriptors. The driver
429allocates full frame size skbuffs for the Rx ring buffers, so all frames
430should fit in a single descriptor. The driver does not use the completion
431queue consumer index, but instead checks for non-zero status entries.
432
433When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
434is allocated and the frame is copied to the new skbuff. When the incoming
435frame is larger, the skbuff is passed directly up the protocol stack.
436Buffers consumed this way are replaced by newly allocated skbuffs in a later
437phase of receive.
438
439A notable aspect of operation is that unaligned buffers are not permitted by
440the Starfire hardware. Thus the IP header at offset 14 in an ethernet frame
441isn't longword aligned, which may cause problems on some machine
442e.g. Alphas and IA64. For these architectures, the driver is forced to copy
443the frame into a new skbuff unconditionally. Copied frames are put into the
444skbuff at an offset of "+2", thus 16-byte aligning the IP header.
445
446IIId. Synchronization
447
448The driver runs as two independent, single-threaded flows of control. One
449is the send-packet routine, which enforces single-threaded use by the
450dev->tbusy flag. The other thread is the interrupt handler, which is single
451threaded by the hardware and interrupt handling software.
452
453The send packet thread has partial control over the Tx ring and the netif_queue
454status. If the number of free Tx slots in the ring falls below a certain number
455(currently hardcoded to 4), it signals the upper layer to stop the queue.
456
457The interrupt handler has exclusive control over the Rx ring and records stats
458from the Tx ring. After reaping the stats, it marks the Tx queue entry as
459empty by incrementing the dirty_tx mark. Iff the netif_queue is stopped and the
460number of free Tx slow is above the threshold, it signals the upper layer to
461restart the queue.
462
463IV. Notes
464
465IVb. References
466
467The Adaptec Starfire manuals, available only from Adaptec.
468http://www.scyld.com/expert/100mbps.html
469http://www.scyld.com/expert/NWay.html
470
471IVc. Errata
472
473- StopOnPerr is broken, don't enable
474- Hardware ethernet padding exposes random data, perform software padding
475 instead (unverified -- works correctly for all the hardware I have)
476
477*/
478
479
480
481enum chip_capability_flags {CanHaveMII=1, };
482
483enum chipset {
484 CH_6915 = 0,
485};
486
487static struct pci_device_id starfire_pci_tbl[] = {
488 { 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
489 { 0, }
490};
491MODULE_DEVICE_TABLE(pci, starfire_pci_tbl);
492
493/* A chip capabilities table, matching the CH_xxx entries in xxx_pci_tbl[] above. */
494static struct chip_info {
495 const char *name;
496 int drv_flags;
497} netdrv_tbl[] __devinitdata = {
498 { "Adaptec Starfire 6915", CanHaveMII },
499};
500
501
502/* Offsets to the device registers.
503 Unlike software-only systems, device drivers interact with complex hardware.
504 It's not useful to define symbolic names for every register bit in the
505 device. The name can only partially document the semantics and make
506 the driver longer and more difficult to read.
507 In general, only the important configuration values or bits changed
508 multiple times should be defined symbolically.
509*/
510enum register_offsets {
511 PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
512 IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
513 MIICtrl=0x52000, TxStationAddr=0x50120, EEPROMCtrl=0x51000,
514 GPIOCtrl=0x5008C, TxDescCtrl=0x50090,
515 TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
516 TxRingHiAddr=0x5009C, /* 64 bit address extension. */
517 TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
518 TxThreshold=0x500B0,
519 CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
520 RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
521 CompletionQConsumerIdx=0x500C4, RxDMACtrl=0x500D0,
522 RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
523 RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
524 TxMode=0x55000, VlanType=0x55064,
525 PerfFilterTable=0x56000, HashTable=0x56100,
526 TxGfpMem=0x58000, RxGfpMem=0x5a000,
527};
528
529/*
530 * Bits in the interrupt status/mask registers.
531 * Warning: setting Intr[Ab]NormalSummary in the IntrEnable register
532 * enables all the interrupt sources that are or'ed into those status bits.
533 */
534enum intr_status_bits {
535 IntrLinkChange=0xf0000000, IntrStatsMax=0x08000000,
536 IntrAbnormalSummary=0x02000000, IntrGeneralTimer=0x01000000,
537 IntrSoftware=0x800000, IntrRxComplQ1Low=0x400000,
538 IntrTxComplQLow=0x200000, IntrPCI=0x100000,
539 IntrDMAErr=0x080000, IntrTxDataLow=0x040000,
540 IntrRxComplQ2Low=0x020000, IntrRxDescQ1Low=0x010000,
541 IntrNormalSummary=0x8000, IntrTxDone=0x4000,
542 IntrTxDMADone=0x2000, IntrTxEmpty=0x1000,
543 IntrEarlyRxQ2=0x0800, IntrEarlyRxQ1=0x0400,
544 IntrRxQ2Done=0x0200, IntrRxQ1Done=0x0100,
545 IntrRxGFPDead=0x80, IntrRxDescQ2Low=0x40,
546 IntrNoTxCsum=0x20, IntrTxBadID=0x10,
547 IntrHiPriTxBadID=0x08, IntrRxGfp=0x04,
548 IntrTxGfp=0x02, IntrPCIPad=0x01,
549 /* not quite bits */
550 IntrRxDone=IntrRxQ2Done | IntrRxQ1Done,
551 IntrRxEmpty=IntrRxDescQ1Low | IntrRxDescQ2Low,
552 IntrNormalMask=0xff00, IntrAbnormalMask=0x3ff00fe,
553};
554
555/* Bits in the RxFilterMode register. */
556enum rx_mode_bits {
557 AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
558 AcceptMulticast=0x10, PerfectFilter=0x40, HashFilter=0x30,
559 PerfectFilterVlan=0x80, MinVLANPrio=0xE000, VlanMode=0x0200,
560 WakeupOnGFP=0x0800,
561};
562
563/* Bits in the TxMode register */
564enum tx_mode_bits {
565 MiiSoftReset=0x8000, MIILoopback=0x4000,
566 TxFlowEnable=0x0800, RxFlowEnable=0x0400,
567 PadEnable=0x04, FullDuplex=0x02, HugeFrame=0x01,
568};
569
570/* Bits in the TxDescCtrl register. */
571enum tx_ctrl_bits {
572 TxDescSpaceUnlim=0x00, TxDescSpace32=0x10, TxDescSpace64=0x20,
573 TxDescSpace128=0x30, TxDescSpace256=0x40,
574 TxDescType0=0x00, TxDescType1=0x01, TxDescType2=0x02,
575 TxDescType3=0x03, TxDescType4=0x04,
576 TxNoDMACompletion=0x08,
577 TxDescQAddr64bit=0x80, TxDescQAddr32bit=0,
578 TxHiPriFIFOThreshShift=24, TxPadLenShift=16,
579 TxDMABurstSizeShift=8,
580};
581
582/* Bits in the RxDescQCtrl register. */
583enum rx_ctrl_bits {
584 RxBufferLenShift=16, RxMinDescrThreshShift=0,
585 RxPrefetchMode=0x8000, RxVariableQ=0x2000,
586 Rx2048QEntries=0x4000, Rx256QEntries=0,
587 RxDescAddr64bit=0x1000, RxDescAddr32bit=0,
588 RxDescQAddr64bit=0x0100, RxDescQAddr32bit=0,
589 RxDescSpace4=0x000, RxDescSpace8=0x100,
590 RxDescSpace16=0x200, RxDescSpace32=0x300,
591 RxDescSpace64=0x400, RxDescSpace128=0x500,
592 RxConsumerWrEn=0x80,
593};
594
595/* Bits in the RxDMACtrl register. */
596enum rx_dmactrl_bits {
597 RxReportBadFrames=0x80000000, RxDMAShortFrames=0x40000000,
598 RxDMABadFrames=0x20000000, RxDMACrcErrorFrames=0x10000000,
599 RxDMAControlFrame=0x08000000, RxDMAPauseFrame=0x04000000,
600 RxChecksumIgnore=0, RxChecksumRejectTCPUDP=0x02000000,
601 RxChecksumRejectTCPOnly=0x01000000,
602 RxCompletionQ2Enable=0x800000,
603 RxDMAQ2Disable=0, RxDMAQ2FPOnly=0x100000,
604 RxDMAQ2SmallPkt=0x200000, RxDMAQ2HighPrio=0x300000,
605 RxDMAQ2NonIP=0x400000,
606 RxUseBackupQueue=0x080000, RxDMACRC=0x040000,
607 RxEarlyIntThreshShift=12, RxHighPrioThreshShift=8,
608 RxBurstSizeShift=0,
609};
610
611/* Bits in the RxCompletionAddr register */
612enum rx_compl_bits {
613 RxComplQAddr64bit=0x80, RxComplQAddr32bit=0,
614 RxComplProducerWrEn=0x40,
615 RxComplType0=0x00, RxComplType1=0x10,
616 RxComplType2=0x20, RxComplType3=0x30,
617 RxComplThreshShift=0,
618};
619
620/* Bits in the TxCompletionAddr register */
621enum tx_compl_bits {
622 TxComplQAddr64bit=0x80, TxComplQAddr32bit=0,
623 TxComplProducerWrEn=0x40,
624 TxComplIntrStatus=0x20,
625 CommonQueueMode=0x10,
626 TxComplThreshShift=0,
627};
628
629/* Bits in the GenCtrl register */
630enum gen_ctrl_bits {
631 RxEnable=0x05, TxEnable=0x0a,
632 RxGFPEnable=0x10, TxGFPEnable=0x20,
633};
634
635/* Bits in the IntrTimerCtrl register */
636enum intr_ctrl_bits {
637 Timer10X=0x800, EnableIntrMasking=0x60, SmallFrameBypass=0x100,
638 SmallFrame64=0, SmallFrame128=0x200, SmallFrame256=0x400, SmallFrame512=0x600,
639 IntrLatencyMask=0x1f,
640};
641
642/* The Rx and Tx buffer descriptors. */
643struct starfire_rx_desc {
644 dma_addr_t rxaddr;
645};
646enum rx_desc_bits {
647 RxDescValid=1, RxDescEndRing=2,
648};
649
650/* Completion queue entry. */
651struct short_rx_done_desc {
652 u32 status; /* Low 16 bits is length. */
653};
654struct basic_rx_done_desc {
655 u32 status; /* Low 16 bits is length. */
656 u16 vlanid;
657 u16 status2;
658};
659struct csum_rx_done_desc {
660 u32 status; /* Low 16 bits is length. */
661 u16 csum; /* Partial checksum */
662 u16 status2;
663};
664struct full_rx_done_desc {
665 u32 status; /* Low 16 bits is length. */
666 u16 status3;
667 u16 status2;
668 u16 vlanid;
669 u16 csum; /* partial checksum */
670 u32 timestamp;
671};
672/* XXX: this is ugly and I'm not sure it's worth the trouble -Ion */
673#ifdef HAS_FIRMWARE
674#ifdef VLAN_SUPPORT
675typedef struct full_rx_done_desc rx_done_desc;
676#define RxComplType RxComplType3
677#else /* not VLAN_SUPPORT */
678typedef struct csum_rx_done_desc rx_done_desc;
679#define RxComplType RxComplType2
680#endif /* not VLAN_SUPPORT */
681#else /* not HAS_FIRMWARE */
682#ifdef VLAN_SUPPORT
683typedef struct basic_rx_done_desc rx_done_desc;
684#define RxComplType RxComplType1
685#else /* not VLAN_SUPPORT */
686typedef struct short_rx_done_desc rx_done_desc;
687#define RxComplType RxComplType0
688#endif /* not VLAN_SUPPORT */
689#endif /* not HAS_FIRMWARE */
690
691enum rx_done_bits {
692 RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
693};
694
695/* Type 1 Tx descriptor. */
696struct starfire_tx_desc_1 {
697 u32 status; /* Upper bits are status, lower 16 length. */
698 u32 addr;
699};
700
701/* Type 2 Tx descriptor. */
702struct starfire_tx_desc_2 {
703 u32 status; /* Upper bits are status, lower 16 length. */
704 u32 reserved;
705 u64 addr;
706};
707
708#ifdef ADDR_64BITS
709typedef struct starfire_tx_desc_2 starfire_tx_desc;
710#define TX_DESC_TYPE TxDescType2
711#else /* not ADDR_64BITS */
712typedef struct starfire_tx_desc_1 starfire_tx_desc;
713#define TX_DESC_TYPE TxDescType1
714#endif /* not ADDR_64BITS */
715#define TX_DESC_SPACING TxDescSpaceUnlim
716
717enum tx_desc_bits {
718 TxDescID=0xB0000000,
719 TxCRCEn=0x01000000, TxDescIntr=0x08000000,
720 TxRingWrap=0x04000000, TxCalTCP=0x02000000,
721};
722struct tx_done_desc {
723 u32 status; /* timestamp, index. */
724#if 0
725 u32 intrstatus; /* interrupt status */
726#endif
727};
728
729struct rx_ring_info {
730 struct sk_buff *skb;
731 dma_addr_t mapping;
732};
733struct tx_ring_info {
734 struct sk_buff *skb;
735 dma_addr_t mapping;
736 unsigned int used_slots;
737};
738
739#define PHY_CNT 2
740struct netdev_private {
741 /* Descriptor rings first for alignment. */
742 struct starfire_rx_desc *rx_ring;
743 starfire_tx_desc *tx_ring;
744 dma_addr_t rx_ring_dma;
745 dma_addr_t tx_ring_dma;
746 /* The addresses of rx/tx-in-place skbuffs. */
747 struct rx_ring_info rx_info[RX_RING_SIZE];
748 struct tx_ring_info tx_info[TX_RING_SIZE];
749 /* Pointers to completion queues (full pages). */
750 rx_done_desc *rx_done_q;
751 dma_addr_t rx_done_q_dma;
752 unsigned int rx_done;
753 struct tx_done_desc *tx_done_q;
754 dma_addr_t tx_done_q_dma;
755 unsigned int tx_done;
756 struct net_device_stats stats;
757 struct pci_dev *pci_dev;
758#ifdef VLAN_SUPPORT
759 struct vlan_group *vlgrp;
760#endif
761 void *queue_mem;
762 dma_addr_t queue_mem_dma;
763 size_t queue_mem_size;
764
765 /* Frequently used values: keep some adjacent for cache effect. */
766 spinlock_t lock;
767 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
768 unsigned int cur_tx, dirty_tx, reap_tx;
769 unsigned int rx_buf_sz; /* Based on MTU+slack. */
770 /* These values keep track of the transceiver/media in use. */
771 int speed100; /* Set if speed == 100MBit. */
772 u32 tx_mode;
773 u32 intr_timer_ctrl;
774 u8 tx_threshold;
775 /* MII transceiver section. */
776 struct mii_if_info mii_if; /* MII lib hooks/info */
777 int phy_cnt; /* MII device addresses. */
778 unsigned char phys[PHY_CNT]; /* MII device addresses. */
779 void __iomem *base;
780};
781
782
783static int mdio_read(struct net_device *dev, int phy_id, int location);
784static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
785static int netdev_open(struct net_device *dev);
786static void check_duplex(struct net_device *dev);
787static void tx_timeout(struct net_device *dev);
788static void init_ring(struct net_device *dev);
789static int start_tx(struct sk_buff *skb, struct net_device *dev);
790static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
791static void netdev_error(struct net_device *dev, int intr_status);
792static int __netdev_rx(struct net_device *dev, int *quota);
793static void refill_rx_ring(struct net_device *dev);
794static void netdev_error(struct net_device *dev, int intr_status);
795static void set_rx_mode(struct net_device *dev);
796static struct net_device_stats *get_stats(struct net_device *dev);
797static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
798static int netdev_close(struct net_device *dev);
799static void netdev_media_change(struct net_device *dev);
800static struct ethtool_ops ethtool_ops;
801
802
803#ifdef VLAN_SUPPORT
804static void netdev_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
805{
806 struct netdev_private *np = netdev_priv(dev);
807
808 spin_lock(&np->lock);
809 if (debug > 2)
810 printk("%s: Setting vlgrp to %p\n", dev->name, grp);
811 np->vlgrp = grp;
812 set_rx_mode(dev);
813 spin_unlock(&np->lock);
814}
815
816static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
817{
818 struct netdev_private *np = netdev_priv(dev);
819
820 spin_lock(&np->lock);
821 if (debug > 1)
822 printk("%s: Adding vlanid %d to vlan filter\n", dev->name, vid);
823 set_rx_mode(dev);
824 spin_unlock(&np->lock);
825}
826
827static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
828{
829 struct netdev_private *np = netdev_priv(dev);
830
831 spin_lock(&np->lock);
832 if (debug > 1)
833 printk("%s: removing vlanid %d from vlan filter\n", dev->name, vid);
834 if (np->vlgrp)
835 np->vlgrp->vlan_devices[vid] = NULL;
836 set_rx_mode(dev);
837 spin_unlock(&np->lock);
838}
839#endif /* VLAN_SUPPORT */
840
841
842static int __devinit starfire_init_one(struct pci_dev *pdev,
843 const struct pci_device_id *ent)
844{
845 struct netdev_private *np;
846 int i, irq, option, chip_idx = ent->driver_data;
847 struct net_device *dev;
848 static int card_idx = -1;
849 long ioaddr;
850 void __iomem *base;
851 int drv_flags, io_size;
852 int boguscnt;
853
854/* when built into the kernel, we only print version if device is found */
855#ifndef MODULE
856 static int printed_version;
857 if (!printed_version++)
858 printk(version);
859#endif
860
861 card_idx++;
862
863 if (pci_enable_device (pdev))
864 return -EIO;
865
866 ioaddr = pci_resource_start(pdev, 0);
867 io_size = pci_resource_len(pdev, 0);
868 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
869 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx);
870 return -ENODEV;
871 }
872
873 dev = alloc_etherdev(sizeof(*np));
874 if (!dev) {
875 printk(KERN_ERR DRV_NAME " %d: cannot alloc etherdev, aborting\n", card_idx);
876 return -ENOMEM;
877 }
878 SET_MODULE_OWNER(dev);
879 SET_NETDEV_DEV(dev, &pdev->dev);
880
881 irq = pdev->irq;
882
883 if (pci_request_regions (pdev, DRV_NAME)) {
884 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx);
885 goto err_out_free_netdev;
886 }
887
888 /* ioremap is borken in Linux-2.2.x/sparc64 */
889 base = ioremap(ioaddr, io_size);
890 if (!base) {
891 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n",
892 card_idx, io_size, ioaddr);
893 goto err_out_free_res;
894 }
895
896 pci_set_master(pdev);
897
898 /* enable MWI -- it vastly improves Rx performance on sparc64 */
899 pci_set_mwi(pdev);
900
901#ifdef MAX_SKB_FRAGS
902 dev->features |= NETIF_F_SG;
903#endif /* MAX_SKB_FRAGS */
904#ifdef ZEROCOPY
905 /* Starfire can do TCP/UDP checksumming */
906 if (enable_hw_cksum)
907 dev->features |= NETIF_F_IP_CSUM;
908#endif /* ZEROCOPY */
909#ifdef VLAN_SUPPORT
910 dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
911 dev->vlan_rx_register = netdev_vlan_rx_register;
912 dev->vlan_rx_add_vid = netdev_vlan_rx_add_vid;
913 dev->vlan_rx_kill_vid = netdev_vlan_rx_kill_vid;
914#endif /* VLAN_RX_KILL_VID */
915#ifdef ADDR_64BITS
916 dev->features |= NETIF_F_HIGHDMA;
917#endif /* ADDR_64BITS */
918
919 /* Serial EEPROM reads are hidden by the hardware. */
920 for (i = 0; i < 6; i++)
921 dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
922
923#if ! defined(final_version) /* Dump the EEPROM contents during development. */
924 if (debug > 4)
925 for (i = 0; i < 0x20; i++)
926 printk("%2.2x%s",
927 (unsigned int)readb(base + EEPROMCtrl + i),
928 i % 16 != 15 ? " " : "\n");
929#endif
930
931 /* Issue soft reset */
932 writel(MiiSoftReset, base + TxMode);
933 udelay(1000);
934 writel(0, base + TxMode);
935
936 /* Reset the chip to erase previous misconfiguration. */
937 writel(1, base + PCIDeviceConfig);
938 boguscnt = 1000;
939 while (--boguscnt > 0) {
940 udelay(10);
941 if ((readl(base + PCIDeviceConfig) & 1) == 0)
942 break;
943 }
944 if (boguscnt == 0)
945 printk("%s: chipset reset never completed!\n", dev->name);
946 /* wait a little longer */
947 udelay(1000);
948
949 dev->base_addr = (unsigned long)base;
950 dev->irq = irq;
951
952 np = netdev_priv(dev);
953 np->base = base;
954 spin_lock_init(&np->lock);
955 pci_set_drvdata(pdev, dev);
956
957 np->pci_dev = pdev;
958
959 np->mii_if.dev = dev;
960 np->mii_if.mdio_read = mdio_read;
961 np->mii_if.mdio_write = mdio_write;
962 np->mii_if.phy_id_mask = 0x1f;
963 np->mii_if.reg_num_mask = 0x1f;
964
965 drv_flags = netdrv_tbl[chip_idx].drv_flags;
966
967 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
968 if (dev->mem_start)
969 option = dev->mem_start;
970
971 /* The lower four bits are the media type. */
972 if (option & 0x200)
973 np->mii_if.full_duplex = 1;
974
975 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
976 np->mii_if.full_duplex = 1;
977
978 if (np->mii_if.full_duplex)
979 np->mii_if.force_media = 1;
980 else
981 np->mii_if.force_media = 0;
982 np->speed100 = 1;
983
984 /* timer resolution is 128 * 0.8us */
985 np->intr_timer_ctrl = (((intr_latency * 10) / 1024) & IntrLatencyMask) |
986 Timer10X | EnableIntrMasking;
987
988 if (small_frames > 0) {
989 np->intr_timer_ctrl |= SmallFrameBypass;
990 switch (small_frames) {
991 case 1 ... 64:
992 np->intr_timer_ctrl |= SmallFrame64;
993 break;
994 case 65 ... 128:
995 np->intr_timer_ctrl |= SmallFrame128;
996 break;
997 case 129 ... 256:
998 np->intr_timer_ctrl |= SmallFrame256;
999 break;
1000 default:
1001 np->intr_timer_ctrl |= SmallFrame512;
1002 if (small_frames > 512)
1003 printk("Adjusting small_frames down to 512\n");
1004 break;
1005 }
1006 }
1007
1008 /* The chip-specific entries in the device structure. */
1009 dev->open = &netdev_open;
1010 dev->hard_start_xmit = &start_tx;
1011 init_tx_timer(dev, tx_timeout, TX_TIMEOUT);
1012 init_poll(dev);
1013 dev->stop = &netdev_close;
1014 dev->get_stats = &get_stats;
1015 dev->set_multicast_list = &set_rx_mode;
1016 dev->do_ioctl = &netdev_ioctl;
1017 SET_ETHTOOL_OPS(dev, &ethtool_ops);
1018
1019 if (mtu)
1020 dev->mtu = mtu;
1021
1022 if (register_netdev(dev))
1023 goto err_out_cleardev;
1024
1025 printk(KERN_INFO "%s: %s at %p, ",
1026 dev->name, netdrv_tbl[chip_idx].name, base);
1027 for (i = 0; i < 5; i++)
1028 printk("%2.2x:", dev->dev_addr[i]);
1029 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
1030
1031 if (drv_flags & CanHaveMII) {
1032 int phy, phy_idx = 0;
1033 int mii_status;
1034 for (phy = 0; phy < 32 && phy_idx < PHY_CNT; phy++) {
1035 mdio_write(dev, phy, MII_BMCR, BMCR_RESET);
1036 mdelay(100);
1037 boguscnt = 1000;
1038 while (--boguscnt > 0)
1039 if ((mdio_read(dev, phy, MII_BMCR) & BMCR_RESET) == 0)
1040 break;
1041 if (boguscnt == 0) {
1042 printk("%s: PHY reset never completed!\n", dev->name);
1043 continue;
1044 }
1045 mii_status = mdio_read(dev, phy, MII_BMSR);
1046 if (mii_status != 0) {
1047 np->phys[phy_idx++] = phy;
1048 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
1049 printk(KERN_INFO "%s: MII PHY found at address %d, status "
1050 "%#4.4x advertising %#4.4x.\n",
1051 dev->name, phy, mii_status, np->mii_if.advertising);
1052 /* there can be only one PHY on-board */
1053 break;
1054 }
1055 }
1056 np->phy_cnt = phy_idx;
1057 if (np->phy_cnt > 0)
1058 np->mii_if.phy_id = np->phys[0];
1059 else
1060 memset(&np->mii_if, 0, sizeof(np->mii_if));
1061 }
1062
1063 printk(KERN_INFO "%s: scatter-gather and hardware TCP cksumming %s.\n",
1064 dev->name, enable_hw_cksum ? "enabled" : "disabled");
1065 return 0;
1066
1067err_out_cleardev:
1068 pci_set_drvdata(pdev, NULL);
1069 iounmap(base);
1070err_out_free_res:
1071 pci_release_regions (pdev);
1072err_out_free_netdev:
1073 free_netdev(dev);
1074 return -ENODEV;
1075}
1076
1077
1078/* Read the MII Management Data I/O (MDIO) interfaces. */
1079static int mdio_read(struct net_device *dev, int phy_id, int location)
1080{
1081 struct netdev_private *np = netdev_priv(dev);
1082 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
1083 int result, boguscnt=1000;
1084 /* ??? Should we add a busy-wait here? */
1085 do
1086 result = readl(mdio_addr);
1087 while ((result & 0xC0000000) != 0x80000000 && --boguscnt > 0);
1088 if (boguscnt == 0)
1089 return 0;
1090 if ((result & 0xffff) == 0xffff)
1091 return 0;
1092 return result & 0xffff;
1093}
1094
1095
1096static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
1097{
1098 struct netdev_private *np = netdev_priv(dev);
1099 void __iomem *mdio_addr = np->base + MIICtrl + (phy_id<<7) + (location<<2);
1100 writel(value, mdio_addr);
1101 /* The busy-wait will occur before a read. */
1102}
1103
1104
1105static int netdev_open(struct net_device *dev)
1106{
1107 struct netdev_private *np = netdev_priv(dev);
1108 void __iomem *ioaddr = np->base;
1109 int i, retval;
1110 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
1111
1112 /* Do we ever need to reset the chip??? */
1113 retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
1114 if (retval)
1115 return retval;
1116
1117 /* Disable the Rx and Tx, and reset the chip. */
1118 writel(0, ioaddr + GenCtrl);
1119 writel(1, ioaddr + PCIDeviceConfig);
1120 if (debug > 1)
1121 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1122 dev->name, dev->irq);
1123
1124 /* Allocate the various queues. */
1125 if (np->queue_mem == 0) {
1126 tx_done_q_size = ((sizeof(struct tx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1127 rx_done_q_size = ((sizeof(rx_done_desc) * DONE_Q_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1128 tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
1129 rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
1130 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
1131 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
1132 if (np->queue_mem == 0)
1133 return -ENOMEM;
1134
1135 np->tx_done_q = np->queue_mem;
1136 np->tx_done_q_dma = np->queue_mem_dma;
1137 np->rx_done_q = (void *) np->tx_done_q + tx_done_q_size;
1138 np->rx_done_q_dma = np->tx_done_q_dma + tx_done_q_size;
1139 np->tx_ring = (void *) np->rx_done_q + rx_done_q_size;
1140 np->tx_ring_dma = np->rx_done_q_dma + rx_done_q_size;
1141 np->rx_ring = (void *) np->tx_ring + tx_ring_size;
1142 np->rx_ring_dma = np->tx_ring_dma + tx_ring_size;
1143 }
1144
1145 /* Start with no carrier, it gets adjusted later */
1146 netif_carrier_off(dev);
1147 init_ring(dev);
1148 /* Set the size of the Rx buffers. */
1149 writel((np->rx_buf_sz << RxBufferLenShift) |
1150 (0 << RxMinDescrThreshShift) |
1151 RxPrefetchMode | RxVariableQ |
1152 RX_Q_ENTRIES |
1153 RX_DESC_Q_ADDR_SIZE | RX_DESC_ADDR_SIZE |
1154 RxDescSpace4,
1155 ioaddr + RxDescQCtrl);
1156
1157 /* Set up the Rx DMA controller. */
1158 writel(RxChecksumIgnore |
1159 (0 << RxEarlyIntThreshShift) |
1160 (6 << RxHighPrioThreshShift) |
1161 ((DMA_BURST_SIZE / 32) << RxBurstSizeShift),
1162 ioaddr + RxDMACtrl);
1163
1164 /* Set Tx descriptor */
1165 writel((2 << TxHiPriFIFOThreshShift) |
1166 (0 << TxPadLenShift) |
1167 ((DMA_BURST_SIZE / 32) << TxDMABurstSizeShift) |
1168 TX_DESC_Q_ADDR_SIZE |
1169 TX_DESC_SPACING | TX_DESC_TYPE,
1170 ioaddr + TxDescCtrl);
1171
1172 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + RxDescQHiAddr);
1173 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + TxRingHiAddr);
1174 writel( (np->queue_mem_dma >> 16) >> 16, ioaddr + CompletionHiAddr);
1175 writel(np->rx_ring_dma, ioaddr + RxDescQAddr);
1176 writel(np->tx_ring_dma, ioaddr + TxRingPtr);
1177
1178 writel(np->tx_done_q_dma, ioaddr + TxCompletionAddr);
1179 writel(np->rx_done_q_dma |
1180 RxComplType |
1181 (0 << RxComplThreshShift),
1182 ioaddr + RxCompletionAddr);
1183
1184 if (debug > 1)
1185 printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
1186
1187 /* Fill both the Tx SA register and the Rx perfect filter. */
1188 for (i = 0; i < 6; i++)
1189 writeb(dev->dev_addr[i], ioaddr + TxStationAddr + 5 - i);
1190 /* The first entry is special because it bypasses the VLAN filter.
1191 Don't use it. */
1192 writew(0, ioaddr + PerfFilterTable);
1193 writew(0, ioaddr + PerfFilterTable + 4);
1194 writew(0, ioaddr + PerfFilterTable + 8);
1195 for (i = 1; i < 16; i++) {
1196 u16 *eaddrs = (u16 *)dev->dev_addr;
1197 void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
1198 writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
1199 writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
1200 writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
1201 }
1202
1203 /* Initialize other registers. */
1204 /* Configure the PCI bus bursts and FIFO thresholds. */
1205 np->tx_mode = TxFlowEnable|RxFlowEnable|PadEnable; /* modified when link is up. */
1206 writel(MiiSoftReset | np->tx_mode, ioaddr + TxMode);
1207 udelay(1000);
1208 writel(np->tx_mode, ioaddr + TxMode);
1209 np->tx_threshold = 4;
1210 writel(np->tx_threshold, ioaddr + TxThreshold);
1211
1212 writel(np->intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1213
1214 netif_start_if(dev);
1215 netif_start_queue(dev);
1216
1217 if (debug > 1)
1218 printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
1219 set_rx_mode(dev);
1220
1221 np->mii_if.advertising = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1222 check_duplex(dev);
1223
1224 /* Enable GPIO interrupts on link change */
1225 writel(0x0f00ff00, ioaddr + GPIOCtrl);
1226
1227 /* Set the interrupt mask */
1228 writel(IntrRxDone | IntrRxEmpty | IntrDMAErr |
1229 IntrTxDMADone | IntrStatsMax | IntrLinkChange |
1230 IntrRxGFPDead | IntrNoTxCsum | IntrTxBadID,
1231 ioaddr + IntrEnable);
1232 /* Enable PCI interrupts. */
1233 writel(0x00800000 | readl(ioaddr + PCIDeviceConfig),
1234 ioaddr + PCIDeviceConfig);
1235
1236#ifdef VLAN_SUPPORT
1237 /* Set VLAN type to 802.1q */
1238 writel(ETH_P_8021Q, ioaddr + VlanType);
1239#endif /* VLAN_SUPPORT */
1240
1241#ifdef HAS_FIRMWARE
1242 /* Load Rx/Tx firmware into the frame processors */
1243 for (i = 0; i < FIRMWARE_RX_SIZE * 2; i++)
1244 writel(firmware_rx[i], ioaddr + RxGfpMem + i * 4);
1245 for (i = 0; i < FIRMWARE_TX_SIZE * 2; i++)
1246 writel(firmware_tx[i], ioaddr + TxGfpMem + i * 4);
1247#endif /* HAS_FIRMWARE */
1248 if (enable_hw_cksum)
1249 /* Enable the Rx and Tx units, and the Rx/Tx frame processors. */
1250 writel(TxEnable|TxGFPEnable|RxEnable|RxGFPEnable, ioaddr + GenCtrl);
1251 else
1252 /* Enable the Rx and Tx units only. */
1253 writel(TxEnable|RxEnable, ioaddr + GenCtrl);
1254
1255 if (debug > 1)
1256 printk(KERN_DEBUG "%s: Done netdev_open().\n",
1257 dev->name);
1258
1259 return 0;
1260}
1261
1262
1263static void check_duplex(struct net_device *dev)
1264{
1265 struct netdev_private *np = netdev_priv(dev);
1266 u16 reg0;
1267 int silly_count = 1000;
1268
1269 mdio_write(dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising);
1270 mdio_write(dev, np->phys[0], MII_BMCR, BMCR_RESET);
1271 udelay(500);
1272 while (--silly_count && mdio_read(dev, np->phys[0], MII_BMCR) & BMCR_RESET)
1273 /* do nothing */;
1274 if (!silly_count) {
1275 printk("%s: MII reset failed!\n", dev->name);
1276 return;
1277 }
1278
1279 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1280
1281 if (!np->mii_if.force_media) {
1282 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1283 } else {
1284 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1285 if (np->speed100)
1286 reg0 |= BMCR_SPEED100;
1287 if (np->mii_if.full_duplex)
1288 reg0 |= BMCR_FULLDPLX;
1289 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1290 dev->name,
1291 np->speed100 ? "100" : "10",
1292 np->mii_if.full_duplex ? "full" : "half");
1293 }
1294 mdio_write(dev, np->phys[0], MII_BMCR, reg0);
1295}
1296
1297
1298static void tx_timeout(struct net_device *dev)
1299{
1300 struct netdev_private *np = netdev_priv(dev);
1301 void __iomem *ioaddr = np->base;
1302 int old_debug;
1303
1304 printk(KERN_WARNING "%s: Transmit timed out, status %#8.8x, "
1305 "resetting...\n", dev->name, (int) readl(ioaddr + IntrStatus));
1306
1307 /* Perhaps we should reinitialize the hardware here. */
1308
1309 /*
1310 * Stop and restart the interface.
1311 * Cheat and increase the debug level temporarily.
1312 */
1313 old_debug = debug;
1314 debug = 2;
1315 netdev_close(dev);
1316 netdev_open(dev);
1317 debug = old_debug;
1318
1319 /* Trigger an immediate transmit demand. */
1320
1321 dev->trans_start = jiffies;
1322 np->stats.tx_errors++;
1323 netif_wake_queue(dev);
1324}
1325
1326
1327/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1328static void init_ring(struct net_device *dev)
1329{
1330 struct netdev_private *np = netdev_priv(dev);
1331 int i;
1332
1333 np->cur_rx = np->cur_tx = np->reap_tx = 0;
1334 np->dirty_rx = np->dirty_tx = np->rx_done = np->tx_done = 0;
1335
1336 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1337
1338 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1339 for (i = 0; i < RX_RING_SIZE; i++) {
1340 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
1341 np->rx_info[i].skb = skb;
1342 if (skb == NULL)
1343 break;
1344 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1345 skb->dev = dev; /* Mark as being used by this device. */
1346 /* Grrr, we cannot offset to correctly align the IP header. */
1347 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
1348 }
1349 writew(i - 1, np->base + RxDescQIdx);
1350 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1351
1352 /* Clear the remainder of the Rx buffer ring. */
1353 for ( ; i < RX_RING_SIZE; i++) {
1354 np->rx_ring[i].rxaddr = 0;
1355 np->rx_info[i].skb = NULL;
1356 np->rx_info[i].mapping = 0;
1357 }
1358 /* Mark the last entry as wrapping the ring. */
1359 np->rx_ring[RX_RING_SIZE - 1].rxaddr |= cpu_to_dma(RxDescEndRing);
1360
1361 /* Clear the completion rings. */
1362 for (i = 0; i < DONE_Q_SIZE; i++) {
1363 np->rx_done_q[i].status = 0;
1364 np->tx_done_q[i].status = 0;
1365 }
1366
1367 for (i = 0; i < TX_RING_SIZE; i++)
1368 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1369
1370 return;
1371}
1372
1373
1374static int start_tx(struct sk_buff *skb, struct net_device *dev)
1375{
1376 struct netdev_private *np = netdev_priv(dev);
1377 unsigned int entry;
1378 u32 status;
1379 int i;
1380
1381 kick_tx_timer(dev, tx_timeout, TX_TIMEOUT);
1382
1383 /*
1384 * be cautious here, wrapping the queue has weird semantics
1385 * and we may not have enough slots even when it seems we do.
1386 */
1387 if ((np->cur_tx - np->dirty_tx) + skb_num_frags(skb) * 2 > TX_RING_SIZE) {
1388 netif_stop_queue(dev);
1389 return 1;
1390 }
1391
1392#if defined(ZEROCOPY) && defined(HAS_BROKEN_FIRMWARE)
1393 {
1394 int has_bad_length = 0;
1395
1396 if (skb_first_frag_len(skb) == 1)
1397 has_bad_length = 1;
1398 else {
1399 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1400 if (skb_shinfo(skb)->frags[i].size == 1) {
1401 has_bad_length = 1;
1402 break;
1403 }
1404 }
1405
1406 if (has_bad_length)
1407 skb_checksum_help(skb);
1408 }
1409#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1410
1411 entry = np->cur_tx % TX_RING_SIZE;
1412 for (i = 0; i < skb_num_frags(skb); i++) {
1413 int wrap_ring = 0;
1414 status = TxDescID;
1415
1416 if (i == 0) {
1417 np->tx_info[entry].skb = skb;
1418 status |= TxCRCEn;
1419 if (entry >= TX_RING_SIZE - skb_num_frags(skb)) {
1420 status |= TxRingWrap;
1421 wrap_ring = 1;
1422 }
1423 if (np->reap_tx) {
1424 status |= TxDescIntr;
1425 np->reap_tx = 0;
1426 }
1427 if (skb->ip_summed == CHECKSUM_HW) {
1428 status |= TxCalTCP;
1429 np->stats.tx_compressed++;
1430 }
1431 status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
1432
1433 np->tx_info[entry].mapping =
1434 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
1435 } else {
1436#ifdef MAX_SKB_FRAGS
1437 skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
1438 status |= this_frag->size;
1439 np->tx_info[entry].mapping =
1440 pci_map_single(np->pci_dev, page_address(this_frag->page) + this_frag->page_offset, this_frag->size, PCI_DMA_TODEVICE);
1441#endif /* MAX_SKB_FRAGS */
1442 }
1443
1444 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
1445 np->tx_ring[entry].status = cpu_to_le32(status);
1446 if (debug > 3)
1447 printk(KERN_DEBUG "%s: Tx #%d/#%d slot %d status %#8.8x.\n",
1448 dev->name, np->cur_tx, np->dirty_tx,
1449 entry, status);
1450 if (wrap_ring) {
1451 np->tx_info[entry].used_slots = TX_RING_SIZE - entry;
1452 np->cur_tx += np->tx_info[entry].used_slots;
1453 entry = 0;
1454 } else {
1455 np->tx_info[entry].used_slots = 1;
1456 np->cur_tx += np->tx_info[entry].used_slots;
1457 entry++;
1458 }
1459 /* scavenge the tx descriptors twice per TX_RING_SIZE */
1460 if (np->cur_tx % (TX_RING_SIZE / 2) == 0)
1461 np->reap_tx = 1;
1462 }
1463
1464 /* Non-x86: explicitly flush descriptor cache lines here. */
1465 /* Ensure all descriptors are written back before the transmit is
1466 initiated. - Jes */
1467 wmb();
1468
1469 /* Update the producer index. */
1470 writel(entry * (sizeof(starfire_tx_desc) / 8), np->base + TxProducerIdx);
1471
1472 /* 4 is arbitrary, but should be ok */
1473 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1474 netif_stop_queue(dev);
1475
1476 dev->trans_start = jiffies;
1477
1478 return 0;
1479}
1480
1481
1482/* The interrupt handler does all of the Rx thread work and cleans up
1483 after the Tx thread. */
1484static irqreturn_t intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
1485{
1486 struct net_device *dev = dev_instance;
1487 struct netdev_private *np = netdev_priv(dev);
1488 void __iomem *ioaddr = np->base;
1489 int boguscnt = max_interrupt_work;
1490 int consumer;
1491 int tx_status;
1492 int handled = 0;
1493
1494 do {
1495 u32 intr_status = readl(ioaddr + IntrClear);
1496
1497 if (debug > 4)
1498 printk(KERN_DEBUG "%s: Interrupt status %#8.8x.\n",
1499 dev->name, intr_status);
1500
1501 if (intr_status == 0 || intr_status == (u32) -1)
1502 break;
1503
1504 handled = 1;
1505
1506 if (intr_status & (IntrRxDone | IntrRxEmpty))
1507 netdev_rx(dev, ioaddr);
1508
1509 /* Scavenge the skbuff list based on the Tx-done queue.
1510 There are redundant checks here that may be cleaned up
1511 after the driver has proven to be reliable. */
1512 consumer = readl(ioaddr + TxConsumerIdx);
1513 if (debug > 3)
1514 printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
1515 dev->name, consumer);
1516
1517 while ((tx_status = le32_to_cpu(np->tx_done_q[np->tx_done].status)) != 0) {
1518 if (debug > 3)
1519 printk(KERN_DEBUG "%s: Tx completion #%d entry %d is %#8.8x.\n",
1520 dev->name, np->dirty_tx, np->tx_done, tx_status);
1521 if ((tx_status & 0xe0000000) == 0xa0000000) {
1522 np->stats.tx_packets++;
1523 } else if ((tx_status & 0xe0000000) == 0x80000000) {
1524 u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
1525 struct sk_buff *skb = np->tx_info[entry].skb;
1526 np->tx_info[entry].skb = NULL;
1527 pci_unmap_single(np->pci_dev,
1528 np->tx_info[entry].mapping,
1529 skb_first_frag_len(skb),
1530 PCI_DMA_TODEVICE);
1531 np->tx_info[entry].mapping = 0;
1532 np->dirty_tx += np->tx_info[entry].used_slots;
1533 entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1534#ifdef MAX_SKB_FRAGS
1535 {
1536 int i;
1537 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1538 pci_unmap_single(np->pci_dev,
1539 np->tx_info[entry].mapping,
1540 skb_shinfo(skb)->frags[i].size,
1541 PCI_DMA_TODEVICE);
1542 np->dirty_tx++;
1543 entry++;
1544 }
1545 }
1546#endif /* MAX_SKB_FRAGS */
1547 dev_kfree_skb_irq(skb);
1548 }
1549 np->tx_done_q[np->tx_done].status = 0;
1550 np->tx_done = (np->tx_done + 1) % DONE_Q_SIZE;
1551 }
1552 writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
1553
1554 if (netif_queue_stopped(dev) &&
1555 (np->cur_tx - np->dirty_tx + 4 < TX_RING_SIZE)) {
1556 /* The ring is no longer full, wake the queue. */
1557 netif_wake_queue(dev);
1558 }
1559
1560 /* Stats overflow */
1561 if (intr_status & IntrStatsMax)
1562 get_stats(dev);
1563
1564 /* Media change interrupt. */
1565 if (intr_status & IntrLinkChange)
1566 netdev_media_change(dev);
1567
1568 /* Abnormal error summary/uncommon events handlers. */
1569 if (intr_status & IntrAbnormalSummary)
1570 netdev_error(dev, intr_status);
1571
1572 if (--boguscnt < 0) {
1573 if (debug > 1)
1574 printk(KERN_WARNING "%s: Too much work at interrupt, "
1575 "status=%#8.8x.\n",
1576 dev->name, intr_status);
1577 break;
1578 }
1579 } while (1);
1580
1581 if (debug > 4)
1582 printk(KERN_DEBUG "%s: exiting interrupt, status=%#8.8x.\n",
1583 dev->name, (int) readl(ioaddr + IntrStatus));
1584 return IRQ_RETVAL(handled);
1585}
1586
1587
1588/* This routine is logically part of the interrupt/poll handler, but separated
1589 for clarity, code sharing between NAPI/non-NAPI, and better register allocation. */
1590static int __netdev_rx(struct net_device *dev, int *quota)
1591{
1592 struct netdev_private *np = netdev_priv(dev);
1593 u32 desc_status;
1594 int retcode = 0;
1595
1596 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1597 while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
1598 struct sk_buff *skb;
1599 u16 pkt_len;
1600 int entry;
1601 rx_done_desc *desc = &np->rx_done_q[np->rx_done];
1602
1603 if (debug > 4)
1604 printk(KERN_DEBUG " netdev_rx() status of %d was %#8.8x.\n", np->rx_done, desc_status);
1605 if (!(desc_status & RxOK)) {
1606 /* There was a error. */
1607 if (debug > 2)
1608 printk(KERN_DEBUG " netdev_rx() Rx error was %#8.8x.\n", desc_status);
1609 np->stats.rx_errors++;
1610 if (desc_status & RxFIFOErr)
1611 np->stats.rx_fifo_errors++;
1612 goto next_rx;
1613 }
1614
1615 if (*quota <= 0) { /* out of rx quota */
1616 retcode = 1;
1617 goto out;
1618 }
1619 (*quota)--;
1620
1621 pkt_len = desc_status; /* Implicitly Truncate */
1622 entry = (desc_status >> 16) & 0x7ff;
1623
1624 if (debug > 4)
1625 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d, quota %d.\n", pkt_len, *quota);
1626 /* Check if the packet is long enough to accept without copying
1627 to a minimally-sized skbuff. */
1628 if (pkt_len < rx_copybreak
1629 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1630 skb->dev = dev;
1631 skb_reserve(skb, 2); /* 16 byte align the IP header */
1632 pci_dma_sync_single_for_cpu(np->pci_dev,
1633 np->rx_info[entry].mapping,
1634 pkt_len, PCI_DMA_FROMDEVICE);
1635 eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0);
1636 pci_dma_sync_single_for_device(np->pci_dev,
1637 np->rx_info[entry].mapping,
1638 pkt_len, PCI_DMA_FROMDEVICE);
1639 skb_put(skb, pkt_len);
1640 } else {
1641 pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1642 skb = np->rx_info[entry].skb;
1643 skb_put(skb, pkt_len);
1644 np->rx_info[entry].skb = NULL;
1645 np->rx_info[entry].mapping = 0;
1646 }
1647#ifndef final_version /* Remove after testing. */
1648 /* You will want this info for the initial debug. */
1649 if (debug > 5)
1650 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
1651 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x.\n",
1652 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
1653 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
1654 skb->data[8], skb->data[9], skb->data[10],
1655 skb->data[11], skb->data[12], skb->data[13]);
1656#endif
1657
1658 skb->protocol = eth_type_trans(skb, dev);
1659#if defined(HAS_FIRMWARE) || defined(VLAN_SUPPORT)
1660 if (debug > 4)
1661 printk(KERN_DEBUG " netdev_rx() status2 of %d was %#4.4x.\n", np->rx_done, le16_to_cpu(desc->status2));
1662#endif
1663#ifdef HAS_FIRMWARE
1664 if (le16_to_cpu(desc->status2) & 0x0100) {
1665 skb->ip_summed = CHECKSUM_UNNECESSARY;
1666 np->stats.rx_compressed++;
1667 }
1668 /*
1669 * This feature doesn't seem to be working, at least
1670 * with the two firmware versions I have. If the GFP sees
1671 * an IP fragment, it either ignores it completely, or reports
1672 * "bad checksum" on it.
1673 *
1674 * Maybe I missed something -- corrections are welcome.
1675 * Until then, the printk stays. :-) -Ion
1676 */
1677 else if (le16_to_cpu(desc->status2) & 0x0040) {
1678 skb->ip_summed = CHECKSUM_HW;
1679 skb->csum = le16_to_cpu(desc->csum);
1680 printk(KERN_DEBUG "%s: checksum_hw, status2 = %#x\n", dev->name, le16_to_cpu(desc->status2));
1681 }
1682#endif /* HAS_FIRMWARE */
1683#ifdef VLAN_SUPPORT
1684 if (np->vlgrp && le16_to_cpu(desc->status2) & 0x0200) {
1685 if (debug > 4)
1686 printk(KERN_DEBUG " netdev_rx() vlanid = %d\n", le16_to_cpu(desc->vlanid));
1687 /* vlan_netdev_receive_skb() expects a packet with the VLAN tag stripped out */
1688 vlan_netdev_receive_skb(skb, np->vlgrp, le16_to_cpu(desc->vlanid) & VLAN_VID_MASK);
1689 } else
1690#endif /* VLAN_SUPPORT */
1691 netdev_receive_skb(skb);
1692 dev->last_rx = jiffies;
1693 np->stats.rx_packets++;
1694
1695 next_rx:
1696 np->cur_rx++;
1697 desc->status = 0;
1698 np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
1699 }
1700 writew(np->rx_done, np->base + CompletionQConsumerIdx);
1701
1702 out:
1703 refill_rx_ring(dev);
1704 if (debug > 5)
1705 printk(KERN_DEBUG " exiting netdev_rx(): %d, status of %d was %#8.8x.\n",
1706 retcode, np->rx_done, desc_status);
1707 return retcode;
1708}
1709
1710
1711#ifdef HAVE_NETDEV_POLL
1712static int netdev_poll(struct net_device *dev, int *budget)
1713{
1714 u32 intr_status;
1715 struct netdev_private *np = netdev_priv(dev);
1716 void __iomem *ioaddr = np->base;
1717 int retcode = 0, quota = dev->quota;
1718
1719 do {
1720 writel(IntrRxDone | IntrRxEmpty, ioaddr + IntrClear);
1721
1722 retcode = __netdev_rx(dev, &quota);
1723 *budget -= (dev->quota - quota);
1724 dev->quota = quota;
1725 if (retcode)
1726 goto out;
1727
1728 intr_status = readl(ioaddr + IntrStatus);
1729 } while (intr_status & (IntrRxDone | IntrRxEmpty));
1730
1731 netif_rx_complete(dev);
1732 intr_status = readl(ioaddr + IntrEnable);
1733 intr_status |= IntrRxDone | IntrRxEmpty;
1734 writel(intr_status, ioaddr + IntrEnable);
1735
1736 out:
1737 if (debug > 5)
1738 printk(KERN_DEBUG " exiting netdev_poll(): %d.\n", retcode);
1739
1740 /* Restart Rx engine if stopped. */
1741 return retcode;
1742}
1743#endif /* HAVE_NETDEV_POLL */
1744
1745
1746static void refill_rx_ring(struct net_device *dev)
1747{
1748 struct netdev_private *np = netdev_priv(dev);
1749 struct sk_buff *skb;
1750 int entry = -1;
1751
1752 /* Refill the Rx ring buffers. */
1753 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1754 entry = np->dirty_rx % RX_RING_SIZE;
1755 if (np->rx_info[entry].skb == NULL) {
1756 skb = dev_alloc_skb(np->rx_buf_sz);
1757 np->rx_info[entry].skb = skb;
1758 if (skb == NULL)
1759 break; /* Better luck next round. */
1760 np->rx_info[entry].mapping =
1761 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1762 skb->dev = dev; /* Mark as being used by this device. */
1763 np->rx_ring[entry].rxaddr =
1764 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
1765 }
1766 if (entry == RX_RING_SIZE - 1)
1767 np->rx_ring[entry].rxaddr |= cpu_to_dma(RxDescEndRing);
1768 }
1769 if (entry >= 0)
1770 writew(entry, np->base + RxDescQIdx);
1771}
1772
1773
1774static void netdev_media_change(struct net_device *dev)
1775{
1776 struct netdev_private *np = netdev_priv(dev);
1777 void __iomem *ioaddr = np->base;
1778 u16 reg0, reg1, reg4, reg5;
1779 u32 new_tx_mode;
1780 u32 new_intr_timer_ctrl;
1781
1782 /* reset status first */
1783 mdio_read(dev, np->phys[0], MII_BMCR);
1784 mdio_read(dev, np->phys[0], MII_BMSR);
1785
1786 reg0 = mdio_read(dev, np->phys[0], MII_BMCR);
1787 reg1 = mdio_read(dev, np->phys[0], MII_BMSR);
1788
1789 if (reg1 & BMSR_LSTATUS) {
1790 /* link is up */
1791 if (reg0 & BMCR_ANENABLE) {
1792 /* autonegotiation is enabled */
1793 reg4 = mdio_read(dev, np->phys[0], MII_ADVERTISE);
1794 reg5 = mdio_read(dev, np->phys[0], MII_LPA);
1795 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
1796 np->speed100 = 1;
1797 np->mii_if.full_duplex = 1;
1798 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
1799 np->speed100 = 1;
1800 np->mii_if.full_duplex = 0;
1801 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
1802 np->speed100 = 0;
1803 np->mii_if.full_duplex = 1;
1804 } else {
1805 np->speed100 = 0;
1806 np->mii_if.full_duplex = 0;
1807 }
1808 } else {
1809 /* autonegotiation is disabled */
1810 if (reg0 & BMCR_SPEED100)
1811 np->speed100 = 1;
1812 else
1813 np->speed100 = 0;
1814 if (reg0 & BMCR_FULLDPLX)
1815 np->mii_if.full_duplex = 1;
1816 else
1817 np->mii_if.full_duplex = 0;
1818 }
1819 netif_carrier_on(dev);
1820 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
1821 dev->name,
1822 np->speed100 ? "100" : "10",
1823 np->mii_if.full_duplex ? "full" : "half");
1824
1825 new_tx_mode = np->tx_mode & ~FullDuplex; /* duplex setting */
1826 if (np->mii_if.full_duplex)
1827 new_tx_mode |= FullDuplex;
1828 if (np->tx_mode != new_tx_mode) {
1829 np->tx_mode = new_tx_mode;
1830 writel(np->tx_mode | MiiSoftReset, ioaddr + TxMode);
1831 udelay(1000);
1832 writel(np->tx_mode, ioaddr + TxMode);
1833 }
1834
1835 new_intr_timer_ctrl = np->intr_timer_ctrl & ~Timer10X;
1836 if (np->speed100)
1837 new_intr_timer_ctrl |= Timer10X;
1838 if (np->intr_timer_ctrl != new_intr_timer_ctrl) {
1839 np->intr_timer_ctrl = new_intr_timer_ctrl;
1840 writel(new_intr_timer_ctrl, ioaddr + IntrTimerCtrl);
1841 }
1842 } else {
1843 netif_carrier_off(dev);
1844 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1845 }
1846}
1847
1848
1849static void netdev_error(struct net_device *dev, int intr_status)
1850{
1851 struct netdev_private *np = netdev_priv(dev);
1852
1853 /* Came close to underrunning the Tx FIFO, increase threshold. */
1854 if (intr_status & IntrTxDataLow) {
1855 if (np->tx_threshold <= PKT_BUF_SZ / 16) {
1856 writel(++np->tx_threshold, np->base + TxThreshold);
1857 printk(KERN_NOTICE "%s: PCI bus congestion, increasing Tx FIFO threshold to %d bytes\n",
1858 dev->name, np->tx_threshold * 16);
1859 } else
1860 printk(KERN_WARNING "%s: PCI Tx underflow -- adapter is probably malfunctioning\n", dev->name);
1861 }
1862 if (intr_status & IntrRxGFPDead) {
1863 np->stats.rx_fifo_errors++;
1864 np->stats.rx_errors++;
1865 }
1866 if (intr_status & (IntrNoTxCsum | IntrDMAErr)) {
1867 np->stats.tx_fifo_errors++;
1868 np->stats.tx_errors++;
1869 }
1870 if ((intr_status & ~(IntrNormalMask | IntrAbnormalSummary | IntrLinkChange | IntrStatsMax | IntrTxDataLow | IntrRxGFPDead | IntrNoTxCsum | IntrPCIPad)) && debug)
1871 printk(KERN_ERR "%s: Something Wicked happened! %#8.8x.\n",
1872 dev->name, intr_status);
1873}
1874
1875
1876static struct net_device_stats *get_stats(struct net_device *dev)
1877{
1878 struct netdev_private *np = netdev_priv(dev);
1879 void __iomem *ioaddr = np->base;
1880
1881 /* This adapter architecture needs no SMP locks. */
1882 np->stats.tx_bytes = readl(ioaddr + 0x57010);
1883 np->stats.rx_bytes = readl(ioaddr + 0x57044);
1884 np->stats.tx_packets = readl(ioaddr + 0x57000);
1885 np->stats.tx_aborted_errors =
1886 readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
1887 np->stats.tx_window_errors = readl(ioaddr + 0x57018);
1888 np->stats.collisions =
1889 readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
1890
1891 /* The chip only need report frame silently dropped. */
1892 np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
1893 writew(0, ioaddr + RxDMAStatus);
1894 np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
1895 np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
1896 np->stats.rx_length_errors = readl(ioaddr + 0x57058);
1897 np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
1898
1899 return &np->stats;
1900}
1901
1902
1903/* Chips may use the upper or lower CRC bits, and may reverse and/or invert
1904 them. Select the endian-ness that results in minimal calculations.
1905*/
1906static void set_rx_mode(struct net_device *dev)
1907{
1908 struct netdev_private *np = netdev_priv(dev);
1909 void __iomem *ioaddr = np->base;
1910 u32 rx_mode = MinVLANPrio;
1911 struct dev_mc_list *mclist;
1912 int i;
1913#ifdef VLAN_SUPPORT
1914
1915 rx_mode |= VlanMode;
1916 if (np->vlgrp) {
1917 int vlan_count = 0;
1918 void __iomem *filter_addr = ioaddr + HashTable + 8;
1919 for (i = 0; i < VLAN_VID_MASK; i++) {
1920 if (np->vlgrp->vlan_devices[i]) {
1921 if (vlan_count >= 32)
1922 break;
1923 writew(cpu_to_be16(i), filter_addr);
1924 filter_addr += 16;
1925 vlan_count++;
1926 }
1927 }
1928 if (i == VLAN_VID_MASK) {
1929 rx_mode |= PerfectFilterVlan;
1930 while (vlan_count < 32) {
1931 writew(0, filter_addr);
1932 filter_addr += 16;
1933 vlan_count++;
1934 }
1935 }
1936 }
1937#endif /* VLAN_SUPPORT */
1938
1939 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1940 rx_mode |= AcceptAll;
1941 } else if ((dev->mc_count > multicast_filter_limit)
1942 || (dev->flags & IFF_ALLMULTI)) {
1943 /* Too many to match, or accept all multicasts. */
1944 rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
1945 } else if (dev->mc_count <= 14) {
1946 /* Use the 16 element perfect filter, skip first two entries. */
1947 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1948 u16 *eaddrs;
1949 for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
1950 i++, mclist = mclist->next) {
1951 eaddrs = (u16 *)mclist->dmi_addr;
1952 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
1953 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1954 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
1955 }
1956 eaddrs = (u16 *)dev->dev_addr;
1957 while (i++ < 16) {
1958 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1959 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1960 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1961 }
1962 rx_mode |= AcceptBroadcast|PerfectFilter;
1963 } else {
1964 /* Must use a multicast hash table. */
1965 void __iomem *filter_addr;
1966 u16 *eaddrs;
1967 u16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1968
1969 memset(mc_filter, 0, sizeof(mc_filter));
1970 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1971 i++, mclist = mclist->next) {
1972 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
1973 __u32 *fptr = (__u32 *) &mc_filter[(bit_nr >> 4) & ~1];
1974
1975 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
1976 }
1977 /* Clear the perfect filter list, skip first two entries. */
1978 filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1979 eaddrs = (u16 *)dev->dev_addr;
1980 for (i = 2; i < 16; i++) {
1981 writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 4;
1982 writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
1983 writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 8;
1984 }
1985 for (filter_addr = ioaddr + HashTable, i = 0; i < 32; filter_addr+= 16, i++)
1986 writew(mc_filter[i], filter_addr);
1987 rx_mode |= AcceptBroadcast|PerfectFilter|HashFilter;
1988 }
1989 writel(rx_mode, ioaddr + RxFilterMode);
1990}
1991
1992static int check_if_running(struct net_device *dev)
1993{
1994 if (!netif_running(dev))
1995 return -EINVAL;
1996 return 0;
1997}
1998
1999static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2000{
2001 struct netdev_private *np = netdev_priv(dev);
2002 strcpy(info->driver, DRV_NAME);
2003 strcpy(info->version, DRV_VERSION);
2004 strcpy(info->bus_info, PCI_SLOT_NAME(np->pci_dev));
2005}
2006
2007static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2008{
2009 struct netdev_private *np = netdev_priv(dev);
2010 spin_lock_irq(&np->lock);
2011 mii_ethtool_gset(&np->mii_if, ecmd);
2012 spin_unlock_irq(&np->lock);
2013 return 0;
2014}
2015
2016static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2017{
2018 struct netdev_private *np = netdev_priv(dev);
2019 int res;
2020 spin_lock_irq(&np->lock);
2021 res = mii_ethtool_sset(&np->mii_if, ecmd);
2022 spin_unlock_irq(&np->lock);
2023 check_duplex(dev);
2024 return res;
2025}
2026
2027static int nway_reset(struct net_device *dev)
2028{
2029 struct netdev_private *np = netdev_priv(dev);
2030 return mii_nway_restart(&np->mii_if);
2031}
2032
2033static u32 get_link(struct net_device *dev)
2034{
2035 struct netdev_private *np = netdev_priv(dev);
2036 return mii_link_ok(&np->mii_if);
2037}
2038
2039static u32 get_msglevel(struct net_device *dev)
2040{
2041 return debug;
2042}
2043
2044static void set_msglevel(struct net_device *dev, u32 val)
2045{
2046 debug = val;
2047}
2048
2049static struct ethtool_ops ethtool_ops = {
2050 .begin = check_if_running,
2051 .get_drvinfo = get_drvinfo,
2052 .get_settings = get_settings,
2053 .set_settings = set_settings,
2054 .nway_reset = nway_reset,
2055 .get_link = get_link,
2056 .get_msglevel = get_msglevel,
2057 .set_msglevel = set_msglevel,
2058};
2059
2060static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2061{
2062 struct netdev_private *np = netdev_priv(dev);
2063 struct mii_ioctl_data *data = if_mii(rq);
2064 int rc;
2065
2066 if (!netif_running(dev))
2067 return -EINVAL;
2068
2069 spin_lock_irq(&np->lock);
2070 rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL);
2071 spin_unlock_irq(&np->lock);
2072
2073 if ((cmd == SIOCSMIIREG) && (data->phy_id == np->phys[0]))
2074 check_duplex(dev);
2075
2076 return rc;
2077}
2078
2079static int netdev_close(struct net_device *dev)
2080{
2081 struct netdev_private *np = netdev_priv(dev);
2082 void __iomem *ioaddr = np->base;
2083 int i;
2084
2085 netif_stop_queue(dev);
2086 netif_stop_if(dev);
2087
2088 if (debug > 1) {
2089 printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %#8.8x.\n",
2090 dev->name, (int) readl(ioaddr + IntrStatus));
2091 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
2092 dev->name, np->cur_tx, np->dirty_tx,
2093 np->cur_rx, np->dirty_rx);
2094 }
2095
2096 /* Disable interrupts by clearing the interrupt mask. */
2097 writel(0, ioaddr + IntrEnable);
2098
2099 /* Stop the chip's Tx and Rx processes. */
2100 writel(0, ioaddr + GenCtrl);
2101 readl(ioaddr + GenCtrl);
2102
2103 if (debug > 5) {
2104 printk(KERN_DEBUG" Tx ring at %#llx:\n",
2105 (long long) np->tx_ring_dma);
2106 for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
2107 printk(KERN_DEBUG " #%d desc. %#8.8x %#llx -> %#8.8x.\n",
2108 i, le32_to_cpu(np->tx_ring[i].status),
2109 (long long) dma_to_cpu(np->tx_ring[i].addr),
2110 le32_to_cpu(np->tx_done_q[i].status));
2111 printk(KERN_DEBUG " Rx ring at %#llx -> %p:\n",
2112 (long long) np->rx_ring_dma, np->rx_done_q);
2113 if (np->rx_done_q)
2114 for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
2115 printk(KERN_DEBUG " #%d desc. %#llx -> %#8.8x\n",
2116 i, (long long) dma_to_cpu(np->rx_ring[i].rxaddr), le32_to_cpu(np->rx_done_q[i].status));
2117 }
2118 }
2119
2120 free_irq(dev->irq, dev);
2121
2122 /* Free all the skbuffs in the Rx queue. */
2123 for (i = 0; i < RX_RING_SIZE; i++) {
2124 np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
2125 if (np->rx_info[i].skb != NULL) {
2126 pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
2127 dev_kfree_skb(np->rx_info[i].skb);
2128 }
2129 np->rx_info[i].skb = NULL;
2130 np->rx_info[i].mapping = 0;
2131 }
2132 for (i = 0; i < TX_RING_SIZE; i++) {
2133 struct sk_buff *skb = np->tx_info[i].skb;
2134 if (skb == NULL)
2135 continue;
2136 pci_unmap_single(np->pci_dev,
2137 np->tx_info[i].mapping,
2138 skb_first_frag_len(skb), PCI_DMA_TODEVICE);
2139 np->tx_info[i].mapping = 0;
2140 dev_kfree_skb(skb);
2141 np->tx_info[i].skb = NULL;
2142 }
2143
2144 return 0;
2145}
2146
2147
2148static void __devexit starfire_remove_one (struct pci_dev *pdev)
2149{
2150 struct net_device *dev = pci_get_drvdata(pdev);
2151 struct netdev_private *np = netdev_priv(dev);
2152
2153 if (!dev)
2154 BUG();
2155
2156 unregister_netdev(dev);
2157
2158 if (np->queue_mem)
2159 pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
2160
2161
2162 /* XXX: add wakeup code -- requires firmware for MagicPacket */
2163 pci_set_power_state(pdev, PCI_D3hot); /* go to sleep in D3 mode */
2164 pci_disable_device(pdev);
2165
2166 iounmap(np->base);
2167 pci_release_regions(pdev);
2168
2169 pci_set_drvdata(pdev, NULL);
2170 free_netdev(dev); /* Will also free np!! */
2171}
2172
2173
2174static struct pci_driver starfire_driver = {
2175 .name = DRV_NAME,
2176 .probe = starfire_init_one,
2177 .remove = __devexit_p(starfire_remove_one),
2178 .id_table = starfire_pci_tbl,
2179};
2180
2181
2182static int __init starfire_init (void)
2183{
2184/* when a module, this is printed whether or not devices are found in probe */
2185#ifdef MODULE
2186 printk(version);
2187#endif
2188#ifndef ADDR_64BITS
2189 /* we can do this test only at run-time... sigh */
2190 if (sizeof(dma_addr_t) == sizeof(u64)) {
2191 printk("This driver has not been ported to this 64-bit architecture yet\n");
2192 return -ENODEV;
2193 }
2194#endif /* not ADDR_64BITS */
2195#ifndef HAS_FIRMWARE
2196 /* unconditionally disable hw cksums if firmware is not present */
2197 enable_hw_cksum = 0;
2198#endif /* not HAS_FIRMWARE */
2199 return pci_module_init (&starfire_driver);
2200}
2201
2202
2203static void __exit starfire_cleanup (void)
2204{
2205 pci_unregister_driver (&starfire_driver);
2206}
2207
2208
2209module_init(starfire_init);
2210module_exit(starfire_cleanup);
2211
2212
2213/*
2214 * Local variables:
2215 * c-basic-offset: 8
2216 * tab-width: 8
2217 * End:
2218 */