aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdrian Bunk <bunk@kernel.org>2008-10-29 17:22:15 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-10-31 00:52:23 -0400
commit6b1abbaefa31b84cc02bf4006ba8a63393de1136 (patch)
tree8f6260231b8cdc155f0f4531ba9621f3fd2f5ac1
parent5177b3240a6608fc0c9c05cc32f4855c6540f8d5 (diff)
The overdue eepro100 removal.
Signed-off-by: Adrian Bunk <bunk@kernel.org> Cc: Jeff Garzik <jeff@garzik.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
-rw-r--r--Documentation/feature-removal-schedule.txt7
-rw-r--r--MAINTAINERS5
-rw-r--r--drivers/net/Kconfig13
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/eepro100.c2400
5 files changed, 0 insertions, 2426 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 05d71b4b9430..6ecd4f0a24f0 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -144,13 +144,6 @@ Who: Christoph Hellwig <hch@lst.de>
144 144
145--------------------------- 145---------------------------
146 146
147What: eepro100 network driver
148When: January 2007
149Why: replaced by the e100 driver
150Who: Adrian Bunk <bunk@stusta.de>
151
152---------------------------
153
154What: Unused EXPORT_SYMBOL/EXPORT_SYMBOL_GPL exports 147What: Unused EXPORT_SYMBOL/EXPORT_SYMBOL_GPL exports
155 (temporary transition config option provided until then) 148 (temporary transition config option provided until then)
156 The transition config option will also be removed at the same time. 149 The transition config option will also be removed at the same time.
diff --git a/MAINTAINERS b/MAINTAINERS
index 16202c8ac68f..74e69ab8c132 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1599,11 +1599,6 @@ L: acpi4asus-user@lists.sourceforge.net
1599W: http://sourceforge.net/projects/acpi4asus 1599W: http://sourceforge.net/projects/acpi4asus
1600S: Maintained 1600S: Maintained
1601 1601
1602EEPRO100 NETWORK DRIVER
1603P: Andrey V. Savochkin
1604M: saw@saw.sw.com.sg
1605S: Maintained
1606
1607EFS FILESYSTEM 1602EFS FILESYSTEM
1608W: http://aeschi.ch.eu.org/efs/ 1603W: http://aeschi.ch.eu.org/efs/
1609S: Orphan 1604S: Orphan
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f749b40f954e..0f3e6b2d2808 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1414,19 +1414,6 @@ config TC35815
1414 depends on NET_PCI && PCI && MIPS 1414 depends on NET_PCI && PCI && MIPS
1415 select PHYLIB 1415 select PHYLIB
1416 1416
1417config EEPRO100
1418 tristate "EtherExpressPro/100 support (eepro100, original Becker driver)"
1419 depends on NET_PCI && PCI
1420 select MII
1421 help
1422 If you have an Intel EtherExpress PRO/100 PCI network (Ethernet)
1423 card, say Y and read the Ethernet-HOWTO, available from
1424 <http://www.tldp.org/docs.html#howto>.
1425
1426 To compile this driver as a module, choose M here. The module
1427 will be called eepro100.
1428
1429
1430config E100 1417config E100
1431 tristate "Intel(R) PRO/100+ support" 1418 tristate "Intel(R) PRO/100+ support"
1432 depends on NET_PCI && PCI 1419 depends on NET_PCI && PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f19acf8b9220..657c47b1a6b6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -53,7 +53,6 @@ obj-$(CONFIG_VORTEX) += 3c59x.o
53obj-$(CONFIG_TYPHOON) += typhoon.o 53obj-$(CONFIG_TYPHOON) += typhoon.o
54obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o 54obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
55obj-$(CONFIG_PCNET32) += pcnet32.o 55obj-$(CONFIG_PCNET32) += pcnet32.o
56obj-$(CONFIG_EEPRO100) += eepro100.o
57obj-$(CONFIG_E100) += e100.o 56obj-$(CONFIG_E100) += e100.o
58obj-$(CONFIG_TLAN) += tlan.o 57obj-$(CONFIG_TLAN) += tlan.o
59obj-$(CONFIG_EPIC100) += epic100.o 58obj-$(CONFIG_EPIC100) += epic100.o
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
deleted file mode 100644
index 81e84843c6a0..000000000000
--- a/drivers/net/eepro100.c
+++ /dev/null
@@ -1,2400 +0,0 @@
1/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2/*
3 Written 1996-1999 by Donald Becker.
4
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
9
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
12
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
15
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
28*/
29
30static const char * const version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33
34/* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
36
37static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41static int txdmacount = 128;
42static int rxdmacount /* = 0 */;
43
44#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47# define rx_align(skb) skb_reserve((skb), 2)
48# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49#else
50# define rx_align(skb)
51# define RxFD_ALIGNMENT
52#endif
53
54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56static int rx_copybreak = 200;
57
58/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59static int max_interrupt_work = 20;
60
61/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62static int multicast_filter_limit = 64;
63
64/* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
68
69/* A few values that may be tweaked. */
70/* The ring sizes should be a power of two for efficiency. */
71#define TX_RING_SIZE 64
72#define RX_RING_SIZE 64
73/* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75#define TX_MULTICAST_SIZE 2
76#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77/* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80/* Hysteresis marking queue as no longer full. */
81#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88#define PKT_BUF_SZ 1536
89
90#include <linux/module.h>
91
92#include <linux/kernel.h>
93#include <linux/string.h>
94#include <linux/errno.h>
95#include <linux/ioport.h>
96#include <linux/slab.h>
97#include <linux/interrupt.h>
98#include <linux/timer.h>
99#include <linux/pci.h>
100#include <linux/spinlock.h>
101#include <linux/init.h>
102#include <linux/mii.h>
103#include <linux/delay.h>
104#include <linux/bitops.h>
105
106#include <asm/io.h>
107#include <asm/uaccess.h>
108#include <asm/irq.h>
109
110#include <linux/netdevice.h>
111#include <linux/etherdevice.h>
112#include <linux/rtnetlink.h>
113#include <linux/skbuff.h>
114#include <linux/ethtool.h>
115
116static int use_io;
117static int debug = -1;
118#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
119 NETIF_MSG_HW | \
120 NETIF_MSG_RX_ERR | \
121 NETIF_MSG_TX_ERR)
122#define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
123
124
125MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
126MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
127MODULE_LICENSE("GPL");
128module_param(use_io, int, 0);
129module_param(debug, int, 0);
130module_param_array(options, int, NULL, 0);
131module_param_array(full_duplex, int, NULL, 0);
132module_param(congenb, int, 0);
133module_param(txfifo, int, 0);
134module_param(rxfifo, int, 0);
135module_param(txdmacount, int, 0);
136module_param(rxdmacount, int, 0);
137module_param(rx_copybreak, int, 0);
138module_param(max_interrupt_work, int, 0);
139module_param(multicast_filter_limit, int, 0);
140MODULE_PARM_DESC(debug, "debug level (0-6)");
141MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
142MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
143MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
144MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
145MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
146MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
147MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
148MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
149MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
150MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
151
152#define RUN_AT(x) (jiffies + (x))
153
154#define netdevice_start(dev)
155#define netdevice_stop(dev)
156#define netif_set_tx_timeout(dev, tf, tm) \
157 do { \
158 (dev)->tx_timeout = (tf); \
159 (dev)->watchdog_timeo = (tm); \
160 } while(0)
161
162
163
164/*
165 Theory of Operation
166
167I. Board Compatibility
168
169This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
170single-chip fast Ethernet controller for PCI, as used on the Intel
171EtherExpress Pro 100 adapter.
172
173II. Board-specific settings
174
175PCI bus devices are configured by the system at boot time, so no jumpers
176need to be set on the board. The system BIOS should be set to assign the
177PCI INTA signal to an otherwise unused system IRQ line. While it's
178possible to share PCI interrupt lines, it negatively impacts performance and
179only recent kernels support it.
180
181III. Driver operation
182
183IIIA. General
184The Speedo3 is very similar to other Intel network chips, that is to say
185"apparently designed on a different planet". This chips retains the complex
186Rx and Tx descriptors and multiple buffers pointers as previous chips, but
187also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
188Tx mode, but in a simplified lower-overhead manner: it associates only a
189single buffer descriptor with each frame descriptor.
190
191Despite the extra space overhead in each receive skbuff, the driver must use
192the simplified Rx buffer mode to assure that only a single data buffer is
193associated with each RxFD. The driver implements this by reserving space
194for the Rx descriptor at the head of each Rx skbuff.
195
196The Speedo-3 has receive and command unit base addresses that are added to
197almost all descriptor pointers. The driver sets these to zero, so that all
198pointer fields are absolute addresses.
199
200The System Control Block (SCB) of some previous Intel chips exists on the
201chip in both PCI I/O and memory space. This driver uses the I/O space
202registers, but might switch to memory mapped mode to better support non-x86
203processors.
204
205IIIB. Transmit structure
206
207The driver must use the complex Tx command+descriptor mode in order to
208have a indirect pointer to the skbuff data section. Each Tx command block
209(TxCB) is associated with two immediately appended Tx Buffer Descriptor
210(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
211speedo_private data structure for each adapter instance.
212
213The newer i82558 explicitly supports this structure, and can read the two
214TxBDs in the same PCI burst as the TxCB.
215
216This ring structure is used for all normal transmit packets, but the
217transmit packet descriptors aren't long enough for most non-Tx commands such
218as CmdConfigure. This is complicated by the possibility that the chip has
219already loaded the link address in the previous descriptor. So for these
220commands we convert the next free descriptor on the ring to a NoOp, and point
221that descriptor's link to the complex command.
222
223An additional complexity of these non-transmit commands are that they may be
224added asynchronous to the normal transmit queue, so we disable interrupts
225whenever the Tx descriptor ring is manipulated.
226
227A notable aspect of these special configure commands is that they do
228work with the normal Tx ring entry scavenge method. The Tx ring scavenge
229is done at interrupt time using the 'dirty_tx' index, and checking for the
230command-complete bit. While the setup frames may have the NoOp command on the
231Tx ring marked as complete, but not have completed the setup command, this
232is not a problem. The tx_ring entry can be still safely reused, as the
233tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
234
235Commands may have bits set e.g. CmdSuspend in the command word to either
236suspend or stop the transmit/command unit. This driver always flags the last
237command with CmdSuspend, erases the CmdSuspend in the previous command, and
238then issues a CU_RESUME.
239Note: Watch out for the potential race condition here: imagine
240 erasing the previous suspend
241 the chip processes the previous command
242 the chip processes the final command, and suspends
243 doing the CU_RESUME
244 the chip processes the next-yet-valid post-final-command.
245So blindly sending a CU_RESUME is only safe if we do it immediately after
246after erasing the previous CmdSuspend, without the possibility of an
247intervening delay. Thus the resume command is always within the
248interrupts-disabled region. This is a timing dependence, but handling this
249condition in a timing-independent way would considerably complicate the code.
250
251Note: In previous generation Intel chips, restarting the command unit was a
252notoriously slow process. This is presumably no longer true.
253
254IIIC. Receive structure
255
256Because of the bus-master support on the Speedo3 this driver uses the new
257SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
258This scheme allocates full-sized skbuffs as receive buffers. The value
259SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
260trade-off the memory wasted by passing the full-sized skbuff to the queue
261layer for all frames vs. the copying cost of copying a frame to a
262correctly-sized skbuff.
263
264For small frames the copying cost is negligible (esp. considering that we
265are pre-loading the cache with immediately useful header information), so we
266allocate a new, minimally-sized skbuff. For large frames the copying cost
267is non-trivial, and the larger copy might flush the cache of useful data, so
268we pass up the skbuff the packet was received into.
269
270IV. Notes
271
272Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
273that stated that I could disclose the information. But I still resent
274having to sign an Intel NDA when I'm helping Intel sell their own product!
275
276*/
277
278static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
279
280/* Offsets to the various registers.
281 All accesses need not be longword aligned. */
282enum speedo_offsets {
283 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
284 SCBIntmask = 3,
285 SCBPointer = 4, /* General purpose pointer. */
286 SCBPort = 8, /* Misc. commands and operands. */
287 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
288 SCBCtrlMDI = 16, /* MDI interface control. */
289 SCBEarlyRx = 20, /* Early receive byte count. */
290};
291/* Commands that can be put in a command list entry. */
292enum commands {
293 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
294 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
295 CmdDump = 0x60000, CmdDiagnose = 0x70000,
296 CmdSuspend = 0x40000000, /* Suspend after completion. */
297 CmdIntr = 0x20000000, /* Interrupt after completion. */
298 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
299};
300/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
301 status bits. Previous driver versions used separate 16 bit fields for
302 commands and statuses. --SAW
303 */
304#if defined(__alpha__)
305# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
306#else
307# define clear_suspend(cmd) ((__le16 *)&(cmd)->cmd_status)[1] &= ~cpu_to_le16(1<<14)
308#endif
309
310enum SCBCmdBits {
311 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
312 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
313 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
314 /* The rest are Rx and Tx commands. */
315 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
316 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
317 CUDumpStats=0x0070, /* Dump then reset stats counters. */
318 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
319 RxResumeNoResources=0x0007,
320};
321
322enum SCBPort_cmds {
323 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
324};
325
326/* The Speedo3 Rx and Tx frame/buffer descriptors. */
327struct descriptor { /* A generic descriptor. */
328 volatile __le32 cmd_status; /* All command and status fields. */
329 __le32 link; /* struct descriptor * */
330 unsigned char params[0];
331};
332
333/* The Speedo3 Rx and Tx buffer descriptors. */
334struct RxFD { /* Receive frame descriptor. */
335 volatile __le32 status;
336 __le32 link; /* struct RxFD * */
337 __le32 rx_buf_addr; /* void * */
338 __le32 count;
339} RxFD_ALIGNMENT;
340
341/* Selected elements of the Tx/RxFD.status word. */
342enum RxFD_bits {
343 RxComplete=0x8000, RxOK=0x2000,
344 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
345 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
346 TxUnderrun=0x1000, StatusComplete=0x8000,
347};
348
349#define CONFIG_DATA_SIZE 22
350struct TxFD { /* Transmit frame descriptor set. */
351 __le32 status;
352 __le32 link; /* void * */
353 __le32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
354 __le32 count; /* # of TBD (=1), Tx start thresh., etc. */
355 /* This constitutes two "TBD" entries -- we only use one. */
356#define TX_DESCR_BUF_OFFSET 16
357 __le32 tx_buf_addr0; /* void *, frame to be transmitted. */
358 __le32 tx_buf_size0; /* Length of Tx frame. */
359 __le32 tx_buf_addr1; /* void *, frame to be transmitted. */
360 __le32 tx_buf_size1; /* Length of Tx frame. */
361 /* the structure must have space for at least CONFIG_DATA_SIZE starting
362 * from tx_desc_addr field */
363};
364
365/* Multicast filter setting block. --SAW */
366struct speedo_mc_block {
367 struct speedo_mc_block *next;
368 unsigned int tx;
369 dma_addr_t frame_dma;
370 unsigned int len;
371 struct descriptor frame __attribute__ ((__aligned__(16)));
372};
373
374/* Elements of the dump_statistics block. This block must be lword aligned. */
375struct speedo_stats {
376 __le32 tx_good_frames;
377 __le32 tx_coll16_errs;
378 __le32 tx_late_colls;
379 __le32 tx_underruns;
380 __le32 tx_lost_carrier;
381 __le32 tx_deferred;
382 __le32 tx_one_colls;
383 __le32 tx_multi_colls;
384 __le32 tx_total_colls;
385 __le32 rx_good_frames;
386 __le32 rx_crc_errs;
387 __le32 rx_align_errs;
388 __le32 rx_resource_errs;
389 __le32 rx_overrun_errs;
390 __le32 rx_colls_errs;
391 __le32 rx_runt_errs;
392 __le32 done_marker;
393};
394
395enum Rx_ring_state_bits {
396 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
397};
398
399/* Do not change the position (alignment) of the first few elements!
400 The later elements are grouped for cache locality.
401
402 Unfortunately, all the positions have been shifted since there.
403 A new re-alignment is required. 2000/03/06 SAW */
404struct speedo_private {
405 void __iomem *regs;
406 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
407 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
408 /* The addresses of a Tx/Rx-in-place packets/buffers. */
409 struct sk_buff *tx_skbuff[TX_RING_SIZE];
410 struct sk_buff *rx_skbuff[RX_RING_SIZE];
411 /* Mapped addresses of the rings. */
412 dma_addr_t tx_ring_dma;
413#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
414 dma_addr_t rx_ring_dma[RX_RING_SIZE];
415 struct descriptor *last_cmd; /* Last command sent. */
416 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
417 spinlock_t lock; /* Group with Tx control cache line. */
418 u32 tx_threshold; /* The value for txdesc.count. */
419 struct RxFD *last_rxf; /* Last filled RX buffer. */
420 dma_addr_t last_rxf_dma;
421 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
422 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
423 struct net_device_stats stats;
424 struct speedo_stats *lstats;
425 dma_addr_t lstats_dma;
426 int chip_id;
427 struct pci_dev *pdev;
428 struct timer_list timer; /* Media selection timer. */
429 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
430 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
431 long in_interrupt; /* Word-aligned dev->interrupt */
432 unsigned char acpi_pwr;
433 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
434 unsigned int tx_full:1; /* The Tx queue is full. */
435 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
436 unsigned int rx_bug:1; /* Work around receiver hang errata. */
437 unsigned char default_port:8; /* Last dev->if_port value. */
438 unsigned char rx_ring_state; /* RX ring status flags. */
439 unsigned short phy[2]; /* PHY media interfaces available. */
440 unsigned short partner; /* Link partner caps. */
441 struct mii_if_info mii_if; /* MII API hooks, info */
442 u32 msg_enable; /* debug message level */
443};
444
445/* The parameters for a CmdConfigure operation.
446 There are so many options that it would be difficult to document each bit.
447 We mostly use the default or recommended settings. */
448static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
449 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
450 0, 0x2E, 0, 0x60, 0,
451 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
452 0x3f, 0x05, };
453static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
454 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
455 0, 0x2E, 0, 0x60, 0x08, 0x88,
456 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
457 0x31, 0x05, };
458
459/* PHY media interface chips. */
460static const char * const phys[] = {
461 "None", "i82553-A/B", "i82553-C", "i82503",
462 "DP83840", "80c240", "80c24", "i82555",
463 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
464 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
465enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
466 S80C24, I82555, DP83840A=10, };
467static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
468#define EE_READ_CMD (6)
469
470static int eepro100_init_one(struct pci_dev *pdev,
471 const struct pci_device_id *ent);
472
473static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
474static int mdio_read(struct net_device *dev, int phy_id, int location);
475static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
476static int speedo_open(struct net_device *dev);
477static void speedo_resume(struct net_device *dev);
478static void speedo_timer(unsigned long data);
479static void speedo_init_rx_ring(struct net_device *dev);
480static void speedo_tx_timeout(struct net_device *dev);
481static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
482static void speedo_refill_rx_buffers(struct net_device *dev, int force);
483static int speedo_rx(struct net_device *dev);
484static void speedo_tx_buffer_gc(struct net_device *dev);
485static irqreturn_t speedo_interrupt(int irq, void *dev_instance);
486static int speedo_close(struct net_device *dev);
487static struct net_device_stats *speedo_get_stats(struct net_device *dev);
488static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
489static void set_rx_mode(struct net_device *dev);
490static void speedo_show_state(struct net_device *dev);
491static const struct ethtool_ops ethtool_ops;
492
493
494
495#ifdef honor_default_port
496/* Optional driver feature to allow forcing the transceiver setting.
497 Not recommended. */
498static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
499 0x2000, 0x2100, 0x0400, 0x3100};
500#endif
501
502/* How to wait for the command unit to accept a command.
503 Typically this takes 0 ticks. */
504static inline unsigned char wait_for_cmd_done(struct net_device *dev,
505 struct speedo_private *sp)
506{
507 int wait = 1000;
508 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
509 unsigned char r;
510
511 do {
512 udelay(1);
513 r = ioread8(cmd_ioaddr);
514 } while(r && --wait >= 0);
515
516 if (wait < 0)
517 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
518 return r;
519}
520
521static int __devinit eepro100_init_one (struct pci_dev *pdev,
522 const struct pci_device_id *ent)
523{
524 void __iomem *ioaddr;
525 int irq, pci_bar;
526 int acpi_idle_state = 0, pm;
527 static int cards_found /* = 0 */;
528 unsigned long pci_base;
529
530#ifndef MODULE
531 /* when built-in, we only print version if device is found */
532 static int did_version;
533 if (did_version++ == 0)
534 printk(version);
535#endif
536
537 /* save power state before pci_enable_device overwrites it */
538 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
539 if (pm) {
540 u16 pwr_command;
541 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
542 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
543 }
544
545 if (pci_enable_device(pdev))
546 goto err_out_free_mmio_region;
547
548 pci_set_master(pdev);
549
550 if (!request_region(pci_resource_start(pdev, 1),
551 pci_resource_len(pdev, 1), "eepro100")) {
552 dev_err(&pdev->dev, "eepro100: cannot reserve I/O ports\n");
553 goto err_out_none;
554 }
555 if (!request_mem_region(pci_resource_start(pdev, 0),
556 pci_resource_len(pdev, 0), "eepro100")) {
557 dev_err(&pdev->dev, "eepro100: cannot reserve MMIO region\n");
558 goto err_out_free_pio_region;
559 }
560
561 irq = pdev->irq;
562 pci_bar = use_io ? 1 : 0;
563 pci_base = pci_resource_start(pdev, pci_bar);
564 if (DEBUG & NETIF_MSG_PROBE)
565 printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
566 pci_base, irq);
567
568 ioaddr = pci_iomap(pdev, pci_bar, 0);
569 if (!ioaddr) {
570 dev_err(&pdev->dev, "eepro100: cannot remap IO\n");
571 goto err_out_free_mmio_region;
572 }
573
574 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
575 cards_found++;
576 else
577 goto err_out_iounmap;
578
579 return 0;
580
581err_out_iounmap: ;
582 pci_iounmap(pdev, ioaddr);
583err_out_free_mmio_region:
584 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
585err_out_free_pio_region:
586 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
587err_out_none:
588 return -ENODEV;
589}
590
591#ifdef CONFIG_NET_POLL_CONTROLLER
592/*
593 * Polling 'interrupt' - used by things like netconsole to send skbs
594 * without having to re-enable interrupts. It's not called while
595 * the interrupt routine is executing.
596 */
597
598static void poll_speedo (struct net_device *dev)
599{
600 /* disable_irq is not very nice, but with the funny lockless design
601 we have no other choice. */
602 disable_irq(dev->irq);
603 speedo_interrupt (dev->irq, dev);
604 enable_irq(dev->irq);
605}
606#endif
607
608static int __devinit speedo_found1(struct pci_dev *pdev,
609 void __iomem *ioaddr, int card_idx, int acpi_idle_state)
610{
611 struct net_device *dev;
612 struct speedo_private *sp;
613 const char *product;
614 int i, option;
615 u16 eeprom[0x100];
616 int size;
617 void *tx_ring_space;
618 dma_addr_t tx_ring_dma;
619
620 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
621 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
622 if (tx_ring_space == NULL)
623 return -1;
624
625 dev = alloc_etherdev(sizeof(struct speedo_private));
626 if (dev == NULL) {
627 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
628 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
629 return -1;
630 }
631
632 SET_NETDEV_DEV(dev, &pdev->dev);
633
634 if (dev->mem_start > 0)
635 option = dev->mem_start;
636 else if (card_idx >= 0 && options[card_idx] >= 0)
637 option = options[card_idx];
638 else
639 option = 0;
640
641 rtnl_lock();
642 if (dev_alloc_name(dev, dev->name) < 0)
643 goto err_free_unlock;
644
645 /* Read the station address EEPROM before doing the reset.
646 Nominally his should even be done before accepting the device, but
647 then we wouldn't have a device name with which to report the error.
648 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
649 */
650 {
651 void __iomem *iobase;
652 int read_cmd, ee_size;
653 u16 sum;
654 int j;
655
656 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
657 requirements. */
658 iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
659 if (!iobase)
660 goto err_free_unlock;
661 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
662 == 0xffe0000) {
663 ee_size = 0x100;
664 read_cmd = EE_READ_CMD << 24;
665 } else {
666 ee_size = 0x40;
667 read_cmd = EE_READ_CMD << 22;
668 }
669
670 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
671 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
672 eeprom[i] = value;
673 sum += value;
674 if (i < 3) {
675 dev->dev_addr[j++] = value;
676 dev->dev_addr[j++] = value >> 8;
677 }
678 }
679 if (sum != 0xBABA)
680 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
681 "check settings before activating this device!\n",
682 dev->name, sum);
683 /* Don't unregister_netdev(dev); as the EEPro may actually be
684 usable, especially if the MAC address is set later.
685 On the other hand, it may be unusable if MDI data is corrupted. */
686
687 pci_iounmap(pdev, iobase);
688 }
689
690 /* Reset the chip: stop Tx and Rx processes and clear counters.
691 This takes less than 10usec and will easily finish before the next
692 action. */
693 iowrite32(PortReset, ioaddr + SCBPort);
694 ioread32(ioaddr + SCBPort);
695 udelay(10);
696
697 if (eeprom[3] & 0x0100)
698 product = "OEM i82557/i82558 10/100 Ethernet";
699 else
700 product = pci_name(pdev);
701
702 printk(KERN_INFO "%s: %s, %pM, IRQ %d.\n", dev->name, product,
703 dev->dev_addr, pdev->irq);
704
705 sp = netdev_priv(dev);
706
707 /* we must initialize this early, for mdio_{read,write} */
708 sp->regs = ioaddr;
709
710#if 1 || defined(kernel_bloat)
711 /* OK, this is pure kernel bloat. I don't like it when other drivers
712 waste non-pageable kernel space to emit similar messages, but I need
713 them for bug reports. */
714 {
715 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
716 /* The self-test results must be paragraph aligned. */
717 volatile s32 *self_test_results;
718 int boguscnt = 16000; /* Timeout for set-test. */
719 if ((eeprom[3] & 0x03) != 0x03)
720 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
721 " work-around.\n");
722 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
723 " connectors present:",
724 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
725 for (i = 0; i < 4; i++)
726 if (eeprom[5] & (1<<i))
727 printk(connectors[i]);
728 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
729 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
730 if (eeprom[7] & 0x0700)
731 printk(KERN_INFO " Secondary interface chip %s.\n",
732 phys[(eeprom[7]>>8)&7]);
733 if (((eeprom[6]>>8) & 0x3f) == DP83840
734 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
735 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
736 if (congenb)
737 mdi_reg23 |= 0x0100;
738 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
739 mdi_reg23);
740 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
741 }
742 if ((option >= 0) && (option & 0x70)) {
743 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
744 (option & 0x20 ? 100 : 10),
745 (option & 0x10 ? "full" : "half"));
746 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
747 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
748 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
749 }
750
751 /* Perform a system self-test. */
752 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
753 self_test_results[0] = 0;
754 self_test_results[1] = -1;
755 iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
756 do {
757 udelay(10);
758 } while (self_test_results[1] == -1 && --boguscnt >= 0);
759
760 if (boguscnt < 0) { /* Test optimized out. */
761 printk(KERN_ERR "Self test failed, status %8.8x:\n"
762 KERN_ERR " Failure to initialize the i82557.\n"
763 KERN_ERR " Verify that the card is a bus-master"
764 " capable slot.\n",
765 self_test_results[1]);
766 } else
767 printk(KERN_INFO " General self-test: %s.\n"
768 KERN_INFO " Serial sub-system self-test: %s.\n"
769 KERN_INFO " Internal registers self-test: %s.\n"
770 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
771 self_test_results[1] & 0x1000 ? "failed" : "passed",
772 self_test_results[1] & 0x0020 ? "failed" : "passed",
773 self_test_results[1] & 0x0008 ? "failed" : "passed",
774 self_test_results[1] & 0x0004 ? "failed" : "passed",
775 self_test_results[0]);
776 }
777#endif /* kernel_bloat */
778
779 iowrite32(PortReset, ioaddr + SCBPort);
780 ioread32(ioaddr + SCBPort);
781 udelay(10);
782
783 /* Return the chip to its original power state. */
784 pci_set_power_state(pdev, acpi_idle_state);
785
786 pci_set_drvdata (pdev, dev);
787 SET_NETDEV_DEV(dev, &pdev->dev);
788
789 dev->irq = pdev->irq;
790
791 sp->pdev = pdev;
792 sp->msg_enable = DEBUG;
793 sp->acpi_pwr = acpi_idle_state;
794 sp->tx_ring = tx_ring_space;
795 sp->tx_ring_dma = tx_ring_dma;
796 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
797 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
798 init_timer(&sp->timer); /* used in ioctl() */
799 spin_lock_init(&sp->lock);
800
801 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
802 if (card_idx >= 0) {
803 if (full_duplex[card_idx] >= 0)
804 sp->mii_if.full_duplex = full_duplex[card_idx];
805 }
806 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
807
808 sp->phy[0] = eeprom[6];
809 sp->phy[1] = eeprom[7];
810
811 sp->mii_if.phy_id = eeprom[6] & 0x1f;
812 sp->mii_if.phy_id_mask = 0x1f;
813 sp->mii_if.reg_num_mask = 0x1f;
814 sp->mii_if.dev = dev;
815 sp->mii_if.mdio_read = mdio_read;
816 sp->mii_if.mdio_write = mdio_write;
817
818 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
819 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
820 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
821 || (pdev->device == 0x245D)) {
822 sp->chip_id = 1;
823 }
824
825 if (sp->rx_bug)
826 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
827
828 /* The Speedo-specific entries in the device structure. */
829 dev->open = &speedo_open;
830 dev->hard_start_xmit = &speedo_start_xmit;
831 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
832 dev->stop = &speedo_close;
833 dev->get_stats = &speedo_get_stats;
834 dev->set_multicast_list = &set_rx_mode;
835 dev->do_ioctl = &speedo_ioctl;
836 SET_ETHTOOL_OPS(dev, &ethtool_ops);
837#ifdef CONFIG_NET_POLL_CONTROLLER
838 dev->poll_controller = &poll_speedo;
839#endif
840
841 if (register_netdevice(dev))
842 goto err_free_unlock;
843 rtnl_unlock();
844
845 return 0;
846
847 err_free_unlock:
848 rtnl_unlock();
849 free_netdev(dev);
850 return -1;
851}
852
853static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
854{
855 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
856 int wait = 0;
857 do
858 if (ioread8(cmd_ioaddr) == 0) break;
859 while(++wait <= 200);
860 if (wait > 100)
861 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
862 ioread8(cmd_ioaddr), wait);
863
864 iowrite8(cmd, cmd_ioaddr);
865
866 for (wait = 0; wait <= 100; wait++)
867 if (ioread8(cmd_ioaddr) == 0) return;
868 for (; wait <= 20000; wait++)
869 if (ioread8(cmd_ioaddr) == 0) return;
870 else udelay(1);
871 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
872 " Current status %8.8x.\n",
873 cmd, wait, ioread32(sp->regs + SCBStatus));
874}
875
876/* Serial EEPROM section.
877 A "bit" grungy, but we work our way through bit-by-bit :->. */
878/* EEPROM_Ctrl bits. */
879#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
880#define EE_CS 0x02 /* EEPROM chip select. */
881#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
882#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
883#define EE_ENB (0x4800 | EE_CS)
884#define EE_WRITE_0 0x4802
885#define EE_WRITE_1 0x4806
886#define EE_OFFSET SCBeeprom
887
888/* The fixes for the code were kindly provided by Dragan Stancevic
889 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
890 access timing.
891 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
892 interval for serial EEPROM. However, it looks like that there is an
893 additional requirement dictating larger udelay's in the code below.
894 2000/05/24 SAW */
895static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
896{
897 unsigned retval = 0;
898 void __iomem *ee_addr = ioaddr + SCBeeprom;
899
900 iowrite16(EE_ENB, ee_addr); udelay(2);
901 iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
902
903 /* Shift the command bits out. */
904 do {
905 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
906 iowrite16(dataval, ee_addr); udelay(2);
907 iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
908 retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
909 } while (--cmd_len >= 0);
910 iowrite16(EE_ENB, ee_addr); udelay(2);
911
912 /* Terminate the EEPROM access. */
913 iowrite16(EE_ENB & ~EE_CS, ee_addr);
914 return retval;
915}
916
917static int mdio_read(struct net_device *dev, int phy_id, int location)
918{
919 struct speedo_private *sp = netdev_priv(dev);
920 void __iomem *ioaddr = sp->regs;
921 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
922 iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
923 do {
924 val = ioread32(ioaddr + SCBCtrlMDI);
925 if (--boguscnt < 0) {
926 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
927 break;
928 }
929 } while (! (val & 0x10000000));
930 return val & 0xffff;
931}
932
933static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
934{
935 struct speedo_private *sp = netdev_priv(dev);
936 void __iomem *ioaddr = sp->regs;
937 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
938 iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
939 ioaddr + SCBCtrlMDI);
940 do {
941 val = ioread32(ioaddr + SCBCtrlMDI);
942 if (--boguscnt < 0) {
943 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
944 break;
945 }
946 } while (! (val & 0x10000000));
947}
948
949static int
950speedo_open(struct net_device *dev)
951{
952 struct speedo_private *sp = netdev_priv(dev);
953 void __iomem *ioaddr = sp->regs;
954 int retval;
955
956 if (netif_msg_ifup(sp))
957 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
958
959 pci_set_power_state(sp->pdev, PCI_D0);
960
961 /* Set up the Tx queue early.. */
962 sp->cur_tx = 0;
963 sp->dirty_tx = 0;
964 sp->last_cmd = NULL;
965 sp->tx_full = 0;
966 sp->in_interrupt = 0;
967
968 /* .. we can safely take handler calls during init. */
969 retval = request_irq(dev->irq, &speedo_interrupt, IRQF_SHARED, dev->name, dev);
970 if (retval) {
971 return retval;
972 }
973
974 dev->if_port = sp->default_port;
975
976#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
977 /* Retrigger negotiation to reset previous errors. */
978 if ((sp->phy[0] & 0x8000) == 0) {
979 int phy_addr = sp->phy[0] & 0x1f ;
980 /* Use 0x3300 for restarting NWay, other values to force xcvr:
981 0x0000 10-HD
982 0x0100 10-FD
983 0x2000 100-HD
984 0x2100 100-FD
985 */
986#ifdef honor_default_port
987 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
988#else
989 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
990#endif
991 }
992#endif
993
994 speedo_init_rx_ring(dev);
995
996 /* Fire up the hardware. */
997 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
998 speedo_resume(dev);
999
1000 netdevice_start(dev);
1001 netif_start_queue(dev);
1002
1003 /* Setup the chip and configure the multicast list. */
1004 sp->mc_setup_head = NULL;
1005 sp->mc_setup_tail = NULL;
1006 sp->flow_ctrl = sp->partner = 0;
1007 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1008 set_rx_mode(dev);
1009 if ((sp->phy[0] & 0x8000) == 0)
1010 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1011
1012 mii_check_link(&sp->mii_if);
1013
1014 if (netif_msg_ifup(sp)) {
1015 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1016 dev->name, ioread16(ioaddr + SCBStatus));
1017 }
1018
1019 /* Set the timer. The timer serves a dual purpose:
1020 1) to monitor the media interface (e.g. link beat) and perhaps switch
1021 to an alternate media type
1022 2) to monitor Rx activity, and restart the Rx process if the receiver
1023 hangs. */
1024 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1025 sp->timer.data = (unsigned long)dev;
1026 sp->timer.function = &speedo_timer; /* timer handler */
1027 add_timer(&sp->timer);
1028
1029 /* No need to wait for the command unit to accept here. */
1030 if ((sp->phy[0] & 0x8000) == 0)
1031 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1032
1033 return 0;
1034}
1035
1036/* Start the chip hardware after a full reset. */
1037static void speedo_resume(struct net_device *dev)
1038{
1039 struct speedo_private *sp = netdev_priv(dev);
1040 void __iomem *ioaddr = sp->regs;
1041
1042 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1043 sp->tx_threshold = 0x01208000;
1044
1045 /* Set the segment registers to '0'. */
1046 if (wait_for_cmd_done(dev, sp) != 0) {
1047 iowrite32(PortPartialReset, ioaddr + SCBPort);
1048 udelay(10);
1049 }
1050
1051 iowrite32(0, ioaddr + SCBPointer);
1052 ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
1053 udelay(10); /* Bogus, but it avoids the bug. */
1054
1055 /* Note: these next two operations can take a while. */
1056 do_slow_command(dev, sp, RxAddrLoad);
1057 do_slow_command(dev, sp, CUCmdBase);
1058
1059 /* Load the statistics block and rx ring addresses. */
1060 iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1061 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1062
1063 iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1064 sp->lstats->done_marker = 0;
1065 wait_for_cmd_done(dev, sp);
1066
1067 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1068 if (netif_msg_rx_err(sp))
1069 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1070 dev->name);
1071 } else {
1072 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1073 ioaddr + SCBPointer);
1074 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1075 }
1076
1077 /* Note: RxStart should complete instantly. */
1078 do_slow_command(dev, sp, RxStart);
1079 do_slow_command(dev, sp, CUDumpStats);
1080
1081 /* Fill the first command with our physical address. */
1082 {
1083 struct descriptor *ias_cmd;
1084
1085 ias_cmd =
1086 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1087 /* Avoid a bug(?!) here by marking the command already completed. */
1088 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1089 ias_cmd->link =
1090 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1091 memcpy(ias_cmd->params, dev->dev_addr, 6);
1092 if (sp->last_cmd)
1093 clear_suspend(sp->last_cmd);
1094 sp->last_cmd = ias_cmd;
1095 }
1096
1097 /* Start the chip's Tx process and unmask interrupts. */
1098 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1099 ioaddr + SCBPointer);
1100 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1101 remain masked --Dragan */
1102 iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1103}
1104
1105/*
1106 * Sometimes the receiver stops making progress. This routine knows how to
1107 * get it going again, without losing packets or being otherwise nasty like
1108 * a chip reset would be. Previously the driver had a whole sequence
1109 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1110 * do another, etc. But those things don't really matter. Separate logic
1111 * in the ISR provides for allocating buffers--the other half of operation
1112 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1113 * This problem with the old, more involved algorithm is shown up under
1114 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1115 */
1116static void
1117speedo_rx_soft_reset(struct net_device *dev)
1118{
1119 struct speedo_private *sp = netdev_priv(dev);
1120 struct RxFD *rfd;
1121 void __iomem *ioaddr;
1122
1123 ioaddr = sp->regs;
1124 if (wait_for_cmd_done(dev, sp) != 0) {
1125 printk("%s: previous command stalled\n", dev->name);
1126 return;
1127 }
1128 /*
1129 * Put the hardware into a known state.
1130 */
1131 iowrite8(RxAbort, ioaddr + SCBCmd);
1132
1133 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1134
1135 rfd->rx_buf_addr = cpu_to_le32(0xffffffff);
1136
1137 if (wait_for_cmd_done(dev, sp) != 0) {
1138 printk("%s: RxAbort command stalled\n", dev->name);
1139 return;
1140 }
1141 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1142 ioaddr + SCBPointer);
1143 iowrite8(RxStart, ioaddr + SCBCmd);
1144}
1145
1146
1147/* Media monitoring and control. */
1148static void speedo_timer(unsigned long data)
1149{
1150 struct net_device *dev = (struct net_device *)data;
1151 struct speedo_private *sp = netdev_priv(dev);
1152 void __iomem *ioaddr = sp->regs;
1153 int phy_num = sp->phy[0] & 0x1f;
1154
1155 /* We have MII and lost link beat. */
1156 if ((sp->phy[0] & 0x8000) == 0) {
1157 int partner = mdio_read(dev, phy_num, MII_LPA);
1158 if (partner != sp->partner) {
1159 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1160 if (netif_msg_link(sp)) {
1161 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1162 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1163 dev->name, sp->partner, partner, sp->mii_if.advertising);
1164 }
1165 sp->partner = partner;
1166 if (flow_ctrl != sp->flow_ctrl) {
1167 sp->flow_ctrl = flow_ctrl;
1168 sp->rx_mode = -1; /* Trigger a reload. */
1169 }
1170 }
1171 }
1172 mii_check_link(&sp->mii_if);
1173 if (netif_msg_timer(sp)) {
1174 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1175 dev->name, ioread16(ioaddr + SCBStatus));
1176 }
1177 if (sp->rx_mode < 0 ||
1178 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1179 /* We haven't received a packet in a Long Time. We might have been
1180 bitten by the receiver hang bug. This can be cleared by sending
1181 a set multicast list command. */
1182 if (netif_msg_timer(sp))
1183 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1184 " from a timer routine,"
1185 " m=%d, j=%ld, l=%ld.\n",
1186 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1187 set_rx_mode(dev);
1188 }
1189 /* We must continue to monitor the media. */
1190 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1191 add_timer(&sp->timer);
1192}
1193
1194static void speedo_show_state(struct net_device *dev)
1195{
1196 struct speedo_private *sp = netdev_priv(dev);
1197 int i;
1198
1199 if (netif_msg_pktdata(sp)) {
1200 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1201 dev->name, sp->cur_tx, sp->dirty_tx);
1202 for (i = 0; i < TX_RING_SIZE; i++)
1203 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1204 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1205 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1206 i, sp->tx_ring[i].status);
1207
1208 printk(KERN_DEBUG "%s: Printing Rx ring"
1209 " (next to receive into %u, dirty index %u).\n",
1210 dev->name, sp->cur_rx, sp->dirty_rx);
1211 for (i = 0; i < RX_RING_SIZE; i++)
1212 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1213 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1214 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1215 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1216 i, (sp->rx_ringp[i] != NULL) ?
1217 (unsigned)sp->rx_ringp[i]->status : 0);
1218 }
1219
1220#if 0
1221 {
1222 void __iomem *ioaddr = sp->regs;
1223 int phy_num = sp->phy[0] & 0x1f;
1224 for (i = 0; i < 16; i++) {
1225 /* FIXME: what does it mean? --SAW */
1226 if (i == 6) i = 21;
1227 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1228 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1229 }
1230 }
1231#endif
1232
1233}
1234
1235/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1236static void
1237speedo_init_rx_ring(struct net_device *dev)
1238{
1239 struct speedo_private *sp = netdev_priv(dev);
1240 struct RxFD *rxf, *last_rxf = NULL;
1241 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1242 int i;
1243
1244 sp->cur_rx = 0;
1245
1246 for (i = 0; i < RX_RING_SIZE; i++) {
1247 struct sk_buff *skb;
1248 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1249 if (skb)
1250 rx_align(skb); /* Align IP on 16 byte boundary */
1251 sp->rx_skbuff[i] = skb;
1252 if (skb == NULL)
1253 break; /* OK. Just initially short of Rx bufs. */
1254 skb->dev = dev; /* Mark as being used by this device. */
1255 rxf = (struct RxFD *)skb->data;
1256 sp->rx_ringp[i] = rxf;
1257 sp->rx_ring_dma[i] =
1258 pci_map_single(sp->pdev, rxf,
1259 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1260 skb_reserve(skb, sizeof(struct RxFD));
1261 if (last_rxf) {
1262 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1263 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1264 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1265 }
1266 last_rxf = rxf;
1267 last_rxf_dma = sp->rx_ring_dma[i];
1268 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1269 rxf->link = 0; /* None yet. */
1270 /* This field unused by i82557. */
1271 rxf->rx_buf_addr = cpu_to_le32(0xffffffff);
1272 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1273 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1274 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1275 }
1276 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1277 /* Mark the last entry as end-of-list. */
1278 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1279 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1280 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1281 sp->last_rxf = last_rxf;
1282 sp->last_rxf_dma = last_rxf_dma;
1283}
1284
1285static void speedo_purge_tx(struct net_device *dev)
1286{
1287 struct speedo_private *sp = netdev_priv(dev);
1288 int entry;
1289
1290 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1291 entry = sp->dirty_tx % TX_RING_SIZE;
1292 if (sp->tx_skbuff[entry]) {
1293 sp->stats.tx_errors++;
1294 pci_unmap_single(sp->pdev,
1295 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1296 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1297 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1298 sp->tx_skbuff[entry] = NULL;
1299 }
1300 sp->dirty_tx++;
1301 }
1302 while (sp->mc_setup_head != NULL) {
1303 struct speedo_mc_block *t;
1304 if (netif_msg_tx_err(sp))
1305 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1306 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1307 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1308 t = sp->mc_setup_head->next;
1309 kfree(sp->mc_setup_head);
1310 sp->mc_setup_head = t;
1311 }
1312 sp->mc_setup_tail = NULL;
1313 sp->tx_full = 0;
1314 netif_wake_queue(dev);
1315}
1316
1317static void reset_mii(struct net_device *dev)
1318{
1319 struct speedo_private *sp = netdev_priv(dev);
1320
1321 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1322 if ((sp->phy[0] & 0x8000) == 0) {
1323 int phy_addr = sp->phy[0] & 0x1f;
1324 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1325 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1326 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1327 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1328 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1329 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1330#ifdef honor_default_port
1331 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1332#else
1333 mdio_read(dev, phy_addr, MII_BMCR);
1334 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1335 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1336#endif
1337 }
1338}
1339
1340static void speedo_tx_timeout(struct net_device *dev)
1341{
1342 struct speedo_private *sp = netdev_priv(dev);
1343 void __iomem *ioaddr = sp->regs;
1344 int status = ioread16(ioaddr + SCBStatus);
1345 unsigned long flags;
1346
1347 if (netif_msg_tx_err(sp)) {
1348 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1349 " %4.4x at %d/%d command %8.8x.\n",
1350 dev->name, status, ioread16(ioaddr + SCBCmd),
1351 sp->dirty_tx, sp->cur_tx,
1352 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1353
1354 }
1355 speedo_show_state(dev);
1356#if 0
1357 if ((status & 0x00C0) != 0x0080
1358 && (status & 0x003C) == 0x0010) {
1359 /* Only the command unit has stopped. */
1360 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1361 dev->name);
1362 iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1363 ioaddr + SCBPointer);
1364 iowrite16(CUStart, ioaddr + SCBCmd);
1365 reset_mii(dev);
1366 } else {
1367#else
1368 {
1369#endif
1370 del_timer_sync(&sp->timer);
1371 /* Reset the Tx and Rx units. */
1372 iowrite32(PortReset, ioaddr + SCBPort);
1373 /* We may get spurious interrupts here. But I don't think that they
1374 may do much harm. 1999/12/09 SAW */
1375 udelay(10);
1376 /* Disable interrupts. */
1377 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1378 synchronize_irq(dev->irq);
1379 speedo_tx_buffer_gc(dev);
1380 /* Free as much as possible.
1381 It helps to recover from a hang because of out-of-memory.
1382 It also simplifies speedo_resume() in case TX ring is full or
1383 close-to-be full. */
1384 speedo_purge_tx(dev);
1385 speedo_refill_rx_buffers(dev, 1);
1386 spin_lock_irqsave(&sp->lock, flags);
1387 speedo_resume(dev);
1388 sp->rx_mode = -1;
1389 dev->trans_start = jiffies;
1390 spin_unlock_irqrestore(&sp->lock, flags);
1391 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1392 /* Reset MII transceiver. Do it before starting the timer to serialize
1393 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1394 reset_mii(dev);
1395 sp->timer.expires = RUN_AT(2*HZ);
1396 add_timer(&sp->timer);
1397 }
1398 return;
1399}
1400
1401static int
1402speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1403{
1404 struct speedo_private *sp = netdev_priv(dev);
1405 void __iomem *ioaddr = sp->regs;
1406 int entry;
1407
1408 /* Prevent interrupts from changing the Tx ring from underneath us. */
1409 unsigned long flags;
1410
1411 spin_lock_irqsave(&sp->lock, flags);
1412
1413 /* Check if there are enough space. */
1414 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1415 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1416 netif_stop_queue(dev);
1417 sp->tx_full = 1;
1418 spin_unlock_irqrestore(&sp->lock, flags);
1419 return 1;
1420 }
1421
1422 /* Calculate the Tx descriptor entry. */
1423 entry = sp->cur_tx++ % TX_RING_SIZE;
1424
1425 sp->tx_skbuff[entry] = skb;
1426 sp->tx_ring[entry].status =
1427 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1428 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1429 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1430 sp->tx_ring[entry].link =
1431 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1432 sp->tx_ring[entry].tx_desc_addr =
1433 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1434 /* The data region is always in one buffer descriptor. */
1435 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1436 sp->tx_ring[entry].tx_buf_addr0 =
1437 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1438 skb->len, PCI_DMA_TODEVICE));
1439 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1440
1441 /* workaround for hardware bug on 10 mbit half duplex */
1442
1443 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1444 wait_for_cmd_done(dev, sp);
1445 iowrite8(0 , ioaddr + SCBCmd);
1446 udelay(1);
1447 }
1448
1449 /* Trigger the command unit resume. */
1450 wait_for_cmd_done(dev, sp);
1451 clear_suspend(sp->last_cmd);
1452 /* We want the time window between clearing suspend flag on the previous
1453 command and resuming CU to be as small as possible.
1454 Interrupts in between are very undesired. --SAW */
1455 iowrite8(CUResume, ioaddr + SCBCmd);
1456 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1457
1458 /* Leave room for set_rx_mode(). If there is no more space than reserved
1459 for multicast filter mark the ring as full. */
1460 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1461 netif_stop_queue(dev);
1462 sp->tx_full = 1;
1463 }
1464
1465 spin_unlock_irqrestore(&sp->lock, flags);
1466
1467 dev->trans_start = jiffies;
1468
1469 return 0;
1470}
1471
1472static void speedo_tx_buffer_gc(struct net_device *dev)
1473{
1474 unsigned int dirty_tx;
1475 struct speedo_private *sp = netdev_priv(dev);
1476
1477 dirty_tx = sp->dirty_tx;
1478 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1479 int entry = dirty_tx % TX_RING_SIZE;
1480 int status = le32_to_cpu(sp->tx_ring[entry].status);
1481
1482 if (netif_msg_tx_done(sp))
1483 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1484 entry, status);
1485 if ((status & StatusComplete) == 0)
1486 break; /* It still hasn't been processed. */
1487 if (status & TxUnderrun)
1488 if (sp->tx_threshold < 0x01e08000) {
1489 if (netif_msg_tx_err(sp))
1490 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1491 dev->name);
1492 sp->tx_threshold += 0x00040000;
1493 }
1494 /* Free the original skb. */
1495 if (sp->tx_skbuff[entry]) {
1496 sp->stats.tx_packets++; /* Count only user packets. */
1497 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1498 pci_unmap_single(sp->pdev,
1499 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1500 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1501 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1502 sp->tx_skbuff[entry] = NULL;
1503 }
1504 dirty_tx++;
1505 }
1506
1507 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1508 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1509 " full=%d.\n",
1510 dirty_tx, sp->cur_tx, sp->tx_full);
1511 dirty_tx += TX_RING_SIZE;
1512 }
1513
1514 while (sp->mc_setup_head != NULL
1515 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1516 struct speedo_mc_block *t;
1517 if (netif_msg_tx_err(sp))
1518 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1519 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1520 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1521 t = sp->mc_setup_head->next;
1522 kfree(sp->mc_setup_head);
1523 sp->mc_setup_head = t;
1524 }
1525 if (sp->mc_setup_head == NULL)
1526 sp->mc_setup_tail = NULL;
1527
1528 sp->dirty_tx = dirty_tx;
1529}
1530
1531/* The interrupt handler does all of the Rx thread work and cleans up
1532 after the Tx thread. */
1533static irqreturn_t speedo_interrupt(int irq, void *dev_instance)
1534{
1535 struct net_device *dev = (struct net_device *)dev_instance;
1536 struct speedo_private *sp;
1537 void __iomem *ioaddr;
1538 long boguscnt = max_interrupt_work;
1539 unsigned short status;
1540 unsigned int handled = 0;
1541
1542 sp = netdev_priv(dev);
1543 ioaddr = sp->regs;
1544
1545#ifndef final_version
1546 /* A lock to prevent simultaneous entry on SMP machines. */
1547 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1548 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1549 dev->name);
1550 sp->in_interrupt = 0; /* Avoid halting machine. */
1551 return IRQ_NONE;
1552 }
1553#endif
1554
1555 do {
1556 status = ioread16(ioaddr + SCBStatus);
1557 /* Acknowledge all of the current interrupt sources ASAP. */
1558 /* Will change from 0xfc00 to 0xff00 when we start handling
1559 FCP and ER interrupts --Dragan */
1560 iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1561
1562 if (netif_msg_intr(sp))
1563 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1564 dev->name, status);
1565
1566 if ((status & 0xfc00) == 0)
1567 break;
1568 handled = 1;
1569
1570
1571 if ((status & 0x5000) || /* Packet received, or Rx error. */
1572 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1573 /* Need to gather the postponed packet. */
1574 speedo_rx(dev);
1575
1576 /* Always check if all rx buffers are allocated. --SAW */
1577 speedo_refill_rx_buffers(dev, 0);
1578
1579 spin_lock(&sp->lock);
1580 /*
1581 * The chip may have suspended reception for various reasons.
1582 * Check for that, and re-prime it should this be the case.
1583 */
1584 switch ((status >> 2) & 0xf) {
1585 case 0: /* Idle */
1586 break;
1587 case 1: /* Suspended */
1588 case 2: /* No resources (RxFDs) */
1589 case 9: /* Suspended with no more RBDs */
1590 case 10: /* No resources due to no RBDs */
1591 case 12: /* Ready with no RBDs */
1592 speedo_rx_soft_reset(dev);
1593 break;
1594 case 3: case 5: case 6: case 7: case 8:
1595 case 11: case 13: case 14: case 15:
1596 /* these are all reserved values */
1597 break;
1598 }
1599
1600
1601 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1602 if (status & 0xA400) {
1603 speedo_tx_buffer_gc(dev);
1604 if (sp->tx_full
1605 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1606 /* The ring is no longer full. */
1607 sp->tx_full = 0;
1608 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1609 }
1610 }
1611
1612 spin_unlock(&sp->lock);
1613
1614 if (--boguscnt < 0) {
1615 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1616 dev->name, status);
1617 /* Clear all interrupt sources. */
1618 /* Will change from 0xfc00 to 0xff00 when we start handling
1619 FCP and ER interrupts --Dragan */
1620 iowrite16(0xfc00, ioaddr + SCBStatus);
1621 break;
1622 }
1623 } while (1);
1624
1625 if (netif_msg_intr(sp))
1626 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1627 dev->name, ioread16(ioaddr + SCBStatus));
1628
1629 clear_bit(0, (void*)&sp->in_interrupt);
1630 return IRQ_RETVAL(handled);
1631}
1632
1633static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1634{
1635 struct speedo_private *sp = netdev_priv(dev);
1636 struct RxFD *rxf;
1637 struct sk_buff *skb;
1638 /* Get a fresh skbuff to replace the consumed one. */
1639 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1640 if (skb)
1641 rx_align(skb); /* Align IP on 16 byte boundary */
1642 sp->rx_skbuff[entry] = skb;
1643 if (skb == NULL) {
1644 sp->rx_ringp[entry] = NULL;
1645 return NULL;
1646 }
1647 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
1648 sp->rx_ring_dma[entry] =
1649 pci_map_single(sp->pdev, rxf,
1650 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1651 skb->dev = dev;
1652 skb_reserve(skb, sizeof(struct RxFD));
1653 rxf->rx_buf_addr = cpu_to_le32(0xffffffff);
1654 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1655 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1656 return rxf;
1657}
1658
1659static inline void speedo_rx_link(struct net_device *dev, int entry,
1660 struct RxFD *rxf, dma_addr_t rxf_dma)
1661{
1662 struct speedo_private *sp = netdev_priv(dev);
1663 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1664 rxf->link = 0; /* None yet. */
1665 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1666 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1667 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1668 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1669 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1670 sp->last_rxf = rxf;
1671 sp->last_rxf_dma = rxf_dma;
1672}
1673
1674static int speedo_refill_rx_buf(struct net_device *dev, int force)
1675{
1676 struct speedo_private *sp = netdev_priv(dev);
1677 int entry;
1678 struct RxFD *rxf;
1679
1680 entry = sp->dirty_rx % RX_RING_SIZE;
1681 if (sp->rx_skbuff[entry] == NULL) {
1682 rxf = speedo_rx_alloc(dev, entry);
1683 if (rxf == NULL) {
1684 unsigned int forw;
1685 int forw_entry;
1686 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1687 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1688 dev->name, force);
1689 sp->rx_ring_state |= RrOOMReported;
1690 }
1691 speedo_show_state(dev);
1692 if (!force)
1693 return -1; /* Better luck next time! */
1694 /* Borrow an skb from one of next entries. */
1695 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1696 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1697 break;
1698 if (forw == sp->cur_rx)
1699 return -1;
1700 forw_entry = forw % RX_RING_SIZE;
1701 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1702 sp->rx_skbuff[forw_entry] = NULL;
1703 rxf = sp->rx_ringp[forw_entry];
1704 sp->rx_ringp[forw_entry] = NULL;
1705 sp->rx_ringp[entry] = rxf;
1706 }
1707 } else {
1708 rxf = sp->rx_ringp[entry];
1709 }
1710 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1711 sp->dirty_rx++;
1712 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1713 return 0;
1714}
1715
1716static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1717{
1718 struct speedo_private *sp = netdev_priv(dev);
1719
1720 /* Refill the RX ring. */
1721 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1722 speedo_refill_rx_buf(dev, force) != -1);
1723}
1724
1725static int
1726speedo_rx(struct net_device *dev)
1727{
1728 struct speedo_private *sp = netdev_priv(dev);
1729 int entry = sp->cur_rx % RX_RING_SIZE;
1730 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1731 int alloc_ok = 1;
1732 int npkts = 0;
1733
1734 if (netif_msg_intr(sp))
1735 printk(KERN_DEBUG " In speedo_rx().\n");
1736 /* If we own the next entry, it's a new packet. Send it up. */
1737 while (sp->rx_ringp[entry] != NULL) {
1738 int status;
1739 int pkt_len;
1740
1741 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1742 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1743 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1744 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1745
1746 if (!(status & RxComplete))
1747 break;
1748
1749 if (--rx_work_limit < 0)
1750 break;
1751
1752 /* Check for a rare out-of-memory case: the current buffer is
1753 the last buffer allocated in the RX ring. --SAW */
1754 if (sp->last_rxf == sp->rx_ringp[entry]) {
1755 /* Postpone the packet. It'll be reaped at an interrupt when this
1756 packet is no longer the last packet in the ring. */
1757 if (netif_msg_rx_err(sp))
1758 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1759 dev->name);
1760 sp->rx_ring_state |= RrPostponed;
1761 break;
1762 }
1763
1764 if (netif_msg_rx_status(sp))
1765 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1766 pkt_len);
1767 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1768 if (status & RxErrTooBig)
1769 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1770 "status %8.8x!\n", dev->name, status);
1771 else if (! (status & RxOK)) {
1772 /* There was a fatal error. This *should* be impossible. */
1773 sp->stats.rx_errors++;
1774 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1775 "status %8.8x.\n",
1776 dev->name, status);
1777 }
1778 } else {
1779 struct sk_buff *skb;
1780
1781 /* Check if the packet is long enough to just accept without
1782 copying to a properly sized skbuff. */
1783 if (pkt_len < rx_copybreak
1784 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1785 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1786 /* 'skb_put()' points to the start of sk_buff data area. */
1787 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1788 sizeof(struct RxFD) + pkt_len,
1789 PCI_DMA_FROMDEVICE);
1790
1791#if 1 || USE_IP_CSUM
1792 /* Packet is in one chunk -- we can copy + cksum. */
1793 skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len);
1794 skb_put(skb, pkt_len);
1795#else
1796 skb_copy_from_linear_data(sp->rx_skbuff[entry],
1797 skb_put(skb, pkt_len),
1798 pkt_len);
1799#endif
1800 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1801 sizeof(struct RxFD) + pkt_len,
1802 PCI_DMA_FROMDEVICE);
1803 npkts++;
1804 } else {
1805 /* Pass up the already-filled skbuff. */
1806 skb = sp->rx_skbuff[entry];
1807 if (skb == NULL) {
1808 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1809 dev->name);
1810 break;
1811 }
1812 sp->rx_skbuff[entry] = NULL;
1813 skb_put(skb, pkt_len);
1814 npkts++;
1815 sp->rx_ringp[entry] = NULL;
1816 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1817 PKT_BUF_SZ + sizeof(struct RxFD),
1818 PCI_DMA_FROMDEVICE);
1819 }
1820 skb->protocol = eth_type_trans(skb, dev);
1821 netif_rx(skb);
1822 dev->last_rx = jiffies;
1823 sp->stats.rx_packets++;
1824 sp->stats.rx_bytes += pkt_len;
1825 }
1826 entry = (++sp->cur_rx) % RX_RING_SIZE;
1827 sp->rx_ring_state &= ~RrPostponed;
1828 /* Refill the recently taken buffers.
1829 Do it one-by-one to handle traffic bursts better. */
1830 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1831 alloc_ok = 0;
1832 }
1833
1834 /* Try hard to refill the recently taken buffers. */
1835 speedo_refill_rx_buffers(dev, 1);
1836
1837 if (npkts)
1838 sp->last_rx_time = jiffies;
1839
1840 return 0;
1841}
1842
1843static int
1844speedo_close(struct net_device *dev)
1845{
1846 struct speedo_private *sp = netdev_priv(dev);
1847 void __iomem *ioaddr = sp->regs;
1848 int i;
1849
1850 netdevice_stop(dev);
1851 netif_stop_queue(dev);
1852
1853 if (netif_msg_ifdown(sp))
1854 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1855 dev->name, ioread16(ioaddr + SCBStatus));
1856
1857 /* Shut off the media monitoring timer. */
1858 del_timer_sync(&sp->timer);
1859
1860 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1861
1862 /* Shutting down the chip nicely fails to disable flow control. So.. */
1863 iowrite32(PortPartialReset, ioaddr + SCBPort);
1864 ioread32(ioaddr + SCBPort); /* flush posted write */
1865 /*
1866 * The chip requires a 10 microsecond quiet period. Wait here!
1867 */
1868 udelay(10);
1869
1870 free_irq(dev->irq, dev);
1871 speedo_show_state(dev);
1872
1873 /* Free all the skbuffs in the Rx and Tx queues. */
1874 for (i = 0; i < RX_RING_SIZE; i++) {
1875 struct sk_buff *skb = sp->rx_skbuff[i];
1876 sp->rx_skbuff[i] = NULL;
1877 /* Clear the Rx descriptors. */
1878 if (skb) {
1879 pci_unmap_single(sp->pdev,
1880 sp->rx_ring_dma[i],
1881 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1882 dev_kfree_skb(skb);
1883 }
1884 }
1885
1886 for (i = 0; i < TX_RING_SIZE; i++) {
1887 struct sk_buff *skb = sp->tx_skbuff[i];
1888 sp->tx_skbuff[i] = NULL;
1889 /* Clear the Tx descriptors. */
1890 if (skb) {
1891 pci_unmap_single(sp->pdev,
1892 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1893 skb->len, PCI_DMA_TODEVICE);
1894 dev_kfree_skb(skb);
1895 }
1896 }
1897
1898 /* Free multicast setting blocks. */
1899 for (i = 0; sp->mc_setup_head != NULL; i++) {
1900 struct speedo_mc_block *t;
1901 t = sp->mc_setup_head->next;
1902 kfree(sp->mc_setup_head);
1903 sp->mc_setup_head = t;
1904 }
1905 sp->mc_setup_tail = NULL;
1906 if (netif_msg_ifdown(sp))
1907 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1908
1909 pci_set_power_state(sp->pdev, PCI_D2);
1910
1911 return 0;
1912}
1913
1914/* The Speedo-3 has an especially awkward and unusable method of getting
1915 statistics out of the chip. It takes an unpredictable length of time
1916 for the dump-stats command to complete. To avoid a busy-wait loop we
1917 update the stats with the previous dump results, and then trigger a
1918 new dump.
1919
1920 Oh, and incoming frames are dropped while executing dump-stats!
1921 */
1922static struct net_device_stats *
1923speedo_get_stats(struct net_device *dev)
1924{
1925 struct speedo_private *sp = netdev_priv(dev);
1926 void __iomem *ioaddr = sp->regs;
1927
1928 /* Update only if the previous dump finished. */
1929 if (sp->lstats->done_marker == cpu_to_le32(0xA007)) {
1930 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1931 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1932 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1933 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1934 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1935 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1936 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1937 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1938 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1939 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1940 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1941 sp->lstats->done_marker = 0x0000;
1942 if (netif_running(dev)) {
1943 unsigned long flags;
1944 /* Take a spinlock to make wait_for_cmd_done and sending the
1945 command atomic. --SAW */
1946 spin_lock_irqsave(&sp->lock, flags);
1947 wait_for_cmd_done(dev, sp);
1948 iowrite8(CUDumpStats, ioaddr + SCBCmd);
1949 spin_unlock_irqrestore(&sp->lock, flags);
1950 }
1951 }
1952 return &sp->stats;
1953}
1954
1955static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1956{
1957 struct speedo_private *sp = netdev_priv(dev);
1958 strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1959 strncpy(info->version, version, sizeof(info->version)-1);
1960 if (sp->pdev)
1961 strcpy(info->bus_info, pci_name(sp->pdev));
1962}
1963
1964static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1965{
1966 struct speedo_private *sp = netdev_priv(dev);
1967 spin_lock_irq(&sp->lock);
1968 mii_ethtool_gset(&sp->mii_if, ecmd);
1969 spin_unlock_irq(&sp->lock);
1970 return 0;
1971}
1972
1973static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1974{
1975 struct speedo_private *sp = netdev_priv(dev);
1976 int res;
1977 spin_lock_irq(&sp->lock);
1978 res = mii_ethtool_sset(&sp->mii_if, ecmd);
1979 spin_unlock_irq(&sp->lock);
1980 return res;
1981}
1982
1983static int speedo_nway_reset(struct net_device *dev)
1984{
1985 struct speedo_private *sp = netdev_priv(dev);
1986 return mii_nway_restart(&sp->mii_if);
1987}
1988
1989static u32 speedo_get_link(struct net_device *dev)
1990{
1991 struct speedo_private *sp = netdev_priv(dev);
1992 return mii_link_ok(&sp->mii_if);
1993}
1994
1995static u32 speedo_get_msglevel(struct net_device *dev)
1996{
1997 struct speedo_private *sp = netdev_priv(dev);
1998 return sp->msg_enable;
1999}
2000
2001static void speedo_set_msglevel(struct net_device *dev, u32 v)
2002{
2003 struct speedo_private *sp = netdev_priv(dev);
2004 sp->msg_enable = v;
2005}
2006
2007static const struct ethtool_ops ethtool_ops = {
2008 .get_drvinfo = speedo_get_drvinfo,
2009 .get_settings = speedo_get_settings,
2010 .set_settings = speedo_set_settings,
2011 .nway_reset = speedo_nway_reset,
2012 .get_link = speedo_get_link,
2013 .get_msglevel = speedo_get_msglevel,
2014 .set_msglevel = speedo_set_msglevel,
2015};
2016
2017static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2018{
2019 struct speedo_private *sp = netdev_priv(dev);
2020 struct mii_ioctl_data *data = if_mii(rq);
2021 int phy = sp->phy[0] & 0x1f;
2022 int saved_acpi;
2023 int t;
2024
2025 switch(cmd) {
2026 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2027 data->phy_id = phy;
2028
2029 case SIOCGMIIREG: /* Read MII PHY register. */
2030 /* FIXME: these operations need to be serialized with MDIO
2031 access from the timeout handler.
2032 They are currently serialized only with MDIO access from the
2033 timer routine. 2000/05/09 SAW */
2034 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2035 t = del_timer_sync(&sp->timer);
2036 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2037 if (t)
2038 add_timer(&sp->timer); /* may be set to the past --SAW */
2039 pci_set_power_state(sp->pdev, saved_acpi);
2040 return 0;
2041
2042 case SIOCSMIIREG: /* Write MII PHY register. */
2043 if (!capable(CAP_NET_ADMIN))
2044 return -EPERM;
2045 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2046 t = del_timer_sync(&sp->timer);
2047 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2048 if (t)
2049 add_timer(&sp->timer); /* may be set to the past --SAW */
2050 pci_set_power_state(sp->pdev, saved_acpi);
2051 return 0;
2052 default:
2053 return -EOPNOTSUPP;
2054 }
2055}
2056
2057/* Set or clear the multicast filter for this adaptor.
2058 This is very ugly with Intel chips -- we usually have to execute an
2059 entire configuration command, plus process a multicast command.
2060 This is complicated. We must put a large configuration command and
2061 an arbitrarily-sized multicast command in the transmit list.
2062 To minimize the disruption -- the previous command might have already
2063 loaded the link -- we convert the current command block, normally a Tx
2064 command, into a no-op and link it to the new command.
2065*/
2066static void set_rx_mode(struct net_device *dev)
2067{
2068 struct speedo_private *sp = netdev_priv(dev);
2069 void __iomem *ioaddr = sp->regs;
2070 struct descriptor *last_cmd;
2071 char new_rx_mode;
2072 unsigned long flags;
2073 int entry, i;
2074
2075 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2076 new_rx_mode = 3;
2077 } else if ((dev->flags & IFF_ALLMULTI) ||
2078 dev->mc_count > multicast_filter_limit) {
2079 new_rx_mode = 1;
2080 } else
2081 new_rx_mode = 0;
2082
2083 if (netif_msg_rx_status(sp))
2084 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2085 sp->rx_mode, new_rx_mode);
2086
2087 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2088 /* The Tx ring is full -- don't add anything! Hope the mode will be
2089 * set again later. */
2090 sp->rx_mode = -1;
2091 return;
2092 }
2093
2094 if (new_rx_mode != sp->rx_mode) {
2095 u8 *config_cmd_data;
2096
2097 spin_lock_irqsave(&sp->lock, flags);
2098 entry = sp->cur_tx++ % TX_RING_SIZE;
2099 last_cmd = sp->last_cmd;
2100 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2101
2102 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2103 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2104 sp->tx_ring[entry].link =
2105 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2106 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2107 /* Construct a full CmdConfig frame. */
2108 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2109 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2110 config_cmd_data[4] = rxdmacount;
2111 config_cmd_data[5] = txdmacount + 0x80;
2112 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2113 /* 0x80 doesn't disable FC 0x84 does.
2114 Disable Flow control since we are not ACK-ing any FC interrupts
2115 for now. --Dragan */
2116 config_cmd_data[19] = 0x84;
2117 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2118 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2119 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2120 config_cmd_data[15] |= 0x80;
2121 config_cmd_data[8] = 0;
2122 }
2123 /* Trigger the command unit resume. */
2124 wait_for_cmd_done(dev, sp);
2125 clear_suspend(last_cmd);
2126 iowrite8(CUResume, ioaddr + SCBCmd);
2127 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2128 netif_stop_queue(dev);
2129 sp->tx_full = 1;
2130 }
2131 spin_unlock_irqrestore(&sp->lock, flags);
2132 }
2133
2134 if (new_rx_mode == 0 && dev->mc_count < 4) {
2135 /* The simple case of 0-3 multicast list entries occurs often, and
2136 fits within one tx_ring[] entry. */
2137 struct dev_mc_list *mclist;
2138 __le16 *setup_params, *eaddrs;
2139
2140 spin_lock_irqsave(&sp->lock, flags);
2141 entry = sp->cur_tx++ % TX_RING_SIZE;
2142 last_cmd = sp->last_cmd;
2143 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2144
2145 sp->tx_skbuff[entry] = NULL;
2146 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2147 sp->tx_ring[entry].link =
2148 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2149 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2150 setup_params = (__le16 *)&sp->tx_ring[entry].tx_desc_addr;
2151 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2152 /* Fill in the multicast addresses. */
2153 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2154 i++, mclist = mclist->next) {
2155 eaddrs = (__le16 *)mclist->dmi_addr;
2156 *setup_params++ = *eaddrs++;
2157 *setup_params++ = *eaddrs++;
2158 *setup_params++ = *eaddrs++;
2159 }
2160
2161 wait_for_cmd_done(dev, sp);
2162 clear_suspend(last_cmd);
2163 /* Immediately trigger the command unit resume. */
2164 iowrite8(CUResume, ioaddr + SCBCmd);
2165
2166 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2167 netif_stop_queue(dev);
2168 sp->tx_full = 1;
2169 }
2170 spin_unlock_irqrestore(&sp->lock, flags);
2171 } else if (new_rx_mode == 0) {
2172 struct dev_mc_list *mclist;
2173 __le16 *setup_params, *eaddrs;
2174 struct speedo_mc_block *mc_blk;
2175 struct descriptor *mc_setup_frm;
2176 int i;
2177
2178 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2179 GFP_ATOMIC);
2180 if (mc_blk == NULL) {
2181 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2182 dev->name);
2183 sp->rx_mode = -1; /* We failed, try again. */
2184 return;
2185 }
2186 mc_blk->next = NULL;
2187 mc_blk->len = 2 + multicast_filter_limit*6;
2188 mc_blk->frame_dma =
2189 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2190 PCI_DMA_TODEVICE);
2191 mc_setup_frm = &mc_blk->frame;
2192
2193 /* Fill the setup frame. */
2194 if (netif_msg_ifup(sp))
2195 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2196 dev->name, mc_setup_frm);
2197 mc_setup_frm->cmd_status =
2198 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2199 /* Link set below. */
2200 setup_params = (__le16 *)&mc_setup_frm->params;
2201 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2202 /* Fill in the multicast addresses. */
2203 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2204 i++, mclist = mclist->next) {
2205 eaddrs = (__le16 *)mclist->dmi_addr;
2206 *setup_params++ = *eaddrs++;
2207 *setup_params++ = *eaddrs++;
2208 *setup_params++ = *eaddrs++;
2209 }
2210
2211 /* Disable interrupts while playing with the Tx Cmd list. */
2212 spin_lock_irqsave(&sp->lock, flags);
2213
2214 if (sp->mc_setup_tail)
2215 sp->mc_setup_tail->next = mc_blk;
2216 else
2217 sp->mc_setup_head = mc_blk;
2218 sp->mc_setup_tail = mc_blk;
2219 mc_blk->tx = sp->cur_tx;
2220
2221 entry = sp->cur_tx++ % TX_RING_SIZE;
2222 last_cmd = sp->last_cmd;
2223 sp->last_cmd = mc_setup_frm;
2224
2225 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2226 sp->tx_skbuff[entry] = NULL;
2227 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2228 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2229
2230 /* Set the link in the setup frame. */
2231 mc_setup_frm->link =
2232 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2233
2234 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2235 mc_blk->len, PCI_DMA_TODEVICE);
2236
2237 wait_for_cmd_done(dev, sp);
2238 clear_suspend(last_cmd);
2239 /* Immediately trigger the command unit resume. */
2240 iowrite8(CUResume, ioaddr + SCBCmd);
2241
2242 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2243 netif_stop_queue(dev);
2244 sp->tx_full = 1;
2245 }
2246 spin_unlock_irqrestore(&sp->lock, flags);
2247
2248 if (netif_msg_rx_status(sp))
2249 printk(" CmdMCSetup frame length %d in entry %d.\n",
2250 dev->mc_count, entry);
2251 }
2252
2253 sp->rx_mode = new_rx_mode;
2254}
2255
2256#ifdef CONFIG_PM
2257static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2258{
2259 struct net_device *dev = pci_get_drvdata (pdev);
2260 struct speedo_private *sp = netdev_priv(dev);
2261 void __iomem *ioaddr = sp->regs;
2262
2263 pci_save_state(pdev);
2264
2265 if (!netif_running(dev))
2266 return 0;
2267
2268 del_timer_sync(&sp->timer);
2269
2270 netif_device_detach(dev);
2271 iowrite32(PortPartialReset, ioaddr + SCBPort);
2272
2273 /* XXX call pci_set_power_state ()? */
2274 pci_disable_device(pdev);
2275 pci_set_power_state (pdev, PCI_D3hot);
2276 return 0;
2277}
2278
2279static int eepro100_resume(struct pci_dev *pdev)
2280{
2281 struct net_device *dev = pci_get_drvdata (pdev);
2282 struct speedo_private *sp = netdev_priv(dev);
2283 void __iomem *ioaddr = sp->regs;
2284 int rc;
2285
2286 pci_set_power_state(pdev, PCI_D0);
2287 pci_restore_state(pdev);
2288
2289 rc = pci_enable_device(pdev);
2290 if (rc)
2291 return rc;
2292
2293 pci_set_master(pdev);
2294
2295 if (!netif_running(dev))
2296 return 0;
2297
2298 /* I'm absolutely uncertain if this part of code may work.
2299 The problems are:
2300 - correct hardware reinitialization;
2301 - correct driver behavior between different steps of the
2302 reinitialization;
2303 - serialization with other driver calls.
2304 2000/03/08 SAW */
2305 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2306 speedo_resume(dev);
2307 netif_device_attach(dev);
2308 sp->rx_mode = -1;
2309 sp->flow_ctrl = sp->partner = 0;
2310 set_rx_mode(dev);
2311 sp->timer.expires = RUN_AT(2*HZ);
2312 add_timer(&sp->timer);
2313 return 0;
2314}
2315#endif /* CONFIG_PM */
2316
2317static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2318{
2319 struct net_device *dev = pci_get_drvdata (pdev);
2320 struct speedo_private *sp = netdev_priv(dev);
2321
2322 unregister_netdev(dev);
2323
2324 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2325 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2326
2327 pci_iounmap(pdev, sp->regs);
2328 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2329 + sizeof(struct speedo_stats),
2330 sp->tx_ring, sp->tx_ring_dma);
2331 pci_disable_device(pdev);
2332 free_netdev(dev);
2333}
2334
2335static struct pci_device_id eepro100_pci_tbl[] = {
2336 { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2337 { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2338 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2339 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2340 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2341 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2342 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2343 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2344 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2345 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2346 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2347 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2348 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2349 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2350 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2351 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2352 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2353 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2354 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2355 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2356 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2357 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2358 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2359 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2360 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2361 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2362 { 0,}
2363};
2364MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2365
2366static struct pci_driver eepro100_driver = {
2367 .name = "eepro100",
2368 .id_table = eepro100_pci_tbl,
2369 .probe = eepro100_init_one,
2370 .remove = __devexit_p(eepro100_remove_one),
2371#ifdef CONFIG_PM
2372 .suspend = eepro100_suspend,
2373 .resume = eepro100_resume,
2374#endif /* CONFIG_PM */
2375};
2376
2377static int __init eepro100_init_module(void)
2378{
2379#ifdef MODULE
2380 printk(version);
2381#endif
2382 return pci_register_driver(&eepro100_driver);
2383}
2384
2385static void __exit eepro100_cleanup_module(void)
2386{
2387 pci_unregister_driver(&eepro100_driver);
2388}
2389
2390module_init(eepro100_init_module);
2391module_exit(eepro100_cleanup_module);
2392
2393/*
2394 * Local variables:
2395 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2396 * c-indent-level: 4
2397 * c-basic-offset: 4
2398 * tab-width: 4
2399 * End:
2400 */