aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/eepro100.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/eepro100.c
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/eepro100.c')
-rw-r--r--drivers/net/eepro100.c2412
1 files changed, 2412 insertions, 0 deletions
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
new file mode 100644
index 00000000000..98b3a2fdce9
--- /dev/null
+++ b/drivers/net/eepro100.c
@@ -0,0 +1,2412 @@
1/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
2/*
3 Written 1996-1999 by Donald Becker.
4
5 The driver also contains updates by different kernel developers
6 (see incomplete list below).
7 Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
8 Please use this email address and linux-kernel mailing list for bug reports.
9
10 This software may be used and distributed according to the terms
11 of the GNU General Public License, incorporated herein by reference.
12
13 This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
14 It should work with all i82557/558/559 boards.
15
16 Version history:
17 1998 Apr - 2000 Feb Andrey V. Savochkin <saw@saw.sw.com.sg>
18 Serious fixes for multicast filter list setting, TX timeout routine;
19 RX ring refilling logic; other stuff
20 2000 Feb Jeff Garzik <jgarzik@pobox.com>
21 Convert to new PCI driver interface
22 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
23 Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
24 2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
25 PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
26 2000 Aug 31 David Mosberger <davidm@hpl.hp.com>
27 rx_align support: enables rx DMA without causing unaligned accesses.
28*/
29
30static const char *version =
31"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://www.scyld.com/network/eepro100.html\n"
32"eepro100.c: $Revision: 1.36 $ 2000/11/17 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n";
33
34/* A few user-configurable values that apply to all boards.
35 First set is undocumented and spelled per Intel recommendations. */
36
37static int congenb /* = 0 */; /* Enable congestion control in the DP83840. */
38static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
39static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
40/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
41static int txdmacount = 128;
42static int rxdmacount /* = 0 */;
43
44#if defined(__ia64__) || defined(__alpha__) || defined(__sparc__) || defined(__mips__) || \
45 defined(__arm__)
46 /* align rx buffers to 2 bytes so that IP header is aligned */
47# define rx_align(skb) skb_reserve((skb), 2)
48# define RxFD_ALIGNMENT __attribute__ ((aligned (2), packed))
49#else
50# define rx_align(skb)
51# define RxFD_ALIGNMENT
52#endif
53
54/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
55 Lower values use more memory, but are faster. */
56static int rx_copybreak = 200;
57
58/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
59static int max_interrupt_work = 20;
60
61/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
62static int multicast_filter_limit = 64;
63
64/* 'options' is used to pass a transceiver override or full-duplex flag
65 e.g. "options=16" for FD, "options=32" for 100mbps-only. */
66static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
67static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
68
69/* A few values that may be tweaked. */
70/* The ring sizes should be a power of two for efficiency. */
71#define TX_RING_SIZE 64
72#define RX_RING_SIZE 64
73/* How much slots multicast filter setup may take.
74 Do not descrease without changing set_rx_mode() implementaion. */
75#define TX_MULTICAST_SIZE 2
76#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
77/* Actual number of TX packets queued, must be
78 <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
79#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
80/* Hysteresis marking queue as no longer full. */
81#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
82
83/* Operational parameters that usually are not changed. */
84
85/* Time in jiffies before concluding the transmitter is hung. */
86#define TX_TIMEOUT (2*HZ)
87/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
88#define PKT_BUF_SZ 1536
89
90#include <linux/config.h>
91#include <linux/module.h>
92
93#include <linux/kernel.h>
94#include <linux/string.h>
95#include <linux/errno.h>
96#include <linux/ioport.h>
97#include <linux/slab.h>
98#include <linux/interrupt.h>
99#include <linux/timer.h>
100#include <linux/pci.h>
101#include <linux/spinlock.h>
102#include <linux/init.h>
103#include <linux/mii.h>
104#include <linux/delay.h>
105#include <linux/bitops.h>
106
107#include <asm/io.h>
108#include <asm/uaccess.h>
109#include <asm/irq.h>
110
111#include <linux/netdevice.h>
112#include <linux/etherdevice.h>
113#include <linux/rtnetlink.h>
114#include <linux/skbuff.h>
115#include <linux/ethtool.h>
116
117static int use_io;
118static int debug = -1;
119#define DEBUG_DEFAULT (NETIF_MSG_DRV | \
120 NETIF_MSG_HW | \
121 NETIF_MSG_RX_ERR | \
122 NETIF_MSG_TX_ERR)
123#define DEBUG ((debug >= 0) ? (1<<debug)-1 : DEBUG_DEFAULT)
124
125
126MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
127MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
128MODULE_LICENSE("GPL");
129module_param(use_io, int, 0);
130module_param(debug, int, 0);
131module_param_array(options, int, NULL, 0);
132module_param_array(full_duplex, int, NULL, 0);
133module_param(congenb, int, 0);
134module_param(txfifo, int, 0);
135module_param(rxfifo, int, 0);
136module_param(txdmacount, int, 0);
137module_param(rxdmacount, int, 0);
138module_param(rx_copybreak, int, 0);
139module_param(max_interrupt_work, int, 0);
140module_param(multicast_filter_limit, int, 0);
141MODULE_PARM_DESC(debug, "debug level (0-6)");
142MODULE_PARM_DESC(options, "Bits 0-3: transceiver type, bit 4: full duplex, bit 5: 100Mbps");
143MODULE_PARM_DESC(full_duplex, "full duplex setting(s) (1)");
144MODULE_PARM_DESC(congenb, "Enable congestion control (1)");
145MODULE_PARM_DESC(txfifo, "Tx FIFO threshold in 4 byte units, (0-15)");
146MODULE_PARM_DESC(rxfifo, "Rx FIFO threshold in 4 byte units, (0-15)");
147MODULE_PARM_DESC(txdmacount, "Tx DMA burst length; 128 - disable (0-128)");
148MODULE_PARM_DESC(rxdmacount, "Rx DMA burst length; 128 - disable (0-128)");
149MODULE_PARM_DESC(rx_copybreak, "copy breakpoint for copy-only-tiny-frames");
150MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
151MODULE_PARM_DESC(multicast_filter_limit, "maximum number of filtered multicast addresses");
152
153#define RUN_AT(x) (jiffies + (x))
154
155#define netdevice_start(dev)
156#define netdevice_stop(dev)
157#define netif_set_tx_timeout(dev, tf, tm) \
158 do { \
159 (dev)->tx_timeout = (tf); \
160 (dev)->watchdog_timeo = (tm); \
161 } while(0)
162
163
164
165/*
166 Theory of Operation
167
168I. Board Compatibility
169
170This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
171single-chip fast Ethernet controller for PCI, as used on the Intel
172EtherExpress Pro 100 adapter.
173
174II. Board-specific settings
175
176PCI bus devices are configured by the system at boot time, so no jumpers
177need to be set on the board. The system BIOS should be set to assign the
178PCI INTA signal to an otherwise unused system IRQ line. While it's
179possible to share PCI interrupt lines, it negatively impacts performance and
180only recent kernels support it.
181
182III. Driver operation
183
184IIIA. General
185The Speedo3 is very similar to other Intel network chips, that is to say
186"apparently designed on a different planet". This chips retains the complex
187Rx and Tx descriptors and multiple buffers pointers as previous chips, but
188also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
189Tx mode, but in a simplified lower-overhead manner: it associates only a
190single buffer descriptor with each frame descriptor.
191
192Despite the extra space overhead in each receive skbuff, the driver must use
193the simplified Rx buffer mode to assure that only a single data buffer is
194associated with each RxFD. The driver implements this by reserving space
195for the Rx descriptor at the head of each Rx skbuff.
196
197The Speedo-3 has receive and command unit base addresses that are added to
198almost all descriptor pointers. The driver sets these to zero, so that all
199pointer fields are absolute addresses.
200
201The System Control Block (SCB) of some previous Intel chips exists on the
202chip in both PCI I/O and memory space. This driver uses the I/O space
203registers, but might switch to memory mapped mode to better support non-x86
204processors.
205
206IIIB. Transmit structure
207
208The driver must use the complex Tx command+descriptor mode in order to
209have a indirect pointer to the skbuff data section. Each Tx command block
210(TxCB) is associated with two immediately appended Tx Buffer Descriptor
211(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
212speedo_private data structure for each adapter instance.
213
214The newer i82558 explicitly supports this structure, and can read the two
215TxBDs in the same PCI burst as the TxCB.
216
217This ring structure is used for all normal transmit packets, but the
218transmit packet descriptors aren't long enough for most non-Tx commands such
219as CmdConfigure. This is complicated by the possibility that the chip has
220already loaded the link address in the previous descriptor. So for these
221commands we convert the next free descriptor on the ring to a NoOp, and point
222that descriptor's link to the complex command.
223
224An additional complexity of these non-transmit commands are that they may be
225added asynchronous to the normal transmit queue, so we disable interrupts
226whenever the Tx descriptor ring is manipulated.
227
228A notable aspect of these special configure commands is that they do
229work with the normal Tx ring entry scavenge method. The Tx ring scavenge
230is done at interrupt time using the 'dirty_tx' index, and checking for the
231command-complete bit. While the setup frames may have the NoOp command on the
232Tx ring marked as complete, but not have completed the setup command, this
233is not a problem. The tx_ring entry can be still safely reused, as the
234tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
235
236Commands may have bits set e.g. CmdSuspend in the command word to either
237suspend or stop the transmit/command unit. This driver always flags the last
238command with CmdSuspend, erases the CmdSuspend in the previous command, and
239then issues a CU_RESUME.
240Note: Watch out for the potential race condition here: imagine
241 erasing the previous suspend
242 the chip processes the previous command
243 the chip processes the final command, and suspends
244 doing the CU_RESUME
245 the chip processes the next-yet-valid post-final-command.
246So blindly sending a CU_RESUME is only safe if we do it immediately after
247after erasing the previous CmdSuspend, without the possibility of an
248intervening delay. Thus the resume command is always within the
249interrupts-disabled region. This is a timing dependence, but handling this
250condition in a timing-independent way would considerably complicate the code.
251
252Note: In previous generation Intel chips, restarting the command unit was a
253notoriously slow process. This is presumably no longer true.
254
255IIIC. Receive structure
256
257Because of the bus-master support on the Speedo3 this driver uses the new
258SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
259This scheme allocates full-sized skbuffs as receive buffers. The value
260SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
261trade-off the memory wasted by passing the full-sized skbuff to the queue
262layer for all frames vs. the copying cost of copying a frame to a
263correctly-sized skbuff.
264
265For small frames the copying cost is negligible (esp. considering that we
266are pre-loading the cache with immediately useful header information), so we
267allocate a new, minimally-sized skbuff. For large frames the copying cost
268is non-trivial, and the larger copy might flush the cache of useful data, so
269we pass up the skbuff the packet was received into.
270
271IV. Notes
272
273Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
274that stated that I could disclose the information. But I still resent
275having to sign an Intel NDA when I'm helping Intel sell their own product!
276
277*/
278
279static int speedo_found1(struct pci_dev *pdev, void __iomem *ioaddr, int fnd_cnt, int acpi_idle_state);
280
281enum pci_flags_bit {
282 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
283 PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
284};
285
286/* Offsets to the various registers.
287 All accesses need not be longword aligned. */
288enum speedo_offsets {
289 SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
290 SCBIntmask = 3,
291 SCBPointer = 4, /* General purpose pointer. */
292 SCBPort = 8, /* Misc. commands and operands. */
293 SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
294 SCBCtrlMDI = 16, /* MDI interface control. */
295 SCBEarlyRx = 20, /* Early receive byte count. */
296};
297/* Commands that can be put in a command list entry. */
298enum commands {
299 CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
300 CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
301 CmdDump = 0x60000, CmdDiagnose = 0x70000,
302 CmdSuspend = 0x40000000, /* Suspend after completion. */
303 CmdIntr = 0x20000000, /* Interrupt after completion. */
304 CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
305};
306/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
307 status bits. Previous driver versions used separate 16 bit fields for
308 commands and statuses. --SAW
309 */
310#if defined(__alpha__)
311# define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status);
312#else
313# if defined(__LITTLE_ENDIAN)
314# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
315# elif defined(__BIG_ENDIAN)
316# define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
317# else
318# error Unsupported byteorder
319# endif
320#endif
321
322enum SCBCmdBits {
323 SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
324 SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
325 SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
326 /* The rest are Rx and Tx commands. */
327 CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
328 CUCmdBase=0x0060, /* CU Base address (set to zero) . */
329 CUDumpStats=0x0070, /* Dump then reset stats counters. */
330 RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
331 RxResumeNoResources=0x0007,
332};
333
334enum SCBPort_cmds {
335 PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
336};
337
338/* The Speedo3 Rx and Tx frame/buffer descriptors. */
339struct descriptor { /* A generic descriptor. */
340 volatile s32 cmd_status; /* All command and status fields. */
341 u32 link; /* struct descriptor * */
342 unsigned char params[0];
343};
344
345/* The Speedo3 Rx and Tx buffer descriptors. */
346struct RxFD { /* Receive frame descriptor. */
347 volatile s32 status;
348 u32 link; /* struct RxFD * */
349 u32 rx_buf_addr; /* void * */
350 u32 count;
351} RxFD_ALIGNMENT;
352
353/* Selected elements of the Tx/RxFD.status word. */
354enum RxFD_bits {
355 RxComplete=0x8000, RxOK=0x2000,
356 RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
357 RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
358 TxUnderrun=0x1000, StatusComplete=0x8000,
359};
360
361#define CONFIG_DATA_SIZE 22
362struct TxFD { /* Transmit frame descriptor set. */
363 s32 status;
364 u32 link; /* void * */
365 u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
366 s32 count; /* # of TBD (=1), Tx start thresh., etc. */
367 /* This constitutes two "TBD" entries -- we only use one. */
368#define TX_DESCR_BUF_OFFSET 16
369 u32 tx_buf_addr0; /* void *, frame to be transmitted. */
370 s32 tx_buf_size0; /* Length of Tx frame. */
371 u32 tx_buf_addr1; /* void *, frame to be transmitted. */
372 s32 tx_buf_size1; /* Length of Tx frame. */
373 /* the structure must have space for at least CONFIG_DATA_SIZE starting
374 * from tx_desc_addr field */
375};
376
377/* Multicast filter setting block. --SAW */
378struct speedo_mc_block {
379 struct speedo_mc_block *next;
380 unsigned int tx;
381 dma_addr_t frame_dma;
382 unsigned int len;
383 struct descriptor frame __attribute__ ((__aligned__(16)));
384};
385
386/* Elements of the dump_statistics block. This block must be lword aligned. */
387struct speedo_stats {
388 u32 tx_good_frames;
389 u32 tx_coll16_errs;
390 u32 tx_late_colls;
391 u32 tx_underruns;
392 u32 tx_lost_carrier;
393 u32 tx_deferred;
394 u32 tx_one_colls;
395 u32 tx_multi_colls;
396 u32 tx_total_colls;
397 u32 rx_good_frames;
398 u32 rx_crc_errs;
399 u32 rx_align_errs;
400 u32 rx_resource_errs;
401 u32 rx_overrun_errs;
402 u32 rx_colls_errs;
403 u32 rx_runt_errs;
404 u32 done_marker;
405};
406
407enum Rx_ring_state_bits {
408 RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
409};
410
411/* Do not change the position (alignment) of the first few elements!
412 The later elements are grouped for cache locality.
413
414 Unfortunately, all the positions have been shifted since there.
415 A new re-alignment is required. 2000/03/06 SAW */
416struct speedo_private {
417 void __iomem *regs;
418 struct TxFD *tx_ring; /* Commands (usually CmdTxPacket). */
419 struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
420 /* The addresses of a Tx/Rx-in-place packets/buffers. */
421 struct sk_buff *tx_skbuff[TX_RING_SIZE];
422 struct sk_buff *rx_skbuff[RX_RING_SIZE];
423 /* Mapped addresses of the rings. */
424 dma_addr_t tx_ring_dma;
425#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
426 dma_addr_t rx_ring_dma[RX_RING_SIZE];
427 struct descriptor *last_cmd; /* Last command sent. */
428 unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
429 spinlock_t lock; /* Group with Tx control cache line. */
430 u32 tx_threshold; /* The value for txdesc.count. */
431 struct RxFD *last_rxf; /* Last filled RX buffer. */
432 dma_addr_t last_rxf_dma;
433 unsigned int cur_rx, dirty_rx; /* The next free ring entry */
434 long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
435 struct net_device_stats stats;
436 struct speedo_stats *lstats;
437 dma_addr_t lstats_dma;
438 int chip_id;
439 struct pci_dev *pdev;
440 struct timer_list timer; /* Media selection timer. */
441 struct speedo_mc_block *mc_setup_head; /* Multicast setup frame list head. */
442 struct speedo_mc_block *mc_setup_tail; /* Multicast setup frame list tail. */
443 long in_interrupt; /* Word-aligned dev->interrupt */
444 unsigned char acpi_pwr;
445 signed char rx_mode; /* Current PROMISC/ALLMULTI setting. */
446 unsigned int tx_full:1; /* The Tx queue is full. */
447 unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
448 unsigned int rx_bug:1; /* Work around receiver hang errata. */
449 unsigned char default_port:8; /* Last dev->if_port value. */
450 unsigned char rx_ring_state; /* RX ring status flags. */
451 unsigned short phy[2]; /* PHY media interfaces available. */
452 unsigned short partner; /* Link partner caps. */
453 struct mii_if_info mii_if; /* MII API hooks, info */
454 u32 msg_enable; /* debug message level */
455};
456
457/* The parameters for a CmdConfigure operation.
458 There are so many options that it would be difficult to document each bit.
459 We mostly use the default or recommended settings. */
460static const char i82557_config_cmd[CONFIG_DATA_SIZE] = {
461 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
462 0, 0x2E, 0, 0x60, 0,
463 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
464 0x3f, 0x05, };
465static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
466 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
467 0, 0x2E, 0, 0x60, 0x08, 0x88,
468 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
469 0x31, 0x05, };
470
471/* PHY media interface chips. */
472static const char *phys[] = {
473 "None", "i82553-A/B", "i82553-C", "i82503",
474 "DP83840", "80c240", "80c24", "i82555",
475 "unknown-8", "unknown-9", "DP83840A", "unknown-11",
476 "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
477enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
478 S80C24, I82555, DP83840A=10, };
479static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
480#define EE_READ_CMD (6)
481
482static int eepro100_init_one(struct pci_dev *pdev,
483 const struct pci_device_id *ent);
484
485static int do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len);
486static int mdio_read(struct net_device *dev, int phy_id, int location);
487static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
488static int speedo_open(struct net_device *dev);
489static void speedo_resume(struct net_device *dev);
490static void speedo_timer(unsigned long data);
491static void speedo_init_rx_ring(struct net_device *dev);
492static void speedo_tx_timeout(struct net_device *dev);
493static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
494static void speedo_refill_rx_buffers(struct net_device *dev, int force);
495static int speedo_rx(struct net_device *dev);
496static void speedo_tx_buffer_gc(struct net_device *dev);
497static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
498static int speedo_close(struct net_device *dev);
499static struct net_device_stats *speedo_get_stats(struct net_device *dev);
500static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
501static void set_rx_mode(struct net_device *dev);
502static void speedo_show_state(struct net_device *dev);
503static struct ethtool_ops ethtool_ops;
504
505
506
507#ifdef honor_default_port
508/* Optional driver feature to allow forcing the transceiver setting.
509 Not recommended. */
510static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
511 0x2000, 0x2100, 0x0400, 0x3100};
512#endif
513
514/* How to wait for the command unit to accept a command.
515 Typically this takes 0 ticks. */
516static inline unsigned char wait_for_cmd_done(struct net_device *dev,
517 struct speedo_private *sp)
518{
519 int wait = 1000;
520 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
521 unsigned char r;
522
523 do {
524 udelay(1);
525 r = ioread8(cmd_ioaddr);
526 } while(r && --wait >= 0);
527
528 if (wait < 0)
529 printk(KERN_ALERT "%s: wait_for_cmd_done timeout!\n", dev->name);
530 return r;
531}
532
533static int __devinit eepro100_init_one (struct pci_dev *pdev,
534 const struct pci_device_id *ent)
535{
536 void __iomem *ioaddr;
537 int irq, pci_bar;
538 int acpi_idle_state = 0, pm;
539 static int cards_found /* = 0 */;
540 unsigned long pci_base;
541
542#ifndef MODULE
543 /* when built-in, we only print version if device is found */
544 static int did_version;
545 if (did_version++ == 0)
546 printk(version);
547#endif
548
549 /* save power state before pci_enable_device overwrites it */
550 pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
551 if (pm) {
552 u16 pwr_command;
553 pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
554 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
555 }
556
557 if (pci_enable_device(pdev))
558 goto err_out_free_mmio_region;
559
560 pci_set_master(pdev);
561
562 if (!request_region(pci_resource_start(pdev, 1),
563 pci_resource_len(pdev, 1), "eepro100")) {
564 printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
565 goto err_out_none;
566 }
567 if (!request_mem_region(pci_resource_start(pdev, 0),
568 pci_resource_len(pdev, 0), "eepro100")) {
569 printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
570 goto err_out_free_pio_region;
571 }
572
573 irq = pdev->irq;
574 pci_bar = use_io ? 1 : 0;
575 pci_base = pci_resource_start(pdev, pci_bar);
576 if (DEBUG & NETIF_MSG_PROBE)
577 printk("Found Intel i82557 PCI Speedo at %#lx, IRQ %d.\n",
578 pci_base, irq);
579
580 ioaddr = pci_iomap(pdev, pci_bar, 0);
581 if (!ioaddr) {
582 printk (KERN_ERR "eepro100: cannot remap IO\n");
583 goto err_out_free_mmio_region;
584 }
585
586 if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) == 0)
587 cards_found++;
588 else
589 goto err_out_iounmap;
590
591 return 0;
592
593err_out_iounmap: ;
594 pci_iounmap(pdev, ioaddr);
595err_out_free_mmio_region:
596 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
597err_out_free_pio_region:
598 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
599err_out_none:
600 return -ENODEV;
601}
602
603#ifdef CONFIG_NET_POLL_CONTROLLER
604/*
605 * Polling 'interrupt' - used by things like netconsole to send skbs
606 * without having to re-enable interrupts. It's not called while
607 * the interrupt routine is executing.
608 */
609
610static void poll_speedo (struct net_device *dev)
611{
612 /* disable_irq is not very nice, but with the funny lockless design
613 we have no other choice. */
614 disable_irq(dev->irq);
615 speedo_interrupt (dev->irq, dev, NULL);
616 enable_irq(dev->irq);
617}
618#endif
619
620static int __devinit speedo_found1(struct pci_dev *pdev,
621 void __iomem *ioaddr, int card_idx, int acpi_idle_state)
622{
623 struct net_device *dev;
624 struct speedo_private *sp;
625 const char *product;
626 int i, option;
627 u16 eeprom[0x100];
628 int size;
629 void *tx_ring_space;
630 dma_addr_t tx_ring_dma;
631
632 size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
633 tx_ring_space = pci_alloc_consistent(pdev, size, &tx_ring_dma);
634 if (tx_ring_space == NULL)
635 return -1;
636
637 dev = alloc_etherdev(sizeof(struct speedo_private));
638 if (dev == NULL) {
639 printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
640 pci_free_consistent(pdev, size, tx_ring_space, tx_ring_dma);
641 return -1;
642 }
643
644 SET_MODULE_OWNER(dev);
645 SET_NETDEV_DEV(dev, &pdev->dev);
646
647 if (dev->mem_start > 0)
648 option = dev->mem_start;
649 else if (card_idx >= 0 && options[card_idx] >= 0)
650 option = options[card_idx];
651 else
652 option = 0;
653
654 rtnl_lock();
655 if (dev_alloc_name(dev, dev->name) < 0)
656 goto err_free_unlock;
657
658 /* Read the station address EEPROM before doing the reset.
659 Nominally his should even be done before accepting the device, but
660 then we wouldn't have a device name with which to report the error.
661 The size test is for 6 bit vs. 8 bit address serial EEPROMs.
662 */
663 {
664 void __iomem *iobase;
665 int read_cmd, ee_size;
666 u16 sum;
667 int j;
668
669 /* Use IO only to avoid postponed writes and satisfy EEPROM timing
670 requirements. */
671 iobase = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
672 if (!iobase)
673 goto err_free_unlock;
674 if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
675 == 0xffe0000) {
676 ee_size = 0x100;
677 read_cmd = EE_READ_CMD << 24;
678 } else {
679 ee_size = 0x40;
680 read_cmd = EE_READ_CMD << 22;
681 }
682
683 for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
684 u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
685 eeprom[i] = value;
686 sum += value;
687 if (i < 3) {
688 dev->dev_addr[j++] = value;
689 dev->dev_addr[j++] = value >> 8;
690 }
691 }
692 if (sum != 0xBABA)
693 printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
694 "check settings before activating this device!\n",
695 dev->name, sum);
696 /* Don't unregister_netdev(dev); as the EEPro may actually be
697 usable, especially if the MAC address is set later.
698 On the other hand, it may be unusable if MDI data is corrupted. */
699
700 pci_iounmap(pdev, iobase);
701 }
702
703 /* Reset the chip: stop Tx and Rx processes and clear counters.
704 This takes less than 10usec and will easily finish before the next
705 action. */
706 iowrite32(PortReset, ioaddr + SCBPort);
707 ioread32(ioaddr + SCBPort);
708 udelay(10);
709
710 if (eeprom[3] & 0x0100)
711 product = "OEM i82557/i82558 10/100 Ethernet";
712 else
713 product = pci_name(pdev);
714
715 printk(KERN_INFO "%s: %s, ", dev->name, product);
716
717 for (i = 0; i < 5; i++)
718 printk("%2.2X:", dev->dev_addr[i]);
719 printk("%2.2X, ", dev->dev_addr[i]);
720 printk("IRQ %d.\n", pdev->irq);
721
722 sp = netdev_priv(dev);
723
724 /* we must initialize this early, for mdio_{read,write} */
725 sp->regs = ioaddr;
726
727#if 1 || defined(kernel_bloat)
728 /* OK, this is pure kernel bloat. I don't like it when other drivers
729 waste non-pageable kernel space to emit similar messages, but I need
730 them for bug reports. */
731 {
732 const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
733 /* The self-test results must be paragraph aligned. */
734 volatile s32 *self_test_results;
735 int boguscnt = 16000; /* Timeout for set-test. */
736 if ((eeprom[3] & 0x03) != 0x03)
737 printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
738 " work-around.\n");
739 printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
740 " connectors present:",
741 eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
742 for (i = 0; i < 4; i++)
743 if (eeprom[5] & (1<<i))
744 printk(connectors[i]);
745 printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
746 phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
747 if (eeprom[7] & 0x0700)
748 printk(KERN_INFO " Secondary interface chip %s.\n",
749 phys[(eeprom[7]>>8)&7]);
750 if (((eeprom[6]>>8) & 0x3f) == DP83840
751 || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
752 int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
753 if (congenb)
754 mdi_reg23 |= 0x0100;
755 printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
756 mdi_reg23);
757 mdio_write(dev, eeprom[6] & 0x1f, 23, mdi_reg23);
758 }
759 if ((option >= 0) && (option & 0x70)) {
760 printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
761 (option & 0x20 ? 100 : 10),
762 (option & 0x10 ? "full" : "half"));
763 mdio_write(dev, eeprom[6] & 0x1f, MII_BMCR,
764 ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
765 ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
766 }
767
768 /* Perform a system self-test. */
769 self_test_results = (s32*) ((((long) tx_ring_space) + 15) & ~0xf);
770 self_test_results[0] = 0;
771 self_test_results[1] = -1;
772 iowrite32(tx_ring_dma | PortSelfTest, ioaddr + SCBPort);
773 do {
774 udelay(10);
775 } while (self_test_results[1] == -1 && --boguscnt >= 0);
776
777 if (boguscnt < 0) { /* Test optimized out. */
778 printk(KERN_ERR "Self test failed, status %8.8x:\n"
779 KERN_ERR " Failure to initialize the i82557.\n"
780 KERN_ERR " Verify that the card is a bus-master"
781 " capable slot.\n",
782 self_test_results[1]);
783 } else
784 printk(KERN_INFO " General self-test: %s.\n"
785 KERN_INFO " Serial sub-system self-test: %s.\n"
786 KERN_INFO " Internal registers self-test: %s.\n"
787 KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
788 self_test_results[1] & 0x1000 ? "failed" : "passed",
789 self_test_results[1] & 0x0020 ? "failed" : "passed",
790 self_test_results[1] & 0x0008 ? "failed" : "passed",
791 self_test_results[1] & 0x0004 ? "failed" : "passed",
792 self_test_results[0]);
793 }
794#endif /* kernel_bloat */
795
796 iowrite32(PortReset, ioaddr + SCBPort);
797 ioread32(ioaddr + SCBPort);
798 udelay(10);
799
800 /* Return the chip to its original power state. */
801 pci_set_power_state(pdev, acpi_idle_state);
802
803 pci_set_drvdata (pdev, dev);
804 SET_NETDEV_DEV(dev, &pdev->dev);
805
806 dev->irq = pdev->irq;
807
808 sp->pdev = pdev;
809 sp->msg_enable = DEBUG;
810 sp->acpi_pwr = acpi_idle_state;
811 sp->tx_ring = tx_ring_space;
812 sp->tx_ring_dma = tx_ring_dma;
813 sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
814 sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
815 init_timer(&sp->timer); /* used in ioctl() */
816 spin_lock_init(&sp->lock);
817
818 sp->mii_if.full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
819 if (card_idx >= 0) {
820 if (full_duplex[card_idx] >= 0)
821 sp->mii_if.full_duplex = full_duplex[card_idx];
822 }
823 sp->default_port = option >= 0 ? (option & 0x0f) : 0;
824
825 sp->phy[0] = eeprom[6];
826 sp->phy[1] = eeprom[7];
827
828 sp->mii_if.phy_id = eeprom[6] & 0x1f;
829 sp->mii_if.phy_id_mask = 0x1f;
830 sp->mii_if.reg_num_mask = 0x1f;
831 sp->mii_if.dev = dev;
832 sp->mii_if.mdio_read = mdio_read;
833 sp->mii_if.mdio_write = mdio_write;
834
835 sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
836 if (((pdev->device > 0x1030 && (pdev->device < 0x103F)))
837 || (pdev->device == 0x2449) || (pdev->device == 0x2459)
838 || (pdev->device == 0x245D)) {
839 sp->chip_id = 1;
840 }
841
842 if (sp->rx_bug)
843 printk(KERN_INFO " Receiver lock-up workaround activated.\n");
844
845 /* The Speedo-specific entries in the device structure. */
846 dev->open = &speedo_open;
847 dev->hard_start_xmit = &speedo_start_xmit;
848 netif_set_tx_timeout(dev, &speedo_tx_timeout, TX_TIMEOUT);
849 dev->stop = &speedo_close;
850 dev->get_stats = &speedo_get_stats;
851 dev->set_multicast_list = &set_rx_mode;
852 dev->do_ioctl = &speedo_ioctl;
853 SET_ETHTOOL_OPS(dev, &ethtool_ops);
854#ifdef CONFIG_NET_POLL_CONTROLLER
855 dev->poll_controller = &poll_speedo;
856#endif
857
858 if (register_netdevice(dev))
859 goto err_free_unlock;
860 rtnl_unlock();
861
862 return 0;
863
864 err_free_unlock:
865 rtnl_unlock();
866 free_netdev(dev);
867 return -1;
868}
869
870static void do_slow_command(struct net_device *dev, struct speedo_private *sp, int cmd)
871{
872 void __iomem *cmd_ioaddr = sp->regs + SCBCmd;
873 int wait = 0;
874 do
875 if (ioread8(cmd_ioaddr) == 0) break;
876 while(++wait <= 200);
877 if (wait > 100)
878 printk(KERN_ERR "Command %4.4x never accepted (%d polls)!\n",
879 ioread8(cmd_ioaddr), wait);
880
881 iowrite8(cmd, cmd_ioaddr);
882
883 for (wait = 0; wait <= 100; wait++)
884 if (ioread8(cmd_ioaddr) == 0) return;
885 for (; wait <= 20000; wait++)
886 if (ioread8(cmd_ioaddr) == 0) return;
887 else udelay(1);
888 printk(KERN_ERR "Command %4.4x was not accepted after %d polls!"
889 " Current status %8.8x.\n",
890 cmd, wait, ioread32(sp->regs + SCBStatus));
891}
892
893/* Serial EEPROM section.
894 A "bit" grungy, but we work our way through bit-by-bit :->. */
895/* EEPROM_Ctrl bits. */
896#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
897#define EE_CS 0x02 /* EEPROM chip select. */
898#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
899#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
900#define EE_ENB (0x4800 | EE_CS)
901#define EE_WRITE_0 0x4802
902#define EE_WRITE_1 0x4806
903#define EE_OFFSET SCBeeprom
904
905/* The fixes for the code were kindly provided by Dragan Stancevic
906 <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
907 access timing.
908 The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
909 interval for serial EEPROM. However, it looks like that there is an
910 additional requirement dictating larger udelay's in the code below.
911 2000/05/24 SAW */
912static int __devinit do_eeprom_cmd(void __iomem *ioaddr, int cmd, int cmd_len)
913{
914 unsigned retval = 0;
915 void __iomem *ee_addr = ioaddr + SCBeeprom;
916
917 iowrite16(EE_ENB, ee_addr); udelay(2);
918 iowrite16(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
919
920 /* Shift the command bits out. */
921 do {
922 short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
923 iowrite16(dataval, ee_addr); udelay(2);
924 iowrite16(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
925 retval = (retval << 1) | ((ioread16(ee_addr) & EE_DATA_READ) ? 1 : 0);
926 } while (--cmd_len >= 0);
927 iowrite16(EE_ENB, ee_addr); udelay(2);
928
929 /* Terminate the EEPROM access. */
930 iowrite16(EE_ENB & ~EE_CS, ee_addr);
931 return retval;
932}
933
934static int mdio_read(struct net_device *dev, int phy_id, int location)
935{
936 struct speedo_private *sp = netdev_priv(dev);
937 void __iomem *ioaddr = sp->regs;
938 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
939 iowrite32(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
940 do {
941 val = ioread32(ioaddr + SCBCtrlMDI);
942 if (--boguscnt < 0) {
943 printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
944 break;
945 }
946 } while (! (val & 0x10000000));
947 return val & 0xffff;
948}
949
950static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
951{
952 struct speedo_private *sp = netdev_priv(dev);
953 void __iomem *ioaddr = sp->regs;
954 int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
955 iowrite32(0x04000000 | (location<<16) | (phy_id<<21) | value,
956 ioaddr + SCBCtrlMDI);
957 do {
958 val = ioread32(ioaddr + SCBCtrlMDI);
959 if (--boguscnt < 0) {
960 printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
961 break;
962 }
963 } while (! (val & 0x10000000));
964}
965
966static int
967speedo_open(struct net_device *dev)
968{
969 struct speedo_private *sp = netdev_priv(dev);
970 void __iomem *ioaddr = sp->regs;
971 int retval;
972
973 if (netif_msg_ifup(sp))
974 printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
975
976 pci_set_power_state(sp->pdev, PCI_D0);
977
978 /* Set up the Tx queue early.. */
979 sp->cur_tx = 0;
980 sp->dirty_tx = 0;
981 sp->last_cmd = NULL;
982 sp->tx_full = 0;
983 sp->in_interrupt = 0;
984
985 /* .. we can safely take handler calls during init. */
986 retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
987 if (retval) {
988 return retval;
989 }
990
991 dev->if_port = sp->default_port;
992
993#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
994 /* Retrigger negotiation to reset previous errors. */
995 if ((sp->phy[0] & 0x8000) == 0) {
996 int phy_addr = sp->phy[0] & 0x1f ;
997 /* Use 0x3300 for restarting NWay, other values to force xcvr:
998 0x0000 10-HD
999 0x0100 10-FD
1000 0x2000 100-HD
1001 0x2100 100-FD
1002 */
1003#ifdef honor_default_port
1004 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1005#else
1006 mdio_write(dev, phy_addr, MII_BMCR, 0x3300);
1007#endif
1008 }
1009#endif
1010
1011 speedo_init_rx_ring(dev);
1012
1013 /* Fire up the hardware. */
1014 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1015 speedo_resume(dev);
1016
1017 netdevice_start(dev);
1018 netif_start_queue(dev);
1019
1020 /* Setup the chip and configure the multicast list. */
1021 sp->mc_setup_head = NULL;
1022 sp->mc_setup_tail = NULL;
1023 sp->flow_ctrl = sp->partner = 0;
1024 sp->rx_mode = -1; /* Invalid -> always reset the mode. */
1025 set_rx_mode(dev);
1026 if ((sp->phy[0] & 0x8000) == 0)
1027 sp->mii_if.advertising = mdio_read(dev, sp->phy[0] & 0x1f, MII_ADVERTISE);
1028
1029 mii_check_link(&sp->mii_if);
1030
1031 if (netif_msg_ifup(sp)) {
1032 printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
1033 dev->name, ioread16(ioaddr + SCBStatus));
1034 }
1035
1036 /* Set the timer. The timer serves a dual purpose:
1037 1) to monitor the media interface (e.g. link beat) and perhaps switch
1038 to an alternate media type
1039 2) to monitor Rx activity, and restart the Rx process if the receiver
1040 hangs. */
1041 sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
1042 sp->timer.data = (unsigned long)dev;
1043 sp->timer.function = &speedo_timer; /* timer handler */
1044 add_timer(&sp->timer);
1045
1046 /* No need to wait for the command unit to accept here. */
1047 if ((sp->phy[0] & 0x8000) == 0)
1048 mdio_read(dev, sp->phy[0] & 0x1f, MII_BMCR);
1049
1050 return 0;
1051}
1052
1053/* Start the chip hardware after a full reset. */
1054static void speedo_resume(struct net_device *dev)
1055{
1056 struct speedo_private *sp = netdev_priv(dev);
1057 void __iomem *ioaddr = sp->regs;
1058
1059 /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
1060 sp->tx_threshold = 0x01208000;
1061
1062 /* Set the segment registers to '0'. */
1063 if (wait_for_cmd_done(dev, sp) != 0) {
1064 iowrite32(PortPartialReset, ioaddr + SCBPort);
1065 udelay(10);
1066 }
1067
1068 iowrite32(0, ioaddr + SCBPointer);
1069 ioread32(ioaddr + SCBPointer); /* Flush to PCI. */
1070 udelay(10); /* Bogus, but it avoids the bug. */
1071
1072 /* Note: these next two operations can take a while. */
1073 do_slow_command(dev, sp, RxAddrLoad);
1074 do_slow_command(dev, sp, CUCmdBase);
1075
1076 /* Load the statistics block and rx ring addresses. */
1077 iowrite32(sp->lstats_dma, ioaddr + SCBPointer);
1078 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1079
1080 iowrite8(CUStatsAddr, ioaddr + SCBCmd);
1081 sp->lstats->done_marker = 0;
1082 wait_for_cmd_done(dev, sp);
1083
1084 if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
1085 if (netif_msg_rx_err(sp))
1086 printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
1087 dev->name);
1088 } else {
1089 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1090 ioaddr + SCBPointer);
1091 ioread32(ioaddr + SCBPointer); /* Flush to PCI */
1092 }
1093
1094 /* Note: RxStart should complete instantly. */
1095 do_slow_command(dev, sp, RxStart);
1096 do_slow_command(dev, sp, CUDumpStats);
1097
1098 /* Fill the first command with our physical address. */
1099 {
1100 struct descriptor *ias_cmd;
1101
1102 ias_cmd =
1103 (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
1104 /* Avoid a bug(?!) here by marking the command already completed. */
1105 ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
1106 ias_cmd->link =
1107 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1108 memcpy(ias_cmd->params, dev->dev_addr, 6);
1109 if (sp->last_cmd)
1110 clear_suspend(sp->last_cmd);
1111 sp->last_cmd = ias_cmd;
1112 }
1113
1114 /* Start the chip's Tx process and unmask interrupts. */
1115 iowrite32(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
1116 ioaddr + SCBPointer);
1117 /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
1118 remain masked --Dragan */
1119 iowrite16(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
1120}
1121
1122/*
1123 * Sometimes the receiver stops making progress. This routine knows how to
1124 * get it going again, without losing packets or being otherwise nasty like
1125 * a chip reset would be. Previously the driver had a whole sequence
1126 * of if RxSuspended, if it's no buffers do one thing, if it's no resources,
1127 * do another, etc. But those things don't really matter. Separate logic
1128 * in the ISR provides for allocating buffers--the other half of operation
1129 * is just making sure the receiver is active. speedo_rx_soft_reset does that.
1130 * This problem with the old, more involved algorithm is shown up under
1131 * ping floods on the order of 60K packets/second on a 100Mbps fdx network.
1132 */
1133static void
1134speedo_rx_soft_reset(struct net_device *dev)
1135{
1136 struct speedo_private *sp = netdev_priv(dev);
1137 struct RxFD *rfd;
1138 void __iomem *ioaddr;
1139
1140 ioaddr = sp->regs;
1141 if (wait_for_cmd_done(dev, sp) != 0) {
1142 printk("%s: previous command stalled\n", dev->name);
1143 return;
1144 }
1145 /*
1146 * Put the hardware into a known state.
1147 */
1148 iowrite8(RxAbort, ioaddr + SCBCmd);
1149
1150 rfd = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
1151
1152 rfd->rx_buf_addr = 0xffffffff;
1153
1154 if (wait_for_cmd_done(dev, sp) != 0) {
1155 printk("%s: RxAbort command stalled\n", dev->name);
1156 return;
1157 }
1158 iowrite32(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
1159 ioaddr + SCBPointer);
1160 iowrite8(RxStart, ioaddr + SCBCmd);
1161}
1162
1163
1164/* Media monitoring and control. */
1165static void speedo_timer(unsigned long data)
1166{
1167 struct net_device *dev = (struct net_device *)data;
1168 struct speedo_private *sp = netdev_priv(dev);
1169 void __iomem *ioaddr = sp->regs;
1170 int phy_num = sp->phy[0] & 0x1f;
1171
1172 /* We have MII and lost link beat. */
1173 if ((sp->phy[0] & 0x8000) == 0) {
1174 int partner = mdio_read(dev, phy_num, MII_LPA);
1175 if (partner != sp->partner) {
1176 int flow_ctrl = sp->mii_if.advertising & partner & 0x0400 ? 1 : 0;
1177 if (netif_msg_link(sp)) {
1178 printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
1179 printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
1180 dev->name, sp->partner, partner, sp->mii_if.advertising);
1181 }
1182 sp->partner = partner;
1183 if (flow_ctrl != sp->flow_ctrl) {
1184 sp->flow_ctrl = flow_ctrl;
1185 sp->rx_mode = -1; /* Trigger a reload. */
1186 }
1187 }
1188 }
1189 mii_check_link(&sp->mii_if);
1190 if (netif_msg_timer(sp)) {
1191 printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
1192 dev->name, ioread16(ioaddr + SCBStatus));
1193 }
1194 if (sp->rx_mode < 0 ||
1195 (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
1196 /* We haven't received a packet in a Long Time. We might have been
1197 bitten by the receiver hang bug. This can be cleared by sending
1198 a set multicast list command. */
1199 if (netif_msg_timer(sp))
1200 printk(KERN_DEBUG "%s: Sending a multicast list set command"
1201 " from a timer routine,"
1202 " m=%d, j=%ld, l=%ld.\n",
1203 dev->name, sp->rx_mode, jiffies, sp->last_rx_time);
1204 set_rx_mode(dev);
1205 }
1206 /* We must continue to monitor the media. */
1207 sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
1208 add_timer(&sp->timer);
1209}
1210
1211static void speedo_show_state(struct net_device *dev)
1212{
1213 struct speedo_private *sp = netdev_priv(dev);
1214 int i;
1215
1216 if (netif_msg_pktdata(sp)) {
1217 printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n",
1218 dev->name, sp->cur_tx, sp->dirty_tx);
1219 for (i = 0; i < TX_RING_SIZE; i++)
1220 printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
1221 i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
1222 i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
1223 i, sp->tx_ring[i].status);
1224
1225 printk(KERN_DEBUG "%s: Printing Rx ring"
1226 " (next to receive into %u, dirty index %u).\n",
1227 dev->name, sp->cur_rx, sp->dirty_rx);
1228 for (i = 0; i < RX_RING_SIZE; i++)
1229 printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
1230 sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
1231 i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
1232 i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
1233 i, (sp->rx_ringp[i] != NULL) ?
1234 (unsigned)sp->rx_ringp[i]->status : 0);
1235 }
1236
1237#if 0
1238 {
1239 void __iomem *ioaddr = sp->regs;
1240 int phy_num = sp->phy[0] & 0x1f;
1241 for (i = 0; i < 16; i++) {
1242 /* FIXME: what does it mean? --SAW */
1243 if (i == 6) i = 21;
1244 printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
1245 dev->name, phy_num, i, mdio_read(dev, phy_num, i));
1246 }
1247 }
1248#endif
1249
1250}
1251
1252/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
1253static void
1254speedo_init_rx_ring(struct net_device *dev)
1255{
1256 struct speedo_private *sp = netdev_priv(dev);
1257 struct RxFD *rxf, *last_rxf = NULL;
1258 dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
1259 int i;
1260
1261 sp->cur_rx = 0;
1262
1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb;
1265 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1266 /* XXX: do we really want to call this before the NULL check? --hch */
1267 rx_align(skb); /* Align IP on 16 byte boundary */
1268 sp->rx_skbuff[i] = skb;
1269 if (skb == NULL)
1270 break; /* OK. Just initially short of Rx bufs. */
1271 skb->dev = dev; /* Mark as being used by this device. */
1272 rxf = (struct RxFD *)skb->tail;
1273 sp->rx_ringp[i] = rxf;
1274 sp->rx_ring_dma[i] =
1275 pci_map_single(sp->pdev, rxf,
1276 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_BIDIRECTIONAL);
1277 skb_reserve(skb, sizeof(struct RxFD));
1278 if (last_rxf) {
1279 last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
1280 pci_dma_sync_single_for_device(sp->pdev, last_rxf_dma,
1281 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1282 }
1283 last_rxf = rxf;
1284 last_rxf_dma = sp->rx_ring_dma[i];
1285 rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
1286 rxf->link = 0; /* None yet. */
1287 /* This field unused by i82557. */
1288 rxf->rx_buf_addr = 0xffffffff;
1289 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1290 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[i],
1291 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1292 }
1293 sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1294 /* Mark the last entry as end-of-list. */
1295 last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
1296 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[RX_RING_SIZE-1],
1297 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1298 sp->last_rxf = last_rxf;
1299 sp->last_rxf_dma = last_rxf_dma;
1300}
1301
1302static void speedo_purge_tx(struct net_device *dev)
1303{
1304 struct speedo_private *sp = netdev_priv(dev);
1305 int entry;
1306
1307 while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
1308 entry = sp->dirty_tx % TX_RING_SIZE;
1309 if (sp->tx_skbuff[entry]) {
1310 sp->stats.tx_errors++;
1311 pci_unmap_single(sp->pdev,
1312 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1313 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1314 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1315 sp->tx_skbuff[entry] = NULL;
1316 }
1317 sp->dirty_tx++;
1318 }
1319 while (sp->mc_setup_head != NULL) {
1320 struct speedo_mc_block *t;
1321 if (netif_msg_tx_err(sp))
1322 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1323 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1324 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1325 t = sp->mc_setup_head->next;
1326 kfree(sp->mc_setup_head);
1327 sp->mc_setup_head = t;
1328 }
1329 sp->mc_setup_tail = NULL;
1330 sp->tx_full = 0;
1331 netif_wake_queue(dev);
1332}
1333
1334static void reset_mii(struct net_device *dev)
1335{
1336 struct speedo_private *sp = netdev_priv(dev);
1337
1338 /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
1339 if ((sp->phy[0] & 0x8000) == 0) {
1340 int phy_addr = sp->phy[0] & 0x1f;
1341 int advertising = mdio_read(dev, phy_addr, MII_ADVERTISE);
1342 int mii_bmcr = mdio_read(dev, phy_addr, MII_BMCR);
1343 mdio_write(dev, phy_addr, MII_BMCR, 0x0400);
1344 mdio_write(dev, phy_addr, MII_BMSR, 0x0000);
1345 mdio_write(dev, phy_addr, MII_ADVERTISE, 0x0000);
1346 mdio_write(dev, phy_addr, MII_BMCR, 0x8000);
1347#ifdef honor_default_port
1348 mdio_write(dev, phy_addr, MII_BMCR, mii_ctrl[dev->default_port & 7]);
1349#else
1350 mdio_read(dev, phy_addr, MII_BMCR);
1351 mdio_write(dev, phy_addr, MII_BMCR, mii_bmcr);
1352 mdio_write(dev, phy_addr, MII_ADVERTISE, advertising);
1353#endif
1354 }
1355}
1356
1357static void speedo_tx_timeout(struct net_device *dev)
1358{
1359 struct speedo_private *sp = netdev_priv(dev);
1360 void __iomem *ioaddr = sp->regs;
1361 int status = ioread16(ioaddr + SCBStatus);
1362 unsigned long flags;
1363
1364 if (netif_msg_tx_err(sp)) {
1365 printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
1366 " %4.4x at %d/%d command %8.8x.\n",
1367 dev->name, status, ioread16(ioaddr + SCBCmd),
1368 sp->dirty_tx, sp->cur_tx,
1369 sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
1370
1371 }
1372 speedo_show_state(dev);
1373#if 0
1374 if ((status & 0x00C0) != 0x0080
1375 && (status & 0x003C) == 0x0010) {
1376 /* Only the command unit has stopped. */
1377 printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
1378 dev->name);
1379 iowrite32(TX_RING_ELEM_DMA(sp, dirty_tx % TX_RING_SIZE]),
1380 ioaddr + SCBPointer);
1381 iowrite16(CUStart, ioaddr + SCBCmd);
1382 reset_mii(dev);
1383 } else {
1384#else
1385 {
1386#endif
1387 del_timer_sync(&sp->timer);
1388 /* Reset the Tx and Rx units. */
1389 iowrite32(PortReset, ioaddr + SCBPort);
1390 /* We may get spurious interrupts here. But I don't think that they
1391 may do much harm. 1999/12/09 SAW */
1392 udelay(10);
1393 /* Disable interrupts. */
1394 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1395 synchronize_irq(dev->irq);
1396 speedo_tx_buffer_gc(dev);
1397 /* Free as much as possible.
1398 It helps to recover from a hang because of out-of-memory.
1399 It also simplifies speedo_resume() in case TX ring is full or
1400 close-to-be full. */
1401 speedo_purge_tx(dev);
1402 speedo_refill_rx_buffers(dev, 1);
1403 spin_lock_irqsave(&sp->lock, flags);
1404 speedo_resume(dev);
1405 sp->rx_mode = -1;
1406 dev->trans_start = jiffies;
1407 spin_unlock_irqrestore(&sp->lock, flags);
1408 set_rx_mode(dev); /* it takes the spinlock itself --SAW */
1409 /* Reset MII transceiver. Do it before starting the timer to serialize
1410 mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
1411 reset_mii(dev);
1412 sp->timer.expires = RUN_AT(2*HZ);
1413 add_timer(&sp->timer);
1414 }
1415 return;
1416}
1417
1418static int
1419speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
1420{
1421 struct speedo_private *sp = netdev_priv(dev);
1422 void __iomem *ioaddr = sp->regs;
1423 int entry;
1424
1425 /* Prevent interrupts from changing the Tx ring from underneath us. */
1426 unsigned long flags;
1427
1428 spin_lock_irqsave(&sp->lock, flags);
1429
1430 /* Check if there are enough space. */
1431 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1432 printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
1433 netif_stop_queue(dev);
1434 sp->tx_full = 1;
1435 spin_unlock_irqrestore(&sp->lock, flags);
1436 return 1;
1437 }
1438
1439 /* Calculate the Tx descriptor entry. */
1440 entry = sp->cur_tx++ % TX_RING_SIZE;
1441
1442 sp->tx_skbuff[entry] = skb;
1443 sp->tx_ring[entry].status =
1444 cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
1445 if (!(entry & ((TX_RING_SIZE>>2)-1)))
1446 sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
1447 sp->tx_ring[entry].link =
1448 cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
1449 sp->tx_ring[entry].tx_desc_addr =
1450 cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
1451 /* The data region is always in one buffer descriptor. */
1452 sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
1453 sp->tx_ring[entry].tx_buf_addr0 =
1454 cpu_to_le32(pci_map_single(sp->pdev, skb->data,
1455 skb->len, PCI_DMA_TODEVICE));
1456 sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
1457
1458 /* workaround for hardware bug on 10 mbit half duplex */
1459
1460 if ((sp->partner == 0) && (sp->chip_id == 1)) {
1461 wait_for_cmd_done(dev, sp);
1462 iowrite8(0 , ioaddr + SCBCmd);
1463 udelay(1);
1464 }
1465
1466 /* Trigger the command unit resume. */
1467 wait_for_cmd_done(dev, sp);
1468 clear_suspend(sp->last_cmd);
1469 /* We want the time window between clearing suspend flag on the previous
1470 command and resuming CU to be as small as possible.
1471 Interrupts in between are very undesired. --SAW */
1472 iowrite8(CUResume, ioaddr + SCBCmd);
1473 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
1474
1475 /* Leave room for set_rx_mode(). If there is no more space than reserved
1476 for multicast filter mark the ring as full. */
1477 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
1478 netif_stop_queue(dev);
1479 sp->tx_full = 1;
1480 }
1481
1482 spin_unlock_irqrestore(&sp->lock, flags);
1483
1484 dev->trans_start = jiffies;
1485
1486 return 0;
1487}
1488
1489static void speedo_tx_buffer_gc(struct net_device *dev)
1490{
1491 unsigned int dirty_tx;
1492 struct speedo_private *sp = netdev_priv(dev);
1493
1494 dirty_tx = sp->dirty_tx;
1495 while ((int)(sp->cur_tx - dirty_tx) > 0) {
1496 int entry = dirty_tx % TX_RING_SIZE;
1497 int status = le32_to_cpu(sp->tx_ring[entry].status);
1498
1499 if (netif_msg_tx_done(sp))
1500 printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
1501 entry, status);
1502 if ((status & StatusComplete) == 0)
1503 break; /* It still hasn't been processed. */
1504 if (status & TxUnderrun)
1505 if (sp->tx_threshold < 0x01e08000) {
1506 if (netif_msg_tx_err(sp))
1507 printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
1508 dev->name);
1509 sp->tx_threshold += 0x00040000;
1510 }
1511 /* Free the original skb. */
1512 if (sp->tx_skbuff[entry]) {
1513 sp->stats.tx_packets++; /* Count only user packets. */
1514 sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
1515 pci_unmap_single(sp->pdev,
1516 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
1517 sp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE);
1518 dev_kfree_skb_irq(sp->tx_skbuff[entry]);
1519 sp->tx_skbuff[entry] = NULL;
1520 }
1521 dirty_tx++;
1522 }
1523
1524 if (netif_msg_tx_err(sp) && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
1525 printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
1526 " full=%d.\n",
1527 dirty_tx, sp->cur_tx, sp->tx_full);
1528 dirty_tx += TX_RING_SIZE;
1529 }
1530
1531 while (sp->mc_setup_head != NULL
1532 && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
1533 struct speedo_mc_block *t;
1534 if (netif_msg_tx_err(sp))
1535 printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
1536 pci_unmap_single(sp->pdev, sp->mc_setup_head->frame_dma,
1537 sp->mc_setup_head->len, PCI_DMA_TODEVICE);
1538 t = sp->mc_setup_head->next;
1539 kfree(sp->mc_setup_head);
1540 sp->mc_setup_head = t;
1541 }
1542 if (sp->mc_setup_head == NULL)
1543 sp->mc_setup_tail = NULL;
1544
1545 sp->dirty_tx = dirty_tx;
1546}
1547
1548/* The interrupt handler does all of the Rx thread work and cleans up
1549 after the Tx thread. */
1550static irqreturn_t speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
1551{
1552 struct net_device *dev = (struct net_device *)dev_instance;
1553 struct speedo_private *sp;
1554 void __iomem *ioaddr;
1555 long boguscnt = max_interrupt_work;
1556 unsigned short status;
1557 unsigned int handled = 0;
1558
1559 sp = netdev_priv(dev);
1560 ioaddr = sp->regs;
1561
1562#ifndef final_version
1563 /* A lock to prevent simultaneous entry on SMP machines. */
1564 if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
1565 printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
1566 dev->name);
1567 sp->in_interrupt = 0; /* Avoid halting machine. */
1568 return IRQ_NONE;
1569 }
1570#endif
1571
1572 do {
1573 status = ioread16(ioaddr + SCBStatus);
1574 /* Acknowledge all of the current interrupt sources ASAP. */
1575 /* Will change from 0xfc00 to 0xff00 when we start handling
1576 FCP and ER interrupts --Dragan */
1577 iowrite16(status & 0xfc00, ioaddr + SCBStatus);
1578
1579 if (netif_msg_intr(sp))
1580 printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
1581 dev->name, status);
1582
1583 if ((status & 0xfc00) == 0)
1584 break;
1585 handled = 1;
1586
1587
1588 if ((status & 0x5000) || /* Packet received, or Rx error. */
1589 (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
1590 /* Need to gather the postponed packet. */
1591 speedo_rx(dev);
1592
1593 /* Always check if all rx buffers are allocated. --SAW */
1594 speedo_refill_rx_buffers(dev, 0);
1595
1596 spin_lock(&sp->lock);
1597 /*
1598 * The chip may have suspended reception for various reasons.
1599 * Check for that, and re-prime it should this be the case.
1600 */
1601 switch ((status >> 2) & 0xf) {
1602 case 0: /* Idle */
1603 break;
1604 case 1: /* Suspended */
1605 case 2: /* No resources (RxFDs) */
1606 case 9: /* Suspended with no more RBDs */
1607 case 10: /* No resources due to no RBDs */
1608 case 12: /* Ready with no RBDs */
1609 speedo_rx_soft_reset(dev);
1610 break;
1611 case 3: case 5: case 6: case 7: case 8:
1612 case 11: case 13: case 14: case 15:
1613 /* these are all reserved values */
1614 break;
1615 }
1616
1617
1618 /* User interrupt, Command/Tx unit interrupt or CU not active. */
1619 if (status & 0xA400) {
1620 speedo_tx_buffer_gc(dev);
1621 if (sp->tx_full
1622 && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
1623 /* The ring is no longer full. */
1624 sp->tx_full = 0;
1625 netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
1626 }
1627 }
1628
1629 spin_unlock(&sp->lock);
1630
1631 if (--boguscnt < 0) {
1632 printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
1633 dev->name, status);
1634 /* Clear all interrupt sources. */
1635 /* Will change from 0xfc00 to 0xff00 when we start handling
1636 FCP and ER interrupts --Dragan */
1637 iowrite16(0xfc00, ioaddr + SCBStatus);
1638 break;
1639 }
1640 } while (1);
1641
1642 if (netif_msg_intr(sp))
1643 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
1644 dev->name, ioread16(ioaddr + SCBStatus));
1645
1646 clear_bit(0, (void*)&sp->in_interrupt);
1647 return IRQ_RETVAL(handled);
1648}
1649
1650static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1651{
1652 struct speedo_private *sp = netdev_priv(dev);
1653 struct RxFD *rxf;
1654 struct sk_buff *skb;
1655 /* Get a fresh skbuff to replace the consumed one. */
1656 skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
1657 /* XXX: do we really want to call this before the NULL check? --hch */
1658 rx_align(skb); /* Align IP on 16 byte boundary */
1659 sp->rx_skbuff[entry] = skb;
1660 if (skb == NULL) {
1661 sp->rx_ringp[entry] = NULL;
1662 return NULL;
1663 }
1664 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
1665 sp->rx_ring_dma[entry] =
1666 pci_map_single(sp->pdev, rxf,
1667 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1668 skb->dev = dev;
1669 skb_reserve(skb, sizeof(struct RxFD));
1670 rxf->rx_buf_addr = 0xffffffff;
1671 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1672 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1673 return rxf;
1674}
1675
1676static inline void speedo_rx_link(struct net_device *dev, int entry,
1677 struct RxFD *rxf, dma_addr_t rxf_dma)
1678{
1679 struct speedo_private *sp = netdev_priv(dev);
1680 rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
1681 rxf->link = 0; /* None yet. */
1682 rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
1683 sp->last_rxf->link = cpu_to_le32(rxf_dma);
1684 sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
1685 pci_dma_sync_single_for_device(sp->pdev, sp->last_rxf_dma,
1686 sizeof(struct RxFD), PCI_DMA_TODEVICE);
1687 sp->last_rxf = rxf;
1688 sp->last_rxf_dma = rxf_dma;
1689}
1690
1691static int speedo_refill_rx_buf(struct net_device *dev, int force)
1692{
1693 struct speedo_private *sp = netdev_priv(dev);
1694 int entry;
1695 struct RxFD *rxf;
1696
1697 entry = sp->dirty_rx % RX_RING_SIZE;
1698 if (sp->rx_skbuff[entry] == NULL) {
1699 rxf = speedo_rx_alloc(dev, entry);
1700 if (rxf == NULL) {
1701 unsigned int forw;
1702 int forw_entry;
1703 if (netif_msg_rx_err(sp) || !(sp->rx_ring_state & RrOOMReported)) {
1704 printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
1705 dev->name, force);
1706 sp->rx_ring_state |= RrOOMReported;
1707 }
1708 speedo_show_state(dev);
1709 if (!force)
1710 return -1; /* Better luck next time! */
1711 /* Borrow an skb from one of next entries. */
1712 for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
1713 if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
1714 break;
1715 if (forw == sp->cur_rx)
1716 return -1;
1717 forw_entry = forw % RX_RING_SIZE;
1718 sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
1719 sp->rx_skbuff[forw_entry] = NULL;
1720 rxf = sp->rx_ringp[forw_entry];
1721 sp->rx_ringp[forw_entry] = NULL;
1722 sp->rx_ringp[entry] = rxf;
1723 }
1724 } else {
1725 rxf = sp->rx_ringp[entry];
1726 }
1727 speedo_rx_link(dev, entry, rxf, sp->rx_ring_dma[entry]);
1728 sp->dirty_rx++;
1729 sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
1730 return 0;
1731}
1732
1733static void speedo_refill_rx_buffers(struct net_device *dev, int force)
1734{
1735 struct speedo_private *sp = netdev_priv(dev);
1736
1737 /* Refill the RX ring. */
1738 while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
1739 speedo_refill_rx_buf(dev, force) != -1);
1740}
1741
1742static int
1743speedo_rx(struct net_device *dev)
1744{
1745 struct speedo_private *sp = netdev_priv(dev);
1746 int entry = sp->cur_rx % RX_RING_SIZE;
1747 int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
1748 int alloc_ok = 1;
1749 int npkts = 0;
1750
1751 if (netif_msg_intr(sp))
1752 printk(KERN_DEBUG " In speedo_rx().\n");
1753 /* If we own the next entry, it's a new packet. Send it up. */
1754 while (sp->rx_ringp[entry] != NULL) {
1755 int status;
1756 int pkt_len;
1757
1758 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1759 sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1760 status = le32_to_cpu(sp->rx_ringp[entry]->status);
1761 pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
1762
1763 if (!(status & RxComplete))
1764 break;
1765
1766 if (--rx_work_limit < 0)
1767 break;
1768
1769 /* Check for a rare out-of-memory case: the current buffer is
1770 the last buffer allocated in the RX ring. --SAW */
1771 if (sp->last_rxf == sp->rx_ringp[entry]) {
1772 /* Postpone the packet. It'll be reaped at an interrupt when this
1773 packet is no longer the last packet in the ring. */
1774 if (netif_msg_rx_err(sp))
1775 printk(KERN_DEBUG "%s: RX packet postponed!\n",
1776 dev->name);
1777 sp->rx_ring_state |= RrPostponed;
1778 break;
1779 }
1780
1781 if (netif_msg_rx_status(sp))
1782 printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
1783 pkt_len);
1784 if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
1785 if (status & RxErrTooBig)
1786 printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
1787 "status %8.8x!\n", dev->name, status);
1788 else if (! (status & RxOK)) {
1789 /* There was a fatal error. This *should* be impossible. */
1790 sp->stats.rx_errors++;
1791 printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
1792 "status %8.8x.\n",
1793 dev->name, status);
1794 }
1795 } else {
1796 struct sk_buff *skb;
1797
1798 /* Check if the packet is long enough to just accept without
1799 copying to a properly sized skbuff. */
1800 if (pkt_len < rx_copybreak
1801 && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
1802 skb->dev = dev;
1803 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1804 /* 'skb_put()' points to the start of sk_buff data area. */
1805 pci_dma_sync_single_for_cpu(sp->pdev, sp->rx_ring_dma[entry],
1806 sizeof(struct RxFD) + pkt_len,
1807 PCI_DMA_FROMDEVICE);
1808
1809#if 1 || USE_IP_CSUM
1810 /* Packet is in one chunk -- we can copy + cksum. */
1811 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
1812 skb_put(skb, pkt_len);
1813#else
1814 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
1815 pkt_len);
1816#endif
1817 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
1818 sizeof(struct RxFD) + pkt_len,
1819 PCI_DMA_FROMDEVICE);
1820 npkts++;
1821 } else {
1822 /* Pass up the already-filled skbuff. */
1823 skb = sp->rx_skbuff[entry];
1824 if (skb == NULL) {
1825 printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
1826 dev->name);
1827 break;
1828 }
1829 sp->rx_skbuff[entry] = NULL;
1830 skb_put(skb, pkt_len);
1831 npkts++;
1832 sp->rx_ringp[entry] = NULL;
1833 pci_unmap_single(sp->pdev, sp->rx_ring_dma[entry],
1834 PKT_BUF_SZ + sizeof(struct RxFD),
1835 PCI_DMA_FROMDEVICE);
1836 }
1837 skb->protocol = eth_type_trans(skb, dev);
1838 netif_rx(skb);
1839 dev->last_rx = jiffies;
1840 sp->stats.rx_packets++;
1841 sp->stats.rx_bytes += pkt_len;
1842 }
1843 entry = (++sp->cur_rx) % RX_RING_SIZE;
1844 sp->rx_ring_state &= ~RrPostponed;
1845 /* Refill the recently taken buffers.
1846 Do it one-by-one to handle traffic bursts better. */
1847 if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
1848 alloc_ok = 0;
1849 }
1850
1851 /* Try hard to refill the recently taken buffers. */
1852 speedo_refill_rx_buffers(dev, 1);
1853
1854 if (npkts)
1855 sp->last_rx_time = jiffies;
1856
1857 return 0;
1858}
1859
1860static int
1861speedo_close(struct net_device *dev)
1862{
1863 struct speedo_private *sp = netdev_priv(dev);
1864 void __iomem *ioaddr = sp->regs;
1865 int i;
1866
1867 netdevice_stop(dev);
1868 netif_stop_queue(dev);
1869
1870 if (netif_msg_ifdown(sp))
1871 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1872 dev->name, ioread16(ioaddr + SCBStatus));
1873
1874 /* Shut off the media monitoring timer. */
1875 del_timer_sync(&sp->timer);
1876
1877 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
1878
1879 /* Shutting down the chip nicely fails to disable flow control. So.. */
1880 iowrite32(PortPartialReset, ioaddr + SCBPort);
1881 ioread32(ioaddr + SCBPort); /* flush posted write */
1882 /*
1883 * The chip requires a 10 microsecond quiet period. Wait here!
1884 */
1885 udelay(10);
1886
1887 free_irq(dev->irq, dev);
1888 speedo_show_state(dev);
1889
1890 /* Free all the skbuffs in the Rx and Tx queues. */
1891 for (i = 0; i < RX_RING_SIZE; i++) {
1892 struct sk_buff *skb = sp->rx_skbuff[i];
1893 sp->rx_skbuff[i] = NULL;
1894 /* Clear the Rx descriptors. */
1895 if (skb) {
1896 pci_unmap_single(sp->pdev,
1897 sp->rx_ring_dma[i],
1898 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
1899 dev_kfree_skb(skb);
1900 }
1901 }
1902
1903 for (i = 0; i < TX_RING_SIZE; i++) {
1904 struct sk_buff *skb = sp->tx_skbuff[i];
1905 sp->tx_skbuff[i] = NULL;
1906 /* Clear the Tx descriptors. */
1907 if (skb) {
1908 pci_unmap_single(sp->pdev,
1909 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
1910 skb->len, PCI_DMA_TODEVICE);
1911 dev_kfree_skb(skb);
1912 }
1913 }
1914
1915 /* Free multicast setting blocks. */
1916 for (i = 0; sp->mc_setup_head != NULL; i++) {
1917 struct speedo_mc_block *t;
1918 t = sp->mc_setup_head->next;
1919 kfree(sp->mc_setup_head);
1920 sp->mc_setup_head = t;
1921 }
1922 sp->mc_setup_tail = NULL;
1923 if (netif_msg_ifdown(sp))
1924 printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
1925
1926 pci_set_power_state(sp->pdev, PCI_D2);
1927
1928 return 0;
1929}
1930
1931/* The Speedo-3 has an especially awkward and unusable method of getting
1932 statistics out of the chip. It takes an unpredictable length of time
1933 for the dump-stats command to complete. To avoid a busy-wait loop we
1934 update the stats with the previous dump results, and then trigger a
1935 new dump.
1936
1937 Oh, and incoming frames are dropped while executing dump-stats!
1938 */
1939static struct net_device_stats *
1940speedo_get_stats(struct net_device *dev)
1941{
1942 struct speedo_private *sp = netdev_priv(dev);
1943 void __iomem *ioaddr = sp->regs;
1944
1945 /* Update only if the previous dump finished. */
1946 if (sp->lstats->done_marker == le32_to_cpu(0xA007)) {
1947 sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats->tx_coll16_errs);
1948 sp->stats.tx_window_errors += le32_to_cpu(sp->lstats->tx_late_colls);
1949 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_underruns);
1950 sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats->tx_lost_carrier);
1951 /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats->tx_deferred);*/
1952 sp->stats.collisions += le32_to_cpu(sp->lstats->tx_total_colls);
1953 sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats->rx_crc_errs);
1954 sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats->rx_align_errs);
1955 sp->stats.rx_over_errors += le32_to_cpu(sp->lstats->rx_resource_errs);
1956 sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats->rx_overrun_errs);
1957 sp->stats.rx_length_errors += le32_to_cpu(sp->lstats->rx_runt_errs);
1958 sp->lstats->done_marker = 0x0000;
1959 if (netif_running(dev)) {
1960 unsigned long flags;
1961 /* Take a spinlock to make wait_for_cmd_done and sending the
1962 command atomic. --SAW */
1963 spin_lock_irqsave(&sp->lock, flags);
1964 wait_for_cmd_done(dev, sp);
1965 iowrite8(CUDumpStats, ioaddr + SCBCmd);
1966 spin_unlock_irqrestore(&sp->lock, flags);
1967 }
1968 }
1969 return &sp->stats;
1970}
1971
1972static void speedo_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1973{
1974 struct speedo_private *sp = netdev_priv(dev);
1975 strncpy(info->driver, "eepro100", sizeof(info->driver)-1);
1976 strncpy(info->version, version, sizeof(info->version)-1);
1977 if (sp->pdev)
1978 strcpy(info->bus_info, pci_name(sp->pdev));
1979}
1980
1981static int speedo_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1982{
1983 struct speedo_private *sp = netdev_priv(dev);
1984 spin_lock_irq(&sp->lock);
1985 mii_ethtool_gset(&sp->mii_if, ecmd);
1986 spin_unlock_irq(&sp->lock);
1987 return 0;
1988}
1989
1990static int speedo_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1991{
1992 struct speedo_private *sp = netdev_priv(dev);
1993 int res;
1994 spin_lock_irq(&sp->lock);
1995 res = mii_ethtool_sset(&sp->mii_if, ecmd);
1996 spin_unlock_irq(&sp->lock);
1997 return res;
1998}
1999
2000static int speedo_nway_reset(struct net_device *dev)
2001{
2002 struct speedo_private *sp = netdev_priv(dev);
2003 return mii_nway_restart(&sp->mii_if);
2004}
2005
2006static u32 speedo_get_link(struct net_device *dev)
2007{
2008 struct speedo_private *sp = netdev_priv(dev);
2009 return mii_link_ok(&sp->mii_if);
2010}
2011
2012static u32 speedo_get_msglevel(struct net_device *dev)
2013{
2014 struct speedo_private *sp = netdev_priv(dev);
2015 return sp->msg_enable;
2016}
2017
2018static void speedo_set_msglevel(struct net_device *dev, u32 v)
2019{
2020 struct speedo_private *sp = netdev_priv(dev);
2021 sp->msg_enable = v;
2022}
2023
2024static struct ethtool_ops ethtool_ops = {
2025 .get_drvinfo = speedo_get_drvinfo,
2026 .get_settings = speedo_get_settings,
2027 .set_settings = speedo_set_settings,
2028 .nway_reset = speedo_nway_reset,
2029 .get_link = speedo_get_link,
2030 .get_msglevel = speedo_get_msglevel,
2031 .set_msglevel = speedo_set_msglevel,
2032};
2033
2034static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2035{
2036 struct speedo_private *sp = netdev_priv(dev);
2037 struct mii_ioctl_data *data = if_mii(rq);
2038 int phy = sp->phy[0] & 0x1f;
2039 int saved_acpi;
2040 int t;
2041
2042 switch(cmd) {
2043 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
2044 data->phy_id = phy;
2045
2046 case SIOCGMIIREG: /* Read MII PHY register. */
2047 /* FIXME: these operations need to be serialized with MDIO
2048 access from the timeout handler.
2049 They are currently serialized only with MDIO access from the
2050 timer routine. 2000/05/09 SAW */
2051 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2052 t = del_timer_sync(&sp->timer);
2053 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
2054 if (t)
2055 add_timer(&sp->timer); /* may be set to the past --SAW */
2056 pci_set_power_state(sp->pdev, saved_acpi);
2057 return 0;
2058
2059 case SIOCSMIIREG: /* Write MII PHY register. */
2060 if (!capable(CAP_NET_ADMIN))
2061 return -EPERM;
2062 saved_acpi = pci_set_power_state(sp->pdev, PCI_D0);
2063 t = del_timer_sync(&sp->timer);
2064 mdio_write(dev, data->phy_id, data->reg_num, data->val_in);
2065 if (t)
2066 add_timer(&sp->timer); /* may be set to the past --SAW */
2067 pci_set_power_state(sp->pdev, saved_acpi);
2068 return 0;
2069 default:
2070 return -EOPNOTSUPP;
2071 }
2072}
2073
2074/* Set or clear the multicast filter for this adaptor.
2075 This is very ugly with Intel chips -- we usually have to execute an
2076 entire configuration command, plus process a multicast command.
2077 This is complicated. We must put a large configuration command and
2078 an arbitrarily-sized multicast command in the transmit list.
2079 To minimize the disruption -- the previous command might have already
2080 loaded the link -- we convert the current command block, normally a Tx
2081 command, into a no-op and link it to the new command.
2082*/
2083static void set_rx_mode(struct net_device *dev)
2084{
2085 struct speedo_private *sp = netdev_priv(dev);
2086 void __iomem *ioaddr = sp->regs;
2087 struct descriptor *last_cmd;
2088 char new_rx_mode;
2089 unsigned long flags;
2090 int entry, i;
2091
2092 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2093 new_rx_mode = 3;
2094 } else if ((dev->flags & IFF_ALLMULTI) ||
2095 dev->mc_count > multicast_filter_limit) {
2096 new_rx_mode = 1;
2097 } else
2098 new_rx_mode = 0;
2099
2100 if (netif_msg_rx_status(sp))
2101 printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
2102 sp->rx_mode, new_rx_mode);
2103
2104 if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
2105 /* The Tx ring is full -- don't add anything! Hope the mode will be
2106 * set again later. */
2107 sp->rx_mode = -1;
2108 return;
2109 }
2110
2111 if (new_rx_mode != sp->rx_mode) {
2112 u8 *config_cmd_data;
2113
2114 spin_lock_irqsave(&sp->lock, flags);
2115 entry = sp->cur_tx++ % TX_RING_SIZE;
2116 last_cmd = sp->last_cmd;
2117 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2118
2119 sp->tx_skbuff[entry] = NULL; /* Redundant. */
2120 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
2121 sp->tx_ring[entry].link =
2122 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2123 config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
2124 /* Construct a full CmdConfig frame. */
2125 memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
2126 config_cmd_data[1] = (txfifo << 4) | rxfifo;
2127 config_cmd_data[4] = rxdmacount;
2128 config_cmd_data[5] = txdmacount + 0x80;
2129 config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
2130 /* 0x80 doesn't disable FC 0x84 does.
2131 Disable Flow control since we are not ACK-ing any FC interrupts
2132 for now. --Dragan */
2133 config_cmd_data[19] = 0x84;
2134 config_cmd_data[19] |= sp->mii_if.full_duplex ? 0x40 : 0;
2135 config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
2136 if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
2137 config_cmd_data[15] |= 0x80;
2138 config_cmd_data[8] = 0;
2139 }
2140 /* Trigger the command unit resume. */
2141 wait_for_cmd_done(dev, sp);
2142 clear_suspend(last_cmd);
2143 iowrite8(CUResume, ioaddr + SCBCmd);
2144 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2145 netif_stop_queue(dev);
2146 sp->tx_full = 1;
2147 }
2148 spin_unlock_irqrestore(&sp->lock, flags);
2149 }
2150
2151 if (new_rx_mode == 0 && dev->mc_count < 4) {
2152 /* The simple case of 0-3 multicast list entries occurs often, and
2153 fits within one tx_ring[] entry. */
2154 struct dev_mc_list *mclist;
2155 u16 *setup_params, *eaddrs;
2156
2157 spin_lock_irqsave(&sp->lock, flags);
2158 entry = sp->cur_tx++ % TX_RING_SIZE;
2159 last_cmd = sp->last_cmd;
2160 sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
2161
2162 sp->tx_skbuff[entry] = NULL;
2163 sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
2164 sp->tx_ring[entry].link =
2165 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2166 sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
2167 setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
2168 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2169 /* Fill in the multicast addresses. */
2170 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2171 i++, mclist = mclist->next) {
2172 eaddrs = (u16 *)mclist->dmi_addr;
2173 *setup_params++ = *eaddrs++;
2174 *setup_params++ = *eaddrs++;
2175 *setup_params++ = *eaddrs++;
2176 }
2177
2178 wait_for_cmd_done(dev, sp);
2179 clear_suspend(last_cmd);
2180 /* Immediately trigger the command unit resume. */
2181 iowrite8(CUResume, ioaddr + SCBCmd);
2182
2183 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2184 netif_stop_queue(dev);
2185 sp->tx_full = 1;
2186 }
2187 spin_unlock_irqrestore(&sp->lock, flags);
2188 } else if (new_rx_mode == 0) {
2189 struct dev_mc_list *mclist;
2190 u16 *setup_params, *eaddrs;
2191 struct speedo_mc_block *mc_blk;
2192 struct descriptor *mc_setup_frm;
2193 int i;
2194
2195 mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
2196 GFP_ATOMIC);
2197 if (mc_blk == NULL) {
2198 printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
2199 dev->name);
2200 sp->rx_mode = -1; /* We failed, try again. */
2201 return;
2202 }
2203 mc_blk->next = NULL;
2204 mc_blk->len = 2 + multicast_filter_limit*6;
2205 mc_blk->frame_dma =
2206 pci_map_single(sp->pdev, &mc_blk->frame, mc_blk->len,
2207 PCI_DMA_TODEVICE);
2208 mc_setup_frm = &mc_blk->frame;
2209
2210 /* Fill the setup frame. */
2211 if (netif_msg_ifup(sp))
2212 printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
2213 dev->name, mc_setup_frm);
2214 mc_setup_frm->cmd_status =
2215 cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
2216 /* Link set below. */
2217 setup_params = (u16 *)&mc_setup_frm->params;
2218 *setup_params++ = cpu_to_le16(dev->mc_count*6);
2219 /* Fill in the multicast addresses. */
2220 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
2221 i++, mclist = mclist->next) {
2222 eaddrs = (u16 *)mclist->dmi_addr;
2223 *setup_params++ = *eaddrs++;
2224 *setup_params++ = *eaddrs++;
2225 *setup_params++ = *eaddrs++;
2226 }
2227
2228 /* Disable interrupts while playing with the Tx Cmd list. */
2229 spin_lock_irqsave(&sp->lock, flags);
2230
2231 if (sp->mc_setup_tail)
2232 sp->mc_setup_tail->next = mc_blk;
2233 else
2234 sp->mc_setup_head = mc_blk;
2235 sp->mc_setup_tail = mc_blk;
2236 mc_blk->tx = sp->cur_tx;
2237
2238 entry = sp->cur_tx++ % TX_RING_SIZE;
2239 last_cmd = sp->last_cmd;
2240 sp->last_cmd = mc_setup_frm;
2241
2242 /* Change the command to a NoOp, pointing to the CmdMulti command. */
2243 sp->tx_skbuff[entry] = NULL;
2244 sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
2245 sp->tx_ring[entry].link = cpu_to_le32(mc_blk->frame_dma);
2246
2247 /* Set the link in the setup frame. */
2248 mc_setup_frm->link =
2249 cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
2250
2251 pci_dma_sync_single_for_device(sp->pdev, mc_blk->frame_dma,
2252 mc_blk->len, PCI_DMA_TODEVICE);
2253
2254 wait_for_cmd_done(dev, sp);
2255 clear_suspend(last_cmd);
2256 /* Immediately trigger the command unit resume. */
2257 iowrite8(CUResume, ioaddr + SCBCmd);
2258
2259 if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
2260 netif_stop_queue(dev);
2261 sp->tx_full = 1;
2262 }
2263 spin_unlock_irqrestore(&sp->lock, flags);
2264
2265 if (netif_msg_rx_status(sp))
2266 printk(" CmdMCSetup frame length %d in entry %d.\n",
2267 dev->mc_count, entry);
2268 }
2269
2270 sp->rx_mode = new_rx_mode;
2271}
2272
2273#ifdef CONFIG_PM
2274static int eepro100_suspend(struct pci_dev *pdev, pm_message_t state)
2275{
2276 struct net_device *dev = pci_get_drvdata (pdev);
2277 struct speedo_private *sp = netdev_priv(dev);
2278 void __iomem *ioaddr = sp->regs;
2279
2280 pci_save_state(pdev);
2281
2282 if (!netif_running(dev))
2283 return 0;
2284
2285 del_timer_sync(&sp->timer);
2286
2287 netif_device_detach(dev);
2288 iowrite32(PortPartialReset, ioaddr + SCBPort);
2289
2290 /* XXX call pci_set_power_state ()? */
2291 pci_disable_device(pdev);
2292 pci_set_power_state (pdev, PCI_D3hot);
2293 return 0;
2294}
2295
2296static int eepro100_resume(struct pci_dev *pdev)
2297{
2298 struct net_device *dev = pci_get_drvdata (pdev);
2299 struct speedo_private *sp = netdev_priv(dev);
2300 void __iomem *ioaddr = sp->regs;
2301
2302 pci_set_power_state(pdev, PCI_D0);
2303 pci_restore_state(pdev);
2304 pci_enable_device(pdev);
2305 pci_set_master(pdev);
2306
2307 if (!netif_running(dev))
2308 return 0;
2309
2310 /* I'm absolutely uncertain if this part of code may work.
2311 The problems are:
2312 - correct hardware reinitialization;
2313 - correct driver behavior between different steps of the
2314 reinitialization;
2315 - serialization with other driver calls.
2316 2000/03/08 SAW */
2317 iowrite16(SCBMaskAll, ioaddr + SCBCmd);
2318 speedo_resume(dev);
2319 netif_device_attach(dev);
2320 sp->rx_mode = -1;
2321 sp->flow_ctrl = sp->partner = 0;
2322 set_rx_mode(dev);
2323 sp->timer.expires = RUN_AT(2*HZ);
2324 add_timer(&sp->timer);
2325 return 0;
2326}
2327#endif /* CONFIG_PM */
2328
2329static void __devexit eepro100_remove_one (struct pci_dev *pdev)
2330{
2331 struct net_device *dev = pci_get_drvdata (pdev);
2332 struct speedo_private *sp = netdev_priv(dev);
2333
2334 unregister_netdev(dev);
2335
2336 release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
2337 release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
2338
2339 pci_iounmap(pdev, sp->regs);
2340 pci_free_consistent(pdev, TX_RING_SIZE * sizeof(struct TxFD)
2341 + sizeof(struct speedo_stats),
2342 sp->tx_ring, sp->tx_ring_dma);
2343 pci_disable_device(pdev);
2344 free_netdev(dev);
2345}
2346
2347static struct pci_device_id eepro100_pci_tbl[] = {
2348 { PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
2349 { PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
2350 { PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
2351 { PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
2352 { PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
2353 { PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
2354 { PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
2355 { PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
2356 { PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
2357 { PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
2358 { PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
2359 { PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
2360 { PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
2361 { PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
2362 { PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
2363 { PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
2364 { PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
2365 { PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
2366 { PCI_VENDOR_ID_INTEL, 0x1050, PCI_ANY_ID, PCI_ANY_ID, },
2367 { PCI_VENDOR_ID_INTEL, 0x1059, PCI_ANY_ID, PCI_ANY_ID, },
2368 { PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
2369 { PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
2370 { PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
2371 { PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
2372 { PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
2373 { PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
2374 { 0,}
2375};
2376MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
2377
2378static struct pci_driver eepro100_driver = {
2379 .name = "eepro100",
2380 .id_table = eepro100_pci_tbl,
2381 .probe = eepro100_init_one,
2382 .remove = __devexit_p(eepro100_remove_one),
2383#ifdef CONFIG_PM
2384 .suspend = eepro100_suspend,
2385 .resume = eepro100_resume,
2386#endif /* CONFIG_PM */
2387};
2388
2389static int __init eepro100_init_module(void)
2390{
2391#ifdef MODULE
2392 printk(version);
2393#endif
2394 return pci_module_init(&eepro100_driver);
2395}
2396
2397static void __exit eepro100_cleanup_module(void)
2398{
2399 pci_unregister_driver(&eepro100_driver);
2400}
2401
2402module_init(eepro100_init_module);
2403module_exit(eepro100_cleanup_module);
2404
2405/*
2406 * Local variables:
2407 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
2408 * c-indent-level: 4
2409 * c-basic-offset: 4
2410 * tab-width: 4
2411 * End:
2412 */