diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-05-20 10:15:19 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-08-12 03:21:25 -0400 |
commit | a88394cfb58007cca945699545469017beb0d206 (patch) | |
tree | 7efa1efcaf18d1b8f7bede1f25acdfc0c657c086 /drivers/net/ethernet | |
parent | 5ff2241dd42ade03572753f9ed7743719b47c474 (diff) |
ewrk3/tulip: Move the DEC - Tulip drivers
Move the DEC - Tulip driver into drivers/net/ethernet/dec/tulip/
and make the necessary Kconfig and Makefile changes.
The Digital Equioment (DEC) driver ewrk3 was moved into
drivers/net/ethernet/dec/ and the remaining drivers (Tulip)
were moved into drivers/net/ethernet/dec/tulip/
CC: Tobias Ringstrom <tori@unhappy.mine.nu>
CC: Grant Grundler <grundler@parisc-linux.org>
CC: David Davies <davies@maniac.ultranet.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Acked-by: Grant Grundler <grundler@parisc-linux.org>
Diffstat (limited to 'drivers/net/ethernet')
24 files changed, 23629 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 9410f20241f6..ed428501abba 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -19,6 +19,7 @@ source "drivers/net/ethernet/broadcom/Kconfig" | |||
19 | source "drivers/net/ethernet/brocade/Kconfig" | 19 | source "drivers/net/ethernet/brocade/Kconfig" |
20 | source "drivers/net/ethernet/chelsio/Kconfig" | 20 | source "drivers/net/ethernet/chelsio/Kconfig" |
21 | source "drivers/net/ethernet/cisco/Kconfig" | 21 | source "drivers/net/ethernet/cisco/Kconfig" |
22 | source "drivers/net/ethernet/dec/Kconfig" | ||
22 | source "drivers/net/ethernet/dlink/Kconfig" | 23 | source "drivers/net/ethernet/dlink/Kconfig" |
23 | source "drivers/net/ethernet/emulex/Kconfig" | 24 | source "drivers/net/ethernet/emulex/Kconfig" |
24 | source "drivers/net/ethernet/neterion/Kconfig" | 25 | source "drivers/net/ethernet/neterion/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 5d89fd9d672b..3de82490adec 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -11,6 +11,7 @@ obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/ | |||
11 | obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ | 11 | obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/ |
12 | obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ | 12 | obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ |
13 | obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ | 13 | obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ |
14 | obj-$(CONFIG_NET_VENDOR_DEC) += dec/ | ||
14 | obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ | 15 | obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ |
15 | obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ | 16 | obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ |
16 | obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/ | 17 | obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/ |
diff --git a/drivers/net/ethernet/dec/Kconfig b/drivers/net/ethernet/dec/Kconfig new file mode 100644 index 000000000000..40e8df9fde8d --- /dev/null +++ b/drivers/net/ethernet/dec/Kconfig | |||
@@ -0,0 +1,36 @@ | |||
1 | # | ||
2 | # Digital Equipment Inc network device configuration | ||
3 | # | ||
4 | |||
5 | config NET_VENDOR_DEC | ||
6 | bool "Digital Equipment devices" | ||
7 | depends on PCI || EISA || CARDBUS | ||
8 | ---help--- | ||
9 | If you have a network (Ethernet) card belonging to this class, say Y | ||
10 | and read the Ethernet-HOWTO, available from | ||
11 | <http://www.tldp.org/docs.html#howto>. | ||
12 | |||
13 | Note that the answer to this question doesn't directly affect the | ||
14 | kernel: saying N will just cause the configurator to skip all | ||
15 | the questions about DEC cards. If you say Y, you will be asked for | ||
16 | your specific card in the following questions. | ||
17 | |||
18 | if NET_VENDOR_DEC | ||
19 | |||
20 | config EWRK3 | ||
21 | tristate "EtherWORKS 3 (DE203, DE204, DE205) support" | ||
22 | depends on ISA | ||
23 | select CRC32 | ||
24 | ---help--- | ||
25 | This driver supports the DE203, DE204 and DE205 network (Ethernet) | ||
26 | cards. If this is for you, say Y and read | ||
27 | <file:Documentation/networking/ewrk3.txt> in the kernel source as | ||
28 | well as the Ethernet-HOWTO, available from | ||
29 | <http://www.tldp.org/docs.html#howto>. | ||
30 | |||
31 | To compile this driver as a module, choose M here. The module | ||
32 | will be called ewrk3. | ||
33 | |||
34 | source "drivers/net/ethernet/dec/tulip/Kconfig" | ||
35 | |||
36 | endif # NET_VENDOR_DEC | ||
diff --git a/drivers/net/ethernet/dec/Makefile b/drivers/net/ethernet/dec/Makefile new file mode 100644 index 000000000000..1b01ed8d42c8 --- /dev/null +++ b/drivers/net/ethernet/dec/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for the Digital Equipment Inc. network device drivers. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_EWRK3) += ewrk3.o | ||
6 | obj-$(CONFIG_NET_TULIP) += tulip/ | ||
diff --git a/drivers/net/ethernet/dec/ewrk3.c b/drivers/net/ethernet/dec/ewrk3.c new file mode 100644 index 000000000000..05a5f71451a7 --- /dev/null +++ b/drivers/net/ethernet/dec/ewrk3.c | |||
@@ -0,0 +1,1959 @@ | |||
1 | /* ewrk3.c: A DIGITAL EtherWORKS 3 ethernet driver for Linux. | ||
2 | |||
3 | Written 1994 by David C. Davies. | ||
4 | |||
5 | Copyright 1994 Digital Equipment Corporation. | ||
6 | |||
7 | This software may be used and distributed according to the terms of | ||
8 | the GNU General Public License, incorporated herein by reference. | ||
9 | |||
10 | This driver is written for the Digital Equipment Corporation series | ||
11 | of EtherWORKS ethernet cards: | ||
12 | |||
13 | DE203 Turbo (BNC) | ||
14 | DE204 Turbo (TP) | ||
15 | DE205 Turbo (TP BNC) | ||
16 | |||
17 | The driver has been tested on a relatively busy network using the DE205 | ||
18 | card and benchmarked with 'ttcp': it transferred 16M of data at 975kB/s | ||
19 | (7.8Mb/s) to a DECstation 5000/200. | ||
20 | |||
21 | The author may be reached at davies@maniac.ultranet.com. | ||
22 | |||
23 | ========================================================================= | ||
24 | This driver has been written substantially from scratch, although its | ||
25 | inheritance of style and stack interface from 'depca.c' and in turn from | ||
26 | Donald Becker's 'lance.c' should be obvious. | ||
27 | |||
28 | The DE203/4/5 boards all use a new proprietary chip in place of the | ||
29 | LANCE chip used in prior cards (DEPCA, DE100, DE200/1/2, DE210, DE422). | ||
30 | Use the depca.c driver in the standard distribution for the LANCE based | ||
31 | cards from DIGITAL; this driver will not work with them. | ||
32 | |||
33 | The DE203/4/5 cards have 2 main modes: shared memory and I/O only. I/O | ||
34 | only makes all the card accesses through I/O transactions and no high | ||
35 | (shared) memory is used. This mode provides a >48% performance penalty | ||
36 | and is deprecated in this driver, although allowed to provide initial | ||
37 | setup when hardstrapped. | ||
38 | |||
39 | The shared memory mode comes in 3 flavours: 2kB, 32kB and 64kB. There is | ||
40 | no point in using any mode other than the 2kB mode - their performances | ||
41 | are virtually identical, although the driver has been tested in the 2kB | ||
42 | and 32kB modes. I would suggest you uncomment the line: | ||
43 | |||
44 | FORCE_2K_MODE; | ||
45 | |||
46 | to allow the driver to configure the card as a 2kB card at your current | ||
47 | base address, thus leaving more room to clutter your system box with | ||
48 | other memory hungry boards. | ||
49 | |||
50 | As many ISA and EISA cards can be supported under this driver as you | ||
51 | wish, limited primarily by the available IRQ lines, rather than by the | ||
52 | available I/O addresses (24 ISA, 16 EISA). I have checked different | ||
53 | configurations of multiple depca cards and ewrk3 cards and have not | ||
54 | found a problem yet (provided you have at least depca.c v0.38) ... | ||
55 | |||
56 | The board IRQ setting must be at an unused IRQ which is auto-probed | ||
57 | using Donald Becker's autoprobe routines. All these cards are at | ||
58 | {5,10,11,15}. | ||
59 | |||
60 | No 16MB memory limitation should exist with this driver as DMA is not | ||
61 | used and the common memory area is in low memory on the network card (my | ||
62 | current system has 20MB and I've not had problems yet). | ||
63 | |||
64 | The ability to load this driver as a loadable module has been included | ||
65 | and used extensively during the driver development (to save those long | ||
66 | reboot sequences). To utilise this ability, you have to do 8 things: | ||
67 | |||
68 | 0) have a copy of the loadable modules code installed on your system. | ||
69 | 1) copy ewrk3.c from the /linux/drivers/net directory to your favourite | ||
70 | temporary directory. | ||
71 | 2) edit the source code near line 1898 to reflect the I/O address and | ||
72 | IRQ you're using. | ||
73 | 3) compile ewrk3.c, but include -DMODULE in the command line to ensure | ||
74 | that the correct bits are compiled (see end of source code). | ||
75 | 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a | ||
76 | kernel with the ewrk3 configuration turned off and reboot. | ||
77 | 5) insmod ewrk3.o | ||
78 | [Alan Cox: Changed this so you can insmod ewrk3.o irq=x io=y] | ||
79 | [Adam Kropelin: now accepts irq=x1,x2 io=y1,y2 for multiple cards] | ||
80 | 6) run the net startup bits for your new eth?? interface manually | ||
81 | (usually /etc/rc.inet[12] at boot time). | ||
82 | 7) enjoy! | ||
83 | |||
84 | Note that autoprobing is not allowed in loadable modules - the system is | ||
85 | already up and running and you're messing with interrupts. | ||
86 | |||
87 | To unload a module, turn off the associated interface | ||
88 | 'ifconfig eth?? down' then 'rmmod ewrk3'. | ||
89 | |||
90 | Promiscuous mode has been turned off in this driver, but all the | ||
91 | multicast address bits have been turned on. This improved the send | ||
92 | performance on a busy network by about 13%. | ||
93 | |||
94 | Ioctl's have now been provided (primarily because I wanted to grab some | ||
95 | packet size statistics). They are patterned after 'plipconfig.c' from a | ||
96 | suggestion by Alan Cox. Using these ioctls, you can enable promiscuous | ||
97 | mode, add/delete multicast addresses, change the hardware address, get | ||
98 | packet size distribution statistics and muck around with the control and | ||
99 | status register. I'll add others if and when the need arises. | ||
100 | |||
101 | TO DO: | ||
102 | ------ | ||
103 | |||
104 | |||
105 | Revision History | ||
106 | ---------------- | ||
107 | |||
108 | Version Date Description | ||
109 | |||
110 | 0.1 26-aug-94 Initial writing. ALPHA code release. | ||
111 | 0.11 31-aug-94 Fixed: 2k mode memory base calc., | ||
112 | LeMAC version calc., | ||
113 | IRQ vector assignments during autoprobe. | ||
114 | 0.12 31-aug-94 Tested working on LeMAC2 (DE20[345]-AC) card. | ||
115 | Fixed up MCA hash table algorithm. | ||
116 | 0.20 4-sep-94 Added IOCTL functionality. | ||
117 | 0.21 14-sep-94 Added I/O mode. | ||
118 | 0.21axp 15-sep-94 Special version for ALPHA AXP Linux V1.0. | ||
119 | 0.22 16-sep-94 Added more IOCTLs & tidied up. | ||
120 | 0.23 21-sep-94 Added transmit cut through. | ||
121 | 0.24 31-oct-94 Added uid checks in some ioctls. | ||
122 | 0.30 1-nov-94 BETA code release. | ||
123 | 0.31 5-dec-94 Added check/allocate region code. | ||
124 | 0.32 16-jan-95 Broadcast packet fix. | ||
125 | 0.33 10-Feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>. | ||
126 | 0.40 27-Dec-95 Rationalise MODULE and autoprobe code. | ||
127 | Rewrite for portability & updated. | ||
128 | ALPHA support from <jestabro@amt.tay1.dec.com> | ||
129 | Added verify_area() calls in ewrk3_ioctl() from | ||
130 | suggestion by <heiko@colossus.escape.de>. | ||
131 | Add new multicasting code. | ||
132 | 0.41 20-Jan-96 Fix IRQ set up problem reported by | ||
133 | <kenneth@bbs.sas.ntu.ac.sg>. | ||
134 | 0.42 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi> | ||
135 | 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c | ||
136 | 0.44 08-Nov-01 use library crc32 functions <Matt_Domsch@dell.com> | ||
137 | 0.45 19-Jul-02 fix unaligned access on alpha <martin@bruli.net> | ||
138 | 0.46 10-Oct-02 Multiple NIC support when module <akropel1@rochester.rr.com> | ||
139 | 0.47 18-Oct-02 ethtool support <akropel1@rochester.rr.com> | ||
140 | 0.48 18-Oct-02 cli/sti removal for 2.5 <vda@port.imtp.ilyichevsk.odessa.ua> | ||
141 | ioctl locking, signature search cleanup <akropel1@rochester.rr.com> | ||
142 | |||
143 | ========================================================================= | ||
144 | */ | ||
145 | |||
146 | #include <linux/module.h> | ||
147 | #include <linux/kernel.h> | ||
148 | #include <linux/sched.h> | ||
149 | #include <linux/string.h> | ||
150 | #include <linux/errno.h> | ||
151 | #include <linux/ioport.h> | ||
152 | #include <linux/slab.h> | ||
153 | #include <linux/interrupt.h> | ||
154 | #include <linux/delay.h> | ||
155 | #include <linux/init.h> | ||
156 | #include <linux/crc32.h> | ||
157 | #include <linux/netdevice.h> | ||
158 | #include <linux/etherdevice.h> | ||
159 | #include <linux/skbuff.h> | ||
160 | #include <linux/ethtool.h> | ||
161 | #include <linux/time.h> | ||
162 | #include <linux/types.h> | ||
163 | #include <linux/unistd.h> | ||
164 | #include <linux/ctype.h> | ||
165 | #include <linux/bitops.h> | ||
166 | |||
167 | #include <asm/io.h> | ||
168 | #include <asm/dma.h> | ||
169 | #include <asm/uaccess.h> | ||
170 | |||
171 | #include "ewrk3.h" | ||
172 | |||
173 | #define DRV_NAME "ewrk3" | ||
174 | #define DRV_VERSION "0.48" | ||
175 | |||
176 | static char version[] __initdata = | ||
177 | DRV_NAME ":v" DRV_VERSION " 2002/10/18 davies@maniac.ultranet.com\n"; | ||
178 | |||
179 | #ifdef EWRK3_DEBUG | ||
180 | static int ewrk3_debug = EWRK3_DEBUG; | ||
181 | #else | ||
182 | static int ewrk3_debug = 1; | ||
183 | #endif | ||
184 | |||
185 | #define EWRK3_NDA 0xffe0 /* No Device Address */ | ||
186 | |||
187 | #define PROBE_LENGTH 32 | ||
188 | #define ETH_PROM_SIG 0xAA5500FFUL | ||
189 | |||
190 | #ifndef EWRK3_SIGNATURE | ||
191 | #define EWRK3_SIGNATURE {"DE203","DE204","DE205",""} | ||
192 | #define EWRK3_STRLEN 8 | ||
193 | #endif | ||
194 | |||
195 | #ifndef EWRK3_RAM_BASE_ADDRESSES | ||
196 | #define EWRK3_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0x00000} | ||
197 | #endif | ||
198 | |||
199 | /* | ||
200 | ** Sets up the I/O area for the autoprobe. | ||
201 | */ | ||
202 | #define EWRK3_IO_BASE 0x100 /* Start address for probe search */ | ||
203 | #define EWRK3_IOP_INC 0x20 /* I/O address increment */ | ||
204 | #define EWRK3_TOTAL_SIZE 0x20 /* required I/O address length */ | ||
205 | |||
206 | #ifndef MAX_NUM_EWRK3S | ||
207 | #define MAX_NUM_EWRK3S 21 | ||
208 | #endif | ||
209 | |||
210 | #ifndef EWRK3_EISA_IO_PORTS | ||
211 | #define EWRK3_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ | ||
212 | #endif | ||
213 | |||
214 | #ifndef MAX_EISA_SLOTS | ||
215 | #define MAX_EISA_SLOTS 16 | ||
216 | #define EISA_SLOT_INC 0x1000 | ||
217 | #endif | ||
218 | |||
219 | #define QUEUE_PKT_TIMEOUT (1*HZ) /* Jiffies */ | ||
220 | |||
221 | /* | ||
222 | ** EtherWORKS 3 shared memory window sizes | ||
223 | */ | ||
224 | #define IO_ONLY 0x00 | ||
225 | #define SHMEM_2K 0x800 | ||
226 | #define SHMEM_32K 0x8000 | ||
227 | #define SHMEM_64K 0x10000 | ||
228 | |||
229 | /* | ||
230 | ** EtherWORKS 3 IRQ ENABLE/DISABLE | ||
231 | */ | ||
232 | #define ENABLE_IRQs { \ | ||
233 | icr |= lp->irq_mask;\ | ||
234 | outb(icr, EWRK3_ICR); /* Enable the IRQs */\ | ||
235 | } | ||
236 | |||
237 | #define DISABLE_IRQs { \ | ||
238 | icr = inb(EWRK3_ICR);\ | ||
239 | icr &= ~lp->irq_mask;\ | ||
240 | outb(icr, EWRK3_ICR); /* Disable the IRQs */\ | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | ** EtherWORKS 3 START/STOP | ||
245 | */ | ||
246 | #define START_EWRK3 { \ | ||
247 | csr = inb(EWRK3_CSR);\ | ||
248 | csr &= ~(CSR_TXD|CSR_RXD);\ | ||
249 | outb(csr, EWRK3_CSR); /* Enable the TX and/or RX */\ | ||
250 | } | ||
251 | |||
252 | #define STOP_EWRK3 { \ | ||
253 | csr = (CSR_TXD|CSR_RXD);\ | ||
254 | outb(csr, EWRK3_CSR); /* Disable the TX and/or RX */\ | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | ** The EtherWORKS 3 private structure | ||
259 | */ | ||
260 | #define EWRK3_PKT_STAT_SZ 16 | ||
261 | #define EWRK3_PKT_BIN_SZ 128 /* Should be >=100 unless you | ||
262 | increase EWRK3_PKT_STAT_SZ */ | ||
263 | |||
264 | struct ewrk3_stats { | ||
265 | u32 bins[EWRK3_PKT_STAT_SZ]; | ||
266 | u32 unicast; | ||
267 | u32 multicast; | ||
268 | u32 broadcast; | ||
269 | u32 excessive_collisions; | ||
270 | u32 tx_underruns; | ||
271 | u32 excessive_underruns; | ||
272 | }; | ||
273 | |||
274 | struct ewrk3_private { | ||
275 | char adapter_name[80]; /* Name exported to /proc/ioports */ | ||
276 | u_long shmem_base; /* Shared memory start address */ | ||
277 | void __iomem *shmem; | ||
278 | u_long shmem_length; /* Shared memory window length */ | ||
279 | struct ewrk3_stats pktStats; /* Private stats counters */ | ||
280 | u_char irq_mask; /* Adapter IRQ mask bits */ | ||
281 | u_char mPage; /* Maximum 2kB Page number */ | ||
282 | u_char lemac; /* Chip rev. level */ | ||
283 | u_char hard_strapped; /* Don't allow a full open */ | ||
284 | u_char txc; /* Transmit cut through */ | ||
285 | void __iomem *mctbl; /* Pointer to the multicast table */ | ||
286 | u_char led_mask; /* Used to reserve LED access for ethtool */ | ||
287 | spinlock_t hw_lock; | ||
288 | }; | ||
289 | |||
290 | /* | ||
291 | ** Force the EtherWORKS 3 card to be in 2kB MODE | ||
292 | */ | ||
293 | #define FORCE_2K_MODE { \ | ||
294 | shmem_length = SHMEM_2K;\ | ||
295 | outb(((mem_start - 0x80000) >> 11), EWRK3_MBR);\ | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | ** Public Functions | ||
300 | */ | ||
301 | static int ewrk3_open(struct net_device *dev); | ||
302 | static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev); | ||
303 | static irqreturn_t ewrk3_interrupt(int irq, void *dev_id); | ||
304 | static int ewrk3_close(struct net_device *dev); | ||
305 | static void set_multicast_list(struct net_device *dev); | ||
306 | static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
307 | static const struct ethtool_ops ethtool_ops_203; | ||
308 | static const struct ethtool_ops ethtool_ops; | ||
309 | |||
310 | /* | ||
311 | ** Private functions | ||
312 | */ | ||
313 | static int ewrk3_hw_init(struct net_device *dev, u_long iobase); | ||
314 | static void ewrk3_init(struct net_device *dev); | ||
315 | static int ewrk3_rx(struct net_device *dev); | ||
316 | static int ewrk3_tx(struct net_device *dev); | ||
317 | static void ewrk3_timeout(struct net_device *dev); | ||
318 | |||
319 | static void EthwrkSignature(char *name, char *eeprom_image); | ||
320 | static int DevicePresent(u_long iobase); | ||
321 | static void SetMulticastFilter(struct net_device *dev); | ||
322 | static int EISA_signature(char *name, s32 eisa_id); | ||
323 | |||
324 | static int Read_EEPROM(u_long iobase, u_char eaddr); | ||
325 | static int Write_EEPROM(short data, u_long iobase, u_char eaddr); | ||
326 | static u_char get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType); | ||
327 | |||
328 | static int ewrk3_probe1(struct net_device *dev, u_long iobase, int irq); | ||
329 | static int isa_probe(struct net_device *dev, u_long iobase); | ||
330 | static int eisa_probe(struct net_device *dev, u_long iobase); | ||
331 | |||
332 | static u_char irq[MAX_NUM_EWRK3S+1] = {5, 0, 10, 3, 11, 9, 15, 12}; | ||
333 | |||
334 | static char name[EWRK3_STRLEN + 1]; | ||
335 | static int num_ewrks3s; | ||
336 | |||
337 | /* | ||
338 | ** Miscellaneous defines... | ||
339 | */ | ||
340 | #define INIT_EWRK3 {\ | ||
341 | outb(EEPROM_INIT, EWRK3_IOPR);\ | ||
342 | mdelay(1);\ | ||
343 | } | ||
344 | |||
345 | #ifndef MODULE | ||
346 | struct net_device * __init ewrk3_probe(int unit) | ||
347 | { | ||
348 | struct net_device *dev = alloc_etherdev(sizeof(struct ewrk3_private)); | ||
349 | int err; | ||
350 | |||
351 | if (!dev) | ||
352 | return ERR_PTR(-ENOMEM); | ||
353 | |||
354 | if (unit >= 0) { | ||
355 | sprintf(dev->name, "eth%d", unit); | ||
356 | netdev_boot_setup_check(dev); | ||
357 | } | ||
358 | |||
359 | err = ewrk3_probe1(dev, dev->base_addr, dev->irq); | ||
360 | if (err) | ||
361 | goto out; | ||
362 | return dev; | ||
363 | out: | ||
364 | free_netdev(dev); | ||
365 | return ERR_PTR(err); | ||
366 | |||
367 | } | ||
368 | #endif | ||
369 | |||
370 | static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq) | ||
371 | { | ||
372 | int err; | ||
373 | |||
374 | dev->base_addr = iobase; | ||
375 | dev->irq = irq; | ||
376 | |||
377 | /* Address PROM pattern */ | ||
378 | err = isa_probe(dev, iobase); | ||
379 | if (err != 0) | ||
380 | err = eisa_probe(dev, iobase); | ||
381 | |||
382 | if (err) | ||
383 | return err; | ||
384 | |||
385 | err = register_netdev(dev); | ||
386 | if (err) | ||
387 | release_region(dev->base_addr, EWRK3_TOTAL_SIZE); | ||
388 | |||
389 | return err; | ||
390 | } | ||
391 | |||
392 | static const struct net_device_ops ewrk3_netdev_ops = { | ||
393 | .ndo_open = ewrk3_open, | ||
394 | .ndo_start_xmit = ewrk3_queue_pkt, | ||
395 | .ndo_stop = ewrk3_close, | ||
396 | .ndo_set_multicast_list = set_multicast_list, | ||
397 | .ndo_do_ioctl = ewrk3_ioctl, | ||
398 | .ndo_tx_timeout = ewrk3_timeout, | ||
399 | .ndo_change_mtu = eth_change_mtu, | ||
400 | .ndo_set_mac_address = eth_mac_addr, | ||
401 | .ndo_validate_addr = eth_validate_addr, | ||
402 | }; | ||
403 | |||
404 | static int __init | ||
405 | ewrk3_hw_init(struct net_device *dev, u_long iobase) | ||
406 | { | ||
407 | struct ewrk3_private *lp; | ||
408 | int i, status = 0; | ||
409 | u_long mem_start, shmem_length; | ||
410 | u_char cr, cmr, icr, nicsr, lemac, hard_strapped = 0; | ||
411 | u_char eeprom_image[EEPROM_MAX], chksum, eisa_cr = 0; | ||
412 | |||
413 | /* | ||
414 | ** Stop the EWRK3. Enable the DBR ROM. Disable interrupts and remote boot. | ||
415 | ** This also disables the EISA_ENABLE bit in the EISA Control Register. | ||
416 | */ | ||
417 | if (iobase > 0x400) | ||
418 | eisa_cr = inb(EISA_CR); | ||
419 | INIT_EWRK3; | ||
420 | |||
421 | nicsr = inb(EWRK3_CSR); | ||
422 | |||
423 | icr = inb(EWRK3_ICR); | ||
424 | icr &= 0x70; | ||
425 | outb(icr, EWRK3_ICR); /* Disable all the IRQs */ | ||
426 | |||
427 | if (nicsr != (CSR_TXD | CSR_RXD)) | ||
428 | return -ENXIO; | ||
429 | |||
430 | /* Check that the EEPROM is alive and well and not living on Pluto... */ | ||
431 | for (chksum = 0, i = 0; i < EEPROM_MAX; i += 2) { | ||
432 | union { | ||
433 | short val; | ||
434 | char c[2]; | ||
435 | } tmp; | ||
436 | |||
437 | tmp.val = (short) Read_EEPROM(iobase, (i >> 1)); | ||
438 | eeprom_image[i] = tmp.c[0]; | ||
439 | eeprom_image[i + 1] = tmp.c[1]; | ||
440 | chksum += eeprom_image[i] + eeprom_image[i + 1]; | ||
441 | } | ||
442 | |||
443 | if (chksum != 0) { /* Bad EEPROM Data! */ | ||
444 | printk("%s: Device has a bad on-board EEPROM.\n", dev->name); | ||
445 | return -ENXIO; | ||
446 | } | ||
447 | |||
448 | EthwrkSignature(name, eeprom_image); | ||
449 | if (*name == '\0') | ||
450 | return -ENXIO; | ||
451 | |||
452 | dev->base_addr = iobase; | ||
453 | |||
454 | if (iobase > 0x400) { | ||
455 | outb(eisa_cr, EISA_CR); /* Rewrite the EISA CR */ | ||
456 | } | ||
457 | lemac = eeprom_image[EEPROM_CHIPVER]; | ||
458 | cmr = inb(EWRK3_CMR); | ||
459 | |||
460 | if (((lemac == LeMAC) && ((cmr & CMR_NO_EEPROM) != CMR_NO_EEPROM)) || | ||
461 | ((lemac == LeMAC2) && !(cmr & CMR_HS))) { | ||
462 | printk("%s: %s at %#4lx", dev->name, name, iobase); | ||
463 | hard_strapped = 1; | ||
464 | } else if ((iobase & 0x0fff) == EWRK3_EISA_IO_PORTS) { | ||
465 | /* EISA slot address */ | ||
466 | printk("%s: %s at %#4lx (EISA slot %ld)", | ||
467 | dev->name, name, iobase, ((iobase >> 12) & 0x0f)); | ||
468 | } else { /* ISA port address */ | ||
469 | printk("%s: %s at %#4lx", dev->name, name, iobase); | ||
470 | } | ||
471 | |||
472 | printk(", h/w address "); | ||
473 | if (lemac != LeMAC2) | ||
474 | DevicePresent(iobase); /* need after EWRK3_INIT */ | ||
475 | status = get_hw_addr(dev, eeprom_image, lemac); | ||
476 | printk("%pM\n", dev->dev_addr); | ||
477 | |||
478 | if (status) { | ||
479 | printk(" which has an EEPROM CRC error.\n"); | ||
480 | return -ENXIO; | ||
481 | } | ||
482 | |||
483 | if (lemac == LeMAC2) { /* Special LeMAC2 CMR things */ | ||
484 | cmr &= ~(CMR_RA | CMR_WB | CMR_LINK | CMR_POLARITY | CMR_0WS); | ||
485 | if (eeprom_image[EEPROM_MISC0] & READ_AHEAD) | ||
486 | cmr |= CMR_RA; | ||
487 | if (eeprom_image[EEPROM_MISC0] & WRITE_BEHIND) | ||
488 | cmr |= CMR_WB; | ||
489 | if (eeprom_image[EEPROM_NETMAN0] & NETMAN_POL) | ||
490 | cmr |= CMR_POLARITY; | ||
491 | if (eeprom_image[EEPROM_NETMAN0] & NETMAN_LINK) | ||
492 | cmr |= CMR_LINK; | ||
493 | if (eeprom_image[EEPROM_MISC0] & _0WS_ENA) | ||
494 | cmr |= CMR_0WS; | ||
495 | } | ||
496 | if (eeprom_image[EEPROM_SETUP] & SETUP_DRAM) | ||
497 | cmr |= CMR_DRAM; | ||
498 | outb(cmr, EWRK3_CMR); | ||
499 | |||
500 | cr = inb(EWRK3_CR); /* Set up the Control Register */ | ||
501 | cr |= eeprom_image[EEPROM_SETUP] & SETUP_APD; | ||
502 | if (cr & SETUP_APD) | ||
503 | cr |= eeprom_image[EEPROM_SETUP] & SETUP_PS; | ||
504 | cr |= eeprom_image[EEPROM_MISC0] & FAST_BUS; | ||
505 | cr |= eeprom_image[EEPROM_MISC0] & ENA_16; | ||
506 | outb(cr, EWRK3_CR); | ||
507 | |||
508 | /* | ||
509 | ** Determine the base address and window length for the EWRK3 | ||
510 | ** RAM from the memory base register. | ||
511 | */ | ||
512 | mem_start = inb(EWRK3_MBR); | ||
513 | shmem_length = 0; | ||
514 | if (mem_start != 0) { | ||
515 | if ((mem_start >= 0x0a) && (mem_start <= 0x0f)) { | ||
516 | mem_start *= SHMEM_64K; | ||
517 | shmem_length = SHMEM_64K; | ||
518 | } else if ((mem_start >= 0x14) && (mem_start <= 0x1f)) { | ||
519 | mem_start *= SHMEM_32K; | ||
520 | shmem_length = SHMEM_32K; | ||
521 | } else if ((mem_start >= 0x40) && (mem_start <= 0xff)) { | ||
522 | mem_start = mem_start * SHMEM_2K + 0x80000; | ||
523 | shmem_length = SHMEM_2K; | ||
524 | } else { | ||
525 | return -ENXIO; | ||
526 | } | ||
527 | } | ||
528 | /* | ||
529 | ** See the top of this source code for comments about | ||
530 | ** uncommenting this line. | ||
531 | */ | ||
532 | /* FORCE_2K_MODE; */ | ||
533 | |||
534 | if (hard_strapped) { | ||
535 | printk(" is hard strapped.\n"); | ||
536 | } else if (mem_start) { | ||
537 | printk(" has a %dk RAM window", (int) (shmem_length >> 10)); | ||
538 | printk(" at 0x%.5lx", mem_start); | ||
539 | } else { | ||
540 | printk(" is in I/O only mode"); | ||
541 | } | ||
542 | |||
543 | lp = netdev_priv(dev); | ||
544 | lp->shmem_base = mem_start; | ||
545 | lp->shmem = ioremap(mem_start, shmem_length); | ||
546 | if (!lp->shmem) | ||
547 | return -ENOMEM; | ||
548 | lp->shmem_length = shmem_length; | ||
549 | lp->lemac = lemac; | ||
550 | lp->hard_strapped = hard_strapped; | ||
551 | lp->led_mask = CR_LED; | ||
552 | spin_lock_init(&lp->hw_lock); | ||
553 | |||
554 | lp->mPage = 64; | ||
555 | if (cmr & CMR_DRAM) | ||
556 | lp->mPage <<= 1; /* 2 DRAMS on module */ | ||
557 | |||
558 | sprintf(lp->adapter_name, "%s (%s)", name, dev->name); | ||
559 | |||
560 | lp->irq_mask = ICR_TNEM | ICR_TXDM | ICR_RNEM | ICR_RXDM; | ||
561 | |||
562 | if (!hard_strapped) { | ||
563 | /* | ||
564 | ** Enable EWRK3 board interrupts for autoprobing | ||
565 | */ | ||
566 | icr |= ICR_IE; /* Enable interrupts */ | ||
567 | outb(icr, EWRK3_ICR); | ||
568 | |||
569 | /* The DMA channel may be passed in on this parameter. */ | ||
570 | dev->dma = 0; | ||
571 | |||
572 | /* To auto-IRQ we enable the initialization-done and DMA err, | ||
573 | interrupts. For now we will always get a DMA error. */ | ||
574 | if (dev->irq < 2) { | ||
575 | #ifndef MODULE | ||
576 | u_char irqnum; | ||
577 | unsigned long irq_mask; | ||
578 | |||
579 | |||
580 | irq_mask = probe_irq_on(); | ||
581 | |||
582 | /* | ||
583 | ** Trigger a TNE interrupt. | ||
584 | */ | ||
585 | icr |= ICR_TNEM; | ||
586 | outb(1, EWRK3_TDQ); /* Write to the TX done queue */ | ||
587 | outb(icr, EWRK3_ICR); /* Unmask the TXD interrupt */ | ||
588 | |||
589 | irqnum = irq[((icr & IRQ_SEL) >> 4)]; | ||
590 | |||
591 | mdelay(20); | ||
592 | dev->irq = probe_irq_off(irq_mask); | ||
593 | if ((dev->irq) && (irqnum == dev->irq)) { | ||
594 | printk(" and uses IRQ%d.\n", dev->irq); | ||
595 | } else { | ||
596 | if (!dev->irq) { | ||
597 | printk(" and failed to detect IRQ line.\n"); | ||
598 | } else if ((irqnum == 1) && (lemac == LeMAC2)) { | ||
599 | printk(" and an illegal IRQ line detected.\n"); | ||
600 | } else { | ||
601 | printk(", but incorrect IRQ line detected.\n"); | ||
602 | } | ||
603 | iounmap(lp->shmem); | ||
604 | return -ENXIO; | ||
605 | } | ||
606 | |||
607 | DISABLE_IRQs; /* Mask all interrupts */ | ||
608 | |||
609 | #endif /* MODULE */ | ||
610 | } else { | ||
611 | printk(" and requires IRQ%d.\n", dev->irq); | ||
612 | } | ||
613 | } | ||
614 | |||
615 | if (ewrk3_debug > 1) { | ||
616 | printk(version); | ||
617 | } | ||
618 | /* The EWRK3-specific entries in the device structure. */ | ||
619 | dev->netdev_ops = &ewrk3_netdev_ops; | ||
620 | if (lp->adapter_name[4] == '3') | ||
621 | SET_ETHTOOL_OPS(dev, ðtool_ops_203); | ||
622 | else | ||
623 | SET_ETHTOOL_OPS(dev, ðtool_ops); | ||
624 | dev->watchdog_timeo = QUEUE_PKT_TIMEOUT; | ||
625 | |||
626 | dev->mem_start = 0; | ||
627 | |||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | |||
632 | static int ewrk3_open(struct net_device *dev) | ||
633 | { | ||
634 | struct ewrk3_private *lp = netdev_priv(dev); | ||
635 | u_long iobase = dev->base_addr; | ||
636 | int status = 0; | ||
637 | u_char icr, csr; | ||
638 | |||
639 | /* | ||
640 | ** Stop the TX and RX... | ||
641 | */ | ||
642 | STOP_EWRK3; | ||
643 | |||
644 | if (!lp->hard_strapped) { | ||
645 | if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) { | ||
646 | printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq); | ||
647 | status = -EAGAIN; | ||
648 | } else { | ||
649 | |||
650 | /* | ||
651 | ** Re-initialize the EWRK3... | ||
652 | */ | ||
653 | ewrk3_init(dev); | ||
654 | |||
655 | if (ewrk3_debug > 1) { | ||
656 | printk("%s: ewrk3 open with irq %d\n", dev->name, dev->irq); | ||
657 | printk(" physical address: %pM\n", dev->dev_addr); | ||
658 | if (lp->shmem_length == 0) { | ||
659 | printk(" no shared memory, I/O only mode\n"); | ||
660 | } else { | ||
661 | printk(" start of shared memory: 0x%08lx\n", lp->shmem_base); | ||
662 | printk(" window length: 0x%04lx\n", lp->shmem_length); | ||
663 | } | ||
664 | printk(" # of DRAMS: %d\n", ((inb(EWRK3_CMR) & 0x02) ? 2 : 1)); | ||
665 | printk(" csr: 0x%02x\n", inb(EWRK3_CSR)); | ||
666 | printk(" cr: 0x%02x\n", inb(EWRK3_CR)); | ||
667 | printk(" icr: 0x%02x\n", inb(EWRK3_ICR)); | ||
668 | printk(" cmr: 0x%02x\n", inb(EWRK3_CMR)); | ||
669 | printk(" fmqc: 0x%02x\n", inb(EWRK3_FMQC)); | ||
670 | } | ||
671 | netif_start_queue(dev); | ||
672 | /* | ||
673 | ** Unmask EWRK3 board interrupts | ||
674 | */ | ||
675 | icr = inb(EWRK3_ICR); | ||
676 | ENABLE_IRQs; | ||
677 | |||
678 | } | ||
679 | } else { | ||
680 | printk(KERN_ERR "%s: ewrk3 available for hard strapped set up only.\n", dev->name); | ||
681 | printk(KERN_ERR " Run the 'ewrk3setup' utility or remove the hard straps.\n"); | ||
682 | return -EINVAL; | ||
683 | } | ||
684 | |||
685 | return status; | ||
686 | } | ||
687 | |||
688 | /* | ||
689 | ** Initialize the EtherWORKS 3 operating conditions | ||
690 | */ | ||
691 | static void ewrk3_init(struct net_device *dev) | ||
692 | { | ||
693 | struct ewrk3_private *lp = netdev_priv(dev); | ||
694 | u_char csr, page; | ||
695 | u_long iobase = dev->base_addr; | ||
696 | int i; | ||
697 | |||
698 | /* | ||
699 | ** Enable any multicasts | ||
700 | */ | ||
701 | set_multicast_list(dev); | ||
702 | |||
703 | /* | ||
704 | ** Set hardware MAC address. Address is initialized from the EEPROM | ||
705 | ** during startup but may have since been changed by the user. | ||
706 | */ | ||
707 | for (i=0; i<ETH_ALEN; i++) | ||
708 | outb(dev->dev_addr[i], EWRK3_PAR0 + i); | ||
709 | |||
710 | /* | ||
711 | ** Clean out any remaining entries in all the queues here | ||
712 | */ | ||
713 | while (inb(EWRK3_TQ)); | ||
714 | while (inb(EWRK3_TDQ)); | ||
715 | while (inb(EWRK3_RQ)); | ||
716 | while (inb(EWRK3_FMQ)); | ||
717 | |||
718 | /* | ||
719 | ** Write a clean free memory queue | ||
720 | */ | ||
721 | for (page = 1; page < lp->mPage; page++) { /* Write the free page numbers */ | ||
722 | outb(page, EWRK3_FMQ); /* to the Free Memory Queue */ | ||
723 | } | ||
724 | |||
725 | START_EWRK3; /* Enable the TX and/or RX */ | ||
726 | } | ||
727 | |||
728 | /* | ||
729 | * Transmit timeout | ||
730 | */ | ||
731 | |||
732 | static void ewrk3_timeout(struct net_device *dev) | ||
733 | { | ||
734 | struct ewrk3_private *lp = netdev_priv(dev); | ||
735 | u_char icr, csr; | ||
736 | u_long iobase = dev->base_addr; | ||
737 | |||
738 | if (!lp->hard_strapped) | ||
739 | { | ||
740 | printk(KERN_WARNING"%s: transmit timed/locked out, status %04x, resetting.\n", | ||
741 | dev->name, inb(EWRK3_CSR)); | ||
742 | |||
743 | /* | ||
744 | ** Mask all board interrupts | ||
745 | */ | ||
746 | DISABLE_IRQs; | ||
747 | |||
748 | /* | ||
749 | ** Stop the TX and RX... | ||
750 | */ | ||
751 | STOP_EWRK3; | ||
752 | |||
753 | ewrk3_init(dev); | ||
754 | |||
755 | /* | ||
756 | ** Unmask EWRK3 board interrupts | ||
757 | */ | ||
758 | ENABLE_IRQs; | ||
759 | |||
760 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
761 | netif_wake_queue(dev); | ||
762 | } | ||
763 | } | ||
764 | |||
765 | /* | ||
766 | ** Writes a socket buffer to the free page queue | ||
767 | */ | ||
768 | static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev) | ||
769 | { | ||
770 | struct ewrk3_private *lp = netdev_priv(dev); | ||
771 | u_long iobase = dev->base_addr; | ||
772 | void __iomem *buf = NULL; | ||
773 | u_char icr; | ||
774 | u_char page; | ||
775 | |||
776 | spin_lock_irq (&lp->hw_lock); | ||
777 | DISABLE_IRQs; | ||
778 | |||
779 | /* if no resources available, exit, request packet be queued */ | ||
780 | if (inb (EWRK3_FMQC) == 0) { | ||
781 | printk (KERN_WARNING "%s: ewrk3_queue_pkt(): No free resources...\n", | ||
782 | dev->name); | ||
783 | printk (KERN_WARNING "%s: ewrk3_queue_pkt(): CSR: %02x ICR: %02x FMQC: %02x\n", | ||
784 | dev->name, inb (EWRK3_CSR), inb (EWRK3_ICR), | ||
785 | inb (EWRK3_FMQC)); | ||
786 | goto err_out; | ||
787 | } | ||
788 | |||
789 | /* | ||
790 | ** Get a free page from the FMQ | ||
791 | */ | ||
792 | if ((page = inb (EWRK3_FMQ)) >= lp->mPage) { | ||
793 | printk ("ewrk3_queue_pkt(): Invalid free memory page (%d).\n", | ||
794 | (u_char) page); | ||
795 | goto err_out; | ||
796 | } | ||
797 | |||
798 | |||
799 | /* | ||
800 | ** Set up shared memory window and pointer into the window | ||
801 | */ | ||
802 | if (lp->shmem_length == IO_ONLY) { | ||
803 | outb (page, EWRK3_IOPR); | ||
804 | } else if (lp->shmem_length == SHMEM_2K) { | ||
805 | buf = lp->shmem; | ||
806 | outb (page, EWRK3_MPR); | ||
807 | } else if (lp->shmem_length == SHMEM_32K) { | ||
808 | buf = (((short) page << 11) & 0x7800) + lp->shmem; | ||
809 | outb ((page >> 4), EWRK3_MPR); | ||
810 | } else if (lp->shmem_length == SHMEM_64K) { | ||
811 | buf = (((short) page << 11) & 0xf800) + lp->shmem; | ||
812 | outb ((page >> 5), EWRK3_MPR); | ||
813 | } else { | ||
814 | printk (KERN_ERR "%s: Oops - your private data area is hosed!\n", | ||
815 | dev->name); | ||
816 | BUG (); | ||
817 | } | ||
818 | |||
819 | /* | ||
820 | ** Set up the buffer control structures and copy the data from | ||
821 | ** the socket buffer to the shared memory . | ||
822 | */ | ||
823 | if (lp->shmem_length == IO_ONLY) { | ||
824 | int i; | ||
825 | u_char *p = skb->data; | ||
826 | outb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), EWRK3_DATA); | ||
827 | outb ((char) (skb->len & 0xff), EWRK3_DATA); | ||
828 | outb ((char) ((skb->len >> 8) & 0xff), EWRK3_DATA); | ||
829 | outb ((char) 0x04, EWRK3_DATA); | ||
830 | for (i = 0; i < skb->len; i++) { | ||
831 | outb (*p++, EWRK3_DATA); | ||
832 | } | ||
833 | outb (page, EWRK3_TQ); /* Start sending pkt */ | ||
834 | } else { | ||
835 | writeb ((char) (TCR_QMODE | TCR_PAD | TCR_IFC), buf); /* ctrl byte */ | ||
836 | buf += 1; | ||
837 | writeb ((char) (skb->len & 0xff), buf); /* length (16 bit xfer) */ | ||
838 | buf += 1; | ||
839 | if (lp->txc) { | ||
840 | writeb(((skb->len >> 8) & 0xff) | XCT, buf); | ||
841 | buf += 1; | ||
842 | writeb (0x04, buf); /* index byte */ | ||
843 | buf += 1; | ||
844 | writeb (0x00, (buf + skb->len)); /* Write the XCT flag */ | ||
845 | memcpy_toio (buf, skb->data, PRELOAD); /* Write PRELOAD bytes */ | ||
846 | outb (page, EWRK3_TQ); /* Start sending pkt */ | ||
847 | memcpy_toio (buf + PRELOAD, | ||
848 | skb->data + PRELOAD, | ||
849 | skb->len - PRELOAD); | ||
850 | writeb (0xff, (buf + skb->len)); /* Write the XCT flag */ | ||
851 | } else { | ||
852 | writeb ((skb->len >> 8) & 0xff, buf); | ||
853 | buf += 1; | ||
854 | writeb (0x04, buf); /* index byte */ | ||
855 | buf += 1; | ||
856 | memcpy_toio (buf, skb->data, skb->len); /* Write data bytes */ | ||
857 | outb (page, EWRK3_TQ); /* Start sending pkt */ | ||
858 | } | ||
859 | } | ||
860 | |||
861 | ENABLE_IRQs; | ||
862 | spin_unlock_irq (&lp->hw_lock); | ||
863 | |||
864 | dev->stats.tx_bytes += skb->len; | ||
865 | dev_kfree_skb (skb); | ||
866 | |||
867 | /* Check for free resources: stop Tx queue if there are none */ | ||
868 | if (inb (EWRK3_FMQC) == 0) | ||
869 | netif_stop_queue (dev); | ||
870 | |||
871 | return NETDEV_TX_OK; | ||
872 | |||
873 | err_out: | ||
874 | ENABLE_IRQs; | ||
875 | spin_unlock_irq (&lp->hw_lock); | ||
876 | return NETDEV_TX_BUSY; | ||
877 | } | ||
878 | |||
879 | /* | ||
880 | ** The EWRK3 interrupt handler. | ||
881 | */ | ||
882 | static irqreturn_t ewrk3_interrupt(int irq, void *dev_id) | ||
883 | { | ||
884 | struct net_device *dev = dev_id; | ||
885 | struct ewrk3_private *lp; | ||
886 | u_long iobase; | ||
887 | u_char icr, cr, csr; | ||
888 | |||
889 | lp = netdev_priv(dev); | ||
890 | iobase = dev->base_addr; | ||
891 | |||
892 | /* get the interrupt information */ | ||
893 | csr = inb(EWRK3_CSR); | ||
894 | |||
895 | /* | ||
896 | ** Mask the EWRK3 board interrupts and turn on the LED | ||
897 | */ | ||
898 | spin_lock(&lp->hw_lock); | ||
899 | DISABLE_IRQs; | ||
900 | |||
901 | cr = inb(EWRK3_CR); | ||
902 | cr |= lp->led_mask; | ||
903 | outb(cr, EWRK3_CR); | ||
904 | |||
905 | if (csr & CSR_RNE) /* Rx interrupt (packet[s] arrived) */ | ||
906 | ewrk3_rx(dev); | ||
907 | |||
908 | if (csr & CSR_TNE) /* Tx interrupt (packet sent) */ | ||
909 | ewrk3_tx(dev); | ||
910 | |||
911 | /* | ||
912 | ** Now deal with the TX/RX disable flags. These are set when there | ||
913 | ** are no more resources. If resources free up then enable these | ||
914 | ** interrupts, otherwise mask them - failure to do this will result | ||
915 | ** in the system hanging in an interrupt loop. | ||
916 | */ | ||
917 | if (inb(EWRK3_FMQC)) { /* any resources available? */ | ||
918 | lp->irq_mask |= ICR_TXDM | ICR_RXDM; /* enable the interrupt source */ | ||
919 | csr &= ~(CSR_TXD | CSR_RXD); /* ensure restart of a stalled TX or RX */ | ||
920 | outb(csr, EWRK3_CSR); | ||
921 | netif_wake_queue(dev); | ||
922 | } else { | ||
923 | lp->irq_mask &= ~(ICR_TXDM | ICR_RXDM); /* disable the interrupt source */ | ||
924 | } | ||
925 | |||
926 | /* Unmask the EWRK3 board interrupts and turn off the LED */ | ||
927 | cr &= ~(lp->led_mask); | ||
928 | outb(cr, EWRK3_CR); | ||
929 | ENABLE_IRQs; | ||
930 | spin_unlock(&lp->hw_lock); | ||
931 | return IRQ_HANDLED; | ||
932 | } | ||
933 | |||
934 | /* Called with lp->hw_lock held */ | ||
935 | static int ewrk3_rx(struct net_device *dev) | ||
936 | { | ||
937 | struct ewrk3_private *lp = netdev_priv(dev); | ||
938 | u_long iobase = dev->base_addr; | ||
939 | int i, status = 0; | ||
940 | u_char page; | ||
941 | void __iomem *buf = NULL; | ||
942 | |||
943 | while (inb(EWRK3_RQC) && !status) { /* Whilst there's incoming data */ | ||
944 | if ((page = inb(EWRK3_RQ)) < lp->mPage) { /* Get next entry's buffer page */ | ||
945 | /* | ||
946 | ** Set up shared memory window and pointer into the window | ||
947 | */ | ||
948 | if (lp->shmem_length == IO_ONLY) { | ||
949 | outb(page, EWRK3_IOPR); | ||
950 | } else if (lp->shmem_length == SHMEM_2K) { | ||
951 | buf = lp->shmem; | ||
952 | outb(page, EWRK3_MPR); | ||
953 | } else if (lp->shmem_length == SHMEM_32K) { | ||
954 | buf = (((short) page << 11) & 0x7800) + lp->shmem; | ||
955 | outb((page >> 4), EWRK3_MPR); | ||
956 | } else if (lp->shmem_length == SHMEM_64K) { | ||
957 | buf = (((short) page << 11) & 0xf800) + lp->shmem; | ||
958 | outb((page >> 5), EWRK3_MPR); | ||
959 | } else { | ||
960 | status = -1; | ||
961 | printk("%s: Oops - your private data area is hosed!\n", dev->name); | ||
962 | } | ||
963 | |||
964 | if (!status) { | ||
965 | char rx_status; | ||
966 | int pkt_len; | ||
967 | |||
968 | if (lp->shmem_length == IO_ONLY) { | ||
969 | rx_status = inb(EWRK3_DATA); | ||
970 | pkt_len = inb(EWRK3_DATA); | ||
971 | pkt_len |= ((u_short) inb(EWRK3_DATA) << 8); | ||
972 | } else { | ||
973 | rx_status = readb(buf); | ||
974 | buf += 1; | ||
975 | pkt_len = readw(buf); | ||
976 | buf += 3; | ||
977 | } | ||
978 | |||
979 | if (!(rx_status & R_ROK)) { /* There was an error. */ | ||
980 | dev->stats.rx_errors++; /* Update the error stats. */ | ||
981 | if (rx_status & R_DBE) | ||
982 | dev->stats.rx_frame_errors++; | ||
983 | if (rx_status & R_CRC) | ||
984 | dev->stats.rx_crc_errors++; | ||
985 | if (rx_status & R_PLL) | ||
986 | dev->stats.rx_fifo_errors++; | ||
987 | } else { | ||
988 | struct sk_buff *skb; | ||
989 | |||
990 | if ((skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
991 | unsigned char *p; | ||
992 | skb_reserve(skb, 2); /* Align to 16 bytes */ | ||
993 | p = skb_put(skb, pkt_len); | ||
994 | |||
995 | if (lp->shmem_length == IO_ONLY) { | ||
996 | *p = inb(EWRK3_DATA); /* dummy read */ | ||
997 | for (i = 0; i < pkt_len; i++) { | ||
998 | *p++ = inb(EWRK3_DATA); | ||
999 | } | ||
1000 | } else { | ||
1001 | memcpy_fromio(p, buf, pkt_len); | ||
1002 | } | ||
1003 | |||
1004 | for (i = 1; i < EWRK3_PKT_STAT_SZ - 1; i++) { | ||
1005 | if (pkt_len < i * EWRK3_PKT_BIN_SZ) { | ||
1006 | lp->pktStats.bins[i]++; | ||
1007 | i = EWRK3_PKT_STAT_SZ; | ||
1008 | } | ||
1009 | } | ||
1010 | p = skb->data; /* Look at the dest addr */ | ||
1011 | if (is_multicast_ether_addr(p)) { | ||
1012 | if (is_broadcast_ether_addr(p)) { | ||
1013 | lp->pktStats.broadcast++; | ||
1014 | } else { | ||
1015 | lp->pktStats.multicast++; | ||
1016 | } | ||
1017 | } else if (compare_ether_addr(p, dev->dev_addr) == 0) { | ||
1018 | lp->pktStats.unicast++; | ||
1019 | } | ||
1020 | lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ | ||
1021 | if (lp->pktStats.bins[0] == 0) { /* Reset counters */ | ||
1022 | memset(&lp->pktStats, 0, sizeof(lp->pktStats)); | ||
1023 | } | ||
1024 | /* | ||
1025 | ** Notify the upper protocol layers that there is another | ||
1026 | ** packet to handle | ||
1027 | */ | ||
1028 | skb->protocol = eth_type_trans(skb, dev); | ||
1029 | netif_rx(skb); | ||
1030 | |||
1031 | /* | ||
1032 | ** Update stats | ||
1033 | */ | ||
1034 | dev->stats.rx_packets++; | ||
1035 | dev->stats.rx_bytes += pkt_len; | ||
1036 | } else { | ||
1037 | printk("%s: Insufficient memory; nuking packet.\n", dev->name); | ||
1038 | dev->stats.rx_dropped++; /* Really, deferred. */ | ||
1039 | break; | ||
1040 | } | ||
1041 | } | ||
1042 | } | ||
1043 | /* | ||
1044 | ** Return the received buffer to the free memory queue | ||
1045 | */ | ||
1046 | outb(page, EWRK3_FMQ); | ||
1047 | } else { | ||
1048 | printk("ewrk3_rx(): Illegal page number, page %d\n", page); | ||
1049 | printk("ewrk3_rx(): CSR: %02x ICR: %02x FMQC: %02x\n", inb(EWRK3_CSR), inb(EWRK3_ICR), inb(EWRK3_FMQC)); | ||
1050 | } | ||
1051 | } | ||
1052 | return status; | ||
1053 | } | ||
1054 | |||
1055 | /* | ||
1056 | ** Buffer sent - check for TX buffer errors. | ||
1057 | ** Called with lp->hw_lock held | ||
1058 | */ | ||
1059 | static int ewrk3_tx(struct net_device *dev) | ||
1060 | { | ||
1061 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1062 | u_long iobase = dev->base_addr; | ||
1063 | u_char tx_status; | ||
1064 | |||
1065 | while ((tx_status = inb(EWRK3_TDQ)) > 0) { /* Whilst there's old buffers */ | ||
1066 | if (tx_status & T_VSTS) { /* The status is valid */ | ||
1067 | if (tx_status & T_TXE) { | ||
1068 | dev->stats.tx_errors++; | ||
1069 | if (tx_status & T_NCL) | ||
1070 | dev->stats.tx_carrier_errors++; | ||
1071 | if (tx_status & T_LCL) | ||
1072 | dev->stats.tx_window_errors++; | ||
1073 | if (tx_status & T_CTU) { | ||
1074 | if ((tx_status & T_COLL) ^ T_XUR) { | ||
1075 | lp->pktStats.tx_underruns++; | ||
1076 | } else { | ||
1077 | lp->pktStats.excessive_underruns++; | ||
1078 | } | ||
1079 | } else if (tx_status & T_COLL) { | ||
1080 | if ((tx_status & T_COLL) ^ T_XCOLL) { | ||
1081 | dev->stats.collisions++; | ||
1082 | } else { | ||
1083 | lp->pktStats.excessive_collisions++; | ||
1084 | } | ||
1085 | } | ||
1086 | } else { | ||
1087 | dev->stats.tx_packets++; | ||
1088 | } | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | return 0; | ||
1093 | } | ||
1094 | |||
1095 | static int ewrk3_close(struct net_device *dev) | ||
1096 | { | ||
1097 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1098 | u_long iobase = dev->base_addr; | ||
1099 | u_char icr, csr; | ||
1100 | |||
1101 | netif_stop_queue(dev); | ||
1102 | |||
1103 | if (ewrk3_debug > 1) { | ||
1104 | printk("%s: Shutting down ethercard, status was %2.2x.\n", | ||
1105 | dev->name, inb(EWRK3_CSR)); | ||
1106 | } | ||
1107 | /* | ||
1108 | ** We stop the EWRK3 here... mask interrupts and stop TX & RX | ||
1109 | */ | ||
1110 | DISABLE_IRQs; | ||
1111 | |||
1112 | STOP_EWRK3; | ||
1113 | |||
1114 | /* | ||
1115 | ** Clean out the TX and RX queues here (note that one entry | ||
1116 | ** may get added to either the TXD or RX queues if the TX or RX | ||
1117 | ** just starts processing a packet before the STOP_EWRK3 command | ||
1118 | ** is received. This will be flushed in the ewrk3_open() call). | ||
1119 | */ | ||
1120 | while (inb(EWRK3_TQ)); | ||
1121 | while (inb(EWRK3_TDQ)); | ||
1122 | while (inb(EWRK3_RQ)); | ||
1123 | |||
1124 | if (!lp->hard_strapped) { | ||
1125 | free_irq(dev->irq, dev); | ||
1126 | } | ||
1127 | return 0; | ||
1128 | } | ||
1129 | |||
1130 | /* | ||
1131 | ** Set or clear the multicast filter for this adapter. | ||
1132 | */ | ||
1133 | static void set_multicast_list(struct net_device *dev) | ||
1134 | { | ||
1135 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1136 | u_long iobase = dev->base_addr; | ||
1137 | u_char csr; | ||
1138 | |||
1139 | csr = inb(EWRK3_CSR); | ||
1140 | |||
1141 | if (lp->shmem_length == IO_ONLY) { | ||
1142 | lp->mctbl = NULL; | ||
1143 | } else { | ||
1144 | lp->mctbl = lp->shmem + PAGE0_HTE; | ||
1145 | } | ||
1146 | |||
1147 | csr &= ~(CSR_PME | CSR_MCE); | ||
1148 | if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ | ||
1149 | csr |= CSR_PME; | ||
1150 | outb(csr, EWRK3_CSR); | ||
1151 | } else { | ||
1152 | SetMulticastFilter(dev); | ||
1153 | csr |= CSR_MCE; | ||
1154 | outb(csr, EWRK3_CSR); | ||
1155 | } | ||
1156 | } | ||
1157 | |||
1158 | /* | ||
1159 | ** Calculate the hash code and update the logical address filter | ||
1160 | ** from a list of ethernet multicast addresses. | ||
1161 | ** Little endian crc one liner from Matt Thomas, DEC. | ||
1162 | ** | ||
1163 | ** Note that when clearing the table, the broadcast bit must remain asserted | ||
1164 | ** to receive broadcast messages. | ||
1165 | */ | ||
1166 | static void SetMulticastFilter(struct net_device *dev) | ||
1167 | { | ||
1168 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1169 | struct netdev_hw_addr *ha; | ||
1170 | u_long iobase = dev->base_addr; | ||
1171 | int i; | ||
1172 | char bit, byte; | ||
1173 | short __iomem *p = lp->mctbl; | ||
1174 | u16 hashcode; | ||
1175 | u32 crc; | ||
1176 | |||
1177 | spin_lock_irq(&lp->hw_lock); | ||
1178 | |||
1179 | if (lp->shmem_length == IO_ONLY) { | ||
1180 | outb(0, EWRK3_IOPR); | ||
1181 | outw(PAGE0_HTE, EWRK3_PIR1); | ||
1182 | } else { | ||
1183 | outb(0, EWRK3_MPR); | ||
1184 | } | ||
1185 | |||
1186 | if (dev->flags & IFF_ALLMULTI) { | ||
1187 | for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { | ||
1188 | if (lp->shmem_length == IO_ONLY) { | ||
1189 | outb(0xff, EWRK3_DATA); | ||
1190 | } else { /* memset didn't work here */ | ||
1191 | writew(0xffff, p); | ||
1192 | p++; | ||
1193 | i++; | ||
1194 | } | ||
1195 | } | ||
1196 | } else { | ||
1197 | /* Clear table except for broadcast bit */ | ||
1198 | if (lp->shmem_length == IO_ONLY) { | ||
1199 | for (i = 0; i < (HASH_TABLE_LEN >> 4) - 1; i++) { | ||
1200 | outb(0x00, EWRK3_DATA); | ||
1201 | } | ||
1202 | outb(0x80, EWRK3_DATA); | ||
1203 | i++; /* insert the broadcast bit */ | ||
1204 | for (; i < (HASH_TABLE_LEN >> 3); i++) { | ||
1205 | outb(0x00, EWRK3_DATA); | ||
1206 | } | ||
1207 | } else { | ||
1208 | memset_io(lp->mctbl, 0, HASH_TABLE_LEN >> 3); | ||
1209 | writeb(0x80, lp->mctbl + (HASH_TABLE_LEN >> 4) - 1); | ||
1210 | } | ||
1211 | |||
1212 | /* Update table */ | ||
1213 | netdev_for_each_mc_addr(ha, dev) { | ||
1214 | crc = ether_crc_le(ETH_ALEN, ha->addr); | ||
1215 | hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */ | ||
1216 | |||
1217 | byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ | ||
1218 | bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */ | ||
1219 | |||
1220 | if (lp->shmem_length == IO_ONLY) { | ||
1221 | u_char tmp; | ||
1222 | |||
1223 | outw(PAGE0_HTE + byte, EWRK3_PIR1); | ||
1224 | tmp = inb(EWRK3_DATA); | ||
1225 | tmp |= bit; | ||
1226 | outw(PAGE0_HTE + byte, EWRK3_PIR1); | ||
1227 | outb(tmp, EWRK3_DATA); | ||
1228 | } else { | ||
1229 | writeb(readb(lp->mctbl + byte) | bit, lp->mctbl + byte); | ||
1230 | } | ||
1231 | } | ||
1232 | } | ||
1233 | |||
1234 | spin_unlock_irq(&lp->hw_lock); | ||
1235 | } | ||
1236 | |||
1237 | /* | ||
1238 | ** ISA bus I/O device probe | ||
1239 | */ | ||
1240 | static int __init isa_probe(struct net_device *dev, u_long ioaddr) | ||
1241 | { | ||
1242 | int i = num_ewrks3s, maxSlots; | ||
1243 | int ret = -ENODEV; | ||
1244 | |||
1245 | u_long iobase; | ||
1246 | |||
1247 | if (ioaddr >= 0x400) | ||
1248 | goto out; | ||
1249 | |||
1250 | if (ioaddr == 0) { /* Autoprobing */ | ||
1251 | iobase = EWRK3_IO_BASE; /* Get the first slot address */ | ||
1252 | maxSlots = 24; | ||
1253 | } else { /* Probe a specific location */ | ||
1254 | iobase = ioaddr; | ||
1255 | maxSlots = i + 1; | ||
1256 | } | ||
1257 | |||
1258 | for (; (i < maxSlots) && (dev != NULL); | ||
1259 | iobase += EWRK3_IOP_INC, i++) | ||
1260 | { | ||
1261 | if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME)) { | ||
1262 | if (DevicePresent(iobase) == 0) { | ||
1263 | int irq = dev->irq; | ||
1264 | ret = ewrk3_hw_init(dev, iobase); | ||
1265 | if (!ret) | ||
1266 | break; | ||
1267 | dev->irq = irq; | ||
1268 | } | ||
1269 | release_region(iobase, EWRK3_TOTAL_SIZE); | ||
1270 | } | ||
1271 | } | ||
1272 | out: | ||
1273 | |||
1274 | return ret; | ||
1275 | } | ||
1276 | |||
1277 | /* | ||
1278 | ** EISA bus I/O device probe. Probe from slot 1 since slot 0 is usually | ||
1279 | ** the motherboard. | ||
1280 | */ | ||
1281 | static int __init eisa_probe(struct net_device *dev, u_long ioaddr) | ||
1282 | { | ||
1283 | int i, maxSlots; | ||
1284 | u_long iobase; | ||
1285 | int ret = -ENODEV; | ||
1286 | |||
1287 | if (ioaddr < 0x1000) | ||
1288 | goto out; | ||
1289 | |||
1290 | iobase = ioaddr; | ||
1291 | i = (ioaddr >> 12); | ||
1292 | maxSlots = i + 1; | ||
1293 | |||
1294 | for (i = 1; (i < maxSlots) && (dev != NULL); i++, iobase += EISA_SLOT_INC) { | ||
1295 | if (EISA_signature(name, EISA_ID) == 0) { | ||
1296 | if (request_region(iobase, EWRK3_TOTAL_SIZE, DRV_NAME) && | ||
1297 | DevicePresent(iobase) == 0) { | ||
1298 | int irq = dev->irq; | ||
1299 | ret = ewrk3_hw_init(dev, iobase); | ||
1300 | if (!ret) | ||
1301 | break; | ||
1302 | dev->irq = irq; | ||
1303 | } | ||
1304 | release_region(iobase, EWRK3_TOTAL_SIZE); | ||
1305 | } | ||
1306 | } | ||
1307 | |||
1308 | out: | ||
1309 | return ret; | ||
1310 | } | ||
1311 | |||
1312 | |||
1313 | /* | ||
1314 | ** Read the EWRK3 EEPROM using this routine | ||
1315 | */ | ||
1316 | static int Read_EEPROM(u_long iobase, u_char eaddr) | ||
1317 | { | ||
1318 | int i; | ||
1319 | |||
1320 | outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */ | ||
1321 | outb(EEPROM_RD, EWRK3_IOPR); /* issue read command */ | ||
1322 | for (i = 0; i < 5000; i++) | ||
1323 | inb(EWRK3_CSR); /* wait 1msec */ | ||
1324 | |||
1325 | return inw(EWRK3_EPROM1); /* 16 bits data return */ | ||
1326 | } | ||
1327 | |||
1328 | /* | ||
1329 | ** Write the EWRK3 EEPROM using this routine | ||
1330 | */ | ||
1331 | static int Write_EEPROM(short data, u_long iobase, u_char eaddr) | ||
1332 | { | ||
1333 | int i; | ||
1334 | |||
1335 | outb(EEPROM_WR_EN, EWRK3_IOPR); /* issue write enable command */ | ||
1336 | for (i = 0; i < 5000; i++) | ||
1337 | inb(EWRK3_CSR); /* wait 1msec */ | ||
1338 | outw(data, EWRK3_EPROM1); /* write data to register */ | ||
1339 | outb((eaddr & 0x3f), EWRK3_PIR1); /* set up 6 bits of address info */ | ||
1340 | outb(EEPROM_WR, EWRK3_IOPR); /* issue write command */ | ||
1341 | for (i = 0; i < 75000; i++) | ||
1342 | inb(EWRK3_CSR); /* wait 15msec */ | ||
1343 | outb(EEPROM_WR_DIS, EWRK3_IOPR); /* issue write disable command */ | ||
1344 | for (i = 0; i < 5000; i++) | ||
1345 | inb(EWRK3_CSR); /* wait 1msec */ | ||
1346 | |||
1347 | return 0; | ||
1348 | } | ||
1349 | |||
1350 | /* | ||
1351 | ** Look for a particular board name in the on-board EEPROM. | ||
1352 | */ | ||
1353 | static void __init EthwrkSignature(char *name, char *eeprom_image) | ||
1354 | { | ||
1355 | int i; | ||
1356 | char *signatures[] = EWRK3_SIGNATURE; | ||
1357 | |||
1358 | for (i=0; *signatures[i] != '\0'; i++) | ||
1359 | if( !strncmp(eeprom_image+EEPROM_PNAME7, signatures[i], strlen(signatures[i])) ) | ||
1360 | break; | ||
1361 | |||
1362 | if (*signatures[i] != '\0') { | ||
1363 | memcpy(name, eeprom_image+EEPROM_PNAME7, EWRK3_STRLEN); | ||
1364 | name[EWRK3_STRLEN] = '\0'; | ||
1365 | } else | ||
1366 | name[0] = '\0'; | ||
1367 | } | ||
1368 | |||
1369 | /* | ||
1370 | ** Look for a special sequence in the Ethernet station address PROM that | ||
1371 | ** is common across all EWRK3 products. | ||
1372 | ** | ||
1373 | ** Search the Ethernet address ROM for the signature. Since the ROM address | ||
1374 | ** counter can start at an arbitrary point, the search must include the entire | ||
1375 | ** probe sequence length plus the (length_of_the_signature - 1). | ||
1376 | ** Stop the search IMMEDIATELY after the signature is found so that the | ||
1377 | ** PROM address counter is correctly positioned at the start of the | ||
1378 | ** ethernet address for later read out. | ||
1379 | */ | ||
1380 | |||
1381 | static int __init DevicePresent(u_long iobase) | ||
1382 | { | ||
1383 | union { | ||
1384 | struct { | ||
1385 | u32 a; | ||
1386 | u32 b; | ||
1387 | } llsig; | ||
1388 | char Sig[sizeof(u32) << 1]; | ||
1389 | } | ||
1390 | dev; | ||
1391 | short sigLength; | ||
1392 | char data; | ||
1393 | int i, j, status = 0; | ||
1394 | |||
1395 | dev.llsig.a = ETH_PROM_SIG; | ||
1396 | dev.llsig.b = ETH_PROM_SIG; | ||
1397 | sigLength = sizeof(u32) << 1; | ||
1398 | |||
1399 | for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) { | ||
1400 | data = inb(EWRK3_APROM); | ||
1401 | if (dev.Sig[j] == data) { /* track signature */ | ||
1402 | j++; | ||
1403 | } else { /* lost signature; begin search again */ | ||
1404 | if (data == dev.Sig[0]) { | ||
1405 | j = 1; | ||
1406 | } else { | ||
1407 | j = 0; | ||
1408 | } | ||
1409 | } | ||
1410 | } | ||
1411 | |||
1412 | if (j != sigLength) { | ||
1413 | status = -ENODEV; /* search failed */ | ||
1414 | } | ||
1415 | return status; | ||
1416 | } | ||
1417 | |||
1418 | static u_char __init get_hw_addr(struct net_device *dev, u_char * eeprom_image, char chipType) | ||
1419 | { | ||
1420 | int i, j, k; | ||
1421 | u_short chksum; | ||
1422 | u_char crc, lfsr, sd, status = 0; | ||
1423 | u_long iobase = dev->base_addr; | ||
1424 | u16 tmp; | ||
1425 | |||
1426 | if (chipType == LeMAC2) { | ||
1427 | for (crc = 0x6a, j = 0; j < ETH_ALEN; j++) { | ||
1428 | sd = dev->dev_addr[j] = eeprom_image[EEPROM_PADDR0 + j]; | ||
1429 | outb(dev->dev_addr[j], EWRK3_PAR0 + j); | ||
1430 | for (k = 0; k < 8; k++, sd >>= 1) { | ||
1431 | lfsr = ((((crc & 0x02) >> 1) ^ (crc & 0x01)) ^ (sd & 0x01)) << 7; | ||
1432 | crc = (crc >> 1) + lfsr; | ||
1433 | } | ||
1434 | } | ||
1435 | if (crc != eeprom_image[EEPROM_PA_CRC]) | ||
1436 | status = -1; | ||
1437 | } else { | ||
1438 | for (i = 0, k = 0; i < ETH_ALEN;) { | ||
1439 | k <<= 1; | ||
1440 | if (k > 0xffff) | ||
1441 | k -= 0xffff; | ||
1442 | |||
1443 | k += (u_char) (tmp = inb(EWRK3_APROM)); | ||
1444 | dev->dev_addr[i] = (u_char) tmp; | ||
1445 | outb(dev->dev_addr[i], EWRK3_PAR0 + i); | ||
1446 | i++; | ||
1447 | k += (u_short) ((tmp = inb(EWRK3_APROM)) << 8); | ||
1448 | dev->dev_addr[i] = (u_char) tmp; | ||
1449 | outb(dev->dev_addr[i], EWRK3_PAR0 + i); | ||
1450 | i++; | ||
1451 | |||
1452 | if (k > 0xffff) | ||
1453 | k -= 0xffff; | ||
1454 | } | ||
1455 | if (k == 0xffff) | ||
1456 | k = 0; | ||
1457 | chksum = inb(EWRK3_APROM); | ||
1458 | chksum |= (inb(EWRK3_APROM) << 8); | ||
1459 | if (k != chksum) | ||
1460 | status = -1; | ||
1461 | } | ||
1462 | |||
1463 | return status; | ||
1464 | } | ||
1465 | |||
1466 | /* | ||
1467 | ** Look for a particular board name in the EISA configuration space | ||
1468 | */ | ||
1469 | static int __init EISA_signature(char *name, s32 eisa_id) | ||
1470 | { | ||
1471 | u_long i; | ||
1472 | char *signatures[] = EWRK3_SIGNATURE; | ||
1473 | char ManCode[EWRK3_STRLEN]; | ||
1474 | union { | ||
1475 | s32 ID; | ||
1476 | char Id[4]; | ||
1477 | } Eisa; | ||
1478 | int status = 0; | ||
1479 | |||
1480 | *name = '\0'; | ||
1481 | for (i = 0; i < 4; i++) { | ||
1482 | Eisa.Id[i] = inb(eisa_id + i); | ||
1483 | } | ||
1484 | |||
1485 | ManCode[0] = (((Eisa.Id[0] >> 2) & 0x1f) + 0x40); | ||
1486 | ManCode[1] = (((Eisa.Id[1] & 0xe0) >> 5) + ((Eisa.Id[0] & 0x03) << 3) + 0x40); | ||
1487 | ManCode[2] = (((Eisa.Id[2] >> 4) & 0x0f) + 0x30); | ||
1488 | ManCode[3] = ((Eisa.Id[2] & 0x0f) + 0x30); | ||
1489 | ManCode[4] = (((Eisa.Id[3] >> 4) & 0x0f) + 0x30); | ||
1490 | ManCode[5] = '\0'; | ||
1491 | |||
1492 | for (i = 0; (*signatures[i] != '\0') && (*name == '\0'); i++) { | ||
1493 | if (strstr(ManCode, signatures[i]) != NULL) { | ||
1494 | strcpy(name, ManCode); | ||
1495 | status = 1; | ||
1496 | } | ||
1497 | } | ||
1498 | |||
1499 | return status; /* return the device name string */ | ||
1500 | } | ||
1501 | |||
1502 | static void ewrk3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
1503 | { | ||
1504 | int fwrev = Read_EEPROM(dev->base_addr, EEPROM_REVLVL); | ||
1505 | |||
1506 | strcpy(info->driver, DRV_NAME); | ||
1507 | strcpy(info->version, DRV_VERSION); | ||
1508 | sprintf(info->fw_version, "%d", fwrev); | ||
1509 | strcpy(info->bus_info, "N/A"); | ||
1510 | info->eedump_len = EEPROM_MAX; | ||
1511 | } | ||
1512 | |||
1513 | static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1514 | { | ||
1515 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1516 | unsigned long iobase = dev->base_addr; | ||
1517 | u8 cr = inb(EWRK3_CR); | ||
1518 | |||
1519 | switch (lp->adapter_name[4]) { | ||
1520 | case '3': /* DE203 */ | ||
1521 | ecmd->supported = SUPPORTED_BNC; | ||
1522 | ecmd->port = PORT_BNC; | ||
1523 | break; | ||
1524 | |||
1525 | case '4': /* DE204 */ | ||
1526 | ecmd->supported = SUPPORTED_TP; | ||
1527 | ecmd->port = PORT_TP; | ||
1528 | break; | ||
1529 | |||
1530 | case '5': /* DE205 */ | ||
1531 | ecmd->supported = SUPPORTED_TP | SUPPORTED_BNC | SUPPORTED_AUI; | ||
1532 | ecmd->autoneg = !(cr & CR_APD); | ||
1533 | /* | ||
1534 | ** Port is only valid if autoneg is disabled | ||
1535 | ** and even then we don't know if AUI is jumpered. | ||
1536 | */ | ||
1537 | if (!ecmd->autoneg) | ||
1538 | ecmd->port = (cr & CR_PSEL) ? PORT_BNC : PORT_TP; | ||
1539 | break; | ||
1540 | } | ||
1541 | |||
1542 | ecmd->supported |= SUPPORTED_10baseT_Half; | ||
1543 | ethtool_cmd_speed_set(ecmd, SPEED_10); | ||
1544 | ecmd->duplex = DUPLEX_HALF; | ||
1545 | return 0; | ||
1546 | } | ||
1547 | |||
1548 | static int ewrk3_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1549 | { | ||
1550 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1551 | unsigned long iobase = dev->base_addr; | ||
1552 | unsigned long flags; | ||
1553 | u8 cr; | ||
1554 | |||
1555 | /* DE205 is the only card with anything to set */ | ||
1556 | if (lp->adapter_name[4] != '5') | ||
1557 | return -EOPNOTSUPP; | ||
1558 | |||
1559 | /* Sanity-check parameters */ | ||
1560 | if (ecmd->speed != SPEED_10) | ||
1561 | return -EINVAL; | ||
1562 | if (ecmd->port != PORT_TP && ecmd->port != PORT_BNC) | ||
1563 | return -EINVAL; /* AUI is not software-selectable */ | ||
1564 | if (ecmd->transceiver != XCVR_INTERNAL) | ||
1565 | return -EINVAL; | ||
1566 | if (ecmd->duplex != DUPLEX_HALF) | ||
1567 | return -EINVAL; | ||
1568 | if (ecmd->phy_address != 0) | ||
1569 | return -EINVAL; | ||
1570 | |||
1571 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1572 | cr = inb(EWRK3_CR); | ||
1573 | |||
1574 | /* If Autoneg is set, change to Auto Port mode */ | ||
1575 | /* Otherwise, disable Auto Port and set port explicitly */ | ||
1576 | if (ecmd->autoneg) { | ||
1577 | cr &= ~CR_APD; | ||
1578 | } else { | ||
1579 | cr |= CR_APD; | ||
1580 | if (ecmd->port == PORT_TP) | ||
1581 | cr &= ~CR_PSEL; /* Force TP */ | ||
1582 | else | ||
1583 | cr |= CR_PSEL; /* Force BNC */ | ||
1584 | } | ||
1585 | |||
1586 | /* Commit the changes */ | ||
1587 | outb(cr, EWRK3_CR); | ||
1588 | spin_unlock_irqrestore(&lp->hw_lock, flags); | ||
1589 | return 0; | ||
1590 | } | ||
1591 | |||
1592 | static u32 ewrk3_get_link(struct net_device *dev) | ||
1593 | { | ||
1594 | unsigned long iobase = dev->base_addr; | ||
1595 | u8 cmr = inb(EWRK3_CMR); | ||
1596 | /* DE203 has BNC only and link status does not apply */ | ||
1597 | /* On DE204 this is always valid since TP is the only port. */ | ||
1598 | /* On DE205 this reflects TP status even if BNC or AUI is selected. */ | ||
1599 | return !(cmr & CMR_LINK); | ||
1600 | } | ||
1601 | |||
1602 | static int ewrk3_set_phys_id(struct net_device *dev, | ||
1603 | enum ethtool_phys_id_state state) | ||
1604 | { | ||
1605 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1606 | unsigned long iobase = dev->base_addr; | ||
1607 | u8 cr; | ||
1608 | |||
1609 | spin_lock_irq(&lp->hw_lock); | ||
1610 | |||
1611 | switch (state) { | ||
1612 | case ETHTOOL_ID_ACTIVE: | ||
1613 | /* Prevent ISR from twiddling the LED */ | ||
1614 | lp->led_mask = 0; | ||
1615 | spin_unlock_irq(&lp->hw_lock); | ||
1616 | return 2; /* cycle on/off twice per second */ | ||
1617 | |||
1618 | case ETHTOOL_ID_ON: | ||
1619 | cr = inb(EWRK3_CR); | ||
1620 | outb(cr | CR_LED, EWRK3_CR); | ||
1621 | break; | ||
1622 | |||
1623 | case ETHTOOL_ID_OFF: | ||
1624 | cr = inb(EWRK3_CR); | ||
1625 | outb(cr & ~CR_LED, EWRK3_CR); | ||
1626 | break; | ||
1627 | |||
1628 | case ETHTOOL_ID_INACTIVE: | ||
1629 | lp->led_mask = CR_LED; | ||
1630 | cr = inb(EWRK3_CR); | ||
1631 | outb(cr & ~CR_LED, EWRK3_CR); | ||
1632 | } | ||
1633 | spin_unlock_irq(&lp->hw_lock); | ||
1634 | |||
1635 | return 0; | ||
1636 | } | ||
1637 | |||
1638 | static const struct ethtool_ops ethtool_ops_203 = { | ||
1639 | .get_drvinfo = ewrk3_get_drvinfo, | ||
1640 | .get_settings = ewrk3_get_settings, | ||
1641 | .set_settings = ewrk3_set_settings, | ||
1642 | .set_phys_id = ewrk3_set_phys_id, | ||
1643 | }; | ||
1644 | |||
1645 | static const struct ethtool_ops ethtool_ops = { | ||
1646 | .get_drvinfo = ewrk3_get_drvinfo, | ||
1647 | .get_settings = ewrk3_get_settings, | ||
1648 | .set_settings = ewrk3_set_settings, | ||
1649 | .get_link = ewrk3_get_link, | ||
1650 | .set_phys_id = ewrk3_set_phys_id, | ||
1651 | }; | ||
1652 | |||
1653 | /* | ||
1654 | ** Perform IOCTL call functions here. Some are privileged operations and the | ||
1655 | ** effective uid is checked in those cases. | ||
1656 | */ | ||
1657 | static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1658 | { | ||
1659 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1660 | struct ewrk3_ioctl *ioc = (struct ewrk3_ioctl *) &rq->ifr_ifru; | ||
1661 | u_long iobase = dev->base_addr; | ||
1662 | int i, j, status = 0; | ||
1663 | u_char csr; | ||
1664 | unsigned long flags; | ||
1665 | union ewrk3_addr { | ||
1666 | u_char addr[HASH_TABLE_LEN * ETH_ALEN]; | ||
1667 | u_short val[(HASH_TABLE_LEN * ETH_ALEN) >> 1]; | ||
1668 | }; | ||
1669 | |||
1670 | union ewrk3_addr *tmp; | ||
1671 | |||
1672 | /* All we handle are private IOCTLs */ | ||
1673 | if (cmd != EWRK3IOCTL) | ||
1674 | return -EOPNOTSUPP; | ||
1675 | |||
1676 | tmp = kmalloc(sizeof(union ewrk3_addr), GFP_KERNEL); | ||
1677 | if(tmp==NULL) | ||
1678 | return -ENOMEM; | ||
1679 | |||
1680 | switch (ioc->cmd) { | ||
1681 | case EWRK3_GET_HWADDR: /* Get the hardware address */ | ||
1682 | for (i = 0; i < ETH_ALEN; i++) { | ||
1683 | tmp->addr[i] = dev->dev_addr[i]; | ||
1684 | } | ||
1685 | ioc->len = ETH_ALEN; | ||
1686 | if (copy_to_user(ioc->data, tmp->addr, ioc->len)) | ||
1687 | status = -EFAULT; | ||
1688 | break; | ||
1689 | |||
1690 | case EWRK3_SET_HWADDR: /* Set the hardware address */ | ||
1691 | if (capable(CAP_NET_ADMIN)) { | ||
1692 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1693 | csr = inb(EWRK3_CSR); | ||
1694 | csr |= (CSR_TXD | CSR_RXD); | ||
1695 | outb(csr, EWRK3_CSR); /* Disable the TX and RX */ | ||
1696 | spin_unlock_irqrestore(&lp->hw_lock, flags); | ||
1697 | |||
1698 | if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN)) { | ||
1699 | status = -EFAULT; | ||
1700 | break; | ||
1701 | } | ||
1702 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1703 | for (i = 0; i < ETH_ALEN; i++) { | ||
1704 | dev->dev_addr[i] = tmp->addr[i]; | ||
1705 | outb(tmp->addr[i], EWRK3_PAR0 + i); | ||
1706 | } | ||
1707 | |||
1708 | csr = inb(EWRK3_CSR); | ||
1709 | csr &= ~(CSR_TXD | CSR_RXD); /* Enable the TX and RX */ | ||
1710 | outb(csr, EWRK3_CSR); | ||
1711 | spin_unlock_irqrestore(&lp->hw_lock, flags); | ||
1712 | } else { | ||
1713 | status = -EPERM; | ||
1714 | } | ||
1715 | |||
1716 | break; | ||
1717 | case EWRK3_SET_PROM: /* Set Promiscuous Mode */ | ||
1718 | if (capable(CAP_NET_ADMIN)) { | ||
1719 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1720 | csr = inb(EWRK3_CSR); | ||
1721 | csr |= CSR_PME; | ||
1722 | csr &= ~CSR_MCE; | ||
1723 | outb(csr, EWRK3_CSR); | ||
1724 | spin_unlock_irqrestore(&lp->hw_lock, flags); | ||
1725 | } else { | ||
1726 | status = -EPERM; | ||
1727 | } | ||
1728 | |||
1729 | break; | ||
1730 | case EWRK3_CLR_PROM: /* Clear Promiscuous Mode */ | ||
1731 | if (capable(CAP_NET_ADMIN)) { | ||
1732 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1733 | csr = inb(EWRK3_CSR); | ||
1734 | csr &= ~CSR_PME; | ||
1735 | outb(csr, EWRK3_CSR); | ||
1736 | spin_unlock_irqrestore(&lp->hw_lock, flags); | ||
1737 | } else { | ||
1738 | status = -EPERM; | ||
1739 | } | ||
1740 | |||
1741 | break; | ||
1742 | case EWRK3_GET_MCA: /* Get the multicast address table */ | ||
1743 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1744 | if (lp->shmem_length == IO_ONLY) { | ||
1745 | outb(0, EWRK3_IOPR); | ||
1746 | outw(PAGE0_HTE, EWRK3_PIR1); | ||
1747 | for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { | ||
1748 | tmp->addr[i] = inb(EWRK3_DATA); | ||
1749 | } | ||
1750 | } else { | ||
1751 | outb(0, EWRK3_MPR); | ||
1752 | memcpy_fromio(tmp->addr, lp->shmem + PAGE0_HTE, (HASH_TABLE_LEN >> 3)); | ||
1753 | } | ||
1754 | spin_unlock_irqrestore(&lp->hw_lock, flags); | ||
1755 | |||
1756 | ioc->len = (HASH_TABLE_LEN >> 3); | ||
1757 | if (copy_to_user(ioc->data, tmp->addr, ioc->len)) | ||
1758 | status = -EFAULT; | ||
1759 | |||
1760 | break; | ||
1761 | case EWRK3_SET_MCA: /* Set a multicast address */ | ||
1762 | if (capable(CAP_NET_ADMIN)) { | ||
1763 | if (ioc->len > HASH_TABLE_LEN) { | ||
1764 | status = -EINVAL; | ||
1765 | break; | ||
1766 | } | ||
1767 | if (copy_from_user(tmp->addr, ioc->data, ETH_ALEN * ioc->len)) { | ||
1768 | status = -EFAULT; | ||
1769 | break; | ||
1770 | } | ||
1771 | set_multicast_list(dev); | ||
1772 | } else { | ||
1773 | status = -EPERM; | ||
1774 | } | ||
1775 | |||
1776 | break; | ||
1777 | case EWRK3_CLR_MCA: /* Clear all multicast addresses */ | ||
1778 | if (capable(CAP_NET_ADMIN)) { | ||
1779 | set_multicast_list(dev); | ||
1780 | } else { | ||
1781 | status = -EPERM; | ||
1782 | } | ||
1783 | |||
1784 | break; | ||
1785 | case EWRK3_MCA_EN: /* Enable multicast addressing */ | ||
1786 | if (capable(CAP_NET_ADMIN)) { | ||
1787 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1788 | csr = inb(EWRK3_CSR); | ||
1789 | csr |= CSR_MCE; | ||
1790 | csr &= ~CSR_PME; | ||
1791 | outb(csr, EWRK3_CSR); | ||
1792 | spin_unlock_irqrestore(&lp->hw_lock, flags); | ||
1793 | } else { | ||
1794 | status = -EPERM; | ||
1795 | } | ||
1796 | |||
1797 | break; | ||
1798 | case EWRK3_GET_STATS: { /* Get the driver statistics */ | ||
1799 | struct ewrk3_stats *tmp_stats = | ||
1800 | kmalloc(sizeof(lp->pktStats), GFP_KERNEL); | ||
1801 | if (!tmp_stats) { | ||
1802 | status = -ENOMEM; | ||
1803 | break; | ||
1804 | } | ||
1805 | |||
1806 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1807 | memcpy(tmp_stats, &lp->pktStats, sizeof(lp->pktStats)); | ||
1808 | spin_unlock_irqrestore(&lp->hw_lock, flags); | ||
1809 | |||
1810 | ioc->len = sizeof(lp->pktStats); | ||
1811 | if (copy_to_user(ioc->data, tmp_stats, sizeof(lp->pktStats))) | ||
1812 | status = -EFAULT; | ||
1813 | kfree(tmp_stats); | ||
1814 | break; | ||
1815 | } | ||
1816 | case EWRK3_CLR_STATS: /* Zero out the driver statistics */ | ||
1817 | if (capable(CAP_NET_ADMIN)) { | ||
1818 | spin_lock_irqsave(&lp->hw_lock, flags); | ||
1819 | memset(&lp->pktStats, 0, sizeof(lp->pktStats)); | ||
1820 | spin_unlock_irqrestore(&lp->hw_lock,flags); | ||
1821 | } else { | ||
1822 | status = -EPERM; | ||
1823 | } | ||
1824 | |||
1825 | break; | ||
1826 | case EWRK3_GET_CSR: /* Get the CSR Register contents */ | ||
1827 | tmp->addr[0] = inb(EWRK3_CSR); | ||
1828 | ioc->len = 1; | ||
1829 | if (copy_to_user(ioc->data, tmp->addr, ioc->len)) | ||
1830 | status = -EFAULT; | ||
1831 | break; | ||
1832 | case EWRK3_SET_CSR: /* Set the CSR Register contents */ | ||
1833 | if (capable(CAP_NET_ADMIN)) { | ||
1834 | if (copy_from_user(tmp->addr, ioc->data, 1)) { | ||
1835 | status = -EFAULT; | ||
1836 | break; | ||
1837 | } | ||
1838 | outb(tmp->addr[0], EWRK3_CSR); | ||
1839 | } else { | ||
1840 | status = -EPERM; | ||
1841 | } | ||
1842 | |||
1843 | break; | ||
1844 | case EWRK3_GET_EEPROM: /* Get the EEPROM contents */ | ||
1845 | if (capable(CAP_NET_ADMIN)) { | ||
1846 | for (i = 0; i < (EEPROM_MAX >> 1); i++) { | ||
1847 | tmp->val[i] = (short) Read_EEPROM(iobase, i); | ||
1848 | } | ||
1849 | i = EEPROM_MAX; | ||
1850 | tmp->addr[i++] = inb(EWRK3_CMR); /* Config/Management Reg. */ | ||
1851 | for (j = 0; j < ETH_ALEN; j++) { | ||
1852 | tmp->addr[i++] = inb(EWRK3_PAR0 + j); | ||
1853 | } | ||
1854 | ioc->len = EEPROM_MAX + 1 + ETH_ALEN; | ||
1855 | if (copy_to_user(ioc->data, tmp->addr, ioc->len)) | ||
1856 | status = -EFAULT; | ||
1857 | } else { | ||
1858 | status = -EPERM; | ||
1859 | } | ||
1860 | |||
1861 | break; | ||
1862 | case EWRK3_SET_EEPROM: /* Set the EEPROM contents */ | ||
1863 | if (capable(CAP_NET_ADMIN)) { | ||
1864 | if (copy_from_user(tmp->addr, ioc->data, EEPROM_MAX)) { | ||
1865 | status = -EFAULT; | ||
1866 | break; | ||
1867 | } | ||
1868 | for (i = 0; i < (EEPROM_MAX >> 1); i++) { | ||
1869 | Write_EEPROM(tmp->val[i], iobase, i); | ||
1870 | } | ||
1871 | } else { | ||
1872 | status = -EPERM; | ||
1873 | } | ||
1874 | |||
1875 | break; | ||
1876 | case EWRK3_GET_CMR: /* Get the CMR Register contents */ | ||
1877 | tmp->addr[0] = inb(EWRK3_CMR); | ||
1878 | ioc->len = 1; | ||
1879 | if (copy_to_user(ioc->data, tmp->addr, ioc->len)) | ||
1880 | status = -EFAULT; | ||
1881 | break; | ||
1882 | case EWRK3_SET_TX_CUT_THRU: /* Set TX cut through mode */ | ||
1883 | if (capable(CAP_NET_ADMIN)) { | ||
1884 | lp->txc = 1; | ||
1885 | } else { | ||
1886 | status = -EPERM; | ||
1887 | } | ||
1888 | |||
1889 | break; | ||
1890 | case EWRK3_CLR_TX_CUT_THRU: /* Clear TX cut through mode */ | ||
1891 | if (capable(CAP_NET_ADMIN)) { | ||
1892 | lp->txc = 0; | ||
1893 | } else { | ||
1894 | status = -EPERM; | ||
1895 | } | ||
1896 | |||
1897 | break; | ||
1898 | default: | ||
1899 | status = -EOPNOTSUPP; | ||
1900 | } | ||
1901 | kfree(tmp); | ||
1902 | return status; | ||
1903 | } | ||
1904 | |||
1905 | #ifdef MODULE | ||
1906 | static struct net_device *ewrk3_devs[MAX_NUM_EWRK3S]; | ||
1907 | static int ndevs; | ||
1908 | static int io[MAX_NUM_EWRK3S+1] = { 0x300, 0, }; | ||
1909 | |||
1910 | /* '21' below should really be 'MAX_NUM_EWRK3S' */ | ||
1911 | module_param_array(io, int, NULL, 0); | ||
1912 | module_param_array(irq, int, NULL, 0); | ||
1913 | MODULE_PARM_DESC(io, "EtherWORKS 3 I/O base address(es)"); | ||
1914 | MODULE_PARM_DESC(irq, "EtherWORKS 3 IRQ number(s)"); | ||
1915 | |||
1916 | static __exit void ewrk3_exit_module(void) | ||
1917 | { | ||
1918 | int i; | ||
1919 | |||
1920 | for( i=0; i<ndevs; i++ ) { | ||
1921 | struct net_device *dev = ewrk3_devs[i]; | ||
1922 | struct ewrk3_private *lp = netdev_priv(dev); | ||
1923 | ewrk3_devs[i] = NULL; | ||
1924 | unregister_netdev(dev); | ||
1925 | release_region(dev->base_addr, EWRK3_TOTAL_SIZE); | ||
1926 | iounmap(lp->shmem); | ||
1927 | free_netdev(dev); | ||
1928 | } | ||
1929 | } | ||
1930 | |||
1931 | static __init int ewrk3_init_module(void) | ||
1932 | { | ||
1933 | int i=0; | ||
1934 | |||
1935 | while( io[i] && irq[i] ) { | ||
1936 | struct net_device *dev | ||
1937 | = alloc_etherdev(sizeof(struct ewrk3_private)); | ||
1938 | |||
1939 | if (!dev) | ||
1940 | break; | ||
1941 | |||
1942 | if (ewrk3_probe1(dev, io[i], irq[i]) != 0) { | ||
1943 | free_netdev(dev); | ||
1944 | break; | ||
1945 | } | ||
1946 | |||
1947 | ewrk3_devs[ndevs++] = dev; | ||
1948 | i++; | ||
1949 | } | ||
1950 | |||
1951 | return ndevs ? 0 : -EIO; | ||
1952 | } | ||
1953 | |||
1954 | |||
1955 | /* Hack for breakage in new module stuff */ | ||
1956 | module_exit(ewrk3_exit_module); | ||
1957 | module_init(ewrk3_init_module); | ||
1958 | #endif /* MODULE */ | ||
1959 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/ethernet/dec/ewrk3.h b/drivers/net/ethernet/dec/ewrk3.h new file mode 100644 index 000000000000..8e0ee906567b --- /dev/null +++ b/drivers/net/ethernet/dec/ewrk3.h | |||
@@ -0,0 +1,322 @@ | |||
1 | /* | ||
2 | Written 1994 by David C. Davies. | ||
3 | |||
4 | Copyright 1994 Digital Equipment Corporation. | ||
5 | |||
6 | This software may be used and distributed according to the terms of the | ||
7 | GNU General Public License, incorporated herein by reference. | ||
8 | |||
9 | The author may be reached as davies@wanton.lkg.dec.com or Digital | ||
10 | Equipment Corporation, 550 King Street, Littleton MA 01460. | ||
11 | |||
12 | ========================================================================= | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | ** I/O Address Register Map | ||
17 | */ | ||
18 | #define EWRK3_CSR iobase+0x00 /* Control and Status Register */ | ||
19 | #define EWRK3_CR iobase+0x01 /* Control Register */ | ||
20 | #define EWRK3_ICR iobase+0x02 /* Interrupt Control Register */ | ||
21 | #define EWRK3_TSR iobase+0x03 /* Transmit Status Register */ | ||
22 | #define EWRK3_RSVD1 iobase+0x04 /* RESERVED */ | ||
23 | #define EWRK3_RSVD2 iobase+0x05 /* RESERVED */ | ||
24 | #define EWRK3_FMQ iobase+0x06 /* Free Memory Queue */ | ||
25 | #define EWRK3_FMQC iobase+0x07 /* Free Memory Queue Counter */ | ||
26 | #define EWRK3_RQ iobase+0x08 /* Receive Queue */ | ||
27 | #define EWRK3_RQC iobase+0x09 /* Receive Queue Counter */ | ||
28 | #define EWRK3_TQ iobase+0x0a /* Transmit Queue */ | ||
29 | #define EWRK3_TQC iobase+0x0b /* Transmit Queue Counter */ | ||
30 | #define EWRK3_TDQ iobase+0x0c /* Transmit Done Queue */ | ||
31 | #define EWRK3_TDQC iobase+0x0d /* Transmit Done Queue Counter */ | ||
32 | #define EWRK3_PIR1 iobase+0x0e /* Page Index Register 1 */ | ||
33 | #define EWRK3_PIR2 iobase+0x0f /* Page Index Register 2 */ | ||
34 | #define EWRK3_DATA iobase+0x10 /* Data Register */ | ||
35 | #define EWRK3_IOPR iobase+0x11 /* I/O Page Register */ | ||
36 | #define EWRK3_IOBR iobase+0x12 /* I/O Base Register */ | ||
37 | #define EWRK3_MPR iobase+0x13 /* Memory Page Register */ | ||
38 | #define EWRK3_MBR iobase+0x14 /* Memory Base Register */ | ||
39 | #define EWRK3_APROM iobase+0x15 /* Address PROM */ | ||
40 | #define EWRK3_EPROM1 iobase+0x16 /* EEPROM Data Register 1 */ | ||
41 | #define EWRK3_EPROM2 iobase+0x17 /* EEPROM Data Register 2 */ | ||
42 | #define EWRK3_PAR0 iobase+0x18 /* Physical Address Register 0 */ | ||
43 | #define EWRK3_PAR1 iobase+0x19 /* Physical Address Register 1 */ | ||
44 | #define EWRK3_PAR2 iobase+0x1a /* Physical Address Register 2 */ | ||
45 | #define EWRK3_PAR3 iobase+0x1b /* Physical Address Register 3 */ | ||
46 | #define EWRK3_PAR4 iobase+0x1c /* Physical Address Register 4 */ | ||
47 | #define EWRK3_PAR5 iobase+0x1d /* Physical Address Register 5 */ | ||
48 | #define EWRK3_CMR iobase+0x1e /* Configuration/Management Register */ | ||
49 | |||
50 | /* | ||
51 | ** Control Page Map | ||
52 | */ | ||
53 | #define PAGE0_FMQ 0x000 /* Free Memory Queue */ | ||
54 | #define PAGE0_RQ 0x080 /* Receive Queue */ | ||
55 | #define PAGE0_TQ 0x100 /* Transmit Queue */ | ||
56 | #define PAGE0_TDQ 0x180 /* Transmit Done Queue */ | ||
57 | #define PAGE0_HTE 0x200 /* Hash Table Entries */ | ||
58 | #define PAGE0_RSVD 0x240 /* RESERVED */ | ||
59 | #define PAGE0_USRD 0x600 /* User Data */ | ||
60 | |||
61 | /* | ||
62 | ** Control and Status Register bit definitions (EWRK3_CSR) | ||
63 | */ | ||
64 | #define CSR_RA 0x80 /* Runt Accept */ | ||
65 | #define CSR_PME 0x40 /* Promiscuous Mode Enable */ | ||
66 | #define CSR_MCE 0x20 /* Multicast Enable */ | ||
67 | #define CSR_TNE 0x08 /* TX Done Queue Not Empty */ | ||
68 | #define CSR_RNE 0x04 /* RX Queue Not Empty */ | ||
69 | #define CSR_TXD 0x02 /* TX Disable */ | ||
70 | #define CSR_RXD 0x01 /* RX Disable */ | ||
71 | |||
72 | /* | ||
73 | ** Control Register bit definitions (EWRK3_CR) | ||
74 | */ | ||
75 | #define CR_APD 0x80 /* Auto Port Disable */ | ||
76 | #define CR_PSEL 0x40 /* Port Select (0->TP port) */ | ||
77 | #define CR_LBCK 0x20 /* LoopBaCK enable */ | ||
78 | #define CR_FDUP 0x10 /* Full DUPlex enable */ | ||
79 | #define CR_FBUS 0x08 /* Fast BUS enable (ISA clk > 8.33MHz) */ | ||
80 | #define CR_EN_16 0x04 /* ENable 16 bit memory accesses */ | ||
81 | #define CR_LED 0x02 /* LED (1-> turn on) */ | ||
82 | |||
83 | /* | ||
84 | ** Interrupt Control Register bit definitions (EWRK3_ICR) | ||
85 | */ | ||
86 | #define ICR_IE 0x80 /* Interrupt Enable */ | ||
87 | #define ICR_IS 0x60 /* Interrupt Selected */ | ||
88 | #define ICR_TNEM 0x08 /* TNE Mask (0->mask) */ | ||
89 | #define ICR_RNEM 0x04 /* RNE Mask (0->mask) */ | ||
90 | #define ICR_TXDM 0x02 /* TXD Mask (0->mask) */ | ||
91 | #define ICR_RXDM 0x01 /* RXD Mask (0->mask) */ | ||
92 | |||
93 | /* | ||
94 | ** Transmit Status Register bit definitions (EWRK3_TSR) | ||
95 | */ | ||
96 | #define TSR_NCL 0x80 /* No Carrier Loopback */ | ||
97 | #define TSR_ID 0x40 /* Initially Deferred */ | ||
98 | #define TSR_LCL 0x20 /* Late CoLlision */ | ||
99 | #define TSR_ECL 0x10 /* Excessive CoLlisions */ | ||
100 | #define TSR_RCNTR 0x0f /* Retries CouNTeR */ | ||
101 | |||
102 | /* | ||
103 | ** I/O Page Register bit definitions (EWRK3_IOPR) | ||
104 | */ | ||
105 | #define EEPROM_INIT 0xc0 /* EEPROM INIT command */ | ||
106 | #define EEPROM_WR_EN 0xc8 /* EEPROM WRITE ENABLE command */ | ||
107 | #define EEPROM_WR 0xd0 /* EEPROM WRITE command */ | ||
108 | #define EEPROM_WR_DIS 0xd8 /* EEPROM WRITE DISABLE command */ | ||
109 | #define EEPROM_RD 0xe0 /* EEPROM READ command */ | ||
110 | |||
111 | /* | ||
112 | ** I/O Base Register bit definitions (EWRK3_IOBR) | ||
113 | */ | ||
114 | #define EISA_REGS_EN 0x20 /* Enable EISA ID and Control Registers */ | ||
115 | #define EISA_IOB 0x1f /* Compare bits for I/O Base Address */ | ||
116 | |||
117 | /* | ||
118 | ** I/O Configuration/Management Register bit definitions (EWRK3_CMR) | ||
119 | */ | ||
120 | #define CMR_RA 0x80 /* Read Ahead */ | ||
121 | #define CMR_WB 0x40 /* Write Behind */ | ||
122 | #define CMR_LINK 0x20 /* 0->TP */ | ||
123 | #define CMR_POLARITY 0x10 /* Informational */ | ||
124 | #define CMR_NO_EEPROM 0x0c /* NO_EEPROM<1:0> pin status */ | ||
125 | #define CMR_HS 0x08 /* Hard Strapped pin status (LeMAC2) */ | ||
126 | #define CMR_PNP 0x04 /* Plug 'n Play */ | ||
127 | #define CMR_DRAM 0x02 /* 0-> 1DRAM, 1-> 2 DRAM on board */ | ||
128 | #define CMR_0WS 0x01 /* Zero Wait State */ | ||
129 | |||
130 | /* | ||
131 | ** MAC Receive Status Register bit definitions | ||
132 | */ | ||
133 | |||
134 | #define R_ROK 0x80 /* Receive OK summary */ | ||
135 | #define R_IAM 0x10 /* Individual Address Match */ | ||
136 | #define R_MCM 0x08 /* MultiCast Match */ | ||
137 | #define R_DBE 0x04 /* Dribble Bit Error */ | ||
138 | #define R_CRC 0x02 /* CRC error */ | ||
139 | #define R_PLL 0x01 /* Phase Lock Lost */ | ||
140 | |||
141 | /* | ||
142 | ** MAC Transmit Control Register bit definitions | ||
143 | */ | ||
144 | |||
145 | #define TCR_SQEE 0x40 /* SQE Enable - look for heartbeat */ | ||
146 | #define TCR_SED 0x20 /* Stop when Error Detected */ | ||
147 | #define TCR_QMODE 0x10 /* Q_MODE */ | ||
148 | #define TCR_LAB 0x08 /* Less Aggressive Backoff */ | ||
149 | #define TCR_PAD 0x04 /* PAD Runt Packets */ | ||
150 | #define TCR_IFC 0x02 /* Insert Frame Check */ | ||
151 | #define TCR_ISA 0x01 /* Insert Source Address */ | ||
152 | |||
153 | /* | ||
154 | ** MAC Transmit Status Register bit definitions | ||
155 | */ | ||
156 | |||
157 | #define T_VSTS 0x80 /* Valid STatuS */ | ||
158 | #define T_CTU 0x40 /* Cut Through Used */ | ||
159 | #define T_SQE 0x20 /* Signal Quality Error */ | ||
160 | #define T_NCL 0x10 /* No Carrier Loopback */ | ||
161 | #define T_LCL 0x08 /* Late Collision */ | ||
162 | #define T_ID 0x04 /* Initially Deferred */ | ||
163 | #define T_COLL 0x03 /* COLLision status */ | ||
164 | #define T_XCOLL 0x03 /* Excessive Collisions */ | ||
165 | #define T_MCOLL 0x02 /* Multiple Collisions */ | ||
166 | #define T_OCOLL 0x01 /* One Collision */ | ||
167 | #define T_NOCOLL 0x00 /* No Collisions */ | ||
168 | #define T_XUR 0x03 /* Excessive Underruns */ | ||
169 | #define T_TXE 0x7f /* TX Errors */ | ||
170 | |||
171 | /* | ||
172 | ** EISA Configuration Register bit definitions | ||
173 | */ | ||
174 | |||
175 | #define EISA_ID iobase + 0x0c80 /* EISA ID Registers */ | ||
176 | #define EISA_ID0 iobase + 0x0c80 /* EISA ID Register 0 */ | ||
177 | #define EISA_ID1 iobase + 0x0c81 /* EISA ID Register 1 */ | ||
178 | #define EISA_ID2 iobase + 0x0c82 /* EISA ID Register 2 */ | ||
179 | #define EISA_ID3 iobase + 0x0c83 /* EISA ID Register 3 */ | ||
180 | #define EISA_CR iobase + 0x0c84 /* EISA Control Register */ | ||
181 | |||
182 | /* | ||
183 | ** EEPROM BYTES | ||
184 | */ | ||
185 | #define EEPROM_MEMB 0x00 | ||
186 | #define EEPROM_IOB 0x01 | ||
187 | #define EEPROM_EISA_ID0 0x02 | ||
188 | #define EEPROM_EISA_ID1 0x03 | ||
189 | #define EEPROM_EISA_ID2 0x04 | ||
190 | #define EEPROM_EISA_ID3 0x05 | ||
191 | #define EEPROM_MISC0 0x06 | ||
192 | #define EEPROM_MISC1 0x07 | ||
193 | #define EEPROM_PNAME7 0x08 | ||
194 | #define EEPROM_PNAME6 0x09 | ||
195 | #define EEPROM_PNAME5 0x0a | ||
196 | #define EEPROM_PNAME4 0x0b | ||
197 | #define EEPROM_PNAME3 0x0c | ||
198 | #define EEPROM_PNAME2 0x0d | ||
199 | #define EEPROM_PNAME1 0x0e | ||
200 | #define EEPROM_PNAME0 0x0f | ||
201 | #define EEPROM_SWFLAGS 0x10 | ||
202 | #define EEPROM_HWCAT 0x11 | ||
203 | #define EEPROM_NETMAN2 0x12 | ||
204 | #define EEPROM_REVLVL 0x13 | ||
205 | #define EEPROM_NETMAN0 0x14 | ||
206 | #define EEPROM_NETMAN1 0x15 | ||
207 | #define EEPROM_CHIPVER 0x16 | ||
208 | #define EEPROM_SETUP 0x17 | ||
209 | #define EEPROM_PADDR0 0x18 | ||
210 | #define EEPROM_PADDR1 0x19 | ||
211 | #define EEPROM_PADDR2 0x1a | ||
212 | #define EEPROM_PADDR3 0x1b | ||
213 | #define EEPROM_PADDR4 0x1c | ||
214 | #define EEPROM_PADDR5 0x1d | ||
215 | #define EEPROM_PA_CRC 0x1e | ||
216 | #define EEPROM_CHKSUM 0x1f | ||
217 | |||
218 | /* | ||
219 | ** EEPROM bytes for checksumming | ||
220 | */ | ||
221 | #define EEPROM_MAX 32 /* bytes */ | ||
222 | |||
223 | /* | ||
224 | ** EEPROM MISCELLANEOUS FLAGS | ||
225 | */ | ||
226 | #define RBE_SHADOW 0x0100 /* Remote Boot Enable Shadow */ | ||
227 | #define READ_AHEAD 0x0080 /* Read Ahead feature */ | ||
228 | #define IRQ_SEL2 0x0070 /* IRQ line selection (LeMAC2) */ | ||
229 | #define IRQ_SEL 0x0060 /* IRQ line selection */ | ||
230 | #define FAST_BUS 0x0008 /* ISA Bus speeds > 8.33MHz */ | ||
231 | #define ENA_16 0x0004 /* Enables 16 bit memory transfers */ | ||
232 | #define WRITE_BEHIND 0x0002 /* Write Behind feature */ | ||
233 | #define _0WS_ENA 0x0001 /* Zero Wait State Enable */ | ||
234 | |||
235 | /* | ||
236 | ** EEPROM NETWORK MANAGEMENT FLAGS | ||
237 | */ | ||
238 | #define NETMAN_POL 0x04 /* Polarity defeat */ | ||
239 | #define NETMAN_LINK 0x02 /* Link defeat */ | ||
240 | #define NETMAN_CCE 0x01 /* Custom Counters Enable */ | ||
241 | |||
242 | /* | ||
243 | ** EEPROM SW FLAGS | ||
244 | */ | ||
245 | #define SW_SQE 0x10 /* Signal Quality Error */ | ||
246 | #define SW_LAB 0x08 /* Less Aggressive Backoff */ | ||
247 | #define SW_INIT 0x04 /* Initialized */ | ||
248 | #define SW_TIMEOUT 0x02 /* 0:2.5 mins, 1: 30 secs */ | ||
249 | #define SW_REMOTE 0x01 /* Remote Boot Enable -> 1 */ | ||
250 | |||
251 | /* | ||
252 | ** EEPROM SETUP FLAGS | ||
253 | */ | ||
254 | #define SETUP_APD 0x80 /* AutoPort Disable */ | ||
255 | #define SETUP_PS 0x40 /* Port Select */ | ||
256 | #define SETUP_MP 0x20 /* MultiPort */ | ||
257 | #define SETUP_1TP 0x10 /* 1 port, TP */ | ||
258 | #define SETUP_1COAX 0x00 /* 1 port, Coax */ | ||
259 | #define SETUP_DRAM 0x02 /* Number of DRAMS on board */ | ||
260 | |||
261 | /* | ||
262 | ** EEPROM MANAGEMENT FLAGS | ||
263 | */ | ||
264 | #define MGMT_CCE 0x01 /* Custom Counters Enable */ | ||
265 | |||
266 | /* | ||
267 | ** EEPROM VERSIONS | ||
268 | */ | ||
269 | #define LeMAC 0x11 | ||
270 | #define LeMAC2 0x12 | ||
271 | |||
272 | /* | ||
273 | ** Miscellaneous | ||
274 | */ | ||
275 | |||
276 | #define EEPROM_WAIT_TIME 1000 /* Number of microseconds */ | ||
277 | #define EISA_EN 0x0001 /* Enable EISA bus buffers */ | ||
278 | |||
279 | #define HASH_TABLE_LEN 512 /* Bits */ | ||
280 | |||
281 | #define XCT 0x80 /* Transmit Cut Through */ | ||
282 | #define PRELOAD 16 /* 4 long words */ | ||
283 | |||
284 | #define MASK_INTERRUPTS 1 | ||
285 | #define UNMASK_INTERRUPTS 0 | ||
286 | |||
287 | #define EEPROM_OFFSET(a) ((u_short)((u_long)(a))) | ||
288 | |||
289 | /* | ||
290 | ** Include the IOCTL stuff | ||
291 | */ | ||
292 | #include <linux/sockios.h> | ||
293 | |||
294 | #define EWRK3IOCTL SIOCDEVPRIVATE | ||
295 | |||
296 | struct ewrk3_ioctl { | ||
297 | unsigned short cmd; /* Command to run */ | ||
298 | unsigned short len; /* Length of the data buffer */ | ||
299 | unsigned char __user *data; /* Pointer to the data buffer */ | ||
300 | }; | ||
301 | |||
302 | /* | ||
303 | ** Recognised commands for the driver | ||
304 | */ | ||
305 | #define EWRK3_GET_HWADDR 0x01 /* Get the hardware address */ | ||
306 | #define EWRK3_SET_HWADDR 0x02 /* Get the hardware address */ | ||
307 | #define EWRK3_SET_PROM 0x03 /* Set Promiscuous Mode */ | ||
308 | #define EWRK3_CLR_PROM 0x04 /* Clear Promiscuous Mode */ | ||
309 | #define EWRK3_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ | ||
310 | #define EWRK3_GET_MCA 0x06 /* Get a multicast address */ | ||
311 | #define EWRK3_SET_MCA 0x07 /* Set a multicast address */ | ||
312 | #define EWRK3_CLR_MCA 0x08 /* Clear a multicast address */ | ||
313 | #define EWRK3_MCA_EN 0x09 /* Enable a multicast address group */ | ||
314 | #define EWRK3_GET_STATS 0x0a /* Get the driver statistics */ | ||
315 | #define EWRK3_CLR_STATS 0x0b /* Zero out the driver statistics */ | ||
316 | #define EWRK3_GET_CSR 0x0c /* Get the CSR Register contents */ | ||
317 | #define EWRK3_SET_CSR 0x0d /* Set the CSR Register contents */ | ||
318 | #define EWRK3_GET_EEPROM 0x0e /* Get the EEPROM contents */ | ||
319 | #define EWRK3_SET_EEPROM 0x0f /* Set the EEPROM contents */ | ||
320 | #define EWRK3_GET_CMR 0x10 /* Get the CMR Register contents */ | ||
321 | #define EWRK3_CLR_TX_CUT_THRU 0x11 /* Clear the TX cut through mode */ | ||
322 | #define EWRK3_SET_TX_CUT_THRU 0x12 /* Set the TX cut through mode */ | ||
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c new file mode 100644 index 000000000000..092c3faa882a --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/21142.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/21142.c | ||
3 | |||
4 | Copyright 2000,2001 The Linux Kernel Team | ||
5 | Written/copyright 1994-2001 by Donald Becker. | ||
6 | |||
7 | This software may be used and distributed according to the terms | ||
8 | of the GNU General Public License, incorporated herein by reference. | ||
9 | |||
10 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
11 | for more information on this driver. | ||
12 | |||
13 | DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller | ||
14 | Hardware Reference Manual" is currently available at : | ||
15 | http://developer.intel.com/design/network/manuals/278074.htm | ||
16 | |||
17 | Please submit bugs to http://bugzilla.kernel.org/ . | ||
18 | */ | ||
19 | |||
20 | #include <linux/delay.h> | ||
21 | #include "tulip.h" | ||
22 | |||
23 | |||
24 | static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, }; | ||
25 | u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, }; | ||
26 | static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; | ||
27 | |||
28 | |||
29 | /* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list | ||
30 | of available transceivers. */ | ||
31 | void t21142_media_task(struct work_struct *work) | ||
32 | { | ||
33 | struct tulip_private *tp = | ||
34 | container_of(work, struct tulip_private, media_work); | ||
35 | struct net_device *dev = tp->dev; | ||
36 | void __iomem *ioaddr = tp->base_addr; | ||
37 | int csr12 = ioread32(ioaddr + CSR12); | ||
38 | int next_tick = 60*HZ; | ||
39 | int new_csr6 = 0; | ||
40 | int csr14 = ioread32(ioaddr + CSR14); | ||
41 | |||
42 | /* CSR12[LS10,LS100] are not reliable during autonegotiation */ | ||
43 | if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) | ||
44 | csr12 |= 6; | ||
45 | if (tulip_debug > 2) | ||
46 | dev_info(&dev->dev, "21143 negotiation status %08x, %s\n", | ||
47 | csr12, medianame[dev->if_port]); | ||
48 | if (tulip_media_cap[dev->if_port] & MediaIsMII) { | ||
49 | if (tulip_check_duplex(dev) < 0) { | ||
50 | netif_carrier_off(dev); | ||
51 | next_tick = 3*HZ; | ||
52 | } else { | ||
53 | netif_carrier_on(dev); | ||
54 | next_tick = 60*HZ; | ||
55 | } | ||
56 | } else if (tp->nwayset) { | ||
57 | /* Don't screw up a negotiated session! */ | ||
58 | if (tulip_debug > 1) | ||
59 | dev_info(&dev->dev, | ||
60 | "Using NWay-set %s media, csr12 %08x\n", | ||
61 | medianame[dev->if_port], csr12); | ||
62 | } else if (tp->medialock) { | ||
63 | ; | ||
64 | } else if (dev->if_port == 3) { | ||
65 | if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */ | ||
66 | if (tulip_debug > 1) | ||
67 | dev_info(&dev->dev, | ||
68 | "No 21143 100baseTx link beat, %08x, trying NWay\n", | ||
69 | csr12); | ||
70 | t21142_start_nway(dev); | ||
71 | next_tick = 3*HZ; | ||
72 | } | ||
73 | } else if ((csr12 & 0x7000) != 0x5000) { | ||
74 | /* Negotiation failed. Search media types. */ | ||
75 | if (tulip_debug > 1) | ||
76 | dev_info(&dev->dev, | ||
77 | "21143 negotiation failed, status %08x\n", | ||
78 | csr12); | ||
79 | if (!(csr12 & 4)) { /* 10mbps link beat good. */ | ||
80 | new_csr6 = 0x82420000; | ||
81 | dev->if_port = 0; | ||
82 | iowrite32(0, ioaddr + CSR13); | ||
83 | iowrite32(0x0003FFFF, ioaddr + CSR14); | ||
84 | iowrite16(t21142_csr15[dev->if_port], ioaddr + CSR15); | ||
85 | iowrite32(t21142_csr13[dev->if_port], ioaddr + CSR13); | ||
86 | } else { | ||
87 | /* Select 100mbps port to check for link beat. */ | ||
88 | new_csr6 = 0x83860000; | ||
89 | dev->if_port = 3; | ||
90 | iowrite32(0, ioaddr + CSR13); | ||
91 | iowrite32(0x0003FFFF, ioaddr + CSR14); | ||
92 | iowrite16(8, ioaddr + CSR15); | ||
93 | iowrite32(1, ioaddr + CSR13); | ||
94 | } | ||
95 | if (tulip_debug > 1) | ||
96 | dev_info(&dev->dev, "Testing new 21143 media %s\n", | ||
97 | medianame[dev->if_port]); | ||
98 | if (new_csr6 != (tp->csr6 & ~0x00D5)) { | ||
99 | tp->csr6 &= 0x00D5; | ||
100 | tp->csr6 |= new_csr6; | ||
101 | iowrite32(0x0301, ioaddr + CSR12); | ||
102 | tulip_restart_rxtx(tp); | ||
103 | } | ||
104 | next_tick = 3*HZ; | ||
105 | } | ||
106 | |||
107 | /* mod_timer synchronizes us with potential add_timer calls | ||
108 | * from interrupts. | ||
109 | */ | ||
110 | mod_timer(&tp->timer, RUN_AT(next_tick)); | ||
111 | } | ||
112 | |||
113 | |||
114 | void t21142_start_nway(struct net_device *dev) | ||
115 | { | ||
116 | struct tulip_private *tp = netdev_priv(dev); | ||
117 | void __iomem *ioaddr = tp->base_addr; | ||
118 | int csr14 = ((tp->sym_advertise & 0x0780) << 9) | | ||
119 | ((tp->sym_advertise & 0x0020) << 1) | 0xffbf; | ||
120 | |||
121 | dev->if_port = 0; | ||
122 | tp->nway = tp->mediasense = 1; | ||
123 | tp->nwayset = tp->lpar = 0; | ||
124 | if (tulip_debug > 1) | ||
125 | netdev_dbg(dev, "Restarting 21143 autonegotiation, csr14=%08x\n", | ||
126 | csr14); | ||
127 | iowrite32(0x0001, ioaddr + CSR13); | ||
128 | udelay(100); | ||
129 | iowrite32(csr14, ioaddr + CSR14); | ||
130 | tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0); | ||
131 | iowrite32(tp->csr6, ioaddr + CSR6); | ||
132 | if (tp->mtable && tp->mtable->csr15dir) { | ||
133 | iowrite32(tp->mtable->csr15dir, ioaddr + CSR15); | ||
134 | iowrite32(tp->mtable->csr15val, ioaddr + CSR15); | ||
135 | } else | ||
136 | iowrite16(0x0008, ioaddr + CSR15); | ||
137 | iowrite32(0x1301, ioaddr + CSR12); /* Trigger NWAY. */ | ||
138 | } | ||
139 | |||
140 | |||
141 | |||
142 | void t21142_lnk_change(struct net_device *dev, int csr5) | ||
143 | { | ||
144 | struct tulip_private *tp = netdev_priv(dev); | ||
145 | void __iomem *ioaddr = tp->base_addr; | ||
146 | int csr12 = ioread32(ioaddr + CSR12); | ||
147 | int csr14 = ioread32(ioaddr + CSR14); | ||
148 | |||
149 | /* CSR12[LS10,LS100] are not reliable during autonegotiation */ | ||
150 | if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000) | ||
151 | csr12 |= 6; | ||
152 | if (tulip_debug > 1) | ||
153 | dev_info(&dev->dev, | ||
154 | "21143 link status interrupt %08x, CSR5 %x, %08x\n", | ||
155 | csr12, csr5, csr14); | ||
156 | |||
157 | /* If NWay finished and we have a negotiated partner capability. */ | ||
158 | if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) { | ||
159 | int setup_done = 0; | ||
160 | int negotiated = tp->sym_advertise & (csr12 >> 16); | ||
161 | tp->lpar = csr12 >> 16; | ||
162 | tp->nwayset = 1; | ||
163 | /* If partner cannot negotiate, it is 10Mbps Half Duplex */ | ||
164 | if (!(csr12 & 0x8000)) dev->if_port = 0; | ||
165 | else if (negotiated & 0x0100) dev->if_port = 5; | ||
166 | else if (negotiated & 0x0080) dev->if_port = 3; | ||
167 | else if (negotiated & 0x0040) dev->if_port = 4; | ||
168 | else if (negotiated & 0x0020) dev->if_port = 0; | ||
169 | else { | ||
170 | tp->nwayset = 0; | ||
171 | if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180)) | ||
172 | dev->if_port = 3; | ||
173 | } | ||
174 | tp->full_duplex = (tulip_media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0; | ||
175 | |||
176 | if (tulip_debug > 1) { | ||
177 | if (tp->nwayset) | ||
178 | dev_info(&dev->dev, | ||
179 | "Switching to %s based on link negotiation %04x & %04x = %04x\n", | ||
180 | medianame[dev->if_port], | ||
181 | tp->sym_advertise, tp->lpar, | ||
182 | negotiated); | ||
183 | else | ||
184 | dev_info(&dev->dev, | ||
185 | "Autonegotiation failed, using %s, link beat status %04x\n", | ||
186 | medianame[dev->if_port], csr12); | ||
187 | } | ||
188 | |||
189 | if (tp->mtable) { | ||
190 | int i; | ||
191 | for (i = 0; i < tp->mtable->leafcount; i++) | ||
192 | if (tp->mtable->mleaf[i].media == dev->if_port) { | ||
193 | int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65))); | ||
194 | tp->cur_index = i; | ||
195 | tulip_select_media(dev, startup); | ||
196 | setup_done = 1; | ||
197 | break; | ||
198 | } | ||
199 | } | ||
200 | if ( ! setup_done) { | ||
201 | tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000) | (tp->csr6 & 0x20ff); | ||
202 | if (tp->full_duplex) | ||
203 | tp->csr6 |= 0x0200; | ||
204 | iowrite32(1, ioaddr + CSR13); | ||
205 | } | ||
206 | #if 0 /* Restart shouldn't be needed. */ | ||
207 | iowrite32(tp->csr6 | RxOn, ioaddr + CSR6); | ||
208 | if (tulip_debug > 2) | ||
209 | netdev_dbg(dev, " Restarting Tx and Rx, CSR5 is %08x\n", | ||
210 | ioread32(ioaddr + CSR5)); | ||
211 | #endif | ||
212 | tulip_start_rxtx(tp); | ||
213 | if (tulip_debug > 2) | ||
214 | netdev_dbg(dev, " Setting CSR6 %08x/%x CSR12 %08x\n", | ||
215 | tp->csr6, ioread32(ioaddr + CSR6), | ||
216 | ioread32(ioaddr + CSR12)); | ||
217 | } else if ((tp->nwayset && (csr5 & 0x08000000) && | ||
218 | (dev->if_port == 3 || dev->if_port == 5) && | ||
219 | (csr12 & 2) == 2) || | ||
220 | (tp->nway && (csr5 & (TPLnkFail)))) { | ||
221 | /* Link blew? Maybe restart NWay. */ | ||
222 | del_timer_sync(&tp->timer); | ||
223 | t21142_start_nway(dev); | ||
224 | tp->timer.expires = RUN_AT(3*HZ); | ||
225 | add_timer(&tp->timer); | ||
226 | } else if (dev->if_port == 3 || dev->if_port == 5) { | ||
227 | if (tulip_debug > 1) | ||
228 | dev_info(&dev->dev, "21143 %s link beat %s\n", | ||
229 | medianame[dev->if_port], | ||
230 | (csr12 & 2) ? "failed" : "good"); | ||
231 | if ((csr12 & 2) && ! tp->medialock) { | ||
232 | del_timer_sync(&tp->timer); | ||
233 | t21142_start_nway(dev); | ||
234 | tp->timer.expires = RUN_AT(3*HZ); | ||
235 | add_timer(&tp->timer); | ||
236 | } else if (dev->if_port == 5) | ||
237 | iowrite32(csr14 & ~0x080, ioaddr + CSR14); | ||
238 | } else if (dev->if_port == 0 || dev->if_port == 4) { | ||
239 | if ((csr12 & 4) == 0) | ||
240 | dev_info(&dev->dev, "21143 10baseT link beat good\n"); | ||
241 | } else if (!(csr12 & 4)) { /* 10mbps link beat good. */ | ||
242 | if (tulip_debug) | ||
243 | dev_info(&dev->dev, "21143 10mbps sensed media\n"); | ||
244 | dev->if_port = 0; | ||
245 | } else if (tp->nwayset) { | ||
246 | if (tulip_debug) | ||
247 | dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n", | ||
248 | medianame[dev->if_port], tp->csr6); | ||
249 | } else { /* 100mbps link beat good. */ | ||
250 | if (tulip_debug) | ||
251 | dev_info(&dev->dev, "21143 100baseTx sensed media\n"); | ||
252 | dev->if_port = 3; | ||
253 | tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff); | ||
254 | iowrite32(0x0003FF7F, ioaddr + CSR14); | ||
255 | iowrite32(0x0301, ioaddr + CSR12); | ||
256 | tulip_restart_rxtx(tp); | ||
257 | } | ||
258 | } | ||
259 | |||
260 | |||
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig new file mode 100644 index 000000000000..f6af772b12c9 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/Kconfig | |||
@@ -0,0 +1,171 @@ | |||
1 | # | ||
2 | # Tulip family network device configuration | ||
3 | # | ||
4 | |||
5 | config NET_TULIP | ||
6 | bool "DEC - Tulip devices" | ||
7 | depends on (PCI || EISA || CARDBUS) | ||
8 | ---help--- | ||
9 | This selects the "Tulip" family of EISA/PCI network cards. | ||
10 | |||
11 | if NET_TULIP | ||
12 | |||
13 | config DE2104X | ||
14 | tristate "Early DECchip Tulip (dc2104x) PCI support" | ||
15 | depends on PCI | ||
16 | select CRC32 | ||
17 | ---help--- | ||
18 | This driver is developed for the SMC EtherPower series Ethernet | ||
19 | cards and also works with cards based on the DECchip | ||
20 | 21040 (Tulip series) chips. Some LinkSys PCI cards are | ||
21 | of this type. (If your card is NOT SMC EtherPower 10/100 PCI | ||
22 | (smc9332dst), you can also try the driver for "Generic DECchip" | ||
23 | cards, below. However, most people with a network card of this type | ||
24 | will say Y here.) Do read the Ethernet-HOWTO, available from | ||
25 | <http://www.tldp.org/docs.html#howto>. | ||
26 | |||
27 | To compile this driver as a module, choose M here. The module will | ||
28 | be called de2104x. | ||
29 | |||
30 | config DE2104X_DSL | ||
31 | int "Descriptor Skip Length in 32 bit longwords" | ||
32 | depends on DE2104X | ||
33 | range 0 31 | ||
34 | default 0 | ||
35 | ---help--- | ||
36 | Setting this value allows to align ring buffer descriptors into their | ||
37 | own cache lines. Value of 4 corresponds to the typical 32 byte line | ||
38 | (the descriptor is 16 bytes). This is necessary on systems that lack | ||
39 | cache coherence, an example is PowerMac 5500. Otherwise 0 is safe. | ||
40 | Default is 0, and range is 0 to 31. | ||
41 | |||
42 | config TULIP | ||
43 | tristate "DECchip Tulip (dc2114x) PCI support" | ||
44 | depends on PCI | ||
45 | select CRC32 | ||
46 | ---help--- | ||
47 | This driver is developed for the SMC EtherPower series Ethernet | ||
48 | cards and also works with cards based on the DECchip | ||
49 | 21140 (Tulip series) chips. Some LinkSys PCI cards are | ||
50 | of this type. (If your card is NOT SMC EtherPower 10/100 PCI | ||
51 | (smc9332dst), you can also try the driver for "Generic DECchip" | ||
52 | cards, above. However, most people with a network card of this type | ||
53 | will say Y here.) Do read the Ethernet-HOWTO, available from | ||
54 | <http://www.tldp.org/docs.html#howto>. | ||
55 | |||
56 | To compile this driver as a module, choose M here. The module will | ||
57 | be called tulip. | ||
58 | |||
59 | config TULIP_MWI | ||
60 | bool "New bus configuration (EXPERIMENTAL)" | ||
61 | depends on TULIP && EXPERIMENTAL | ||
62 | ---help--- | ||
63 | This configures your Tulip card specifically for the card and | ||
64 | system cache line size type you are using. | ||
65 | |||
66 | This is experimental code, not yet tested on many boards. | ||
67 | |||
68 | If unsure, say N. | ||
69 | |||
70 | config TULIP_MMIO | ||
71 | bool "Use PCI shared mem for NIC registers" | ||
72 | depends on TULIP | ||
73 | ---help--- | ||
74 | Use PCI shared memory for the NIC registers, rather than going through | ||
75 | the Tulip's PIO (programmed I/O ports). Faster, but could produce | ||
76 | obscure bugs if your mainboard has memory controller timing issues. | ||
77 | If in doubt, say N. | ||
78 | |||
79 | config TULIP_NAPI | ||
80 | bool "Use RX polling (NAPI)" | ||
81 | depends on TULIP | ||
82 | ---help--- | ||
83 | NAPI is a new driver API designed to reduce CPU and interrupt load | ||
84 | when the driver is receiving lots of packets from the card. It is | ||
85 | still somewhat experimental and thus not yet enabled by default. | ||
86 | |||
87 | If your estimated Rx load is 10kpps or more, or if the card will be | ||
88 | deployed on potentially unfriendly networks (e.g. in a firewall), | ||
89 | then say Y here. | ||
90 | |||
91 | If in doubt, say N. | ||
92 | |||
93 | config TULIP_NAPI_HW_MITIGATION | ||
94 | bool "Use Interrupt Mitigation" | ||
95 | depends on TULIP_NAPI | ||
96 | ---help--- | ||
97 | Use HW to reduce RX interrupts. Not strictly necessary since NAPI | ||
98 | reduces RX interrupts by itself. Interrupt mitigation reduces RX | ||
99 | interrupts even at low levels of traffic at the cost of a small | ||
100 | latency. | ||
101 | |||
102 | If in doubt, say Y. | ||
103 | |||
104 | config TULIP_DM910X | ||
105 | def_bool y | ||
106 | depends on TULIP && SPARC | ||
107 | |||
108 | config DE4X5 | ||
109 | tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" | ||
110 | depends on (PCI || EISA) | ||
111 | select CRC32 | ||
112 | ---help--- | ||
113 | This is support for the DIGITAL series of PCI/EISA Ethernet cards. | ||
114 | These include the DE425, DE434, DE435, DE450 and DE500 models. If | ||
115 | you have a network card of this type, say Y and read the | ||
116 | Ethernet-HOWTO, available from | ||
117 | <http://www.tldp.org/docs.html#howto>. More specific | ||
118 | information is contained in | ||
119 | <file:Documentation/networking/de4x5.txt>. | ||
120 | |||
121 | To compile this driver as a module, choose M here. The module will | ||
122 | be called de4x5. | ||
123 | |||
124 | config WINBOND_840 | ||
125 | tristate "Winbond W89c840 Ethernet support" | ||
126 | depends on PCI | ||
127 | select CRC32 | ||
128 | select MII | ||
129 | ---help--- | ||
130 | This driver is for the Winbond W89c840 chip. It also works with | ||
131 | the TX9882 chip on the Compex RL100-ATX board. | ||
132 | More specific information and updates are available from | ||
133 | <http://www.scyld.com/network/drivers.html>. | ||
134 | |||
135 | config DM9102 | ||
136 | tristate "Davicom DM910x/DM980x support" | ||
137 | depends on PCI | ||
138 | select CRC32 | ||
139 | ---help--- | ||
140 | This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from | ||
141 | Davicom (<http://www.davicom.com.tw/>). If you have such a network | ||
142 | (Ethernet) card, say Y. Some information is contained in the file | ||
143 | <file:Documentation/networking/dmfe.txt>. | ||
144 | |||
145 | To compile this driver as a module, choose M here. The module will | ||
146 | be called dmfe. | ||
147 | |||
148 | config ULI526X | ||
149 | tristate "ULi M526x controller support" | ||
150 | depends on PCI | ||
151 | select CRC32 | ||
152 | ---help--- | ||
153 | This driver is for ULi M5261/M5263 10/100M Ethernet Controller | ||
154 | (<http://www.nvidia.com/page/uli_drivers.html>). | ||
155 | |||
156 | To compile this driver as a module, choose M here. The module will | ||
157 | be called uli526x. | ||
158 | |||
159 | config PCMCIA_XIRCOM | ||
160 | tristate "Xircom CardBus support" | ||
161 | depends on CARDBUS | ||
162 | ---help--- | ||
163 | This driver is for the Digital "Tulip" Ethernet CardBus adapters. | ||
164 | It should work with most DEC 21*4*-based chips/ethercards, as well | ||
165 | as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and | ||
166 | ASIX. | ||
167 | |||
168 | To compile this driver as a module, choose M here. The module will | ||
169 | be called xircom_cb. If unsure, say N. | ||
170 | |||
171 | endif # NET_TULIP | ||
diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile new file mode 100644 index 000000000000..5e8be38b45bb --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/Makefile | |||
@@ -0,0 +1,19 @@ | |||
1 | # | ||
2 | # Makefile for the Linux "Tulip" family network device drivers. | ||
3 | # | ||
4 | |||
5 | ccflags-$(CONFIG_NET_TULIP) := -DDEBUG | ||
6 | |||
7 | obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o | ||
8 | obj-$(CONFIG_DM9102) += dmfe.o | ||
9 | obj-$(CONFIG_WINBOND_840) += winbond-840.o | ||
10 | obj-$(CONFIG_DE2104X) += de2104x.o | ||
11 | obj-$(CONFIG_TULIP) += tulip.o | ||
12 | obj-$(CONFIG_DE4X5) += de4x5.o | ||
13 | obj-$(CONFIG_ULI526X) += uli526x.o | ||
14 | |||
15 | # Declare multi-part drivers. | ||
16 | |||
17 | tulip-objs := eeprom.o interrupt.o media.o \ | ||
18 | timer.o tulip_core.o \ | ||
19 | 21142.o pnic.o pnic2.o | ||
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c new file mode 100644 index 000000000000..ce90efc6ba3c --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/de2104x.c | |||
@@ -0,0 +1,2215 @@ | |||
1 | /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */ | ||
2 | /* | ||
3 | Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com> | ||
4 | |||
5 | Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c] | ||
6 | Written/copyright 1994-2001 by Donald Becker. [tulip.c] | ||
7 | |||
8 | This software may be used and distributed according to the terms of | ||
9 | the GNU General Public License (GPL), incorporated herein by reference. | ||
10 | Drivers based on or derived from this code fall under the GPL and must | ||
11 | retain the authorship, copyright and license notice. This file is not | ||
12 | a complete program and may only be used when the entire operating | ||
13 | system is licensed under the GPL. | ||
14 | |||
15 | See the file COPYING in this distribution for more information. | ||
16 | |||
17 | TODO, in rough priority order: | ||
18 | * Support forcing media type with a module parameter, | ||
19 | like dl2k.c/sundance.c | ||
20 | * Constants (module parms?) for Rx work limit | ||
21 | * Complete reset on PciErr | ||
22 | * Jumbo frames / dev->change_mtu | ||
23 | * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error | ||
24 | * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error | ||
25 | * Implement Tx software interrupt mitigation via | ||
26 | Tx descriptor bit | ||
27 | |||
28 | */ | ||
29 | |||
30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
31 | |||
32 | #define DRV_NAME "de2104x" | ||
33 | #define DRV_VERSION "0.7" | ||
34 | #define DRV_RELDATE "Mar 17, 2004" | ||
35 | |||
36 | #include <linux/module.h> | ||
37 | #include <linux/kernel.h> | ||
38 | #include <linux/netdevice.h> | ||
39 | #include <linux/etherdevice.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/interrupt.h> | ||
42 | #include <linux/pci.h> | ||
43 | #include <linux/delay.h> | ||
44 | #include <linux/ethtool.h> | ||
45 | #include <linux/compiler.h> | ||
46 | #include <linux/rtnetlink.h> | ||
47 | #include <linux/crc32.h> | ||
48 | #include <linux/slab.h> | ||
49 | |||
50 | #include <asm/io.h> | ||
51 | #include <asm/irq.h> | ||
52 | #include <asm/uaccess.h> | ||
53 | #include <asm/unaligned.h> | ||
54 | |||
55 | /* These identify the driver base version and may not be removed. */ | ||
56 | static char version[] = | ||
57 | "PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")"; | ||
58 | |||
59 | MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>"); | ||
60 | MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver"); | ||
61 | MODULE_LICENSE("GPL"); | ||
62 | MODULE_VERSION(DRV_VERSION); | ||
63 | |||
64 | static int debug = -1; | ||
65 | module_param (debug, int, 0); | ||
66 | MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number"); | ||
67 | |||
68 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ | ||
69 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ | ||
70 | defined(CONFIG_SPARC) || defined(__ia64__) || \ | ||
71 | defined(__sh__) || defined(__mips__) | ||
72 | static int rx_copybreak = 1518; | ||
73 | #else | ||
74 | static int rx_copybreak = 100; | ||
75 | #endif | ||
76 | module_param (rx_copybreak, int, 0); | ||
77 | MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied"); | ||
78 | |||
79 | #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ | ||
80 | NETIF_MSG_PROBE | \ | ||
81 | NETIF_MSG_LINK | \ | ||
82 | NETIF_MSG_IFDOWN | \ | ||
83 | NETIF_MSG_IFUP | \ | ||
84 | NETIF_MSG_RX_ERR | \ | ||
85 | NETIF_MSG_TX_ERR) | ||
86 | |||
87 | /* Descriptor skip length in 32 bit longwords. */ | ||
88 | #ifndef CONFIG_DE2104X_DSL | ||
89 | #define DSL 0 | ||
90 | #else | ||
91 | #define DSL CONFIG_DE2104X_DSL | ||
92 | #endif | ||
93 | |||
94 | #define DE_RX_RING_SIZE 64 | ||
95 | #define DE_TX_RING_SIZE 64 | ||
96 | #define DE_RING_BYTES \ | ||
97 | ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ | ||
98 | (sizeof(struct de_desc) * DE_TX_RING_SIZE)) | ||
99 | #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1)) | ||
100 | #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1)) | ||
101 | #define TX_BUFFS_AVAIL(CP) \ | ||
102 | (((CP)->tx_tail <= (CP)->tx_head) ? \ | ||
103 | (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \ | ||
104 | (CP)->tx_tail - (CP)->tx_head - 1) | ||
105 | |||
106 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | ||
107 | #define RX_OFFSET 2 | ||
108 | |||
109 | #define DE_SETUP_SKB ((struct sk_buff *) 1) | ||
110 | #define DE_DUMMY_SKB ((struct sk_buff *) 2) | ||
111 | #define DE_SETUP_FRAME_WORDS 96 | ||
112 | #define DE_EEPROM_WORDS 256 | ||
113 | #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16)) | ||
114 | #define DE_MAX_MEDIA 5 | ||
115 | |||
116 | #define DE_MEDIA_TP_AUTO 0 | ||
117 | #define DE_MEDIA_BNC 1 | ||
118 | #define DE_MEDIA_AUI 2 | ||
119 | #define DE_MEDIA_TP 3 | ||
120 | #define DE_MEDIA_TP_FD 4 | ||
121 | #define DE_MEDIA_INVALID DE_MAX_MEDIA | ||
122 | #define DE_MEDIA_FIRST 0 | ||
123 | #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1) | ||
124 | #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC) | ||
125 | |||
126 | #define DE_TIMER_LINK (60 * HZ) | ||
127 | #define DE_TIMER_NO_LINK (5 * HZ) | ||
128 | |||
129 | #define DE_NUM_REGS 16 | ||
130 | #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32)) | ||
131 | #define DE_REGS_VER 1 | ||
132 | |||
133 | /* Time in jiffies before concluding the transmitter is hung. */ | ||
134 | #define TX_TIMEOUT (6*HZ) | ||
135 | |||
136 | /* This is a mysterious value that can be written to CSR11 in the 21040 (only) | ||
137 | to support a pre-NWay full-duplex signaling mechanism using short frames. | ||
138 | No one knows what it should be, but if left at its default value some | ||
139 | 10base2(!) packets trigger a full-duplex-request interrupt. */ | ||
140 | #define FULL_DUPLEX_MAGIC 0x6969 | ||
141 | |||
142 | enum { | ||
143 | /* NIC registers */ | ||
144 | BusMode = 0x00, | ||
145 | TxPoll = 0x08, | ||
146 | RxPoll = 0x10, | ||
147 | RxRingAddr = 0x18, | ||
148 | TxRingAddr = 0x20, | ||
149 | MacStatus = 0x28, | ||
150 | MacMode = 0x30, | ||
151 | IntrMask = 0x38, | ||
152 | RxMissed = 0x40, | ||
153 | ROMCmd = 0x48, | ||
154 | CSR11 = 0x58, | ||
155 | SIAStatus = 0x60, | ||
156 | CSR13 = 0x68, | ||
157 | CSR14 = 0x70, | ||
158 | CSR15 = 0x78, | ||
159 | PCIPM = 0x40, | ||
160 | |||
161 | /* BusMode bits */ | ||
162 | CmdReset = (1 << 0), | ||
163 | CacheAlign16 = 0x00008000, | ||
164 | BurstLen4 = 0x00000400, | ||
165 | DescSkipLen = (DSL << 2), | ||
166 | |||
167 | /* Rx/TxPoll bits */ | ||
168 | NormalTxPoll = (1 << 0), | ||
169 | NormalRxPoll = (1 << 0), | ||
170 | |||
171 | /* Tx/Rx descriptor status bits */ | ||
172 | DescOwn = (1 << 31), | ||
173 | RxError = (1 << 15), | ||
174 | RxErrLong = (1 << 7), | ||
175 | RxErrCRC = (1 << 1), | ||
176 | RxErrFIFO = (1 << 0), | ||
177 | RxErrRunt = (1 << 11), | ||
178 | RxErrFrame = (1 << 14), | ||
179 | RingEnd = (1 << 25), | ||
180 | FirstFrag = (1 << 29), | ||
181 | LastFrag = (1 << 30), | ||
182 | TxError = (1 << 15), | ||
183 | TxFIFOUnder = (1 << 1), | ||
184 | TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11), | ||
185 | TxMaxCol = (1 << 8), | ||
186 | TxOWC = (1 << 9), | ||
187 | TxJabber = (1 << 14), | ||
188 | SetupFrame = (1 << 27), | ||
189 | TxSwInt = (1 << 31), | ||
190 | |||
191 | /* MacStatus bits */ | ||
192 | IntrOK = (1 << 16), | ||
193 | IntrErr = (1 << 15), | ||
194 | RxIntr = (1 << 6), | ||
195 | RxEmpty = (1 << 7), | ||
196 | TxIntr = (1 << 0), | ||
197 | TxEmpty = (1 << 2), | ||
198 | PciErr = (1 << 13), | ||
199 | TxState = (1 << 22) | (1 << 21) | (1 << 20), | ||
200 | RxState = (1 << 19) | (1 << 18) | (1 << 17), | ||
201 | LinkFail = (1 << 12), | ||
202 | LinkPass = (1 << 4), | ||
203 | RxStopped = (1 << 8), | ||
204 | TxStopped = (1 << 1), | ||
205 | |||
206 | /* MacMode bits */ | ||
207 | TxEnable = (1 << 13), | ||
208 | RxEnable = (1 << 1), | ||
209 | RxTx = TxEnable | RxEnable, | ||
210 | FullDuplex = (1 << 9), | ||
211 | AcceptAllMulticast = (1 << 7), | ||
212 | AcceptAllPhys = (1 << 6), | ||
213 | BOCnt = (1 << 5), | ||
214 | MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) | | ||
215 | RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast, | ||
216 | |||
217 | /* ROMCmd bits */ | ||
218 | EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */ | ||
219 | EE_CS = 0x01, /* EEPROM chip select. */ | ||
220 | EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */ | ||
221 | EE_WRITE_0 = 0x01, | ||
222 | EE_WRITE_1 = 0x05, | ||
223 | EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */ | ||
224 | EE_ENB = (0x4800 | EE_CS), | ||
225 | |||
226 | /* The EEPROM commands include the alway-set leading bit. */ | ||
227 | EE_READ_CMD = 6, | ||
228 | |||
229 | /* RxMissed bits */ | ||
230 | RxMissedOver = (1 << 16), | ||
231 | RxMissedMask = 0xffff, | ||
232 | |||
233 | /* SROM-related bits */ | ||
234 | SROMC0InfoLeaf = 27, | ||
235 | MediaBlockMask = 0x3f, | ||
236 | MediaCustomCSRs = (1 << 6), | ||
237 | |||
238 | /* PCIPM bits */ | ||
239 | PM_Sleep = (1 << 31), | ||
240 | PM_Snooze = (1 << 30), | ||
241 | PM_Mask = PM_Sleep | PM_Snooze, | ||
242 | |||
243 | /* SIAStatus bits */ | ||
244 | NWayState = (1 << 14) | (1 << 13) | (1 << 12), | ||
245 | NWayRestart = (1 << 12), | ||
246 | NonselPortActive = (1 << 9), | ||
247 | SelPortActive = (1 << 8), | ||
248 | LinkFailStatus = (1 << 2), | ||
249 | NetCxnErr = (1 << 1), | ||
250 | }; | ||
251 | |||
252 | static const u32 de_intr_mask = | ||
253 | IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty | | ||
254 | LinkPass | LinkFail | PciErr; | ||
255 | |||
256 | /* | ||
257 | * Set the programmable burst length to 4 longwords for all: | ||
258 | * DMA errors result without these values. Cache align 16 long. | ||
259 | */ | ||
260 | static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen; | ||
261 | |||
262 | struct de_srom_media_block { | ||
263 | u8 opts; | ||
264 | u16 csr13; | ||
265 | u16 csr14; | ||
266 | u16 csr15; | ||
267 | } __packed; | ||
268 | |||
269 | struct de_srom_info_leaf { | ||
270 | u16 default_media; | ||
271 | u8 n_blocks; | ||
272 | u8 unused; | ||
273 | } __packed; | ||
274 | |||
275 | struct de_desc { | ||
276 | __le32 opts1; | ||
277 | __le32 opts2; | ||
278 | __le32 addr1; | ||
279 | __le32 addr2; | ||
280 | #if DSL | ||
281 | __le32 skip[DSL]; | ||
282 | #endif | ||
283 | }; | ||
284 | |||
285 | struct media_info { | ||
286 | u16 type; /* DE_MEDIA_xxx */ | ||
287 | u16 csr13; | ||
288 | u16 csr14; | ||
289 | u16 csr15; | ||
290 | }; | ||
291 | |||
292 | struct ring_info { | ||
293 | struct sk_buff *skb; | ||
294 | dma_addr_t mapping; | ||
295 | }; | ||
296 | |||
297 | struct de_private { | ||
298 | unsigned tx_head; | ||
299 | unsigned tx_tail; | ||
300 | unsigned rx_tail; | ||
301 | |||
302 | void __iomem *regs; | ||
303 | struct net_device *dev; | ||
304 | spinlock_t lock; | ||
305 | |||
306 | struct de_desc *rx_ring; | ||
307 | struct de_desc *tx_ring; | ||
308 | struct ring_info tx_skb[DE_TX_RING_SIZE]; | ||
309 | struct ring_info rx_skb[DE_RX_RING_SIZE]; | ||
310 | unsigned rx_buf_sz; | ||
311 | dma_addr_t ring_dma; | ||
312 | |||
313 | u32 msg_enable; | ||
314 | |||
315 | struct net_device_stats net_stats; | ||
316 | |||
317 | struct pci_dev *pdev; | ||
318 | |||
319 | u16 setup_frame[DE_SETUP_FRAME_WORDS]; | ||
320 | |||
321 | u32 media_type; | ||
322 | u32 media_supported; | ||
323 | u32 media_advertise; | ||
324 | struct media_info media[DE_MAX_MEDIA]; | ||
325 | struct timer_list media_timer; | ||
326 | |||
327 | u8 *ee_data; | ||
328 | unsigned board_idx; | ||
329 | unsigned de21040 : 1; | ||
330 | unsigned media_lock : 1; | ||
331 | }; | ||
332 | |||
333 | |||
334 | static void de_set_rx_mode (struct net_device *dev); | ||
335 | static void de_tx (struct de_private *de); | ||
336 | static void de_clean_rings (struct de_private *de); | ||
337 | static void de_media_interrupt (struct de_private *de, u32 status); | ||
338 | static void de21040_media_timer (unsigned long data); | ||
339 | static void de21041_media_timer (unsigned long data); | ||
340 | static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media); | ||
341 | |||
342 | |||
343 | static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = { | ||
344 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, | ||
345 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
346 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, | ||
347 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, | ||
348 | { }, | ||
349 | }; | ||
350 | MODULE_DEVICE_TABLE(pci, de_pci_tbl); | ||
351 | |||
352 | static const char * const media_name[DE_MAX_MEDIA] = { | ||
353 | "10baseT auto", | ||
354 | "BNC", | ||
355 | "AUI", | ||
356 | "10baseT-HD", | ||
357 | "10baseT-FD" | ||
358 | }; | ||
359 | |||
360 | /* 21040 transceiver register settings: | ||
361 | * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/ | ||
362 | static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, }; | ||
363 | static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, }; | ||
364 | static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, }; | ||
365 | |||
366 | /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/ | ||
367 | static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, }; | ||
368 | static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; | ||
369 | /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */ | ||
370 | static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, }; | ||
371 | static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, }; | ||
372 | |||
373 | |||
374 | #define dr32(reg) ioread32(de->regs + (reg)) | ||
375 | #define dw32(reg, val) iowrite32((val), de->regs + (reg)) | ||
376 | |||
377 | |||
378 | static void de_rx_err_acct (struct de_private *de, unsigned rx_tail, | ||
379 | u32 status, u32 len) | ||
380 | { | ||
381 | netif_dbg(de, rx_err, de->dev, | ||
382 | "rx err, slot %d status 0x%x len %d\n", | ||
383 | rx_tail, status, len); | ||
384 | |||
385 | if ((status & 0x38000300) != 0x0300) { | ||
386 | /* Ingore earlier buffers. */ | ||
387 | if ((status & 0xffff) != 0x7fff) { | ||
388 | netif_warn(de, rx_err, de->dev, | ||
389 | "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", | ||
390 | status); | ||
391 | de->net_stats.rx_length_errors++; | ||
392 | } | ||
393 | } else if (status & RxError) { | ||
394 | /* There was a fatal error. */ | ||
395 | de->net_stats.rx_errors++; /* end of a packet.*/ | ||
396 | if (status & 0x0890) de->net_stats.rx_length_errors++; | ||
397 | if (status & RxErrCRC) de->net_stats.rx_crc_errors++; | ||
398 | if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++; | ||
399 | } | ||
400 | } | ||
401 | |||
402 | static void de_rx (struct de_private *de) | ||
403 | { | ||
404 | unsigned rx_tail = de->rx_tail; | ||
405 | unsigned rx_work = DE_RX_RING_SIZE; | ||
406 | unsigned drop = 0; | ||
407 | int rc; | ||
408 | |||
409 | while (--rx_work) { | ||
410 | u32 status, len; | ||
411 | dma_addr_t mapping; | ||
412 | struct sk_buff *skb, *copy_skb; | ||
413 | unsigned copying_skb, buflen; | ||
414 | |||
415 | skb = de->rx_skb[rx_tail].skb; | ||
416 | BUG_ON(!skb); | ||
417 | rmb(); | ||
418 | status = le32_to_cpu(de->rx_ring[rx_tail].opts1); | ||
419 | if (status & DescOwn) | ||
420 | break; | ||
421 | |||
422 | len = ((status >> 16) & 0x7ff) - 4; | ||
423 | mapping = de->rx_skb[rx_tail].mapping; | ||
424 | |||
425 | if (unlikely(drop)) { | ||
426 | de->net_stats.rx_dropped++; | ||
427 | goto rx_next; | ||
428 | } | ||
429 | |||
430 | if (unlikely((status & 0x38008300) != 0x0300)) { | ||
431 | de_rx_err_acct(de, rx_tail, status, len); | ||
432 | goto rx_next; | ||
433 | } | ||
434 | |||
435 | copying_skb = (len <= rx_copybreak); | ||
436 | |||
437 | netif_dbg(de, rx_status, de->dev, | ||
438 | "rx slot %d status 0x%x len %d copying? %d\n", | ||
439 | rx_tail, status, len, copying_skb); | ||
440 | |||
441 | buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz; | ||
442 | copy_skb = dev_alloc_skb (buflen); | ||
443 | if (unlikely(!copy_skb)) { | ||
444 | de->net_stats.rx_dropped++; | ||
445 | drop = 1; | ||
446 | rx_work = 100; | ||
447 | goto rx_next; | ||
448 | } | ||
449 | |||
450 | if (!copying_skb) { | ||
451 | pci_unmap_single(de->pdev, mapping, | ||
452 | buflen, PCI_DMA_FROMDEVICE); | ||
453 | skb_put(skb, len); | ||
454 | |||
455 | mapping = | ||
456 | de->rx_skb[rx_tail].mapping = | ||
457 | pci_map_single(de->pdev, copy_skb->data, | ||
458 | buflen, PCI_DMA_FROMDEVICE); | ||
459 | de->rx_skb[rx_tail].skb = copy_skb; | ||
460 | } else { | ||
461 | pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); | ||
462 | skb_reserve(copy_skb, RX_OFFSET); | ||
463 | skb_copy_from_linear_data(skb, skb_put(copy_skb, len), | ||
464 | len); | ||
465 | pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); | ||
466 | |||
467 | /* We'll reuse the original ring buffer. */ | ||
468 | skb = copy_skb; | ||
469 | } | ||
470 | |||
471 | skb->protocol = eth_type_trans (skb, de->dev); | ||
472 | |||
473 | de->net_stats.rx_packets++; | ||
474 | de->net_stats.rx_bytes += skb->len; | ||
475 | rc = netif_rx (skb); | ||
476 | if (rc == NET_RX_DROP) | ||
477 | drop = 1; | ||
478 | |||
479 | rx_next: | ||
480 | if (rx_tail == (DE_RX_RING_SIZE - 1)) | ||
481 | de->rx_ring[rx_tail].opts2 = | ||
482 | cpu_to_le32(RingEnd | de->rx_buf_sz); | ||
483 | else | ||
484 | de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz); | ||
485 | de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); | ||
486 | wmb(); | ||
487 | de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn); | ||
488 | rx_tail = NEXT_RX(rx_tail); | ||
489 | } | ||
490 | |||
491 | if (!rx_work) | ||
492 | netdev_warn(de->dev, "rx work limit reached\n"); | ||
493 | |||
494 | de->rx_tail = rx_tail; | ||
495 | } | ||
496 | |||
497 | static irqreturn_t de_interrupt (int irq, void *dev_instance) | ||
498 | { | ||
499 | struct net_device *dev = dev_instance; | ||
500 | struct de_private *de = netdev_priv(dev); | ||
501 | u32 status; | ||
502 | |||
503 | status = dr32(MacStatus); | ||
504 | if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF)) | ||
505 | return IRQ_NONE; | ||
506 | |||
507 | netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n", | ||
508 | status, dr32(MacMode), | ||
509 | de->rx_tail, de->tx_head, de->tx_tail); | ||
510 | |||
511 | dw32(MacStatus, status); | ||
512 | |||
513 | if (status & (RxIntr | RxEmpty)) { | ||
514 | de_rx(de); | ||
515 | if (status & RxEmpty) | ||
516 | dw32(RxPoll, NormalRxPoll); | ||
517 | } | ||
518 | |||
519 | spin_lock(&de->lock); | ||
520 | |||
521 | if (status & (TxIntr | TxEmpty)) | ||
522 | de_tx(de); | ||
523 | |||
524 | if (status & (LinkPass | LinkFail)) | ||
525 | de_media_interrupt(de, status); | ||
526 | |||
527 | spin_unlock(&de->lock); | ||
528 | |||
529 | if (status & PciErr) { | ||
530 | u16 pci_status; | ||
531 | |||
532 | pci_read_config_word(de->pdev, PCI_STATUS, &pci_status); | ||
533 | pci_write_config_word(de->pdev, PCI_STATUS, pci_status); | ||
534 | netdev_err(de->dev, | ||
535 | "PCI bus error, status=%08x, PCI status=%04x\n", | ||
536 | status, pci_status); | ||
537 | } | ||
538 | |||
539 | return IRQ_HANDLED; | ||
540 | } | ||
541 | |||
542 | static void de_tx (struct de_private *de) | ||
543 | { | ||
544 | unsigned tx_head = de->tx_head; | ||
545 | unsigned tx_tail = de->tx_tail; | ||
546 | |||
547 | while (tx_tail != tx_head) { | ||
548 | struct sk_buff *skb; | ||
549 | u32 status; | ||
550 | |||
551 | rmb(); | ||
552 | status = le32_to_cpu(de->tx_ring[tx_tail].opts1); | ||
553 | if (status & DescOwn) | ||
554 | break; | ||
555 | |||
556 | skb = de->tx_skb[tx_tail].skb; | ||
557 | BUG_ON(!skb); | ||
558 | if (unlikely(skb == DE_DUMMY_SKB)) | ||
559 | goto next; | ||
560 | |||
561 | if (unlikely(skb == DE_SETUP_SKB)) { | ||
562 | pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, | ||
563 | sizeof(de->setup_frame), PCI_DMA_TODEVICE); | ||
564 | goto next; | ||
565 | } | ||
566 | |||
567 | pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, | ||
568 | skb->len, PCI_DMA_TODEVICE); | ||
569 | |||
570 | if (status & LastFrag) { | ||
571 | if (status & TxError) { | ||
572 | netif_dbg(de, tx_err, de->dev, | ||
573 | "tx err, status 0x%x\n", | ||
574 | status); | ||
575 | de->net_stats.tx_errors++; | ||
576 | if (status & TxOWC) | ||
577 | de->net_stats.tx_window_errors++; | ||
578 | if (status & TxMaxCol) | ||
579 | de->net_stats.tx_aborted_errors++; | ||
580 | if (status & TxLinkFail) | ||
581 | de->net_stats.tx_carrier_errors++; | ||
582 | if (status & TxFIFOUnder) | ||
583 | de->net_stats.tx_fifo_errors++; | ||
584 | } else { | ||
585 | de->net_stats.tx_packets++; | ||
586 | de->net_stats.tx_bytes += skb->len; | ||
587 | netif_dbg(de, tx_done, de->dev, | ||
588 | "tx done, slot %d\n", tx_tail); | ||
589 | } | ||
590 | dev_kfree_skb_irq(skb); | ||
591 | } | ||
592 | |||
593 | next: | ||
594 | de->tx_skb[tx_tail].skb = NULL; | ||
595 | |||
596 | tx_tail = NEXT_TX(tx_tail); | ||
597 | } | ||
598 | |||
599 | de->tx_tail = tx_tail; | ||
600 | |||
601 | if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4))) | ||
602 | netif_wake_queue(de->dev); | ||
603 | } | ||
604 | |||
605 | static netdev_tx_t de_start_xmit (struct sk_buff *skb, | ||
606 | struct net_device *dev) | ||
607 | { | ||
608 | struct de_private *de = netdev_priv(dev); | ||
609 | unsigned int entry, tx_free; | ||
610 | u32 mapping, len, flags = FirstFrag | LastFrag; | ||
611 | struct de_desc *txd; | ||
612 | |||
613 | spin_lock_irq(&de->lock); | ||
614 | |||
615 | tx_free = TX_BUFFS_AVAIL(de); | ||
616 | if (tx_free == 0) { | ||
617 | netif_stop_queue(dev); | ||
618 | spin_unlock_irq(&de->lock); | ||
619 | return NETDEV_TX_BUSY; | ||
620 | } | ||
621 | tx_free--; | ||
622 | |||
623 | entry = de->tx_head; | ||
624 | |||
625 | txd = &de->tx_ring[entry]; | ||
626 | |||
627 | len = skb->len; | ||
628 | mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
629 | if (entry == (DE_TX_RING_SIZE - 1)) | ||
630 | flags |= RingEnd; | ||
631 | if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2))) | ||
632 | flags |= TxSwInt; | ||
633 | flags |= len; | ||
634 | txd->opts2 = cpu_to_le32(flags); | ||
635 | txd->addr1 = cpu_to_le32(mapping); | ||
636 | |||
637 | de->tx_skb[entry].skb = skb; | ||
638 | de->tx_skb[entry].mapping = mapping; | ||
639 | wmb(); | ||
640 | |||
641 | txd->opts1 = cpu_to_le32(DescOwn); | ||
642 | wmb(); | ||
643 | |||
644 | de->tx_head = NEXT_TX(entry); | ||
645 | netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n", | ||
646 | entry, skb->len); | ||
647 | |||
648 | if (tx_free == 0) | ||
649 | netif_stop_queue(dev); | ||
650 | |||
651 | spin_unlock_irq(&de->lock); | ||
652 | |||
653 | /* Trigger an immediate transmit demand. */ | ||
654 | dw32(TxPoll, NormalTxPoll); | ||
655 | |||
656 | return NETDEV_TX_OK; | ||
657 | } | ||
658 | |||
659 | /* Set or clear the multicast filter for this adaptor. | ||
660 | Note that we only use exclusion around actually queueing the | ||
661 | new frame, not around filling de->setup_frame. This is non-deterministic | ||
662 | when re-entered but still correct. */ | ||
663 | |||
664 | #undef set_bit_le | ||
665 | #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0) | ||
666 | |||
667 | static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) | ||
668 | { | ||
669 | struct de_private *de = netdev_priv(dev); | ||
670 | u16 hash_table[32]; | ||
671 | struct netdev_hw_addr *ha; | ||
672 | int i; | ||
673 | u16 *eaddrs; | ||
674 | |||
675 | memset(hash_table, 0, sizeof(hash_table)); | ||
676 | set_bit_le(255, hash_table); /* Broadcast entry */ | ||
677 | /* This should work on big-endian machines as well. */ | ||
678 | netdev_for_each_mc_addr(ha, dev) { | ||
679 | int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; | ||
680 | |||
681 | set_bit_le(index, hash_table); | ||
682 | } | ||
683 | |||
684 | for (i = 0; i < 32; i++) { | ||
685 | *setup_frm++ = hash_table[i]; | ||
686 | *setup_frm++ = hash_table[i]; | ||
687 | } | ||
688 | setup_frm = &de->setup_frame[13*6]; | ||
689 | |||
690 | /* Fill the final entry with our physical address. */ | ||
691 | eaddrs = (u16 *)dev->dev_addr; | ||
692 | *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; | ||
693 | *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; | ||
694 | *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; | ||
695 | } | ||
696 | |||
697 | static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) | ||
698 | { | ||
699 | struct de_private *de = netdev_priv(dev); | ||
700 | struct netdev_hw_addr *ha; | ||
701 | u16 *eaddrs; | ||
702 | |||
703 | /* We have <= 14 addresses so we can use the wonderful | ||
704 | 16 address perfect filtering of the Tulip. */ | ||
705 | netdev_for_each_mc_addr(ha, dev) { | ||
706 | eaddrs = (u16 *) ha->addr; | ||
707 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; | ||
708 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; | ||
709 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; | ||
710 | } | ||
711 | /* Fill the unused entries with the broadcast address. */ | ||
712 | memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12); | ||
713 | setup_frm = &de->setup_frame[15*6]; | ||
714 | |||
715 | /* Fill the final entry with our physical address. */ | ||
716 | eaddrs = (u16 *)dev->dev_addr; | ||
717 | *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; | ||
718 | *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; | ||
719 | *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; | ||
720 | } | ||
721 | |||
722 | |||
723 | static void __de_set_rx_mode (struct net_device *dev) | ||
724 | { | ||
725 | struct de_private *de = netdev_priv(dev); | ||
726 | u32 macmode; | ||
727 | unsigned int entry; | ||
728 | u32 mapping; | ||
729 | struct de_desc *txd; | ||
730 | struct de_desc *dummy_txd = NULL; | ||
731 | |||
732 | macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys); | ||
733 | |||
734 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | ||
735 | macmode |= AcceptAllMulticast | AcceptAllPhys; | ||
736 | goto out; | ||
737 | } | ||
738 | |||
739 | if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) { | ||
740 | /* Too many to filter well -- accept all multicasts. */ | ||
741 | macmode |= AcceptAllMulticast; | ||
742 | goto out; | ||
743 | } | ||
744 | |||
745 | /* Note that only the low-address shortword of setup_frame is valid! | ||
746 | The values are doubled for big-endian architectures. */ | ||
747 | if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */ | ||
748 | build_setup_frame_hash (de->setup_frame, dev); | ||
749 | else | ||
750 | build_setup_frame_perfect (de->setup_frame, dev); | ||
751 | |||
752 | /* | ||
753 | * Now add this frame to the Tx list. | ||
754 | */ | ||
755 | |||
756 | entry = de->tx_head; | ||
757 | |||
758 | /* Avoid a chip errata by prefixing a dummy entry. */ | ||
759 | if (entry != 0) { | ||
760 | de->tx_skb[entry].skb = DE_DUMMY_SKB; | ||
761 | |||
762 | dummy_txd = &de->tx_ring[entry]; | ||
763 | dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ? | ||
764 | cpu_to_le32(RingEnd) : 0; | ||
765 | dummy_txd->addr1 = 0; | ||
766 | |||
767 | /* Must set DescOwned later to avoid race with chip */ | ||
768 | |||
769 | entry = NEXT_TX(entry); | ||
770 | } | ||
771 | |||
772 | de->tx_skb[entry].skb = DE_SETUP_SKB; | ||
773 | de->tx_skb[entry].mapping = mapping = | ||
774 | pci_map_single (de->pdev, de->setup_frame, | ||
775 | sizeof (de->setup_frame), PCI_DMA_TODEVICE); | ||
776 | |||
777 | /* Put the setup frame on the Tx list. */ | ||
778 | txd = &de->tx_ring[entry]; | ||
779 | if (entry == (DE_TX_RING_SIZE - 1)) | ||
780 | txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame)); | ||
781 | else | ||
782 | txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame)); | ||
783 | txd->addr1 = cpu_to_le32(mapping); | ||
784 | wmb(); | ||
785 | |||
786 | txd->opts1 = cpu_to_le32(DescOwn); | ||
787 | wmb(); | ||
788 | |||
789 | if (dummy_txd) { | ||
790 | dummy_txd->opts1 = cpu_to_le32(DescOwn); | ||
791 | wmb(); | ||
792 | } | ||
793 | |||
794 | de->tx_head = NEXT_TX(entry); | ||
795 | |||
796 | if (TX_BUFFS_AVAIL(de) == 0) | ||
797 | netif_stop_queue(dev); | ||
798 | |||
799 | /* Trigger an immediate transmit demand. */ | ||
800 | dw32(TxPoll, NormalTxPoll); | ||
801 | |||
802 | out: | ||
803 | if (macmode != dr32(MacMode)) | ||
804 | dw32(MacMode, macmode); | ||
805 | } | ||
806 | |||
807 | static void de_set_rx_mode (struct net_device *dev) | ||
808 | { | ||
809 | unsigned long flags; | ||
810 | struct de_private *de = netdev_priv(dev); | ||
811 | |||
812 | spin_lock_irqsave (&de->lock, flags); | ||
813 | __de_set_rx_mode(dev); | ||
814 | spin_unlock_irqrestore (&de->lock, flags); | ||
815 | } | ||
816 | |||
817 | static inline void de_rx_missed(struct de_private *de, u32 rx_missed) | ||
818 | { | ||
819 | if (unlikely(rx_missed & RxMissedOver)) | ||
820 | de->net_stats.rx_missed_errors += RxMissedMask; | ||
821 | else | ||
822 | de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask); | ||
823 | } | ||
824 | |||
825 | static void __de_get_stats(struct de_private *de) | ||
826 | { | ||
827 | u32 tmp = dr32(RxMissed); /* self-clearing */ | ||
828 | |||
829 | de_rx_missed(de, tmp); | ||
830 | } | ||
831 | |||
832 | static struct net_device_stats *de_get_stats(struct net_device *dev) | ||
833 | { | ||
834 | struct de_private *de = netdev_priv(dev); | ||
835 | |||
836 | /* The chip only need report frame silently dropped. */ | ||
837 | spin_lock_irq(&de->lock); | ||
838 | if (netif_running(dev) && netif_device_present(dev)) | ||
839 | __de_get_stats(de); | ||
840 | spin_unlock_irq(&de->lock); | ||
841 | |||
842 | return &de->net_stats; | ||
843 | } | ||
844 | |||
845 | static inline int de_is_running (struct de_private *de) | ||
846 | { | ||
847 | return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0; | ||
848 | } | ||
849 | |||
850 | static void de_stop_rxtx (struct de_private *de) | ||
851 | { | ||
852 | u32 macmode; | ||
853 | unsigned int i = 1300/100; | ||
854 | |||
855 | macmode = dr32(MacMode); | ||
856 | if (macmode & RxTx) { | ||
857 | dw32(MacMode, macmode & ~RxTx); | ||
858 | dr32(MacMode); | ||
859 | } | ||
860 | |||
861 | /* wait until in-flight frame completes. | ||
862 | * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin) | ||
863 | * Typically expect this loop to end in < 50 us on 100BT. | ||
864 | */ | ||
865 | while (--i) { | ||
866 | if (!de_is_running(de)) | ||
867 | return; | ||
868 | udelay(100); | ||
869 | } | ||
870 | |||
871 | netdev_warn(de->dev, "timeout expired, stopping DMA\n"); | ||
872 | } | ||
873 | |||
874 | static inline void de_start_rxtx (struct de_private *de) | ||
875 | { | ||
876 | u32 macmode; | ||
877 | |||
878 | macmode = dr32(MacMode); | ||
879 | if ((macmode & RxTx) != RxTx) { | ||
880 | dw32(MacMode, macmode | RxTx); | ||
881 | dr32(MacMode); | ||
882 | } | ||
883 | } | ||
884 | |||
885 | static void de_stop_hw (struct de_private *de) | ||
886 | { | ||
887 | |||
888 | udelay(5); | ||
889 | dw32(IntrMask, 0); | ||
890 | |||
891 | de_stop_rxtx(de); | ||
892 | |||
893 | dw32(MacStatus, dr32(MacStatus)); | ||
894 | |||
895 | udelay(10); | ||
896 | |||
897 | de->rx_tail = 0; | ||
898 | de->tx_head = de->tx_tail = 0; | ||
899 | } | ||
900 | |||
901 | static void de_link_up(struct de_private *de) | ||
902 | { | ||
903 | if (!netif_carrier_ok(de->dev)) { | ||
904 | netif_carrier_on(de->dev); | ||
905 | netif_info(de, link, de->dev, "link up, media %s\n", | ||
906 | media_name[de->media_type]); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | static void de_link_down(struct de_private *de) | ||
911 | { | ||
912 | if (netif_carrier_ok(de->dev)) { | ||
913 | netif_carrier_off(de->dev); | ||
914 | netif_info(de, link, de->dev, "link down\n"); | ||
915 | } | ||
916 | } | ||
917 | |||
918 | static void de_set_media (struct de_private *de) | ||
919 | { | ||
920 | unsigned media = de->media_type; | ||
921 | u32 macmode = dr32(MacMode); | ||
922 | |||
923 | if (de_is_running(de)) | ||
924 | netdev_warn(de->dev, "chip is running while changing media!\n"); | ||
925 | |||
926 | if (de->de21040) | ||
927 | dw32(CSR11, FULL_DUPLEX_MAGIC); | ||
928 | dw32(CSR13, 0); /* Reset phy */ | ||
929 | dw32(CSR14, de->media[media].csr14); | ||
930 | dw32(CSR15, de->media[media].csr15); | ||
931 | dw32(CSR13, de->media[media].csr13); | ||
932 | |||
933 | /* must delay 10ms before writing to other registers, | ||
934 | * especially CSR6 | ||
935 | */ | ||
936 | mdelay(10); | ||
937 | |||
938 | if (media == DE_MEDIA_TP_FD) | ||
939 | macmode |= FullDuplex; | ||
940 | else | ||
941 | macmode &= ~FullDuplex; | ||
942 | |||
943 | netif_info(de, link, de->dev, "set link %s\n", media_name[media]); | ||
944 | netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n", | ||
945 | dr32(MacMode), dr32(SIAStatus), | ||
946 | dr32(CSR13), dr32(CSR14), dr32(CSR15)); | ||
947 | netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n", | ||
948 | macmode, de->media[media].csr13, | ||
949 | de->media[media].csr14, de->media[media].csr15); | ||
950 | if (macmode != dr32(MacMode)) | ||
951 | dw32(MacMode, macmode); | ||
952 | } | ||
953 | |||
954 | static void de_next_media (struct de_private *de, const u32 *media, | ||
955 | unsigned int n_media) | ||
956 | { | ||
957 | unsigned int i; | ||
958 | |||
959 | for (i = 0; i < n_media; i++) { | ||
960 | if (de_ok_to_advertise(de, media[i])) { | ||
961 | de->media_type = media[i]; | ||
962 | return; | ||
963 | } | ||
964 | } | ||
965 | } | ||
966 | |||
967 | static void de21040_media_timer (unsigned long data) | ||
968 | { | ||
969 | struct de_private *de = (struct de_private *) data; | ||
970 | struct net_device *dev = de->dev; | ||
971 | u32 status = dr32(SIAStatus); | ||
972 | unsigned int carrier; | ||
973 | unsigned long flags; | ||
974 | |||
975 | carrier = (status & NetCxnErr) ? 0 : 1; | ||
976 | |||
977 | if (carrier) { | ||
978 | if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus)) | ||
979 | goto no_link_yet; | ||
980 | |||
981 | de->media_timer.expires = jiffies + DE_TIMER_LINK; | ||
982 | add_timer(&de->media_timer); | ||
983 | if (!netif_carrier_ok(dev)) | ||
984 | de_link_up(de); | ||
985 | else | ||
986 | netif_info(de, timer, dev, "%s link ok, status %x\n", | ||
987 | media_name[de->media_type], status); | ||
988 | return; | ||
989 | } | ||
990 | |||
991 | de_link_down(de); | ||
992 | |||
993 | if (de->media_lock) | ||
994 | return; | ||
995 | |||
996 | if (de->media_type == DE_MEDIA_AUI) { | ||
997 | static const u32 next_state = DE_MEDIA_TP; | ||
998 | de_next_media(de, &next_state, 1); | ||
999 | } else { | ||
1000 | static const u32 next_state = DE_MEDIA_AUI; | ||
1001 | de_next_media(de, &next_state, 1); | ||
1002 | } | ||
1003 | |||
1004 | spin_lock_irqsave(&de->lock, flags); | ||
1005 | de_stop_rxtx(de); | ||
1006 | spin_unlock_irqrestore(&de->lock, flags); | ||
1007 | de_set_media(de); | ||
1008 | de_start_rxtx(de); | ||
1009 | |||
1010 | no_link_yet: | ||
1011 | de->media_timer.expires = jiffies + DE_TIMER_NO_LINK; | ||
1012 | add_timer(&de->media_timer); | ||
1013 | |||
1014 | netif_info(de, timer, dev, "no link, trying media %s, status %x\n", | ||
1015 | media_name[de->media_type], status); | ||
1016 | } | ||
1017 | |||
1018 | static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media) | ||
1019 | { | ||
1020 | switch (new_media) { | ||
1021 | case DE_MEDIA_TP_AUTO: | ||
1022 | if (!(de->media_advertise & ADVERTISED_Autoneg)) | ||
1023 | return 0; | ||
1024 | if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full))) | ||
1025 | return 0; | ||
1026 | break; | ||
1027 | case DE_MEDIA_BNC: | ||
1028 | if (!(de->media_advertise & ADVERTISED_BNC)) | ||
1029 | return 0; | ||
1030 | break; | ||
1031 | case DE_MEDIA_AUI: | ||
1032 | if (!(de->media_advertise & ADVERTISED_AUI)) | ||
1033 | return 0; | ||
1034 | break; | ||
1035 | case DE_MEDIA_TP: | ||
1036 | if (!(de->media_advertise & ADVERTISED_10baseT_Half)) | ||
1037 | return 0; | ||
1038 | break; | ||
1039 | case DE_MEDIA_TP_FD: | ||
1040 | if (!(de->media_advertise & ADVERTISED_10baseT_Full)) | ||
1041 | return 0; | ||
1042 | break; | ||
1043 | } | ||
1044 | |||
1045 | return 1; | ||
1046 | } | ||
1047 | |||
1048 | static void de21041_media_timer (unsigned long data) | ||
1049 | { | ||
1050 | struct de_private *de = (struct de_private *) data; | ||
1051 | struct net_device *dev = de->dev; | ||
1052 | u32 status = dr32(SIAStatus); | ||
1053 | unsigned int carrier; | ||
1054 | unsigned long flags; | ||
1055 | |||
1056 | /* clear port active bits */ | ||
1057 | dw32(SIAStatus, NonselPortActive | SelPortActive); | ||
1058 | |||
1059 | carrier = (status & NetCxnErr) ? 0 : 1; | ||
1060 | |||
1061 | if (carrier) { | ||
1062 | if ((de->media_type == DE_MEDIA_TP_AUTO || | ||
1063 | de->media_type == DE_MEDIA_TP || | ||
1064 | de->media_type == DE_MEDIA_TP_FD) && | ||
1065 | (status & LinkFailStatus)) | ||
1066 | goto no_link_yet; | ||
1067 | |||
1068 | de->media_timer.expires = jiffies + DE_TIMER_LINK; | ||
1069 | add_timer(&de->media_timer); | ||
1070 | if (!netif_carrier_ok(dev)) | ||
1071 | de_link_up(de); | ||
1072 | else | ||
1073 | netif_info(de, timer, dev, | ||
1074 | "%s link ok, mode %x status %x\n", | ||
1075 | media_name[de->media_type], | ||
1076 | dr32(MacMode), status); | ||
1077 | return; | ||
1078 | } | ||
1079 | |||
1080 | de_link_down(de); | ||
1081 | |||
1082 | /* if media type locked, don't switch media */ | ||
1083 | if (de->media_lock) | ||
1084 | goto set_media; | ||
1085 | |||
1086 | /* if activity detected, use that as hint for new media type */ | ||
1087 | if (status & NonselPortActive) { | ||
1088 | unsigned int have_media = 1; | ||
1089 | |||
1090 | /* if AUI/BNC selected, then activity is on TP port */ | ||
1091 | if (de->media_type == DE_MEDIA_AUI || | ||
1092 | de->media_type == DE_MEDIA_BNC) { | ||
1093 | if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)) | ||
1094 | de->media_type = DE_MEDIA_TP_AUTO; | ||
1095 | else | ||
1096 | have_media = 0; | ||
1097 | } | ||
1098 | |||
1099 | /* TP selected. If there is only TP and BNC, then it's BNC */ | ||
1100 | else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) && | ||
1101 | de_ok_to_advertise(de, DE_MEDIA_BNC)) | ||
1102 | de->media_type = DE_MEDIA_BNC; | ||
1103 | |||
1104 | /* TP selected. If there is only TP and AUI, then it's AUI */ | ||
1105 | else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) && | ||
1106 | de_ok_to_advertise(de, DE_MEDIA_AUI)) | ||
1107 | de->media_type = DE_MEDIA_AUI; | ||
1108 | |||
1109 | /* otherwise, ignore the hint */ | ||
1110 | else | ||
1111 | have_media = 0; | ||
1112 | |||
1113 | if (have_media) | ||
1114 | goto set_media; | ||
1115 | } | ||
1116 | |||
1117 | /* | ||
1118 | * Absent or ambiguous activity hint, move to next advertised | ||
1119 | * media state. If de->media_type is left unchanged, this | ||
1120 | * simply resets the PHY and reloads the current media settings. | ||
1121 | */ | ||
1122 | if (de->media_type == DE_MEDIA_AUI) { | ||
1123 | static const u32 next_states[] = { | ||
1124 | DE_MEDIA_BNC, DE_MEDIA_TP_AUTO | ||
1125 | }; | ||
1126 | de_next_media(de, next_states, ARRAY_SIZE(next_states)); | ||
1127 | } else if (de->media_type == DE_MEDIA_BNC) { | ||
1128 | static const u32 next_states[] = { | ||
1129 | DE_MEDIA_TP_AUTO, DE_MEDIA_AUI | ||
1130 | }; | ||
1131 | de_next_media(de, next_states, ARRAY_SIZE(next_states)); | ||
1132 | } else { | ||
1133 | static const u32 next_states[] = { | ||
1134 | DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO | ||
1135 | }; | ||
1136 | de_next_media(de, next_states, ARRAY_SIZE(next_states)); | ||
1137 | } | ||
1138 | |||
1139 | set_media: | ||
1140 | spin_lock_irqsave(&de->lock, flags); | ||
1141 | de_stop_rxtx(de); | ||
1142 | spin_unlock_irqrestore(&de->lock, flags); | ||
1143 | de_set_media(de); | ||
1144 | de_start_rxtx(de); | ||
1145 | |||
1146 | no_link_yet: | ||
1147 | de->media_timer.expires = jiffies + DE_TIMER_NO_LINK; | ||
1148 | add_timer(&de->media_timer); | ||
1149 | |||
1150 | netif_info(de, timer, dev, "no link, trying media %s, status %x\n", | ||
1151 | media_name[de->media_type], status); | ||
1152 | } | ||
1153 | |||
1154 | static void de_media_interrupt (struct de_private *de, u32 status) | ||
1155 | { | ||
1156 | if (status & LinkPass) { | ||
1157 | /* Ignore if current media is AUI or BNC and we can't use TP */ | ||
1158 | if ((de->media_type == DE_MEDIA_AUI || | ||
1159 | de->media_type == DE_MEDIA_BNC) && | ||
1160 | (de->media_lock || | ||
1161 | !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))) | ||
1162 | return; | ||
1163 | /* If current media is not TP, change it to TP */ | ||
1164 | if ((de->media_type == DE_MEDIA_AUI || | ||
1165 | de->media_type == DE_MEDIA_BNC)) { | ||
1166 | de->media_type = DE_MEDIA_TP_AUTO; | ||
1167 | de_stop_rxtx(de); | ||
1168 | de_set_media(de); | ||
1169 | de_start_rxtx(de); | ||
1170 | } | ||
1171 | de_link_up(de); | ||
1172 | mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK); | ||
1173 | return; | ||
1174 | } | ||
1175 | |||
1176 | BUG_ON(!(status & LinkFail)); | ||
1177 | /* Mark the link as down only if current media is TP */ | ||
1178 | if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI && | ||
1179 | de->media_type != DE_MEDIA_BNC) { | ||
1180 | de_link_down(de); | ||
1181 | mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); | ||
1182 | } | ||
1183 | } | ||
1184 | |||
1185 | static int de_reset_mac (struct de_private *de) | ||
1186 | { | ||
1187 | u32 status, tmp; | ||
1188 | |||
1189 | /* | ||
1190 | * Reset MAC. de4x5.c and tulip.c examined for "advice" | ||
1191 | * in this area. | ||
1192 | */ | ||
1193 | |||
1194 | if (dr32(BusMode) == 0xffffffff) | ||
1195 | return -EBUSY; | ||
1196 | |||
1197 | /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ | ||
1198 | dw32 (BusMode, CmdReset); | ||
1199 | mdelay (1); | ||
1200 | |||
1201 | dw32 (BusMode, de_bus_mode); | ||
1202 | mdelay (1); | ||
1203 | |||
1204 | for (tmp = 0; tmp < 5; tmp++) { | ||
1205 | dr32 (BusMode); | ||
1206 | mdelay (1); | ||
1207 | } | ||
1208 | |||
1209 | mdelay (1); | ||
1210 | |||
1211 | status = dr32(MacStatus); | ||
1212 | if (status & (RxState | TxState)) | ||
1213 | return -EBUSY; | ||
1214 | if (status == 0xffffffff) | ||
1215 | return -ENODEV; | ||
1216 | return 0; | ||
1217 | } | ||
1218 | |||
1219 | static void de_adapter_wake (struct de_private *de) | ||
1220 | { | ||
1221 | u32 pmctl; | ||
1222 | |||
1223 | if (de->de21040) | ||
1224 | return; | ||
1225 | |||
1226 | pci_read_config_dword(de->pdev, PCIPM, &pmctl); | ||
1227 | if (pmctl & PM_Mask) { | ||
1228 | pmctl &= ~PM_Mask; | ||
1229 | pci_write_config_dword(de->pdev, PCIPM, pmctl); | ||
1230 | |||
1231 | /* de4x5.c delays, so we do too */ | ||
1232 | msleep(10); | ||
1233 | } | ||
1234 | } | ||
1235 | |||
1236 | static void de_adapter_sleep (struct de_private *de) | ||
1237 | { | ||
1238 | u32 pmctl; | ||
1239 | |||
1240 | if (de->de21040) | ||
1241 | return; | ||
1242 | |||
1243 | dw32(CSR13, 0); /* Reset phy */ | ||
1244 | pci_read_config_dword(de->pdev, PCIPM, &pmctl); | ||
1245 | pmctl |= PM_Sleep; | ||
1246 | pci_write_config_dword(de->pdev, PCIPM, pmctl); | ||
1247 | } | ||
1248 | |||
1249 | static int de_init_hw (struct de_private *de) | ||
1250 | { | ||
1251 | struct net_device *dev = de->dev; | ||
1252 | u32 macmode; | ||
1253 | int rc; | ||
1254 | |||
1255 | de_adapter_wake(de); | ||
1256 | |||
1257 | macmode = dr32(MacMode) & ~MacModeClear; | ||
1258 | |||
1259 | rc = de_reset_mac(de); | ||
1260 | if (rc) | ||
1261 | return rc; | ||
1262 | |||
1263 | de_set_media(de); /* reset phy */ | ||
1264 | |||
1265 | dw32(RxRingAddr, de->ring_dma); | ||
1266 | dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE)); | ||
1267 | |||
1268 | dw32(MacMode, RxTx | macmode); | ||
1269 | |||
1270 | dr32(RxMissed); /* self-clearing */ | ||
1271 | |||
1272 | dw32(IntrMask, de_intr_mask); | ||
1273 | |||
1274 | de_set_rx_mode(dev); | ||
1275 | |||
1276 | return 0; | ||
1277 | } | ||
1278 | |||
1279 | static int de_refill_rx (struct de_private *de) | ||
1280 | { | ||
1281 | unsigned i; | ||
1282 | |||
1283 | for (i = 0; i < DE_RX_RING_SIZE; i++) { | ||
1284 | struct sk_buff *skb; | ||
1285 | |||
1286 | skb = dev_alloc_skb(de->rx_buf_sz); | ||
1287 | if (!skb) | ||
1288 | goto err_out; | ||
1289 | |||
1290 | skb->dev = de->dev; | ||
1291 | |||
1292 | de->rx_skb[i].mapping = pci_map_single(de->pdev, | ||
1293 | skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1294 | de->rx_skb[i].skb = skb; | ||
1295 | |||
1296 | de->rx_ring[i].opts1 = cpu_to_le32(DescOwn); | ||
1297 | if (i == (DE_RX_RING_SIZE - 1)) | ||
1298 | de->rx_ring[i].opts2 = | ||
1299 | cpu_to_le32(RingEnd | de->rx_buf_sz); | ||
1300 | else | ||
1301 | de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz); | ||
1302 | de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping); | ||
1303 | de->rx_ring[i].addr2 = 0; | ||
1304 | } | ||
1305 | |||
1306 | return 0; | ||
1307 | |||
1308 | err_out: | ||
1309 | de_clean_rings(de); | ||
1310 | return -ENOMEM; | ||
1311 | } | ||
1312 | |||
1313 | static int de_init_rings (struct de_private *de) | ||
1314 | { | ||
1315 | memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE); | ||
1316 | de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); | ||
1317 | |||
1318 | de->rx_tail = 0; | ||
1319 | de->tx_head = de->tx_tail = 0; | ||
1320 | |||
1321 | return de_refill_rx (de); | ||
1322 | } | ||
1323 | |||
1324 | static int de_alloc_rings (struct de_private *de) | ||
1325 | { | ||
1326 | de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma); | ||
1327 | if (!de->rx_ring) | ||
1328 | return -ENOMEM; | ||
1329 | de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE]; | ||
1330 | return de_init_rings(de); | ||
1331 | } | ||
1332 | |||
1333 | static void de_clean_rings (struct de_private *de) | ||
1334 | { | ||
1335 | unsigned i; | ||
1336 | |||
1337 | memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE); | ||
1338 | de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); | ||
1339 | wmb(); | ||
1340 | memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE); | ||
1341 | de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd); | ||
1342 | wmb(); | ||
1343 | |||
1344 | for (i = 0; i < DE_RX_RING_SIZE; i++) { | ||
1345 | if (de->rx_skb[i].skb) { | ||
1346 | pci_unmap_single(de->pdev, de->rx_skb[i].mapping, | ||
1347 | de->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1348 | dev_kfree_skb(de->rx_skb[i].skb); | ||
1349 | } | ||
1350 | } | ||
1351 | |||
1352 | for (i = 0; i < DE_TX_RING_SIZE; i++) { | ||
1353 | struct sk_buff *skb = de->tx_skb[i].skb; | ||
1354 | if ((skb) && (skb != DE_DUMMY_SKB)) { | ||
1355 | if (skb != DE_SETUP_SKB) { | ||
1356 | de->net_stats.tx_dropped++; | ||
1357 | pci_unmap_single(de->pdev, | ||
1358 | de->tx_skb[i].mapping, | ||
1359 | skb->len, PCI_DMA_TODEVICE); | ||
1360 | dev_kfree_skb(skb); | ||
1361 | } else { | ||
1362 | pci_unmap_single(de->pdev, | ||
1363 | de->tx_skb[i].mapping, | ||
1364 | sizeof(de->setup_frame), | ||
1365 | PCI_DMA_TODEVICE); | ||
1366 | } | ||
1367 | } | ||
1368 | } | ||
1369 | |||
1370 | memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE); | ||
1371 | memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE); | ||
1372 | } | ||
1373 | |||
1374 | static void de_free_rings (struct de_private *de) | ||
1375 | { | ||
1376 | de_clean_rings(de); | ||
1377 | pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma); | ||
1378 | de->rx_ring = NULL; | ||
1379 | de->tx_ring = NULL; | ||
1380 | } | ||
1381 | |||
1382 | static int de_open (struct net_device *dev) | ||
1383 | { | ||
1384 | struct de_private *de = netdev_priv(dev); | ||
1385 | int rc; | ||
1386 | |||
1387 | netif_dbg(de, ifup, dev, "enabling interface\n"); | ||
1388 | |||
1389 | de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); | ||
1390 | |||
1391 | rc = de_alloc_rings(de); | ||
1392 | if (rc) { | ||
1393 | netdev_err(dev, "ring allocation failure, err=%d\n", rc); | ||
1394 | return rc; | ||
1395 | } | ||
1396 | |||
1397 | dw32(IntrMask, 0); | ||
1398 | |||
1399 | rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); | ||
1400 | if (rc) { | ||
1401 | netdev_err(dev, "IRQ %d request failure, err=%d\n", | ||
1402 | dev->irq, rc); | ||
1403 | goto err_out_free; | ||
1404 | } | ||
1405 | |||
1406 | rc = de_init_hw(de); | ||
1407 | if (rc) { | ||
1408 | netdev_err(dev, "h/w init failure, err=%d\n", rc); | ||
1409 | goto err_out_free_irq; | ||
1410 | } | ||
1411 | |||
1412 | netif_start_queue(dev); | ||
1413 | mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); | ||
1414 | |||
1415 | return 0; | ||
1416 | |||
1417 | err_out_free_irq: | ||
1418 | free_irq(dev->irq, dev); | ||
1419 | err_out_free: | ||
1420 | de_free_rings(de); | ||
1421 | return rc; | ||
1422 | } | ||
1423 | |||
1424 | static int de_close (struct net_device *dev) | ||
1425 | { | ||
1426 | struct de_private *de = netdev_priv(dev); | ||
1427 | unsigned long flags; | ||
1428 | |||
1429 | netif_dbg(de, ifdown, dev, "disabling interface\n"); | ||
1430 | |||
1431 | del_timer_sync(&de->media_timer); | ||
1432 | |||
1433 | spin_lock_irqsave(&de->lock, flags); | ||
1434 | de_stop_hw(de); | ||
1435 | netif_stop_queue(dev); | ||
1436 | netif_carrier_off(dev); | ||
1437 | spin_unlock_irqrestore(&de->lock, flags); | ||
1438 | |||
1439 | free_irq(dev->irq, dev); | ||
1440 | |||
1441 | de_free_rings(de); | ||
1442 | de_adapter_sleep(de); | ||
1443 | return 0; | ||
1444 | } | ||
1445 | |||
1446 | static void de_tx_timeout (struct net_device *dev) | ||
1447 | { | ||
1448 | struct de_private *de = netdev_priv(dev); | ||
1449 | |||
1450 | netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", | ||
1451 | dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), | ||
1452 | de->rx_tail, de->tx_head, de->tx_tail); | ||
1453 | |||
1454 | del_timer_sync(&de->media_timer); | ||
1455 | |||
1456 | disable_irq(dev->irq); | ||
1457 | spin_lock_irq(&de->lock); | ||
1458 | |||
1459 | de_stop_hw(de); | ||
1460 | netif_stop_queue(dev); | ||
1461 | netif_carrier_off(dev); | ||
1462 | |||
1463 | spin_unlock_irq(&de->lock); | ||
1464 | enable_irq(dev->irq); | ||
1465 | |||
1466 | /* Update the error counts. */ | ||
1467 | __de_get_stats(de); | ||
1468 | |||
1469 | synchronize_irq(dev->irq); | ||
1470 | de_clean_rings(de); | ||
1471 | |||
1472 | de_init_rings(de); | ||
1473 | |||
1474 | de_init_hw(de); | ||
1475 | |||
1476 | netif_wake_queue(dev); | ||
1477 | } | ||
1478 | |||
1479 | static void __de_get_regs(struct de_private *de, u8 *buf) | ||
1480 | { | ||
1481 | int i; | ||
1482 | u32 *rbuf = (u32 *)buf; | ||
1483 | |||
1484 | /* read all CSRs */ | ||
1485 | for (i = 0; i < DE_NUM_REGS; i++) | ||
1486 | rbuf[i] = dr32(i * 8); | ||
1487 | |||
1488 | /* handle self-clearing RxMissed counter, CSR8 */ | ||
1489 | de_rx_missed(de, rbuf[8]); | ||
1490 | } | ||
1491 | |||
1492 | static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd) | ||
1493 | { | ||
1494 | ecmd->supported = de->media_supported; | ||
1495 | ecmd->transceiver = XCVR_INTERNAL; | ||
1496 | ecmd->phy_address = 0; | ||
1497 | ecmd->advertising = de->media_advertise; | ||
1498 | |||
1499 | switch (de->media_type) { | ||
1500 | case DE_MEDIA_AUI: | ||
1501 | ecmd->port = PORT_AUI; | ||
1502 | break; | ||
1503 | case DE_MEDIA_BNC: | ||
1504 | ecmd->port = PORT_BNC; | ||
1505 | break; | ||
1506 | default: | ||
1507 | ecmd->port = PORT_TP; | ||
1508 | break; | ||
1509 | } | ||
1510 | |||
1511 | ethtool_cmd_speed_set(ecmd, 10); | ||
1512 | |||
1513 | if (dr32(MacMode) & FullDuplex) | ||
1514 | ecmd->duplex = DUPLEX_FULL; | ||
1515 | else | ||
1516 | ecmd->duplex = DUPLEX_HALF; | ||
1517 | |||
1518 | if (de->media_lock) | ||
1519 | ecmd->autoneg = AUTONEG_DISABLE; | ||
1520 | else | ||
1521 | ecmd->autoneg = AUTONEG_ENABLE; | ||
1522 | |||
1523 | /* ignore maxtxpkt, maxrxpkt for now */ | ||
1524 | |||
1525 | return 0; | ||
1526 | } | ||
1527 | |||
1528 | static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd) | ||
1529 | { | ||
1530 | u32 new_media; | ||
1531 | unsigned int media_lock; | ||
1532 | |||
1533 | if (ethtool_cmd_speed(ecmd) != 10) | ||
1534 | return -EINVAL; | ||
1535 | if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) | ||
1536 | return -EINVAL; | ||
1537 | if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC) | ||
1538 | return -EINVAL; | ||
1539 | if (de->de21040 && ecmd->port == PORT_BNC) | ||
1540 | return -EINVAL; | ||
1541 | if (ecmd->transceiver != XCVR_INTERNAL) | ||
1542 | return -EINVAL; | ||
1543 | if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) | ||
1544 | return -EINVAL; | ||
1545 | if (ecmd->advertising & ~de->media_supported) | ||
1546 | return -EINVAL; | ||
1547 | if (ecmd->autoneg == AUTONEG_ENABLE && | ||
1548 | (!(ecmd->advertising & ADVERTISED_Autoneg))) | ||
1549 | return -EINVAL; | ||
1550 | |||
1551 | switch (ecmd->port) { | ||
1552 | case PORT_AUI: | ||
1553 | new_media = DE_MEDIA_AUI; | ||
1554 | if (!(ecmd->advertising & ADVERTISED_AUI)) | ||
1555 | return -EINVAL; | ||
1556 | break; | ||
1557 | case PORT_BNC: | ||
1558 | new_media = DE_MEDIA_BNC; | ||
1559 | if (!(ecmd->advertising & ADVERTISED_BNC)) | ||
1560 | return -EINVAL; | ||
1561 | break; | ||
1562 | default: | ||
1563 | if (ecmd->autoneg == AUTONEG_ENABLE) | ||
1564 | new_media = DE_MEDIA_TP_AUTO; | ||
1565 | else if (ecmd->duplex == DUPLEX_FULL) | ||
1566 | new_media = DE_MEDIA_TP_FD; | ||
1567 | else | ||
1568 | new_media = DE_MEDIA_TP; | ||
1569 | if (!(ecmd->advertising & ADVERTISED_TP)) | ||
1570 | return -EINVAL; | ||
1571 | if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half))) | ||
1572 | return -EINVAL; | ||
1573 | break; | ||
1574 | } | ||
1575 | |||
1576 | media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1; | ||
1577 | |||
1578 | if ((new_media == de->media_type) && | ||
1579 | (media_lock == de->media_lock) && | ||
1580 | (ecmd->advertising == de->media_advertise)) | ||
1581 | return 0; /* nothing to change */ | ||
1582 | |||
1583 | de_link_down(de); | ||
1584 | mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK); | ||
1585 | de_stop_rxtx(de); | ||
1586 | |||
1587 | de->media_type = new_media; | ||
1588 | de->media_lock = media_lock; | ||
1589 | de->media_advertise = ecmd->advertising; | ||
1590 | de_set_media(de); | ||
1591 | if (netif_running(de->dev)) | ||
1592 | de_start_rxtx(de); | ||
1593 | |||
1594 | return 0; | ||
1595 | } | ||
1596 | |||
1597 | static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info) | ||
1598 | { | ||
1599 | struct de_private *de = netdev_priv(dev); | ||
1600 | |||
1601 | strcpy (info->driver, DRV_NAME); | ||
1602 | strcpy (info->version, DRV_VERSION); | ||
1603 | strcpy (info->bus_info, pci_name(de->pdev)); | ||
1604 | info->eedump_len = DE_EEPROM_SIZE; | ||
1605 | } | ||
1606 | |||
1607 | static int de_get_regs_len(struct net_device *dev) | ||
1608 | { | ||
1609 | return DE_REGS_SIZE; | ||
1610 | } | ||
1611 | |||
1612 | static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1613 | { | ||
1614 | struct de_private *de = netdev_priv(dev); | ||
1615 | int rc; | ||
1616 | |||
1617 | spin_lock_irq(&de->lock); | ||
1618 | rc = __de_get_settings(de, ecmd); | ||
1619 | spin_unlock_irq(&de->lock); | ||
1620 | |||
1621 | return rc; | ||
1622 | } | ||
1623 | |||
1624 | static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) | ||
1625 | { | ||
1626 | struct de_private *de = netdev_priv(dev); | ||
1627 | int rc; | ||
1628 | |||
1629 | spin_lock_irq(&de->lock); | ||
1630 | rc = __de_set_settings(de, ecmd); | ||
1631 | spin_unlock_irq(&de->lock); | ||
1632 | |||
1633 | return rc; | ||
1634 | } | ||
1635 | |||
1636 | static u32 de_get_msglevel(struct net_device *dev) | ||
1637 | { | ||
1638 | struct de_private *de = netdev_priv(dev); | ||
1639 | |||
1640 | return de->msg_enable; | ||
1641 | } | ||
1642 | |||
1643 | static void de_set_msglevel(struct net_device *dev, u32 msglvl) | ||
1644 | { | ||
1645 | struct de_private *de = netdev_priv(dev); | ||
1646 | |||
1647 | de->msg_enable = msglvl; | ||
1648 | } | ||
1649 | |||
1650 | static int de_get_eeprom(struct net_device *dev, | ||
1651 | struct ethtool_eeprom *eeprom, u8 *data) | ||
1652 | { | ||
1653 | struct de_private *de = netdev_priv(dev); | ||
1654 | |||
1655 | if (!de->ee_data) | ||
1656 | return -EOPNOTSUPP; | ||
1657 | if ((eeprom->offset != 0) || (eeprom->magic != 0) || | ||
1658 | (eeprom->len != DE_EEPROM_SIZE)) | ||
1659 | return -EINVAL; | ||
1660 | memcpy(data, de->ee_data, eeprom->len); | ||
1661 | |||
1662 | return 0; | ||
1663 | } | ||
1664 | |||
1665 | static int de_nway_reset(struct net_device *dev) | ||
1666 | { | ||
1667 | struct de_private *de = netdev_priv(dev); | ||
1668 | u32 status; | ||
1669 | |||
1670 | if (de->media_type != DE_MEDIA_TP_AUTO) | ||
1671 | return -EINVAL; | ||
1672 | if (netif_carrier_ok(de->dev)) | ||
1673 | de_link_down(de); | ||
1674 | |||
1675 | status = dr32(SIAStatus); | ||
1676 | dw32(SIAStatus, (status & ~NWayState) | NWayRestart); | ||
1677 | netif_info(de, link, dev, "link nway restart, status %x,%x\n", | ||
1678 | status, dr32(SIAStatus)); | ||
1679 | return 0; | ||
1680 | } | ||
1681 | |||
1682 | static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs, | ||
1683 | void *data) | ||
1684 | { | ||
1685 | struct de_private *de = netdev_priv(dev); | ||
1686 | |||
1687 | regs->version = (DE_REGS_VER << 2) | de->de21040; | ||
1688 | |||
1689 | spin_lock_irq(&de->lock); | ||
1690 | __de_get_regs(de, data); | ||
1691 | spin_unlock_irq(&de->lock); | ||
1692 | } | ||
1693 | |||
1694 | static const struct ethtool_ops de_ethtool_ops = { | ||
1695 | .get_link = ethtool_op_get_link, | ||
1696 | .get_drvinfo = de_get_drvinfo, | ||
1697 | .get_regs_len = de_get_regs_len, | ||
1698 | .get_settings = de_get_settings, | ||
1699 | .set_settings = de_set_settings, | ||
1700 | .get_msglevel = de_get_msglevel, | ||
1701 | .set_msglevel = de_set_msglevel, | ||
1702 | .get_eeprom = de_get_eeprom, | ||
1703 | .nway_reset = de_nway_reset, | ||
1704 | .get_regs = de_get_regs, | ||
1705 | }; | ||
1706 | |||
1707 | static void __devinit de21040_get_mac_address (struct de_private *de) | ||
1708 | { | ||
1709 | unsigned i; | ||
1710 | |||
1711 | dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */ | ||
1712 | udelay(5); | ||
1713 | |||
1714 | for (i = 0; i < 6; i++) { | ||
1715 | int value, boguscnt = 100000; | ||
1716 | do { | ||
1717 | value = dr32(ROMCmd); | ||
1718 | rmb(); | ||
1719 | } while (value < 0 && --boguscnt > 0); | ||
1720 | de->dev->dev_addr[i] = value; | ||
1721 | udelay(1); | ||
1722 | if (boguscnt <= 0) | ||
1723 | pr_warn("timeout reading 21040 MAC address byte %u\n", | ||
1724 | i); | ||
1725 | } | ||
1726 | } | ||
1727 | |||
1728 | static void __devinit de21040_get_media_info(struct de_private *de) | ||
1729 | { | ||
1730 | unsigned int i; | ||
1731 | |||
1732 | de->media_type = DE_MEDIA_TP; | ||
1733 | de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full | | ||
1734 | SUPPORTED_10baseT_Half | SUPPORTED_AUI; | ||
1735 | de->media_advertise = de->media_supported; | ||
1736 | |||
1737 | for (i = 0; i < DE_MAX_MEDIA; i++) { | ||
1738 | switch (i) { | ||
1739 | case DE_MEDIA_AUI: | ||
1740 | case DE_MEDIA_TP: | ||
1741 | case DE_MEDIA_TP_FD: | ||
1742 | de->media[i].type = i; | ||
1743 | de->media[i].csr13 = t21040_csr13[i]; | ||
1744 | de->media[i].csr14 = t21040_csr14[i]; | ||
1745 | de->media[i].csr15 = t21040_csr15[i]; | ||
1746 | break; | ||
1747 | default: | ||
1748 | de->media[i].type = DE_MEDIA_INVALID; | ||
1749 | break; | ||
1750 | } | ||
1751 | } | ||
1752 | } | ||
1753 | |||
1754 | /* Note: this routine returns extra data bits for size detection. */ | ||
1755 | static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len) | ||
1756 | { | ||
1757 | int i; | ||
1758 | unsigned retval = 0; | ||
1759 | void __iomem *ee_addr = regs + ROMCmd; | ||
1760 | int read_cmd = location | (EE_READ_CMD << addr_len); | ||
1761 | |||
1762 | writel(EE_ENB & ~EE_CS, ee_addr); | ||
1763 | writel(EE_ENB, ee_addr); | ||
1764 | |||
1765 | /* Shift the read command bits out. */ | ||
1766 | for (i = 4 + addr_len; i >= 0; i--) { | ||
1767 | short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; | ||
1768 | writel(EE_ENB | dataval, ee_addr); | ||
1769 | readl(ee_addr); | ||
1770 | writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); | ||
1771 | readl(ee_addr); | ||
1772 | retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0); | ||
1773 | } | ||
1774 | writel(EE_ENB, ee_addr); | ||
1775 | readl(ee_addr); | ||
1776 | |||
1777 | for (i = 16; i > 0; i--) { | ||
1778 | writel(EE_ENB | EE_SHIFT_CLK, ee_addr); | ||
1779 | readl(ee_addr); | ||
1780 | retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0); | ||
1781 | writel(EE_ENB, ee_addr); | ||
1782 | readl(ee_addr); | ||
1783 | } | ||
1784 | |||
1785 | /* Terminate the EEPROM access. */ | ||
1786 | writel(EE_ENB & ~EE_CS, ee_addr); | ||
1787 | return retval; | ||
1788 | } | ||
1789 | |||
1790 | static void __devinit de21041_get_srom_info (struct de_private *de) | ||
1791 | { | ||
1792 | unsigned i, sa_offset = 0, ofs; | ||
1793 | u8 ee_data[DE_EEPROM_SIZE + 6] = {}; | ||
1794 | unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6; | ||
1795 | struct de_srom_info_leaf *il; | ||
1796 | void *bufp; | ||
1797 | |||
1798 | /* download entire eeprom */ | ||
1799 | for (i = 0; i < DE_EEPROM_WORDS; i++) | ||
1800 | ((__le16 *)ee_data)[i] = | ||
1801 | cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size)); | ||
1802 | |||
1803 | /* DEC now has a specification but early board makers | ||
1804 | just put the address in the first EEPROM locations. */ | ||
1805 | /* This does memcmp(eedata, eedata+16, 8) */ | ||
1806 | |||
1807 | #ifndef CONFIG_MIPS_COBALT | ||
1808 | |||
1809 | for (i = 0; i < 8; i ++) | ||
1810 | if (ee_data[i] != ee_data[16+i]) | ||
1811 | sa_offset = 20; | ||
1812 | |||
1813 | #endif | ||
1814 | |||
1815 | /* store MAC address */ | ||
1816 | for (i = 0; i < 6; i ++) | ||
1817 | de->dev->dev_addr[i] = ee_data[i + sa_offset]; | ||
1818 | |||
1819 | /* get offset of controller 0 info leaf. ignore 2nd byte. */ | ||
1820 | ofs = ee_data[SROMC0InfoLeaf]; | ||
1821 | if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block))) | ||
1822 | goto bad_srom; | ||
1823 | |||
1824 | /* get pointer to info leaf */ | ||
1825 | il = (struct de_srom_info_leaf *) &ee_data[ofs]; | ||
1826 | |||
1827 | /* paranoia checks */ | ||
1828 | if (il->n_blocks == 0) | ||
1829 | goto bad_srom; | ||
1830 | if ((sizeof(ee_data) - ofs) < | ||
1831 | (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks))) | ||
1832 | goto bad_srom; | ||
1833 | |||
1834 | /* get default media type */ | ||
1835 | switch (get_unaligned(&il->default_media)) { | ||
1836 | case 0x0001: de->media_type = DE_MEDIA_BNC; break; | ||
1837 | case 0x0002: de->media_type = DE_MEDIA_AUI; break; | ||
1838 | case 0x0204: de->media_type = DE_MEDIA_TP_FD; break; | ||
1839 | default: de->media_type = DE_MEDIA_TP_AUTO; break; | ||
1840 | } | ||
1841 | |||
1842 | if (netif_msg_probe(de)) | ||
1843 | pr_info("de%d: SROM leaf offset %u, default media %s\n", | ||
1844 | de->board_idx, ofs, media_name[de->media_type]); | ||
1845 | |||
1846 | /* init SIA register values to defaults */ | ||
1847 | for (i = 0; i < DE_MAX_MEDIA; i++) { | ||
1848 | de->media[i].type = DE_MEDIA_INVALID; | ||
1849 | de->media[i].csr13 = 0xffff; | ||
1850 | de->media[i].csr14 = 0xffff; | ||
1851 | de->media[i].csr15 = 0xffff; | ||
1852 | } | ||
1853 | |||
1854 | /* parse media blocks to see what medias are supported, | ||
1855 | * and if any custom CSR values are provided | ||
1856 | */ | ||
1857 | bufp = ((void *)il) + sizeof(*il); | ||
1858 | for (i = 0; i < il->n_blocks; i++) { | ||
1859 | struct de_srom_media_block *ib = bufp; | ||
1860 | unsigned idx; | ||
1861 | |||
1862 | /* index based on media type in media block */ | ||
1863 | switch(ib->opts & MediaBlockMask) { | ||
1864 | case 0: /* 10baseT */ | ||
1865 | de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half | ||
1866 | | SUPPORTED_Autoneg; | ||
1867 | idx = DE_MEDIA_TP; | ||
1868 | de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO; | ||
1869 | break; | ||
1870 | case 1: /* BNC */ | ||
1871 | de->media_supported |= SUPPORTED_BNC; | ||
1872 | idx = DE_MEDIA_BNC; | ||
1873 | break; | ||
1874 | case 2: /* AUI */ | ||
1875 | de->media_supported |= SUPPORTED_AUI; | ||
1876 | idx = DE_MEDIA_AUI; | ||
1877 | break; | ||
1878 | case 4: /* 10baseT-FD */ | ||
1879 | de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full | ||
1880 | | SUPPORTED_Autoneg; | ||
1881 | idx = DE_MEDIA_TP_FD; | ||
1882 | de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO; | ||
1883 | break; | ||
1884 | default: | ||
1885 | goto bad_srom; | ||
1886 | } | ||
1887 | |||
1888 | de->media[idx].type = idx; | ||
1889 | |||
1890 | if (netif_msg_probe(de)) | ||
1891 | pr_info("de%d: media block #%u: %s", | ||
1892 | de->board_idx, i, | ||
1893 | media_name[de->media[idx].type]); | ||
1894 | |||
1895 | bufp += sizeof (ib->opts); | ||
1896 | |||
1897 | if (ib->opts & MediaCustomCSRs) { | ||
1898 | de->media[idx].csr13 = get_unaligned(&ib->csr13); | ||
1899 | de->media[idx].csr14 = get_unaligned(&ib->csr14); | ||
1900 | de->media[idx].csr15 = get_unaligned(&ib->csr15); | ||
1901 | bufp += sizeof(ib->csr13) + sizeof(ib->csr14) + | ||
1902 | sizeof(ib->csr15); | ||
1903 | |||
1904 | if (netif_msg_probe(de)) | ||
1905 | pr_cont(" (%x,%x,%x)\n", | ||
1906 | de->media[idx].csr13, | ||
1907 | de->media[idx].csr14, | ||
1908 | de->media[idx].csr15); | ||
1909 | |||
1910 | } else { | ||
1911 | if (netif_msg_probe(de)) | ||
1912 | pr_cont("\n"); | ||
1913 | } | ||
1914 | |||
1915 | if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3])) | ||
1916 | break; | ||
1917 | } | ||
1918 | |||
1919 | de->media_advertise = de->media_supported; | ||
1920 | |||
1921 | fill_defaults: | ||
1922 | /* fill in defaults, for cases where custom CSRs not used */ | ||
1923 | for (i = 0; i < DE_MAX_MEDIA; i++) { | ||
1924 | if (de->media[i].csr13 == 0xffff) | ||
1925 | de->media[i].csr13 = t21041_csr13[i]; | ||
1926 | if (de->media[i].csr14 == 0xffff) { | ||
1927 | /* autonegotiation is broken at least on some chip | ||
1928 | revisions - rev. 0x21 works, 0x11 does not */ | ||
1929 | if (de->pdev->revision < 0x20) | ||
1930 | de->media[i].csr14 = t21041_csr14_brk[i]; | ||
1931 | else | ||
1932 | de->media[i].csr14 = t21041_csr14[i]; | ||
1933 | } | ||
1934 | if (de->media[i].csr15 == 0xffff) | ||
1935 | de->media[i].csr15 = t21041_csr15[i]; | ||
1936 | } | ||
1937 | |||
1938 | de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL); | ||
1939 | |||
1940 | return; | ||
1941 | |||
1942 | bad_srom: | ||
1943 | /* for error cases, it's ok to assume we support all these */ | ||
1944 | for (i = 0; i < DE_MAX_MEDIA; i++) | ||
1945 | de->media[i].type = i; | ||
1946 | de->media_supported = | ||
1947 | SUPPORTED_10baseT_Half | | ||
1948 | SUPPORTED_10baseT_Full | | ||
1949 | SUPPORTED_Autoneg | | ||
1950 | SUPPORTED_TP | | ||
1951 | SUPPORTED_AUI | | ||
1952 | SUPPORTED_BNC; | ||
1953 | goto fill_defaults; | ||
1954 | } | ||
1955 | |||
1956 | static const struct net_device_ops de_netdev_ops = { | ||
1957 | .ndo_open = de_open, | ||
1958 | .ndo_stop = de_close, | ||
1959 | .ndo_set_multicast_list = de_set_rx_mode, | ||
1960 | .ndo_start_xmit = de_start_xmit, | ||
1961 | .ndo_get_stats = de_get_stats, | ||
1962 | .ndo_tx_timeout = de_tx_timeout, | ||
1963 | .ndo_change_mtu = eth_change_mtu, | ||
1964 | .ndo_set_mac_address = eth_mac_addr, | ||
1965 | .ndo_validate_addr = eth_validate_addr, | ||
1966 | }; | ||
1967 | |||
1968 | static int __devinit de_init_one (struct pci_dev *pdev, | ||
1969 | const struct pci_device_id *ent) | ||
1970 | { | ||
1971 | struct net_device *dev; | ||
1972 | struct de_private *de; | ||
1973 | int rc; | ||
1974 | void __iomem *regs; | ||
1975 | unsigned long pciaddr; | ||
1976 | static int board_idx = -1; | ||
1977 | |||
1978 | board_idx++; | ||
1979 | |||
1980 | #ifndef MODULE | ||
1981 | if (board_idx == 0) | ||
1982 | pr_info("%s\n", version); | ||
1983 | #endif | ||
1984 | |||
1985 | /* allocate a new ethernet device structure, and fill in defaults */ | ||
1986 | dev = alloc_etherdev(sizeof(struct de_private)); | ||
1987 | if (!dev) | ||
1988 | return -ENOMEM; | ||
1989 | |||
1990 | dev->netdev_ops = &de_netdev_ops; | ||
1991 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1992 | dev->ethtool_ops = &de_ethtool_ops; | ||
1993 | dev->watchdog_timeo = TX_TIMEOUT; | ||
1994 | |||
1995 | de = netdev_priv(dev); | ||
1996 | de->de21040 = ent->driver_data == 0 ? 1 : 0; | ||
1997 | de->pdev = pdev; | ||
1998 | de->dev = dev; | ||
1999 | de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug); | ||
2000 | de->board_idx = board_idx; | ||
2001 | spin_lock_init (&de->lock); | ||
2002 | init_timer(&de->media_timer); | ||
2003 | if (de->de21040) | ||
2004 | de->media_timer.function = de21040_media_timer; | ||
2005 | else | ||
2006 | de->media_timer.function = de21041_media_timer; | ||
2007 | de->media_timer.data = (unsigned long) de; | ||
2008 | |||
2009 | netif_carrier_off(dev); | ||
2010 | |||
2011 | /* wake up device, assign resources */ | ||
2012 | rc = pci_enable_device(pdev); | ||
2013 | if (rc) | ||
2014 | goto err_out_free; | ||
2015 | |||
2016 | /* reserve PCI resources to ensure driver atomicity */ | ||
2017 | rc = pci_request_regions(pdev, DRV_NAME); | ||
2018 | if (rc) | ||
2019 | goto err_out_disable; | ||
2020 | |||
2021 | /* check for invalid IRQ value */ | ||
2022 | if (pdev->irq < 2) { | ||
2023 | rc = -EIO; | ||
2024 | pr_err("invalid irq (%d) for pci dev %s\n", | ||
2025 | pdev->irq, pci_name(pdev)); | ||
2026 | goto err_out_res; | ||
2027 | } | ||
2028 | |||
2029 | dev->irq = pdev->irq; | ||
2030 | |||
2031 | /* obtain and check validity of PCI I/O address */ | ||
2032 | pciaddr = pci_resource_start(pdev, 1); | ||
2033 | if (!pciaddr) { | ||
2034 | rc = -EIO; | ||
2035 | pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev)); | ||
2036 | goto err_out_res; | ||
2037 | } | ||
2038 | if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) { | ||
2039 | rc = -EIO; | ||
2040 | pr_err("MMIO resource (%llx) too small on pci dev %s\n", | ||
2041 | (unsigned long long)pci_resource_len(pdev, 1), | ||
2042 | pci_name(pdev)); | ||
2043 | goto err_out_res; | ||
2044 | } | ||
2045 | |||
2046 | /* remap CSR registers */ | ||
2047 | regs = ioremap_nocache(pciaddr, DE_REGS_SIZE); | ||
2048 | if (!regs) { | ||
2049 | rc = -EIO; | ||
2050 | pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n", | ||
2051 | (unsigned long long)pci_resource_len(pdev, 1), | ||
2052 | pciaddr, pci_name(pdev)); | ||
2053 | goto err_out_res; | ||
2054 | } | ||
2055 | dev->base_addr = (unsigned long) regs; | ||
2056 | de->regs = regs; | ||
2057 | |||
2058 | de_adapter_wake(de); | ||
2059 | |||
2060 | /* make sure hardware is not running */ | ||
2061 | rc = de_reset_mac(de); | ||
2062 | if (rc) { | ||
2063 | pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev)); | ||
2064 | goto err_out_iomap; | ||
2065 | } | ||
2066 | |||
2067 | /* get MAC address, initialize default media type and | ||
2068 | * get list of supported media | ||
2069 | */ | ||
2070 | if (de->de21040) { | ||
2071 | de21040_get_mac_address(de); | ||
2072 | de21040_get_media_info(de); | ||
2073 | } else { | ||
2074 | de21041_get_srom_info(de); | ||
2075 | } | ||
2076 | |||
2077 | /* register new network interface with kernel */ | ||
2078 | rc = register_netdev(dev); | ||
2079 | if (rc) | ||
2080 | goto err_out_iomap; | ||
2081 | |||
2082 | /* print info about board and interface just registered */ | ||
2083 | netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", | ||
2084 | de->de21040 ? "21040" : "21041", | ||
2085 | dev->base_addr, | ||
2086 | dev->dev_addr, | ||
2087 | dev->irq); | ||
2088 | |||
2089 | pci_set_drvdata(pdev, dev); | ||
2090 | |||
2091 | /* enable busmastering */ | ||
2092 | pci_set_master(pdev); | ||
2093 | |||
2094 | /* put adapter to sleep */ | ||
2095 | de_adapter_sleep(de); | ||
2096 | |||
2097 | return 0; | ||
2098 | |||
2099 | err_out_iomap: | ||
2100 | kfree(de->ee_data); | ||
2101 | iounmap(regs); | ||
2102 | err_out_res: | ||
2103 | pci_release_regions(pdev); | ||
2104 | err_out_disable: | ||
2105 | pci_disable_device(pdev); | ||
2106 | err_out_free: | ||
2107 | free_netdev(dev); | ||
2108 | return rc; | ||
2109 | } | ||
2110 | |||
2111 | static void __devexit de_remove_one (struct pci_dev *pdev) | ||
2112 | { | ||
2113 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2114 | struct de_private *de = netdev_priv(dev); | ||
2115 | |||
2116 | BUG_ON(!dev); | ||
2117 | unregister_netdev(dev); | ||
2118 | kfree(de->ee_data); | ||
2119 | iounmap(de->regs); | ||
2120 | pci_release_regions(pdev); | ||
2121 | pci_disable_device(pdev); | ||
2122 | pci_set_drvdata(pdev, NULL); | ||
2123 | free_netdev(dev); | ||
2124 | } | ||
2125 | |||
2126 | #ifdef CONFIG_PM | ||
2127 | |||
2128 | static int de_suspend (struct pci_dev *pdev, pm_message_t state) | ||
2129 | { | ||
2130 | struct net_device *dev = pci_get_drvdata (pdev); | ||
2131 | struct de_private *de = netdev_priv(dev); | ||
2132 | |||
2133 | rtnl_lock(); | ||
2134 | if (netif_running (dev)) { | ||
2135 | del_timer_sync(&de->media_timer); | ||
2136 | |||
2137 | disable_irq(dev->irq); | ||
2138 | spin_lock_irq(&de->lock); | ||
2139 | |||
2140 | de_stop_hw(de); | ||
2141 | netif_stop_queue(dev); | ||
2142 | netif_device_detach(dev); | ||
2143 | netif_carrier_off(dev); | ||
2144 | |||
2145 | spin_unlock_irq(&de->lock); | ||
2146 | enable_irq(dev->irq); | ||
2147 | |||
2148 | /* Update the error counts. */ | ||
2149 | __de_get_stats(de); | ||
2150 | |||
2151 | synchronize_irq(dev->irq); | ||
2152 | de_clean_rings(de); | ||
2153 | |||
2154 | de_adapter_sleep(de); | ||
2155 | pci_disable_device(pdev); | ||
2156 | } else { | ||
2157 | netif_device_detach(dev); | ||
2158 | } | ||
2159 | rtnl_unlock(); | ||
2160 | return 0; | ||
2161 | } | ||
2162 | |||
2163 | static int de_resume (struct pci_dev *pdev) | ||
2164 | { | ||
2165 | struct net_device *dev = pci_get_drvdata (pdev); | ||
2166 | struct de_private *de = netdev_priv(dev); | ||
2167 | int retval = 0; | ||
2168 | |||
2169 | rtnl_lock(); | ||
2170 | if (netif_device_present(dev)) | ||
2171 | goto out; | ||
2172 | if (!netif_running(dev)) | ||
2173 | goto out_attach; | ||
2174 | if ((retval = pci_enable_device(pdev))) { | ||
2175 | netdev_err(dev, "pci_enable_device failed in resume\n"); | ||
2176 | goto out; | ||
2177 | } | ||
2178 | pci_set_master(pdev); | ||
2179 | de_init_rings(de); | ||
2180 | de_init_hw(de); | ||
2181 | out_attach: | ||
2182 | netif_device_attach(dev); | ||
2183 | out: | ||
2184 | rtnl_unlock(); | ||
2185 | return 0; | ||
2186 | } | ||
2187 | |||
2188 | #endif /* CONFIG_PM */ | ||
2189 | |||
2190 | static struct pci_driver de_driver = { | ||
2191 | .name = DRV_NAME, | ||
2192 | .id_table = de_pci_tbl, | ||
2193 | .probe = de_init_one, | ||
2194 | .remove = __devexit_p(de_remove_one), | ||
2195 | #ifdef CONFIG_PM | ||
2196 | .suspend = de_suspend, | ||
2197 | .resume = de_resume, | ||
2198 | #endif | ||
2199 | }; | ||
2200 | |||
2201 | static int __init de_init (void) | ||
2202 | { | ||
2203 | #ifdef MODULE | ||
2204 | pr_info("%s\n", version); | ||
2205 | #endif | ||
2206 | return pci_register_driver(&de_driver); | ||
2207 | } | ||
2208 | |||
2209 | static void __exit de_exit (void) | ||
2210 | { | ||
2211 | pci_unregister_driver (&de_driver); | ||
2212 | } | ||
2213 | |||
2214 | module_init(de_init); | ||
2215 | module_exit(de_exit); | ||
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c new file mode 100644 index 000000000000..959b41021a65 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/de4x5.c | |||
@@ -0,0 +1,5599 @@ | |||
1 | /* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500 | ||
2 | ethernet driver for Linux. | ||
3 | |||
4 | Copyright 1994, 1995 Digital Equipment Corporation. | ||
5 | |||
6 | Testing resources for this driver have been made available | ||
7 | in part by NASA Ames Research Center (mjacob@nas.nasa.gov). | ||
8 | |||
9 | The author may be reached at davies@maniac.ultranet.com. | ||
10 | |||
11 | This program is free software; you can redistribute it and/or modify it | ||
12 | under the terms of the GNU General Public License as published by the | ||
13 | Free Software Foundation; either version 2 of the License, or (at your | ||
14 | option) any later version. | ||
15 | |||
16 | THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
17 | WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
18 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | ||
19 | NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, | ||
20 | INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | ||
21 | NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | ||
22 | USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | ||
23 | ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | ||
25 | THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
26 | |||
27 | You should have received a copy of the GNU General Public License along | ||
28 | with this program; if not, write to the Free Software Foundation, Inc., | ||
29 | 675 Mass Ave, Cambridge, MA 02139, USA. | ||
30 | |||
31 | Originally, this driver was written for the Digital Equipment | ||
32 | Corporation series of EtherWORKS ethernet cards: | ||
33 | |||
34 | DE425 TP/COAX EISA | ||
35 | DE434 TP PCI | ||
36 | DE435 TP/COAX/AUI PCI | ||
37 | DE450 TP/COAX/AUI PCI | ||
38 | DE500 10/100 PCI Fasternet | ||
39 | |||
40 | but it will now attempt to support all cards which conform to the | ||
41 | Digital Semiconductor SROM Specification. The driver currently | ||
42 | recognises the following chips: | ||
43 | |||
44 | DC21040 (no SROM) | ||
45 | DC21041[A] | ||
46 | DC21140[A] | ||
47 | DC21142 | ||
48 | DC21143 | ||
49 | |||
50 | So far the driver is known to work with the following cards: | ||
51 | |||
52 | KINGSTON | ||
53 | Linksys | ||
54 | ZNYX342 | ||
55 | SMC8432 | ||
56 | SMC9332 (w/new SROM) | ||
57 | ZNYX31[45] | ||
58 | ZNYX346 10/100 4 port (can act as a 10/100 bridge!) | ||
59 | |||
60 | The driver has been tested on a relatively busy network using the DE425, | ||
61 | DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred | ||
62 | 16M of data to a DECstation 5000/200 as follows: | ||
63 | |||
64 | TCP UDP | ||
65 | TX RX TX RX | ||
66 | DE425 1030k 997k 1170k 1128k | ||
67 | DE434 1063k 995k 1170k 1125k | ||
68 | DE435 1063k 995k 1170k 1125k | ||
69 | DE500 1063k 998k 1170k 1125k in 10Mb/s mode | ||
70 | |||
71 | All values are typical (in kBytes/sec) from a sample of 4 for each | ||
72 | measurement. Their error is +/-20k on a quiet (private) network and also | ||
73 | depend on what load the CPU has. | ||
74 | |||
75 | ========================================================================= | ||
76 | This driver has been written substantially from scratch, although its | ||
77 | inheritance of style and stack interface from 'ewrk3.c' and in turn from | ||
78 | Donald Becker's 'lance.c' should be obvious. With the module autoload of | ||
79 | every usable DECchip board, I pinched Donald's 'next_module' field to | ||
80 | link my modules together. | ||
81 | |||
82 | Up to 15 EISA cards can be supported under this driver, limited primarily | ||
83 | by the available IRQ lines. I have checked different configurations of | ||
84 | multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a | ||
85 | problem yet (provided you have at least depca.c v0.38) ... | ||
86 | |||
87 | PCI support has been added to allow the driver to work with the DE434, | ||
88 | DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due | ||
89 | to the differences in the EISA and PCI CSR address offsets from the base | ||
90 | address. | ||
91 | |||
92 | The ability to load this driver as a loadable module has been included | ||
93 | and used extensively during the driver development (to save those long | ||
94 | reboot sequences). Loadable module support under PCI and EISA has been | ||
95 | achieved by letting the driver autoprobe as if it were compiled into the | ||
96 | kernel. Do make sure you're not sharing interrupts with anything that | ||
97 | cannot accommodate interrupt sharing! | ||
98 | |||
99 | To utilise this ability, you have to do 8 things: | ||
100 | |||
101 | 0) have a copy of the loadable modules code installed on your system. | ||
102 | 1) copy de4x5.c from the /linux/drivers/net directory to your favourite | ||
103 | temporary directory. | ||
104 | 2) for fixed autoprobes (not recommended), edit the source code near | ||
105 | line 5594 to reflect the I/O address you're using, or assign these when | ||
106 | loading by: | ||
107 | |||
108 | insmod de4x5 io=0xghh where g = bus number | ||
109 | hh = device number | ||
110 | |||
111 | NB: autoprobing for modules is now supported by default. You may just | ||
112 | use: | ||
113 | |||
114 | insmod de4x5 | ||
115 | |||
116 | to load all available boards. For a specific board, still use | ||
117 | the 'io=?' above. | ||
118 | 3) compile de4x5.c, but include -DMODULE in the command line to ensure | ||
119 | that the correct bits are compiled (see end of source code). | ||
120 | 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a | ||
121 | kernel with the de4x5 configuration turned off and reboot. | ||
122 | 5) insmod de4x5 [io=0xghh] | ||
123 | 6) run the net startup bits for your new eth?? interface(s) manually | ||
124 | (usually /etc/rc.inet[12] at boot time). | ||
125 | 7) enjoy! | ||
126 | |||
127 | To unload a module, turn off the associated interface(s) | ||
128 | 'ifconfig eth?? down' then 'rmmod de4x5'. | ||
129 | |||
130 | Automedia detection is included so that in principal you can disconnect | ||
131 | from, e.g. TP, reconnect to BNC and things will still work (after a | ||
132 | pause whilst the driver figures out where its media went). My tests | ||
133 | using ping showed that it appears to work.... | ||
134 | |||
135 | By default, the driver will now autodetect any DECchip based card. | ||
136 | Should you have a need to restrict the driver to DIGITAL only cards, you | ||
137 | can compile with a DEC_ONLY define, or if loading as a module, use the | ||
138 | 'dec_only=1' parameter. | ||
139 | |||
140 | I've changed the timing routines to use the kernel timer and scheduling | ||
141 | functions so that the hangs and other assorted problems that occurred | ||
142 | while autosensing the media should be gone. A bonus for the DC21040 | ||
143 | auto media sense algorithm is that it can now use one that is more in | ||
144 | line with the rest (the DC21040 chip doesn't have a hardware timer). | ||
145 | The downside is the 1 'jiffies' (10ms) resolution. | ||
146 | |||
147 | IEEE 802.3u MII interface code has been added in anticipation that some | ||
148 | products may use it in the future. | ||
149 | |||
150 | The SMC9332 card has a non-compliant SROM which needs fixing - I have | ||
151 | patched this driver to detect it because the SROM format used complies | ||
152 | to a previous DEC-STD format. | ||
153 | |||
154 | I have removed the buffer copies needed for receive on Intels. I cannot | ||
155 | remove them for Alphas since the Tulip hardware only does longword | ||
156 | aligned DMA transfers and the Alphas get alignment traps with non | ||
157 | longword aligned data copies (which makes them really slow). No comment. | ||
158 | |||
159 | I have added SROM decoding routines to make this driver work with any | ||
160 | card that supports the Digital Semiconductor SROM spec. This will help | ||
161 | all cards running the dc2114x series chips in particular. Cards using | ||
162 | the dc2104x chips should run correctly with the basic driver. I'm in | ||
163 | debt to <mjacob@feral.com> for the testing and feedback that helped get | ||
164 | this feature working. So far we have tested KINGSTON, SMC8432, SMC9332 | ||
165 | (with the latest SROM complying with the SROM spec V3: their first was | ||
166 | broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315 | ||
167 | (quad 21041 MAC) cards also appear to work despite their incorrectly | ||
168 | wired IRQs. | ||
169 | |||
170 | I have added a temporary fix for interrupt problems when some SCSI cards | ||
171 | share the same interrupt as the DECchip based cards. The problem occurs | ||
172 | because the SCSI card wants to grab the interrupt as a fast interrupt | ||
173 | (runs the service routine with interrupts turned off) vs. this card | ||
174 | which really needs to run the service routine with interrupts turned on. | ||
175 | This driver will now add the interrupt service routine as a fast | ||
176 | interrupt if it is bounced from the slow interrupt. THIS IS NOT A | ||
177 | RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time | ||
178 | until people sort out their compatibility issues and the kernel | ||
179 | interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST | ||
180 | INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not | ||
181 | run on the same interrupt. PCMCIA/CardBus is another can of worms... | ||
182 | |||
183 | Finally, I think I have really fixed the module loading problem with | ||
184 | more than one DECchip based card. As a side effect, I don't mess with | ||
185 | the device structure any more which means that if more than 1 card in | ||
186 | 2.0.x is installed (4 in 2.1.x), the user will have to edit | ||
187 | linux/drivers/net/Space.c to make room for them. Hence, module loading | ||
188 | is the preferred way to use this driver, since it doesn't have this | ||
189 | limitation. | ||
190 | |||
191 | Where SROM media detection is used and full duplex is specified in the | ||
192 | SROM, the feature is ignored unless lp->params.fdx is set at compile | ||
193 | time OR during a module load (insmod de4x5 args='eth??:fdx' [see | ||
194 | below]). This is because there is no way to automatically detect full | ||
195 | duplex links except through autonegotiation. When I include the | ||
196 | autonegotiation feature in the SROM autoconf code, this detection will | ||
197 | occur automatically for that case. | ||
198 | |||
199 | Command line arguments are now allowed, similar to passing arguments | ||
200 | through LILO. This will allow a per adapter board set up of full duplex | ||
201 | and media. The only lexical constraints are: the board name (dev->name) | ||
202 | appears in the list before its parameters. The list of parameters ends | ||
203 | either at the end of the parameter list or with another board name. The | ||
204 | following parameters are allowed: | ||
205 | |||
206 | fdx for full duplex | ||
207 | autosense to set the media/speed; with the following | ||
208 | sub-parameters: | ||
209 | TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO | ||
210 | |||
211 | Case sensitivity is important for the sub-parameters. They *must* be | ||
212 | upper case. Examples: | ||
213 | |||
214 | insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. | ||
215 | |||
216 | For a compiled in driver, at or above line 548, place e.g. | ||
217 | #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP" | ||
218 | |||
219 | Yes, I know full duplex isn't permissible on BNC or AUI; they're just | ||
220 | examples. By default, full duplex is turned off and AUTO is the default | ||
221 | autosense setting. In reality, I expect only the full duplex option to | ||
222 | be used. Note the use of single quotes in the two examples above and the | ||
223 | lack of commas to separate items. ALSO, you must get the requested media | ||
224 | correct in relation to what the adapter SROM says it has. There's no way | ||
225 | to determine this in advance other than by trial and error and common | ||
226 | sense, e.g. call a BNC connectored port 'BNC', not '10Mb'. | ||
227 | |||
228 | Changed the bus probing. EISA used to be done first, followed by PCI. | ||
229 | Most people probably don't even know what a de425 is today and the EISA | ||
230 | probe has messed up some SCSI cards in the past, so now PCI is always | ||
231 | probed first followed by EISA if a) the architecture allows EISA and | ||
232 | either b) there have been no PCI cards detected or c) an EISA probe is | ||
233 | forced by the user. To force a probe include "force_eisa" in your | ||
234 | insmod "args" line; for built-in kernels either change the driver to do | ||
235 | this automatically or include #define DE4X5_FORCE_EISA on or before | ||
236 | line 1040 in the driver. | ||
237 | |||
238 | TO DO: | ||
239 | ------ | ||
240 | |||
241 | Revision History | ||
242 | ---------------- | ||
243 | |||
244 | Version Date Description | ||
245 | |||
246 | 0.1 17-Nov-94 Initial writing. ALPHA code release. | ||
247 | 0.2 13-Jan-95 Added PCI support for DE435's. | ||
248 | 0.21 19-Jan-95 Added auto media detection. | ||
249 | 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>. | ||
250 | Fix recognition bug reported by <bkm@star.rl.ac.uk>. | ||
251 | Add request/release_region code. | ||
252 | Add loadable modules support for PCI. | ||
253 | Clean up loadable modules support. | ||
254 | 0.23 28-Feb-95 Added DC21041 and DC21140 support. | ||
255 | Fix missed frame counter value and initialisation. | ||
256 | Fixed EISA probe. | ||
257 | 0.24 11-Apr-95 Change delay routine to use <linux/udelay>. | ||
258 | Change TX_BUFFS_AVAIL macro. | ||
259 | Change media autodetection to allow manual setting. | ||
260 | Completed DE500 (DC21140) support. | ||
261 | 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm. | ||
262 | 0.242 10-May-95 Minor changes. | ||
263 | 0.30 12-Jun-95 Timer fix for DC21140. | ||
264 | Portability changes. | ||
265 | Add ALPHA changes from <jestabro@ant.tay1.dec.com>. | ||
266 | Add DE500 semi automatic autosense. | ||
267 | Add Link Fail interrupt TP failure detection. | ||
268 | Add timer based link change detection. | ||
269 | Plugged a memory leak in de4x5_queue_pkt(). | ||
270 | 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1. | ||
271 | 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a | ||
272 | suggestion by <heiko@colossus.escape.de>. | ||
273 | 0.33 8-Aug-95 Add shared interrupt support (not released yet). | ||
274 | 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs. | ||
275 | Fix de4x5_interrupt(). | ||
276 | Fix dc21140_autoconf() mess. | ||
277 | No shared interrupt support. | ||
278 | 0.332 11-Sep-95 Added MII management interface routines. | ||
279 | 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>. | ||
280 | Add kernel timer code (h/w is too flaky). | ||
281 | Add MII based PHY autosense. | ||
282 | Add new multicasting code. | ||
283 | Add new autosense algorithms for media/mode | ||
284 | selection using kernel scheduling/timing. | ||
285 | Re-formatted. | ||
286 | Made changes suggested by <jeff@router.patch.net>: | ||
287 | Change driver to detect all DECchip based cards | ||
288 | with DEC_ONLY restriction a special case. | ||
289 | Changed driver to autoprobe as a module. No irq | ||
290 | checking is done now - assume BIOS is good! | ||
291 | Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp> | ||
292 | 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card | ||
293 | only <niles@axp745gsfc.nasa.gov> | ||
294 | Fix for multiple PCI cards reported by <jos@xos.nl> | ||
295 | Duh, put the IRQF_SHARED flag into request_interrupt(). | ||
296 | Fix SMC ethernet address in enet_det[]. | ||
297 | Print chip name instead of "UNKNOWN" during boot. | ||
298 | 0.42 26-Apr-96 Fix MII write TA bit error. | ||
299 | Fix bug in dc21040 and dc21041 autosense code. | ||
300 | Remove buffer copies on receive for Intels. | ||
301 | Change sk_buff handling during media disconnects to | ||
302 | eliminate DUP packets. | ||
303 | Add dynamic TX thresholding. | ||
304 | Change all chips to use perfect multicast filtering. | ||
305 | Fix alloc_device() bug <jari@markkus2.fimr.fi> | ||
306 | 0.43 21-Jun-96 Fix unconnected media TX retry bug. | ||
307 | Add Accton to the list of broken cards. | ||
308 | Fix TX under-run bug for non DC21140 chips. | ||
309 | Fix boot command probe bug in alloc_device() as | ||
310 | reported by <koen.gadeyne@barco.com> and | ||
311 | <orava@nether.tky.hut.fi>. | ||
312 | Add cache locks to prevent a race condition as | ||
313 | reported by <csd@microplex.com> and | ||
314 | <baba@beckman.uiuc.edu>. | ||
315 | Upgraded alloc_device() code. | ||
316 | 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion | ||
317 | with <csd@microplex.com> | ||
318 | 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips. | ||
319 | Fix EISA probe bugs reported by <os2@kpi.kharkov.ua> | ||
320 | and <michael@compurex.com>. | ||
321 | 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media | ||
322 | with a loopback packet. | ||
323 | 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported | ||
324 | by <bhat@mundook.cs.mu.OZ.AU> | ||
325 | 0.45 8-Dec-96 Include endian functions for PPC use, from work | ||
326 | by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>. | ||
327 | 0.451 28-Dec-96 Added fix to allow autoprobe for modules after | ||
328 | suggestion from <mjacob@feral.com>. | ||
329 | 0.5 30-Jan-97 Added SROM decoding functions. | ||
330 | Updated debug flags. | ||
331 | Fix sleep/wakeup calls for PCI cards, bug reported | ||
332 | by <cross@gweep.lkg.dec.com>. | ||
333 | Added multi-MAC, one SROM feature from discussion | ||
334 | with <mjacob@feral.com>. | ||
335 | Added full module autoprobe capability. | ||
336 | Added attempt to use an SMC9332 with broken SROM. | ||
337 | Added fix for ZYNX multi-mac cards that didn't | ||
338 | get their IRQs wired correctly. | ||
339 | 0.51 13-Feb-97 Added endian fixes for the SROM accesses from | ||
340 | <paubert@iram.es> | ||
341 | Fix init_connection() to remove extra device reset. | ||
342 | Fix MAC/PHY reset ordering in dc21140m_autoconf(). | ||
343 | Fix initialisation problem with lp->timeout in | ||
344 | typeX_infoblock() from <paubert@iram.es>. | ||
345 | Fix MII PHY reset problem from work done by | ||
346 | <paubert@iram.es>. | ||
347 | 0.52 26-Apr-97 Some changes may not credit the right people - | ||
348 | a disk crash meant I lost some mail. | ||
349 | Change RX interrupt routine to drop rather than | ||
350 | defer packets to avoid hang reported by | ||
351 | <g.thomas@opengroup.org>. | ||
352 | Fix srom_exec() to return for COMPACT and type 1 | ||
353 | infoblocks. | ||
354 | Added DC21142 and DC21143 functions. | ||
355 | Added byte counters from <phil@tazenda.demon.co.uk> | ||
356 | Added IRQF_DISABLED temporary fix from | ||
357 | <mjacob@feral.com>. | ||
358 | 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during | ||
359 | module load: bug reported by | ||
360 | <Piete.Brooks@cl.cam.ac.uk> | ||
361 | Fix multi-MAC, one SROM, to work with 2114x chips: | ||
362 | bug reported by <cmetz@inner.net>. | ||
363 | Make above search independent of BIOS device scan | ||
364 | direction. | ||
365 | Completed DC2114[23] autosense functions. | ||
366 | 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by | ||
367 | <robin@intercore.com | ||
368 | Fix type1_infoblock() bug introduced in 0.53, from | ||
369 | problem reports by | ||
370 | <parmee@postecss.ncrfran.france.ncr.com> and | ||
371 | <jo@ice.dillingen.baynet.de>. | ||
372 | Added argument list to set up each board from either | ||
373 | a module's command line or a compiled in #define. | ||
374 | Added generic MII PHY functionality to deal with | ||
375 | newer PHY chips. | ||
376 | Fix the mess in 2.1.67. | ||
377 | 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by | ||
378 | <redhat@cococo.net>. | ||
379 | Fix bug in pci_probe() for 64 bit systems reported | ||
380 | by <belliott@accessone.com>. | ||
381 | 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>. | ||
382 | 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org> | ||
383 | 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040. | ||
384 | 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure. | ||
385 | **Incompatible with 2.0.x from here.** | ||
386 | 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP | ||
387 | from <lma@varesearch.com> | ||
388 | Add TP, AUI and BNC cases to 21140m_autoconf() for | ||
389 | case where a 21140 under SROM control uses, e.g. AUI | ||
390 | from problem report by <delchini@lpnp09.in2p3.fr> | ||
391 | Add MII parallel detection to 2114x_autoconf() for | ||
392 | case where no autonegotiation partner exists from | ||
393 | problem report by <mlapsley@ndirect.co.uk>. | ||
394 | Add ability to force connection type directly even | ||
395 | when using SROM control from problem report by | ||
396 | <earl@exis.net>. | ||
397 | Updated the PCI interface to conform with the latest | ||
398 | version. I hope nothing is broken... | ||
399 | Add TX done interrupt modification from suggestion | ||
400 | by <Austin.Donnelly@cl.cam.ac.uk>. | ||
401 | Fix is_anc_capable() bug reported by | ||
402 | <Austin.Donnelly@cl.cam.ac.uk>. | ||
403 | Fix type[13]_infoblock() bug: during MII search, PHY | ||
404 | lp->rst not run because lp->ibn not initialised - | ||
405 | from report & fix by <paubert@iram.es>. | ||
406 | Fix probe bug with EISA & PCI cards present from | ||
407 | report by <eirik@netcom.com>. | ||
408 | 0.541 24-Aug-98 Fix compiler problems associated with i386-string | ||
409 | ops from multiple bug reports and temporary fix | ||
410 | from <paubert@iram.es>. | ||
411 | Fix pci_probe() to correctly emulate the old | ||
412 | pcibios_find_class() function. | ||
413 | Add an_exception() for old ZYNX346 and fix compile | ||
414 | warning on PPC & SPARC, from <ecd@skynet.be>. | ||
415 | Fix lastPCI to correctly work with compiled in | ||
416 | kernels and modules from bug report by | ||
417 | <Zlatko.Calusic@CARNet.hr> et al. | ||
418 | 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages | ||
419 | when media is unconnected. | ||
420 | Change dev->interrupt to lp->interrupt to ensure | ||
421 | alignment for Alpha's and avoid their unaligned | ||
422 | access traps. This flag is merely for log messages: | ||
423 | should do something more definitive though... | ||
424 | 0.543 30-Dec-98 Add SMP spin locking. | ||
425 | 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using | ||
426 | a 21143 by <mmporter@home.com>. | ||
427 | Change PCI/EISA bus probing order. | ||
428 | 0.545 28-Nov-99 Further Moto SROM bug fix from | ||
429 | <mporter@eng.mcd.mot.com> | ||
430 | Remove double checking for DEBUG_RX in de4x5_dbg_rx() | ||
431 | from report by <geert@linux-m68k.org> | ||
432 | 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function | ||
433 | was causing a page fault when initializing the | ||
434 | variable 'pb', on a non de4x5 PCI device, in this | ||
435 | case a PCI bridge (DEC chip 21152). The value of | ||
436 | 'pb' is now only initialized if a de4x5 chip is | ||
437 | present. | ||
438 | <france@handhelds.org> | ||
439 | 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com> | ||
440 | 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and | ||
441 | generic DMA APIs. Fixed DE425 support on Alpha. | ||
442 | <maz@wild-wind.fr.eu.org> | ||
443 | ========================================================================= | ||
444 | */ | ||
445 | |||
446 | #include <linux/module.h> | ||
447 | #include <linux/kernel.h> | ||
448 | #include <linux/string.h> | ||
449 | #include <linux/interrupt.h> | ||
450 | #include <linux/ptrace.h> | ||
451 | #include <linux/errno.h> | ||
452 | #include <linux/ioport.h> | ||
453 | #include <linux/pci.h> | ||
454 | #include <linux/eisa.h> | ||
455 | #include <linux/delay.h> | ||
456 | #include <linux/init.h> | ||
457 | #include <linux/spinlock.h> | ||
458 | #include <linux/crc32.h> | ||
459 | #include <linux/netdevice.h> | ||
460 | #include <linux/etherdevice.h> | ||
461 | #include <linux/skbuff.h> | ||
462 | #include <linux/time.h> | ||
463 | #include <linux/types.h> | ||
464 | #include <linux/unistd.h> | ||
465 | #include <linux/ctype.h> | ||
466 | #include <linux/dma-mapping.h> | ||
467 | #include <linux/moduleparam.h> | ||
468 | #include <linux/bitops.h> | ||
469 | #include <linux/gfp.h> | ||
470 | |||
471 | #include <asm/io.h> | ||
472 | #include <asm/dma.h> | ||
473 | #include <asm/byteorder.h> | ||
474 | #include <asm/unaligned.h> | ||
475 | #include <asm/uaccess.h> | ||
476 | #ifdef CONFIG_PPC_PMAC | ||
477 | #include <asm/machdep.h> | ||
478 | #endif /* CONFIG_PPC_PMAC */ | ||
479 | |||
480 | #include "de4x5.h" | ||
481 | |||
482 | static const char version[] __devinitconst = | ||
483 | KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; | ||
484 | |||
485 | #define c_char const char | ||
486 | |||
487 | /* | ||
488 | ** MII Information | ||
489 | */ | ||
490 | struct phy_table { | ||
491 | int reset; /* Hard reset required? */ | ||
492 | int id; /* IEEE OUI */ | ||
493 | int ta; /* One cycle TA time - 802.3u is confusing here */ | ||
494 | struct { /* Non autonegotiation (parallel) speed det. */ | ||
495 | int reg; | ||
496 | int mask; | ||
497 | int value; | ||
498 | } spd; | ||
499 | }; | ||
500 | |||
501 | struct mii_phy { | ||
502 | int reset; /* Hard reset required? */ | ||
503 | int id; /* IEEE OUI */ | ||
504 | int ta; /* One cycle TA time */ | ||
505 | struct { /* Non autonegotiation (parallel) speed det. */ | ||
506 | int reg; | ||
507 | int mask; | ||
508 | int value; | ||
509 | } spd; | ||
510 | int addr; /* MII address for the PHY */ | ||
511 | u_char *gep; /* Start of GEP sequence block in SROM */ | ||
512 | u_char *rst; /* Start of reset sequence in SROM */ | ||
513 | u_int mc; /* Media Capabilities */ | ||
514 | u_int ana; /* NWay Advertisement */ | ||
515 | u_int fdx; /* Full DupleX capabilities for each media */ | ||
516 | u_int ttm; /* Transmit Threshold Mode for each media */ | ||
517 | u_int mci; /* 21142 MII Connector Interrupt info */ | ||
518 | }; | ||
519 | |||
520 | #define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */ | ||
521 | |||
522 | struct sia_phy { | ||
523 | u_char mc; /* Media Code */ | ||
524 | u_char ext; /* csr13-15 valid when set */ | ||
525 | int csr13; /* SIA Connectivity Register */ | ||
526 | int csr14; /* SIA TX/RX Register */ | ||
527 | int csr15; /* SIA General Register */ | ||
528 | int gepc; /* SIA GEP Control Information */ | ||
529 | int gep; /* SIA GEP Data */ | ||
530 | }; | ||
531 | |||
532 | /* | ||
533 | ** Define the know universe of PHY devices that can be | ||
534 | ** recognised by this driver. | ||
535 | */ | ||
536 | static struct phy_table phy_info[] = { | ||
537 | {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */ | ||
538 | {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */ | ||
539 | {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */ | ||
540 | {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */ | ||
541 | {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */ | ||
542 | }; | ||
543 | |||
544 | /* | ||
545 | ** These GENERIC values assumes that the PHY devices follow 802.3u and | ||
546 | ** allow parallel detection to set the link partner ability register. | ||
547 | ** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported. | ||
548 | */ | ||
549 | #define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */ | ||
550 | #define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */ | ||
551 | #define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */ | ||
552 | |||
553 | /* | ||
554 | ** Define special SROM detection cases | ||
555 | */ | ||
556 | static c_char enet_det[][ETH_ALEN] = { | ||
557 | {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00}, | ||
558 | {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00} | ||
559 | }; | ||
560 | |||
561 | #define SMC 1 | ||
562 | #define ACCTON 2 | ||
563 | |||
564 | /* | ||
565 | ** SROM Repair definitions. If a broken SROM is detected a card may | ||
566 | ** use this information to help figure out what to do. This is a | ||
567 | ** "stab in the dark" and so far for SMC9332's only. | ||
568 | */ | ||
569 | static c_char srom_repair_info[][100] = { | ||
570 | {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */ | ||
571 | 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02, | ||
572 | 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50, | ||
573 | 0x00,0x18,} | ||
574 | }; | ||
575 | |||
576 | |||
577 | #ifdef DE4X5_DEBUG | ||
578 | static int de4x5_debug = DE4X5_DEBUG; | ||
579 | #else | ||
580 | /*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/ | ||
581 | static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION); | ||
582 | #endif | ||
583 | |||
584 | /* | ||
585 | ** Allow per adapter set up. For modules this is simply a command line | ||
586 | ** parameter, e.g.: | ||
587 | ** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'. | ||
588 | ** | ||
589 | ** For a compiled in driver, place e.g. | ||
590 | ** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP" | ||
591 | ** here | ||
592 | */ | ||
593 | #ifdef DE4X5_PARM | ||
594 | static char *args = DE4X5_PARM; | ||
595 | #else | ||
596 | static char *args; | ||
597 | #endif | ||
598 | |||
599 | struct parameters { | ||
600 | bool fdx; | ||
601 | int autosense; | ||
602 | }; | ||
603 | |||
604 | #define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */ | ||
605 | |||
606 | #define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */ | ||
607 | |||
608 | /* | ||
609 | ** Ethernet PROM defines | ||
610 | */ | ||
611 | #define PROBE_LENGTH 32 | ||
612 | #define ETH_PROM_SIG 0xAA5500FFUL | ||
613 | |||
614 | /* | ||
615 | ** Ethernet Info | ||
616 | */ | ||
617 | #define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */ | ||
618 | #define IEEE802_3_SZ 1518 /* Packet + CRC */ | ||
619 | #define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */ | ||
620 | #define MAX_DAT_SZ 1500 /* Maximum ethernet data length */ | ||
621 | #define MIN_DAT_SZ 1 /* Minimum ethernet data length */ | ||
622 | #define PKT_HDR_LEN 14 /* Addresses and data length info */ | ||
623 | #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1) | ||
624 | #define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */ | ||
625 | |||
626 | |||
627 | /* | ||
628 | ** EISA bus defines | ||
629 | */ | ||
630 | #define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */ | ||
631 | #define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */ | ||
632 | |||
633 | #define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11} | ||
634 | |||
635 | #define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"} | ||
636 | #define DE4X5_NAME_LENGTH 8 | ||
637 | |||
638 | static c_char *de4x5_signatures[] = DE4X5_SIGNATURE; | ||
639 | |||
640 | /* | ||
641 | ** Ethernet PROM defines for DC21040 | ||
642 | */ | ||
643 | #define PROBE_LENGTH 32 | ||
644 | #define ETH_PROM_SIG 0xAA5500FFUL | ||
645 | |||
646 | /* | ||
647 | ** PCI Bus defines | ||
648 | */ | ||
649 | #define PCI_MAX_BUS_NUM 8 | ||
650 | #define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */ | ||
651 | #define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */ | ||
652 | |||
653 | /* | ||
654 | ** Memory Alignment. Each descriptor is 4 longwords long. To force a | ||
655 | ** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and | ||
656 | ** DESC_ALIGN. ALIGN aligns the start address of the private memory area | ||
657 | ** and hence the RX descriptor ring's first entry. | ||
658 | */ | ||
659 | #define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */ | ||
660 | #define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */ | ||
661 | #define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */ | ||
662 | #define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */ | ||
663 | #define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */ | ||
664 | #define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */ | ||
665 | |||
666 | #define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */ | ||
667 | #define DE4X5_CACHE_ALIGN CAL_16LONG | ||
668 | #define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */ | ||
669 | /*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */ | ||
670 | #define DESC_ALIGN | ||
671 | |||
672 | #ifndef DEC_ONLY /* See README.de4x5 for using this */ | ||
673 | static int dec_only; | ||
674 | #else | ||
675 | static int dec_only = 1; | ||
676 | #endif | ||
677 | |||
678 | /* | ||
679 | ** DE4X5 IRQ ENABLE/DISABLE | ||
680 | */ | ||
681 | #define ENABLE_IRQs { \ | ||
682 | imr |= lp->irq_en;\ | ||
683 | outl(imr, DE4X5_IMR); /* Enable the IRQs */\ | ||
684 | } | ||
685 | |||
686 | #define DISABLE_IRQs {\ | ||
687 | imr = inl(DE4X5_IMR);\ | ||
688 | imr &= ~lp->irq_en;\ | ||
689 | outl(imr, DE4X5_IMR); /* Disable the IRQs */\ | ||
690 | } | ||
691 | |||
692 | #define UNMASK_IRQs {\ | ||
693 | imr |= lp->irq_mask;\ | ||
694 | outl(imr, DE4X5_IMR); /* Unmask the IRQs */\ | ||
695 | } | ||
696 | |||
697 | #define MASK_IRQs {\ | ||
698 | imr = inl(DE4X5_IMR);\ | ||
699 | imr &= ~lp->irq_mask;\ | ||
700 | outl(imr, DE4X5_IMR); /* Mask the IRQs */\ | ||
701 | } | ||
702 | |||
703 | /* | ||
704 | ** DE4X5 START/STOP | ||
705 | */ | ||
706 | #define START_DE4X5 {\ | ||
707 | omr = inl(DE4X5_OMR);\ | ||
708 | omr |= OMR_ST | OMR_SR;\ | ||
709 | outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\ | ||
710 | } | ||
711 | |||
712 | #define STOP_DE4X5 {\ | ||
713 | omr = inl(DE4X5_OMR);\ | ||
714 | omr &= ~(OMR_ST|OMR_SR);\ | ||
715 | outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \ | ||
716 | } | ||
717 | |||
718 | /* | ||
719 | ** DE4X5 SIA RESET | ||
720 | */ | ||
721 | #define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */ | ||
722 | |||
723 | /* | ||
724 | ** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS) | ||
725 | */ | ||
726 | #define DE4X5_AUTOSENSE_MS 250 | ||
727 | |||
728 | /* | ||
729 | ** SROM Structure | ||
730 | */ | ||
731 | struct de4x5_srom { | ||
732 | char sub_vendor_id[2]; | ||
733 | char sub_system_id[2]; | ||
734 | char reserved[12]; | ||
735 | char id_block_crc; | ||
736 | char reserved2; | ||
737 | char version; | ||
738 | char num_controllers; | ||
739 | char ieee_addr[6]; | ||
740 | char info[100]; | ||
741 | short chksum; | ||
742 | }; | ||
743 | #define SUB_VENDOR_ID 0x500a | ||
744 | |||
745 | /* | ||
746 | ** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous | ||
747 | ** and have sizes of both a power of 2 and a multiple of 4. | ||
748 | ** A size of 256 bytes for each buffer could be chosen because over 90% of | ||
749 | ** all packets in our network are <256 bytes long and 64 longword alignment | ||
750 | ** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX | ||
751 | ** descriptors are needed for machines with an ALPHA CPU. | ||
752 | */ | ||
753 | #define NUM_RX_DESC 8 /* Number of RX descriptors */ | ||
754 | #define NUM_TX_DESC 32 /* Number of TX descriptors */ | ||
755 | #define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */ | ||
756 | /* Multiple of 4 for DC21040 */ | ||
757 | /* Allows 512 byte alignment */ | ||
758 | struct de4x5_desc { | ||
759 | volatile __le32 status; | ||
760 | __le32 des1; | ||
761 | __le32 buf; | ||
762 | __le32 next; | ||
763 | DESC_ALIGN | ||
764 | }; | ||
765 | |||
766 | /* | ||
767 | ** The DE4X5 private structure | ||
768 | */ | ||
769 | #define DE4X5_PKT_STAT_SZ 16 | ||
770 | #define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you | ||
771 | increase DE4X5_PKT_STAT_SZ */ | ||
772 | |||
773 | struct pkt_stats { | ||
774 | u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */ | ||
775 | u_int unicast; | ||
776 | u_int multicast; | ||
777 | u_int broadcast; | ||
778 | u_int excessive_collisions; | ||
779 | u_int tx_underruns; | ||
780 | u_int excessive_underruns; | ||
781 | u_int rx_runt_frames; | ||
782 | u_int rx_collision; | ||
783 | u_int rx_dribble; | ||
784 | u_int rx_overflow; | ||
785 | }; | ||
786 | |||
787 | struct de4x5_private { | ||
788 | char adapter_name[80]; /* Adapter name */ | ||
789 | u_long interrupt; /* Aligned ISR flag */ | ||
790 | struct de4x5_desc *rx_ring; /* RX descriptor ring */ | ||
791 | struct de4x5_desc *tx_ring; /* TX descriptor ring */ | ||
792 | struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */ | ||
793 | struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */ | ||
794 | int rx_new, rx_old; /* RX descriptor ring pointers */ | ||
795 | int tx_new, tx_old; /* TX descriptor ring pointers */ | ||
796 | char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */ | ||
797 | char frame[64]; /* Min sized packet for loopback*/ | ||
798 | spinlock_t lock; /* Adapter specific spinlock */ | ||
799 | struct net_device_stats stats; /* Public stats */ | ||
800 | struct pkt_stats pktStats; /* Private stats counters */ | ||
801 | char rxRingSize; | ||
802 | char txRingSize; | ||
803 | int bus; /* EISA or PCI */ | ||
804 | int bus_num; /* PCI Bus number */ | ||
805 | int device; /* Device number on PCI bus */ | ||
806 | int state; /* Adapter OPENED or CLOSED */ | ||
807 | int chipset; /* DC21040, DC21041 or DC21140 */ | ||
808 | s32 irq_mask; /* Interrupt Mask (Enable) bits */ | ||
809 | s32 irq_en; /* Summary interrupt bits */ | ||
810 | int media; /* Media (eg TP), mode (eg 100B)*/ | ||
811 | int c_media; /* Remember the last media conn */ | ||
812 | bool fdx; /* media full duplex flag */ | ||
813 | int linkOK; /* Link is OK */ | ||
814 | int autosense; /* Allow/disallow autosensing */ | ||
815 | bool tx_enable; /* Enable descriptor polling */ | ||
816 | int setup_f; /* Setup frame filtering type */ | ||
817 | int local_state; /* State within a 'media' state */ | ||
818 | struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */ | ||
819 | struct sia_phy sia; /* SIA PHY Information */ | ||
820 | int active; /* Index to active PHY device */ | ||
821 | int mii_cnt; /* Number of attached PHY's */ | ||
822 | int timeout; /* Scheduling counter */ | ||
823 | struct timer_list timer; /* Timer info for kernel */ | ||
824 | int tmp; /* Temporary global per card */ | ||
825 | struct { | ||
826 | u_long lock; /* Lock the cache accesses */ | ||
827 | s32 csr0; /* Saved Bus Mode Register */ | ||
828 | s32 csr6; /* Saved Operating Mode Reg. */ | ||
829 | s32 csr7; /* Saved IRQ Mask Register */ | ||
830 | s32 gep; /* Saved General Purpose Reg. */ | ||
831 | s32 gepc; /* Control info for GEP */ | ||
832 | s32 csr13; /* Saved SIA Connectivity Reg. */ | ||
833 | s32 csr14; /* Saved SIA TX/RX Register */ | ||
834 | s32 csr15; /* Saved SIA General Register */ | ||
835 | int save_cnt; /* Flag if state already saved */ | ||
836 | struct sk_buff_head queue; /* Save the (re-ordered) skb's */ | ||
837 | } cache; | ||
838 | struct de4x5_srom srom; /* A copy of the SROM */ | ||
839 | int cfrv; /* Card CFRV copy */ | ||
840 | int rx_ovf; /* Check for 'RX overflow' tag */ | ||
841 | bool useSROM; /* For non-DEC card use SROM */ | ||
842 | bool useMII; /* Infoblock using the MII */ | ||
843 | int asBitValid; /* Autosense bits in GEP? */ | ||
844 | int asPolarity; /* 0 => asserted high */ | ||
845 | int asBit; /* Autosense bit number in GEP */ | ||
846 | int defMedium; /* SROM default medium */ | ||
847 | int tcount; /* Last infoblock number */ | ||
848 | int infoblock_init; /* Initialised this infoblock? */ | ||
849 | int infoleaf_offset; /* SROM infoleaf for controller */ | ||
850 | s32 infoblock_csr6; /* csr6 value in SROM infoblock */ | ||
851 | int infoblock_media; /* infoblock media */ | ||
852 | int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */ | ||
853 | u_char *rst; /* Pointer to Type 5 reset info */ | ||
854 | u_char ibn; /* Infoblock number */ | ||
855 | struct parameters params; /* Command line/ #defined params */ | ||
856 | struct device *gendev; /* Generic device */ | ||
857 | dma_addr_t dma_rings; /* DMA handle for rings */ | ||
858 | int dma_size; /* Size of the DMA area */ | ||
859 | char *rx_bufs; /* rx bufs on alpha, sparc, ... */ | ||
860 | }; | ||
861 | |||
862 | /* | ||
863 | ** To get around certain poxy cards that don't provide an SROM | ||
864 | ** for the second and more DECchip, I have to key off the first | ||
865 | ** chip's address. I'll assume there's not a bad SROM iff: | ||
866 | ** | ||
867 | ** o the chipset is the same | ||
868 | ** o the bus number is the same and > 0 | ||
869 | ** o the sum of all the returned hw address bytes is 0 or 0x5fa | ||
870 | ** | ||
871 | ** Also have to save the irq for those cards whose hardware designers | ||
872 | ** can't follow the PCI to PCI Bridge Architecture spec. | ||
873 | */ | ||
874 | static struct { | ||
875 | int chipset; | ||
876 | int bus; | ||
877 | int irq; | ||
878 | u_char addr[ETH_ALEN]; | ||
879 | } last = {0,}; | ||
880 | |||
881 | /* | ||
882 | ** The transmit ring full condition is described by the tx_old and tx_new | ||
883 | ** pointers by: | ||
884 | ** tx_old = tx_new Empty ring | ||
885 | ** tx_old = tx_new+1 Full ring | ||
886 | ** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition) | ||
887 | */ | ||
888 | #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\ | ||
889 | lp->tx_old+lp->txRingSize-lp->tx_new-1:\ | ||
890 | lp->tx_old -lp->tx_new-1) | ||
891 | |||
892 | #define TX_PKT_PENDING (lp->tx_old != lp->tx_new) | ||
893 | |||
894 | /* | ||
895 | ** Public Functions | ||
896 | */ | ||
897 | static int de4x5_open(struct net_device *dev); | ||
898 | static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb, | ||
899 | struct net_device *dev); | ||
900 | static irqreturn_t de4x5_interrupt(int irq, void *dev_id); | ||
901 | static int de4x5_close(struct net_device *dev); | ||
902 | static struct net_device_stats *de4x5_get_stats(struct net_device *dev); | ||
903 | static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len); | ||
904 | static void set_multicast_list(struct net_device *dev); | ||
905 | static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
906 | |||
907 | /* | ||
908 | ** Private functions | ||
909 | */ | ||
910 | static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev); | ||
911 | static int de4x5_init(struct net_device *dev); | ||
912 | static int de4x5_sw_reset(struct net_device *dev); | ||
913 | static int de4x5_rx(struct net_device *dev); | ||
914 | static int de4x5_tx(struct net_device *dev); | ||
915 | static void de4x5_ast(struct net_device *dev); | ||
916 | static int de4x5_txur(struct net_device *dev); | ||
917 | static int de4x5_rx_ovfc(struct net_device *dev); | ||
918 | |||
919 | static int autoconf_media(struct net_device *dev); | ||
920 | static void create_packet(struct net_device *dev, char *frame, int len); | ||
921 | static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb); | ||
922 | static int dc21040_autoconf(struct net_device *dev); | ||
923 | static int dc21041_autoconf(struct net_device *dev); | ||
924 | static int dc21140m_autoconf(struct net_device *dev); | ||
925 | static int dc2114x_autoconf(struct net_device *dev); | ||
926 | static int srom_autoconf(struct net_device *dev); | ||
927 | static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *)); | ||
928 | static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int)); | ||
929 | static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec); | ||
930 | static int test_for_100Mb(struct net_device *dev, int msec); | ||
931 | static int wait_for_link(struct net_device *dev); | ||
932 | static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec); | ||
933 | static int is_spd_100(struct net_device *dev); | ||
934 | static int is_100_up(struct net_device *dev); | ||
935 | static int is_10_up(struct net_device *dev); | ||
936 | static int is_anc_capable(struct net_device *dev); | ||
937 | static int ping_media(struct net_device *dev, int msec); | ||
938 | static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len); | ||
939 | static void de4x5_free_rx_buffs(struct net_device *dev); | ||
940 | static void de4x5_free_tx_buffs(struct net_device *dev); | ||
941 | static void de4x5_save_skbs(struct net_device *dev); | ||
942 | static void de4x5_rst_desc_ring(struct net_device *dev); | ||
943 | static void de4x5_cache_state(struct net_device *dev, int flag); | ||
944 | static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb); | ||
945 | static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb); | ||
946 | static struct sk_buff *de4x5_get_cache(struct net_device *dev); | ||
947 | static void de4x5_setup_intr(struct net_device *dev); | ||
948 | static void de4x5_init_connection(struct net_device *dev); | ||
949 | static int de4x5_reset_phy(struct net_device *dev); | ||
950 | static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr); | ||
951 | static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec); | ||
952 | static int test_tp(struct net_device *dev, s32 msec); | ||
953 | static int EISA_signature(char *name, struct device *device); | ||
954 | static int PCI_signature(char *name, struct de4x5_private *lp); | ||
955 | static void DevicePresent(struct net_device *dev, u_long iobase); | ||
956 | static void enet_addr_rst(u_long aprom_addr); | ||
957 | static int de4x5_bad_srom(struct de4x5_private *lp); | ||
958 | static short srom_rd(u_long address, u_char offset); | ||
959 | static void srom_latch(u_int command, u_long address); | ||
960 | static void srom_command(u_int command, u_long address); | ||
961 | static void srom_address(u_int command, u_long address, u_char offset); | ||
962 | static short srom_data(u_int command, u_long address); | ||
963 | /*static void srom_busy(u_int command, u_long address);*/ | ||
964 | static void sendto_srom(u_int command, u_long addr); | ||
965 | static int getfrom_srom(u_long addr); | ||
966 | static int srom_map_media(struct net_device *dev); | ||
967 | static int srom_infoleaf_info(struct net_device *dev); | ||
968 | static void srom_init(struct net_device *dev); | ||
969 | static void srom_exec(struct net_device *dev, u_char *p); | ||
970 | static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr); | ||
971 | static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr); | ||
972 | static int mii_rdata(u_long ioaddr); | ||
973 | static void mii_wdata(int data, int len, u_long ioaddr); | ||
974 | static void mii_ta(u_long rw, u_long ioaddr); | ||
975 | static int mii_swap(int data, int len); | ||
976 | static void mii_address(u_char addr, u_long ioaddr); | ||
977 | static void sendto_mii(u32 command, int data, u_long ioaddr); | ||
978 | static int getfrom_mii(u32 command, u_long ioaddr); | ||
979 | static int mii_get_oui(u_char phyaddr, u_long ioaddr); | ||
980 | static int mii_get_phy(struct net_device *dev); | ||
981 | static void SetMulticastFilter(struct net_device *dev); | ||
982 | static int get_hw_addr(struct net_device *dev); | ||
983 | static void srom_repair(struct net_device *dev, int card); | ||
984 | static int test_bad_enet(struct net_device *dev, int status); | ||
985 | static int an_exception(struct de4x5_private *lp); | ||
986 | static char *build_setup_frame(struct net_device *dev, int mode); | ||
987 | static void disable_ast(struct net_device *dev); | ||
988 | static long de4x5_switch_mac_port(struct net_device *dev); | ||
989 | static int gep_rd(struct net_device *dev); | ||
990 | static void gep_wr(s32 data, struct net_device *dev); | ||
991 | static void yawn(struct net_device *dev, int state); | ||
992 | static void de4x5_parse_params(struct net_device *dev); | ||
993 | static void de4x5_dbg_open(struct net_device *dev); | ||
994 | static void de4x5_dbg_mii(struct net_device *dev, int k); | ||
995 | static void de4x5_dbg_media(struct net_device *dev); | ||
996 | static void de4x5_dbg_srom(struct de4x5_srom *p); | ||
997 | static void de4x5_dbg_rx(struct sk_buff *skb, int len); | ||
998 | static int de4x5_strncmp(char *a, char *b, int n); | ||
999 | static int dc21041_infoleaf(struct net_device *dev); | ||
1000 | static int dc21140_infoleaf(struct net_device *dev); | ||
1001 | static int dc21142_infoleaf(struct net_device *dev); | ||
1002 | static int dc21143_infoleaf(struct net_device *dev); | ||
1003 | static int type0_infoblock(struct net_device *dev, u_char count, u_char *p); | ||
1004 | static int type1_infoblock(struct net_device *dev, u_char count, u_char *p); | ||
1005 | static int type2_infoblock(struct net_device *dev, u_char count, u_char *p); | ||
1006 | static int type3_infoblock(struct net_device *dev, u_char count, u_char *p); | ||
1007 | static int type4_infoblock(struct net_device *dev, u_char count, u_char *p); | ||
1008 | static int type5_infoblock(struct net_device *dev, u_char count, u_char *p); | ||
1009 | static int compact_infoblock(struct net_device *dev, u_char count, u_char *p); | ||
1010 | |||
1011 | /* | ||
1012 | ** Note now that module autoprobing is allowed under EISA and PCI. The | ||
1013 | ** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes | ||
1014 | ** to "do the right thing". | ||
1015 | */ | ||
1016 | |||
1017 | static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */ | ||
1018 | |||
1019 | module_param(io, int, 0); | ||
1020 | module_param(de4x5_debug, int, 0); | ||
1021 | module_param(dec_only, int, 0); | ||
1022 | module_param(args, charp, 0); | ||
1023 | |||
1024 | MODULE_PARM_DESC(io, "de4x5 I/O base address"); | ||
1025 | MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask"); | ||
1026 | MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)"); | ||
1027 | MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details"); | ||
1028 | MODULE_LICENSE("GPL"); | ||
1029 | |||
1030 | /* | ||
1031 | ** List the SROM infoleaf functions and chipsets | ||
1032 | */ | ||
1033 | struct InfoLeaf { | ||
1034 | int chipset; | ||
1035 | int (*fn)(struct net_device *); | ||
1036 | }; | ||
1037 | static struct InfoLeaf infoleaf_array[] = { | ||
1038 | {DC21041, dc21041_infoleaf}, | ||
1039 | {DC21140, dc21140_infoleaf}, | ||
1040 | {DC21142, dc21142_infoleaf}, | ||
1041 | {DC21143, dc21143_infoleaf} | ||
1042 | }; | ||
1043 | #define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array) | ||
1044 | |||
1045 | /* | ||
1046 | ** List the SROM info block functions | ||
1047 | */ | ||
1048 | static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = { | ||
1049 | type0_infoblock, | ||
1050 | type1_infoblock, | ||
1051 | type2_infoblock, | ||
1052 | type3_infoblock, | ||
1053 | type4_infoblock, | ||
1054 | type5_infoblock, | ||
1055 | compact_infoblock | ||
1056 | }; | ||
1057 | |||
1058 | #define COMPACT (ARRAY_SIZE(dc_infoblock) - 1) | ||
1059 | |||
1060 | /* | ||
1061 | ** Miscellaneous defines... | ||
1062 | */ | ||
1063 | #define RESET_DE4X5 {\ | ||
1064 | int i;\ | ||
1065 | i=inl(DE4X5_BMR);\ | ||
1066 | mdelay(1);\ | ||
1067 | outl(i | BMR_SWR, DE4X5_BMR);\ | ||
1068 | mdelay(1);\ | ||
1069 | outl(i, DE4X5_BMR);\ | ||
1070 | mdelay(1);\ | ||
1071 | for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\ | ||
1072 | mdelay(1);\ | ||
1073 | } | ||
1074 | |||
1075 | #define PHY_HARD_RESET {\ | ||
1076 | outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\ | ||
1077 | mdelay(1); /* Assert for 1ms */\ | ||
1078 | outl(0x00, DE4X5_GEP);\ | ||
1079 | mdelay(2); /* Wait for 2ms */\ | ||
1080 | } | ||
1081 | |||
1082 | static const struct net_device_ops de4x5_netdev_ops = { | ||
1083 | .ndo_open = de4x5_open, | ||
1084 | .ndo_stop = de4x5_close, | ||
1085 | .ndo_start_xmit = de4x5_queue_pkt, | ||
1086 | .ndo_get_stats = de4x5_get_stats, | ||
1087 | .ndo_set_multicast_list = set_multicast_list, | ||
1088 | .ndo_do_ioctl = de4x5_ioctl, | ||
1089 | .ndo_change_mtu = eth_change_mtu, | ||
1090 | .ndo_set_mac_address= eth_mac_addr, | ||
1091 | .ndo_validate_addr = eth_validate_addr, | ||
1092 | }; | ||
1093 | |||
1094 | |||
1095 | static int __devinit | ||
1096 | de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev) | ||
1097 | { | ||
1098 | char name[DE4X5_NAME_LENGTH + 1]; | ||
1099 | struct de4x5_private *lp = netdev_priv(dev); | ||
1100 | struct pci_dev *pdev = NULL; | ||
1101 | int i, status=0; | ||
1102 | |||
1103 | dev_set_drvdata(gendev, dev); | ||
1104 | |||
1105 | /* Ensure we're not sleeping */ | ||
1106 | if (lp->bus == EISA) { | ||
1107 | outb(WAKEUP, PCI_CFPM); | ||
1108 | } else { | ||
1109 | pdev = to_pci_dev (gendev); | ||
1110 | pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP); | ||
1111 | } | ||
1112 | mdelay(10); | ||
1113 | |||
1114 | RESET_DE4X5; | ||
1115 | |||
1116 | if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) { | ||
1117 | return -ENXIO; /* Hardware could not reset */ | ||
1118 | } | ||
1119 | |||
1120 | /* | ||
1121 | ** Now find out what kind of DC21040/DC21041/DC21140 board we have. | ||
1122 | */ | ||
1123 | lp->useSROM = false; | ||
1124 | if (lp->bus == PCI) { | ||
1125 | PCI_signature(name, lp); | ||
1126 | } else { | ||
1127 | EISA_signature(name, gendev); | ||
1128 | } | ||
1129 | |||
1130 | if (*name == '\0') { /* Not found a board signature */ | ||
1131 | return -ENXIO; | ||
1132 | } | ||
1133 | |||
1134 | dev->base_addr = iobase; | ||
1135 | printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase); | ||
1136 | |||
1137 | status = get_hw_addr(dev); | ||
1138 | printk(", h/w address %pM\n", dev->dev_addr); | ||
1139 | |||
1140 | if (status != 0) { | ||
1141 | printk(" which has an Ethernet PROM CRC error.\n"); | ||
1142 | return -ENXIO; | ||
1143 | } else { | ||
1144 | skb_queue_head_init(&lp->cache.queue); | ||
1145 | lp->cache.gepc = GEP_INIT; | ||
1146 | lp->asBit = GEP_SLNK; | ||
1147 | lp->asPolarity = GEP_SLNK; | ||
1148 | lp->asBitValid = ~0; | ||
1149 | lp->timeout = -1; | ||
1150 | lp->gendev = gendev; | ||
1151 | spin_lock_init(&lp->lock); | ||
1152 | init_timer(&lp->timer); | ||
1153 | lp->timer.function = (void (*)(unsigned long))de4x5_ast; | ||
1154 | lp->timer.data = (unsigned long)dev; | ||
1155 | de4x5_parse_params(dev); | ||
1156 | |||
1157 | /* | ||
1158 | ** Choose correct autosensing in case someone messed up | ||
1159 | */ | ||
1160 | lp->autosense = lp->params.autosense; | ||
1161 | if (lp->chipset != DC21140) { | ||
1162 | if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) { | ||
1163 | lp->params.autosense = TP; | ||
1164 | } | ||
1165 | if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) { | ||
1166 | lp->params.autosense = BNC; | ||
1167 | } | ||
1168 | } | ||
1169 | lp->fdx = lp->params.fdx; | ||
1170 | sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev)); | ||
1171 | |||
1172 | lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc); | ||
1173 | #if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY) | ||
1174 | lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN; | ||
1175 | #endif | ||
1176 | lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size, | ||
1177 | &lp->dma_rings, GFP_ATOMIC); | ||
1178 | if (lp->rx_ring == NULL) { | ||
1179 | return -ENOMEM; | ||
1180 | } | ||
1181 | |||
1182 | lp->tx_ring = lp->rx_ring + NUM_RX_DESC; | ||
1183 | |||
1184 | /* | ||
1185 | ** Set up the RX descriptor ring (Intels) | ||
1186 | ** Allocate contiguous receive buffers, long word aligned (Alphas) | ||
1187 | */ | ||
1188 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) | ||
1189 | for (i=0; i<NUM_RX_DESC; i++) { | ||
1190 | lp->rx_ring[i].status = 0; | ||
1191 | lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); | ||
1192 | lp->rx_ring[i].buf = 0; | ||
1193 | lp->rx_ring[i].next = 0; | ||
1194 | lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */ | ||
1195 | } | ||
1196 | |||
1197 | #else | ||
1198 | { | ||
1199 | dma_addr_t dma_rx_bufs; | ||
1200 | |||
1201 | dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC) | ||
1202 | * sizeof(struct de4x5_desc); | ||
1203 | dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN; | ||
1204 | lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC | ||
1205 | + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN); | ||
1206 | for (i=0; i<NUM_RX_DESC; i++) { | ||
1207 | lp->rx_ring[i].status = 0; | ||
1208 | lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ); | ||
1209 | lp->rx_ring[i].buf = | ||
1210 | cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ); | ||
1211 | lp->rx_ring[i].next = 0; | ||
1212 | lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */ | ||
1213 | } | ||
1214 | |||
1215 | } | ||
1216 | #endif | ||
1217 | |||
1218 | barrier(); | ||
1219 | |||
1220 | lp->rxRingSize = NUM_RX_DESC; | ||
1221 | lp->txRingSize = NUM_TX_DESC; | ||
1222 | |||
1223 | /* Write the end of list marker to the descriptor lists */ | ||
1224 | lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER); | ||
1225 | lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER); | ||
1226 | |||
1227 | /* Tell the adapter where the TX/RX rings are located. */ | ||
1228 | outl(lp->dma_rings, DE4X5_RRBA); | ||
1229 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), | ||
1230 | DE4X5_TRBA); | ||
1231 | |||
1232 | /* Initialise the IRQ mask and Enable/Disable */ | ||
1233 | lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM; | ||
1234 | lp->irq_en = IMR_NIM | IMR_AIM; | ||
1235 | |||
1236 | /* Create a loopback packet frame for later media probing */ | ||
1237 | create_packet(dev, lp->frame, sizeof(lp->frame)); | ||
1238 | |||
1239 | /* Check if the RX overflow bug needs testing for */ | ||
1240 | i = lp->cfrv & 0x000000fe; | ||
1241 | if ((lp->chipset == DC21140) && (i == 0x20)) { | ||
1242 | lp->rx_ovf = 1; | ||
1243 | } | ||
1244 | |||
1245 | /* Initialise the SROM pointers if possible */ | ||
1246 | if (lp->useSROM) { | ||
1247 | lp->state = INITIALISED; | ||
1248 | if (srom_infoleaf_info(dev)) { | ||
1249 | dma_free_coherent (gendev, lp->dma_size, | ||
1250 | lp->rx_ring, lp->dma_rings); | ||
1251 | return -ENXIO; | ||
1252 | } | ||
1253 | srom_init(dev); | ||
1254 | } | ||
1255 | |||
1256 | lp->state = CLOSED; | ||
1257 | |||
1258 | /* | ||
1259 | ** Check for an MII interface | ||
1260 | */ | ||
1261 | if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) { | ||
1262 | mii_get_phy(dev); | ||
1263 | } | ||
1264 | |||
1265 | printk(" and requires IRQ%d (provided by %s).\n", dev->irq, | ||
1266 | ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG")); | ||
1267 | } | ||
1268 | |||
1269 | if (de4x5_debug & DEBUG_VERSION) { | ||
1270 | printk(version); | ||
1271 | } | ||
1272 | |||
1273 | /* The DE4X5-specific entries in the device structure. */ | ||
1274 | SET_NETDEV_DEV(dev, gendev); | ||
1275 | dev->netdev_ops = &de4x5_netdev_ops; | ||
1276 | dev->mem_start = 0; | ||
1277 | |||
1278 | /* Fill in the generic fields of the device structure. */ | ||
1279 | if ((status = register_netdev (dev))) { | ||
1280 | dma_free_coherent (gendev, lp->dma_size, | ||
1281 | lp->rx_ring, lp->dma_rings); | ||
1282 | return status; | ||
1283 | } | ||
1284 | |||
1285 | /* Let the adapter sleep to save power */ | ||
1286 | yawn(dev, SLEEP); | ||
1287 | |||
1288 | return status; | ||
1289 | } | ||
1290 | |||
1291 | |||
1292 | static int | ||
1293 | de4x5_open(struct net_device *dev) | ||
1294 | { | ||
1295 | struct de4x5_private *lp = netdev_priv(dev); | ||
1296 | u_long iobase = dev->base_addr; | ||
1297 | int i, status = 0; | ||
1298 | s32 omr; | ||
1299 | |||
1300 | /* Allocate the RX buffers */ | ||
1301 | for (i=0; i<lp->rxRingSize; i++) { | ||
1302 | if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) { | ||
1303 | de4x5_free_rx_buffs(dev); | ||
1304 | return -EAGAIN; | ||
1305 | } | ||
1306 | } | ||
1307 | |||
1308 | /* | ||
1309 | ** Wake up the adapter | ||
1310 | */ | ||
1311 | yawn(dev, WAKEUP); | ||
1312 | |||
1313 | /* | ||
1314 | ** Re-initialize the DE4X5... | ||
1315 | */ | ||
1316 | status = de4x5_init(dev); | ||
1317 | spin_lock_init(&lp->lock); | ||
1318 | lp->state = OPEN; | ||
1319 | de4x5_dbg_open(dev); | ||
1320 | |||
1321 | if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED, | ||
1322 | lp->adapter_name, dev)) { | ||
1323 | printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq); | ||
1324 | if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED, | ||
1325 | lp->adapter_name, dev)) { | ||
1326 | printk("\n Cannot get IRQ- reconfigure your hardware.\n"); | ||
1327 | disable_ast(dev); | ||
1328 | de4x5_free_rx_buffs(dev); | ||
1329 | de4x5_free_tx_buffs(dev); | ||
1330 | yawn(dev, SLEEP); | ||
1331 | lp->state = CLOSED; | ||
1332 | return -EAGAIN; | ||
1333 | } else { | ||
1334 | printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n"); | ||
1335 | printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n"); | ||
1336 | } | ||
1337 | } | ||
1338 | |||
1339 | lp->interrupt = UNMASK_INTERRUPTS; | ||
1340 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
1341 | |||
1342 | START_DE4X5; | ||
1343 | |||
1344 | de4x5_setup_intr(dev); | ||
1345 | |||
1346 | if (de4x5_debug & DEBUG_OPEN) { | ||
1347 | printk("\tsts: 0x%08x\n", inl(DE4X5_STS)); | ||
1348 | printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR)); | ||
1349 | printk("\timr: 0x%08x\n", inl(DE4X5_IMR)); | ||
1350 | printk("\tomr: 0x%08x\n", inl(DE4X5_OMR)); | ||
1351 | printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR)); | ||
1352 | printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR)); | ||
1353 | printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR)); | ||
1354 | printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR)); | ||
1355 | } | ||
1356 | |||
1357 | return status; | ||
1358 | } | ||
1359 | |||
1360 | /* | ||
1361 | ** Initialize the DE4X5 operating conditions. NB: a chip problem with the | ||
1362 | ** DC21140 requires using perfect filtering mode for that chip. Since I can't | ||
1363 | ** see why I'd want > 14 multicast addresses, I have changed all chips to use | ||
1364 | ** the perfect filtering mode. Keep the DMA burst length at 8: there seems | ||
1365 | ** to be data corruption problems if it is larger (UDP errors seen from a | ||
1366 | ** ttcp source). | ||
1367 | */ | ||
1368 | static int | ||
1369 | de4x5_init(struct net_device *dev) | ||
1370 | { | ||
1371 | /* Lock out other processes whilst setting up the hardware */ | ||
1372 | netif_stop_queue(dev); | ||
1373 | |||
1374 | de4x5_sw_reset(dev); | ||
1375 | |||
1376 | /* Autoconfigure the connected port */ | ||
1377 | autoconf_media(dev); | ||
1378 | |||
1379 | return 0; | ||
1380 | } | ||
1381 | |||
1382 | static int | ||
1383 | de4x5_sw_reset(struct net_device *dev) | ||
1384 | { | ||
1385 | struct de4x5_private *lp = netdev_priv(dev); | ||
1386 | u_long iobase = dev->base_addr; | ||
1387 | int i, j, status = 0; | ||
1388 | s32 bmr, omr; | ||
1389 | |||
1390 | /* Select the MII or SRL port now and RESET the MAC */ | ||
1391 | if (!lp->useSROM) { | ||
1392 | if (lp->phy[lp->active].id != 0) { | ||
1393 | lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD; | ||
1394 | } else { | ||
1395 | lp->infoblock_csr6 = OMR_SDP | OMR_TTM; | ||
1396 | } | ||
1397 | de4x5_switch_mac_port(dev); | ||
1398 | } | ||
1399 | |||
1400 | /* | ||
1401 | ** Set the programmable burst length to 8 longwords for all the DC21140 | ||
1402 | ** Fasternet chips and 4 longwords for all others: DMA errors result | ||
1403 | ** without these values. Cache align 16 long. | ||
1404 | */ | ||
1405 | bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN; | ||
1406 | bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0); | ||
1407 | outl(bmr, DE4X5_BMR); | ||
1408 | |||
1409 | omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */ | ||
1410 | if (lp->chipset == DC21140) { | ||
1411 | omr |= (OMR_SDP | OMR_SB); | ||
1412 | } | ||
1413 | lp->setup_f = PERFECT; | ||
1414 | outl(lp->dma_rings, DE4X5_RRBA); | ||
1415 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), | ||
1416 | DE4X5_TRBA); | ||
1417 | |||
1418 | lp->rx_new = lp->rx_old = 0; | ||
1419 | lp->tx_new = lp->tx_old = 0; | ||
1420 | |||
1421 | for (i = 0; i < lp->rxRingSize; i++) { | ||
1422 | lp->rx_ring[i].status = cpu_to_le32(R_OWN); | ||
1423 | } | ||
1424 | |||
1425 | for (i = 0; i < lp->txRingSize; i++) { | ||
1426 | lp->tx_ring[i].status = cpu_to_le32(0); | ||
1427 | } | ||
1428 | |||
1429 | barrier(); | ||
1430 | |||
1431 | /* Build the setup frame depending on filtering mode */ | ||
1432 | SetMulticastFilter(dev); | ||
1433 | |||
1434 | load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1); | ||
1435 | outl(omr|OMR_ST, DE4X5_OMR); | ||
1436 | |||
1437 | /* Poll for setup frame completion (adapter interrupts are disabled now) */ | ||
1438 | |||
1439 | for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */ | ||
1440 | mdelay(1); | ||
1441 | if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1; | ||
1442 | } | ||
1443 | outl(omr, DE4X5_OMR); /* Stop everything! */ | ||
1444 | |||
1445 | if (j == 0) { | ||
1446 | printk("%s: Setup frame timed out, status %08x\n", dev->name, | ||
1447 | inl(DE4X5_STS)); | ||
1448 | status = -EIO; | ||
1449 | } | ||
1450 | |||
1451 | lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; | ||
1452 | lp->tx_old = lp->tx_new; | ||
1453 | |||
1454 | return status; | ||
1455 | } | ||
1456 | |||
1457 | /* | ||
1458 | ** Writes a socket buffer address to the next available transmit descriptor. | ||
1459 | */ | ||
1460 | static netdev_tx_t | ||
1461 | de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev) | ||
1462 | { | ||
1463 | struct de4x5_private *lp = netdev_priv(dev); | ||
1464 | u_long iobase = dev->base_addr; | ||
1465 | u_long flags = 0; | ||
1466 | |||
1467 | netif_stop_queue(dev); | ||
1468 | if (!lp->tx_enable) /* Cannot send for now */ | ||
1469 | return NETDEV_TX_LOCKED; | ||
1470 | |||
1471 | /* | ||
1472 | ** Clean out the TX ring asynchronously to interrupts - sometimes the | ||
1473 | ** interrupts are lost by delayed descriptor status updates relative to | ||
1474 | ** the irq assertion, especially with a busy PCI bus. | ||
1475 | */ | ||
1476 | spin_lock_irqsave(&lp->lock, flags); | ||
1477 | de4x5_tx(dev); | ||
1478 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1479 | |||
1480 | /* Test if cache is already locked - requeue skb if so */ | ||
1481 | if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) | ||
1482 | return NETDEV_TX_LOCKED; | ||
1483 | |||
1484 | /* Transmit descriptor ring full or stale skb */ | ||
1485 | if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) { | ||
1486 | if (lp->interrupt) { | ||
1487 | de4x5_putb_cache(dev, skb); /* Requeue the buffer */ | ||
1488 | } else { | ||
1489 | de4x5_put_cache(dev, skb); | ||
1490 | } | ||
1491 | if (de4x5_debug & DEBUG_TX) { | ||
1492 | printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO"); | ||
1493 | } | ||
1494 | } else if (skb->len > 0) { | ||
1495 | /* If we already have stuff queued locally, use that first */ | ||
1496 | if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) { | ||
1497 | de4x5_put_cache(dev, skb); | ||
1498 | skb = de4x5_get_cache(dev); | ||
1499 | } | ||
1500 | |||
1501 | while (skb && !netif_queue_stopped(dev) && | ||
1502 | (u_long) lp->tx_skb[lp->tx_new] <= 1) { | ||
1503 | spin_lock_irqsave(&lp->lock, flags); | ||
1504 | netif_stop_queue(dev); | ||
1505 | load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb); | ||
1506 | lp->stats.tx_bytes += skb->len; | ||
1507 | outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ | ||
1508 | |||
1509 | lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; | ||
1510 | |||
1511 | if (TX_BUFFS_AVAIL) { | ||
1512 | netif_start_queue(dev); /* Another pkt may be queued */ | ||
1513 | } | ||
1514 | skb = de4x5_get_cache(dev); | ||
1515 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1516 | } | ||
1517 | if (skb) de4x5_putb_cache(dev, skb); | ||
1518 | } | ||
1519 | |||
1520 | lp->cache.lock = 0; | ||
1521 | |||
1522 | return NETDEV_TX_OK; | ||
1523 | } | ||
1524 | |||
1525 | /* | ||
1526 | ** The DE4X5 interrupt handler. | ||
1527 | ** | ||
1528 | ** I/O Read/Writes through intermediate PCI bridges are never 'posted', | ||
1529 | ** so that the asserted interrupt always has some real data to work with - | ||
1530 | ** if these I/O accesses are ever changed to memory accesses, ensure the | ||
1531 | ** STS write is read immediately to complete the transaction if the adapter | ||
1532 | ** is not on bus 0. Lost interrupts can still occur when the PCI bus load | ||
1533 | ** is high and descriptor status bits cannot be set before the associated | ||
1534 | ** interrupt is asserted and this routine entered. | ||
1535 | */ | ||
1536 | static irqreturn_t | ||
1537 | de4x5_interrupt(int irq, void *dev_id) | ||
1538 | { | ||
1539 | struct net_device *dev = dev_id; | ||
1540 | struct de4x5_private *lp; | ||
1541 | s32 imr, omr, sts, limit; | ||
1542 | u_long iobase; | ||
1543 | unsigned int handled = 0; | ||
1544 | |||
1545 | lp = netdev_priv(dev); | ||
1546 | spin_lock(&lp->lock); | ||
1547 | iobase = dev->base_addr; | ||
1548 | |||
1549 | DISABLE_IRQs; /* Ensure non re-entrancy */ | ||
1550 | |||
1551 | if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt)) | ||
1552 | printk("%s: Re-entering the interrupt handler.\n", dev->name); | ||
1553 | |||
1554 | synchronize_irq(dev->irq); | ||
1555 | |||
1556 | for (limit=0; limit<8; limit++) { | ||
1557 | sts = inl(DE4X5_STS); /* Read IRQ status */ | ||
1558 | outl(sts, DE4X5_STS); /* Reset the board interrupts */ | ||
1559 | |||
1560 | if (!(sts & lp->irq_mask)) break;/* All done */ | ||
1561 | handled = 1; | ||
1562 | |||
1563 | if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */ | ||
1564 | de4x5_rx(dev); | ||
1565 | |||
1566 | if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */ | ||
1567 | de4x5_tx(dev); | ||
1568 | |||
1569 | if (sts & STS_LNF) { /* TP Link has failed */ | ||
1570 | lp->irq_mask &= ~IMR_LFM; | ||
1571 | } | ||
1572 | |||
1573 | if (sts & STS_UNF) { /* Transmit underrun */ | ||
1574 | de4x5_txur(dev); | ||
1575 | } | ||
1576 | |||
1577 | if (sts & STS_SE) { /* Bus Error */ | ||
1578 | STOP_DE4X5; | ||
1579 | printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n", | ||
1580 | dev->name, sts); | ||
1581 | spin_unlock(&lp->lock); | ||
1582 | return IRQ_HANDLED; | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | /* Load the TX ring with any locally stored packets */ | ||
1587 | if (!test_and_set_bit(0, (void *)&lp->cache.lock)) { | ||
1588 | while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) { | ||
1589 | de4x5_queue_pkt(de4x5_get_cache(dev), dev); | ||
1590 | } | ||
1591 | lp->cache.lock = 0; | ||
1592 | } | ||
1593 | |||
1594 | lp->interrupt = UNMASK_INTERRUPTS; | ||
1595 | ENABLE_IRQs; | ||
1596 | spin_unlock(&lp->lock); | ||
1597 | |||
1598 | return IRQ_RETVAL(handled); | ||
1599 | } | ||
1600 | |||
1601 | static int | ||
1602 | de4x5_rx(struct net_device *dev) | ||
1603 | { | ||
1604 | struct de4x5_private *lp = netdev_priv(dev); | ||
1605 | u_long iobase = dev->base_addr; | ||
1606 | int entry; | ||
1607 | s32 status; | ||
1608 | |||
1609 | for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0; | ||
1610 | entry=lp->rx_new) { | ||
1611 | status = (s32)le32_to_cpu(lp->rx_ring[entry].status); | ||
1612 | |||
1613 | if (lp->rx_ovf) { | ||
1614 | if (inl(DE4X5_MFC) & MFC_FOCM) { | ||
1615 | de4x5_rx_ovfc(dev); | ||
1616 | break; | ||
1617 | } | ||
1618 | } | ||
1619 | |||
1620 | if (status & RD_FS) { /* Remember the start of frame */ | ||
1621 | lp->rx_old = entry; | ||
1622 | } | ||
1623 | |||
1624 | if (status & RD_LS) { /* Valid frame status */ | ||
1625 | if (lp->tx_enable) lp->linkOK++; | ||
1626 | if (status & RD_ES) { /* There was an error. */ | ||
1627 | lp->stats.rx_errors++; /* Update the error stats. */ | ||
1628 | if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++; | ||
1629 | if (status & RD_CE) lp->stats.rx_crc_errors++; | ||
1630 | if (status & RD_OF) lp->stats.rx_fifo_errors++; | ||
1631 | if (status & RD_TL) lp->stats.rx_length_errors++; | ||
1632 | if (status & RD_RF) lp->pktStats.rx_runt_frames++; | ||
1633 | if (status & RD_CS) lp->pktStats.rx_collision++; | ||
1634 | if (status & RD_DB) lp->pktStats.rx_dribble++; | ||
1635 | if (status & RD_OF) lp->pktStats.rx_overflow++; | ||
1636 | } else { /* A valid frame received */ | ||
1637 | struct sk_buff *skb; | ||
1638 | short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status) | ||
1639 | >> 16) - 4; | ||
1640 | |||
1641 | if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) { | ||
1642 | printk("%s: Insufficient memory; nuking packet.\n", | ||
1643 | dev->name); | ||
1644 | lp->stats.rx_dropped++; | ||
1645 | } else { | ||
1646 | de4x5_dbg_rx(skb, pkt_len); | ||
1647 | |||
1648 | /* Push up the protocol stack */ | ||
1649 | skb->protocol=eth_type_trans(skb,dev); | ||
1650 | de4x5_local_stats(dev, skb->data, pkt_len); | ||
1651 | netif_rx(skb); | ||
1652 | |||
1653 | /* Update stats */ | ||
1654 | lp->stats.rx_packets++; | ||
1655 | lp->stats.rx_bytes += pkt_len; | ||
1656 | } | ||
1657 | } | ||
1658 | |||
1659 | /* Change buffer ownership for this frame, back to the adapter */ | ||
1660 | for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) { | ||
1661 | lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN); | ||
1662 | barrier(); | ||
1663 | } | ||
1664 | lp->rx_ring[entry].status = cpu_to_le32(R_OWN); | ||
1665 | barrier(); | ||
1666 | } | ||
1667 | |||
1668 | /* | ||
1669 | ** Update entry information | ||
1670 | */ | ||
1671 | lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize; | ||
1672 | } | ||
1673 | |||
1674 | return 0; | ||
1675 | } | ||
1676 | |||
1677 | static inline void | ||
1678 | de4x5_free_tx_buff(struct de4x5_private *lp, int entry) | ||
1679 | { | ||
1680 | dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf), | ||
1681 | le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1, | ||
1682 | DMA_TO_DEVICE); | ||
1683 | if ((u_long) lp->tx_skb[entry] > 1) | ||
1684 | dev_kfree_skb_irq(lp->tx_skb[entry]); | ||
1685 | lp->tx_skb[entry] = NULL; | ||
1686 | } | ||
1687 | |||
1688 | /* | ||
1689 | ** Buffer sent - check for TX buffer errors. | ||
1690 | */ | ||
1691 | static int | ||
1692 | de4x5_tx(struct net_device *dev) | ||
1693 | { | ||
1694 | struct de4x5_private *lp = netdev_priv(dev); | ||
1695 | u_long iobase = dev->base_addr; | ||
1696 | int entry; | ||
1697 | s32 status; | ||
1698 | |||
1699 | for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) { | ||
1700 | status = (s32)le32_to_cpu(lp->tx_ring[entry].status); | ||
1701 | if (status < 0) { /* Buffer not sent yet */ | ||
1702 | break; | ||
1703 | } else if (status != 0x7fffffff) { /* Not setup frame */ | ||
1704 | if (status & TD_ES) { /* An error happened */ | ||
1705 | lp->stats.tx_errors++; | ||
1706 | if (status & TD_NC) lp->stats.tx_carrier_errors++; | ||
1707 | if (status & TD_LC) lp->stats.tx_window_errors++; | ||
1708 | if (status & TD_UF) lp->stats.tx_fifo_errors++; | ||
1709 | if (status & TD_EC) lp->pktStats.excessive_collisions++; | ||
1710 | if (status & TD_DE) lp->stats.tx_aborted_errors++; | ||
1711 | |||
1712 | if (TX_PKT_PENDING) { | ||
1713 | outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */ | ||
1714 | } | ||
1715 | } else { /* Packet sent */ | ||
1716 | lp->stats.tx_packets++; | ||
1717 | if (lp->tx_enable) lp->linkOK++; | ||
1718 | } | ||
1719 | /* Update the collision counter */ | ||
1720 | lp->stats.collisions += ((status & TD_EC) ? 16 : | ||
1721 | ((status & TD_CC) >> 3)); | ||
1722 | |||
1723 | /* Free the buffer. */ | ||
1724 | if (lp->tx_skb[entry] != NULL) | ||
1725 | de4x5_free_tx_buff(lp, entry); | ||
1726 | } | ||
1727 | |||
1728 | /* Update all the pointers */ | ||
1729 | lp->tx_old = (lp->tx_old + 1) % lp->txRingSize; | ||
1730 | } | ||
1731 | |||
1732 | /* Any resources available? */ | ||
1733 | if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) { | ||
1734 | if (lp->interrupt) | ||
1735 | netif_wake_queue(dev); | ||
1736 | else | ||
1737 | netif_start_queue(dev); | ||
1738 | } | ||
1739 | |||
1740 | return 0; | ||
1741 | } | ||
1742 | |||
1743 | static void | ||
1744 | de4x5_ast(struct net_device *dev) | ||
1745 | { | ||
1746 | struct de4x5_private *lp = netdev_priv(dev); | ||
1747 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
1748 | int dt; | ||
1749 | |||
1750 | if (lp->useSROM) | ||
1751 | next_tick = srom_autoconf(dev); | ||
1752 | else if (lp->chipset == DC21140) | ||
1753 | next_tick = dc21140m_autoconf(dev); | ||
1754 | else if (lp->chipset == DC21041) | ||
1755 | next_tick = dc21041_autoconf(dev); | ||
1756 | else if (lp->chipset == DC21040) | ||
1757 | next_tick = dc21040_autoconf(dev); | ||
1758 | lp->linkOK = 0; | ||
1759 | |||
1760 | dt = (next_tick * HZ) / 1000; | ||
1761 | |||
1762 | if (!dt) | ||
1763 | dt = 1; | ||
1764 | |||
1765 | mod_timer(&lp->timer, jiffies + dt); | ||
1766 | } | ||
1767 | |||
1768 | static int | ||
1769 | de4x5_txur(struct net_device *dev) | ||
1770 | { | ||
1771 | struct de4x5_private *lp = netdev_priv(dev); | ||
1772 | u_long iobase = dev->base_addr; | ||
1773 | int omr; | ||
1774 | |||
1775 | omr = inl(DE4X5_OMR); | ||
1776 | if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) { | ||
1777 | omr &= ~(OMR_ST|OMR_SR); | ||
1778 | outl(omr, DE4X5_OMR); | ||
1779 | while (inl(DE4X5_STS) & STS_TS); | ||
1780 | if ((omr & OMR_TR) < OMR_TR) { | ||
1781 | omr += 0x4000; | ||
1782 | } else { | ||
1783 | omr |= OMR_SF; | ||
1784 | } | ||
1785 | outl(omr | OMR_ST | OMR_SR, DE4X5_OMR); | ||
1786 | } | ||
1787 | |||
1788 | return 0; | ||
1789 | } | ||
1790 | |||
1791 | static int | ||
1792 | de4x5_rx_ovfc(struct net_device *dev) | ||
1793 | { | ||
1794 | struct de4x5_private *lp = netdev_priv(dev); | ||
1795 | u_long iobase = dev->base_addr; | ||
1796 | int omr; | ||
1797 | |||
1798 | omr = inl(DE4X5_OMR); | ||
1799 | outl(omr & ~OMR_SR, DE4X5_OMR); | ||
1800 | while (inl(DE4X5_STS) & STS_RS); | ||
1801 | |||
1802 | for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) { | ||
1803 | lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN); | ||
1804 | lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize; | ||
1805 | } | ||
1806 | |||
1807 | outl(omr, DE4X5_OMR); | ||
1808 | |||
1809 | return 0; | ||
1810 | } | ||
1811 | |||
1812 | static int | ||
1813 | de4x5_close(struct net_device *dev) | ||
1814 | { | ||
1815 | struct de4x5_private *lp = netdev_priv(dev); | ||
1816 | u_long iobase = dev->base_addr; | ||
1817 | s32 imr, omr; | ||
1818 | |||
1819 | disable_ast(dev); | ||
1820 | |||
1821 | netif_stop_queue(dev); | ||
1822 | |||
1823 | if (de4x5_debug & DEBUG_CLOSE) { | ||
1824 | printk("%s: Shutting down ethercard, status was %8.8x.\n", | ||
1825 | dev->name, inl(DE4X5_STS)); | ||
1826 | } | ||
1827 | |||
1828 | /* | ||
1829 | ** We stop the DE4X5 here... mask interrupts and stop TX & RX | ||
1830 | */ | ||
1831 | DISABLE_IRQs; | ||
1832 | STOP_DE4X5; | ||
1833 | |||
1834 | /* Free the associated irq */ | ||
1835 | free_irq(dev->irq, dev); | ||
1836 | lp->state = CLOSED; | ||
1837 | |||
1838 | /* Free any socket buffers */ | ||
1839 | de4x5_free_rx_buffs(dev); | ||
1840 | de4x5_free_tx_buffs(dev); | ||
1841 | |||
1842 | /* Put the adapter to sleep to save power */ | ||
1843 | yawn(dev, SLEEP); | ||
1844 | |||
1845 | return 0; | ||
1846 | } | ||
1847 | |||
1848 | static struct net_device_stats * | ||
1849 | de4x5_get_stats(struct net_device *dev) | ||
1850 | { | ||
1851 | struct de4x5_private *lp = netdev_priv(dev); | ||
1852 | u_long iobase = dev->base_addr; | ||
1853 | |||
1854 | lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR)); | ||
1855 | |||
1856 | return &lp->stats; | ||
1857 | } | ||
1858 | |||
1859 | static void | ||
1860 | de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len) | ||
1861 | { | ||
1862 | struct de4x5_private *lp = netdev_priv(dev); | ||
1863 | int i; | ||
1864 | |||
1865 | for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) { | ||
1866 | if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) { | ||
1867 | lp->pktStats.bins[i]++; | ||
1868 | i = DE4X5_PKT_STAT_SZ; | ||
1869 | } | ||
1870 | } | ||
1871 | if (is_multicast_ether_addr(buf)) { | ||
1872 | if (is_broadcast_ether_addr(buf)) { | ||
1873 | lp->pktStats.broadcast++; | ||
1874 | } else { | ||
1875 | lp->pktStats.multicast++; | ||
1876 | } | ||
1877 | } else if (compare_ether_addr(buf, dev->dev_addr) == 0) { | ||
1878 | lp->pktStats.unicast++; | ||
1879 | } | ||
1880 | |||
1881 | lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */ | ||
1882 | if (lp->pktStats.bins[0] == 0) { /* Reset counters */ | ||
1883 | memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats)); | ||
1884 | } | ||
1885 | } | ||
1886 | |||
1887 | /* | ||
1888 | ** Removes the TD_IC flag from previous descriptor to improve TX performance. | ||
1889 | ** If the flag is changed on a descriptor that is being read by the hardware, | ||
1890 | ** I assume PCI transaction ordering will mean you are either successful or | ||
1891 | ** just miss asserting the change to the hardware. Anyway you're messing with | ||
1892 | ** a descriptor you don't own, but this shouldn't kill the chip provided | ||
1893 | ** the descriptor register is read only to the hardware. | ||
1894 | */ | ||
1895 | static void | ||
1896 | load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb) | ||
1897 | { | ||
1898 | struct de4x5_private *lp = netdev_priv(dev); | ||
1899 | int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1); | ||
1900 | dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE); | ||
1901 | |||
1902 | lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma); | ||
1903 | lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER); | ||
1904 | lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags); | ||
1905 | lp->tx_skb[lp->tx_new] = skb; | ||
1906 | lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC); | ||
1907 | barrier(); | ||
1908 | |||
1909 | lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN); | ||
1910 | barrier(); | ||
1911 | } | ||
1912 | |||
1913 | /* | ||
1914 | ** Set or clear the multicast filter for this adaptor. | ||
1915 | */ | ||
1916 | static void | ||
1917 | set_multicast_list(struct net_device *dev) | ||
1918 | { | ||
1919 | struct de4x5_private *lp = netdev_priv(dev); | ||
1920 | u_long iobase = dev->base_addr; | ||
1921 | |||
1922 | /* First, double check that the adapter is open */ | ||
1923 | if (lp->state == OPEN) { | ||
1924 | if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */ | ||
1925 | u32 omr; | ||
1926 | omr = inl(DE4X5_OMR); | ||
1927 | omr |= OMR_PR; | ||
1928 | outl(omr, DE4X5_OMR); | ||
1929 | } else { | ||
1930 | SetMulticastFilter(dev); | ||
1931 | load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | | ||
1932 | SETUP_FRAME_LEN, (struct sk_buff *)1); | ||
1933 | |||
1934 | lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; | ||
1935 | outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ | ||
1936 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
1937 | } | ||
1938 | } | ||
1939 | } | ||
1940 | |||
1941 | /* | ||
1942 | ** Calculate the hash code and update the logical address filter | ||
1943 | ** from a list of ethernet multicast addresses. | ||
1944 | ** Little endian crc one liner from Matt Thomas, DEC. | ||
1945 | */ | ||
1946 | static void | ||
1947 | SetMulticastFilter(struct net_device *dev) | ||
1948 | { | ||
1949 | struct de4x5_private *lp = netdev_priv(dev); | ||
1950 | struct netdev_hw_addr *ha; | ||
1951 | u_long iobase = dev->base_addr; | ||
1952 | int i, bit, byte; | ||
1953 | u16 hashcode; | ||
1954 | u32 omr, crc; | ||
1955 | char *pa; | ||
1956 | unsigned char *addrs; | ||
1957 | |||
1958 | omr = inl(DE4X5_OMR); | ||
1959 | omr &= ~(OMR_PR | OMR_PM); | ||
1960 | pa = build_setup_frame(dev, ALL); /* Build the basic frame */ | ||
1961 | |||
1962 | if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) { | ||
1963 | omr |= OMR_PM; /* Pass all multicasts */ | ||
1964 | } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ | ||
1965 | netdev_for_each_mc_addr(ha, dev) { | ||
1966 | crc = ether_crc_le(ETH_ALEN, ha->addr); | ||
1967 | hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ | ||
1968 | |||
1969 | byte = hashcode >> 3; /* bit[3-8] -> byte in filter */ | ||
1970 | bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */ | ||
1971 | |||
1972 | byte <<= 1; /* calc offset into setup frame */ | ||
1973 | if (byte & 0x02) { | ||
1974 | byte -= 1; | ||
1975 | } | ||
1976 | lp->setup_frame[byte] |= bit; | ||
1977 | } | ||
1978 | } else { /* Perfect filtering */ | ||
1979 | netdev_for_each_mc_addr(ha, dev) { | ||
1980 | addrs = ha->addr; | ||
1981 | for (i=0; i<ETH_ALEN; i++) { | ||
1982 | *(pa + (i&1)) = *addrs++; | ||
1983 | if (i & 0x01) pa += 4; | ||
1984 | } | ||
1985 | } | ||
1986 | } | ||
1987 | outl(omr, DE4X5_OMR); | ||
1988 | } | ||
1989 | |||
1990 | #ifdef CONFIG_EISA | ||
1991 | |||
1992 | static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST; | ||
1993 | |||
1994 | static int __init de4x5_eisa_probe (struct device *gendev) | ||
1995 | { | ||
1996 | struct eisa_device *edev; | ||
1997 | u_long iobase; | ||
1998 | u_char irq, regval; | ||
1999 | u_short vendor; | ||
2000 | u32 cfid; | ||
2001 | int status, device; | ||
2002 | struct net_device *dev; | ||
2003 | struct de4x5_private *lp; | ||
2004 | |||
2005 | edev = to_eisa_device (gendev); | ||
2006 | iobase = edev->base_addr; | ||
2007 | |||
2008 | if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5")) | ||
2009 | return -EBUSY; | ||
2010 | |||
2011 | if (!request_region (iobase + DE4X5_EISA_IO_PORTS, | ||
2012 | DE4X5_EISA_TOTAL_SIZE, "de4x5")) { | ||
2013 | status = -EBUSY; | ||
2014 | goto release_reg_1; | ||
2015 | } | ||
2016 | |||
2017 | if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { | ||
2018 | status = -ENOMEM; | ||
2019 | goto release_reg_2; | ||
2020 | } | ||
2021 | lp = netdev_priv(dev); | ||
2022 | |||
2023 | cfid = (u32) inl(PCI_CFID); | ||
2024 | lp->cfrv = (u_short) inl(PCI_CFRV); | ||
2025 | device = (cfid >> 8) & 0x00ffff00; | ||
2026 | vendor = (u_short) cfid; | ||
2027 | |||
2028 | /* Read the EISA Configuration Registers */ | ||
2029 | regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT); | ||
2030 | #ifdef CONFIG_ALPHA | ||
2031 | /* Looks like the Jensen firmware (rev 2.2) doesn't really | ||
2032 | * care about the EISA configuration, and thus doesn't | ||
2033 | * configure the PLX bridge properly. Oh well... Simply mimic | ||
2034 | * the EISA config file to sort it out. */ | ||
2035 | |||
2036 | /* EISA REG1: Assert DecChip 21040 HW Reset */ | ||
2037 | outb (ER1_IAM | 1, EISA_REG1); | ||
2038 | mdelay (1); | ||
2039 | |||
2040 | /* EISA REG1: Deassert DecChip 21040 HW Reset */ | ||
2041 | outb (ER1_IAM, EISA_REG1); | ||
2042 | mdelay (1); | ||
2043 | |||
2044 | /* EISA REG3: R/W Burst Transfer Enable */ | ||
2045 | outb (ER3_BWE | ER3_BRE, EISA_REG3); | ||
2046 | |||
2047 | /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */ | ||
2048 | outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0); | ||
2049 | #endif | ||
2050 | irq = de4x5_irq[(regval >> 1) & 0x03]; | ||
2051 | |||
2052 | if (is_DC2114x) { | ||
2053 | device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); | ||
2054 | } | ||
2055 | lp->chipset = device; | ||
2056 | lp->bus = EISA; | ||
2057 | |||
2058 | /* Write the PCI Configuration Registers */ | ||
2059 | outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS); | ||
2060 | outl(0x00006000, PCI_CFLT); | ||
2061 | outl(iobase, PCI_CBIO); | ||
2062 | |||
2063 | DevicePresent(dev, EISA_APROM); | ||
2064 | |||
2065 | dev->irq = irq; | ||
2066 | |||
2067 | if (!(status = de4x5_hw_init (dev, iobase, gendev))) { | ||
2068 | return 0; | ||
2069 | } | ||
2070 | |||
2071 | free_netdev (dev); | ||
2072 | release_reg_2: | ||
2073 | release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); | ||
2074 | release_reg_1: | ||
2075 | release_region (iobase, DE4X5_EISA_TOTAL_SIZE); | ||
2076 | |||
2077 | return status; | ||
2078 | } | ||
2079 | |||
2080 | static int __devexit de4x5_eisa_remove (struct device *device) | ||
2081 | { | ||
2082 | struct net_device *dev; | ||
2083 | u_long iobase; | ||
2084 | |||
2085 | dev = dev_get_drvdata(device); | ||
2086 | iobase = dev->base_addr; | ||
2087 | |||
2088 | unregister_netdev (dev); | ||
2089 | free_netdev (dev); | ||
2090 | release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE); | ||
2091 | release_region (iobase, DE4X5_EISA_TOTAL_SIZE); | ||
2092 | |||
2093 | return 0; | ||
2094 | } | ||
2095 | |||
2096 | static struct eisa_device_id de4x5_eisa_ids[] = { | ||
2097 | { "DEC4250", 0 }, /* 0 is the board name index... */ | ||
2098 | { "" } | ||
2099 | }; | ||
2100 | MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); | ||
2101 | |||
2102 | static struct eisa_driver de4x5_eisa_driver = { | ||
2103 | .id_table = de4x5_eisa_ids, | ||
2104 | .driver = { | ||
2105 | .name = "de4x5", | ||
2106 | .probe = de4x5_eisa_probe, | ||
2107 | .remove = __devexit_p (de4x5_eisa_remove), | ||
2108 | } | ||
2109 | }; | ||
2110 | MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids); | ||
2111 | #endif | ||
2112 | |||
2113 | #ifdef CONFIG_PCI | ||
2114 | |||
2115 | /* | ||
2116 | ** This function searches the current bus (which is >0) for a DECchip with an | ||
2117 | ** SROM, so that in multiport cards that have one SROM shared between multiple | ||
2118 | ** DECchips, we can find the base SROM irrespective of the BIOS scan direction. | ||
2119 | ** For single port cards this is a time waster... | ||
2120 | */ | ||
2121 | static void __devinit | ||
2122 | srom_search(struct net_device *dev, struct pci_dev *pdev) | ||
2123 | { | ||
2124 | u_char pb; | ||
2125 | u_short vendor, status; | ||
2126 | u_int irq = 0, device; | ||
2127 | u_long iobase = 0; /* Clear upper 32 bits in Alphas */ | ||
2128 | int i, j; | ||
2129 | struct de4x5_private *lp = netdev_priv(dev); | ||
2130 | struct list_head *walk; | ||
2131 | |||
2132 | list_for_each(walk, &pdev->bus_list) { | ||
2133 | struct pci_dev *this_dev = pci_dev_b(walk); | ||
2134 | |||
2135 | /* Skip the pci_bus list entry */ | ||
2136 | if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue; | ||
2137 | |||
2138 | vendor = this_dev->vendor; | ||
2139 | device = this_dev->device << 8; | ||
2140 | if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue; | ||
2141 | |||
2142 | /* Get the chip configuration revision register */ | ||
2143 | pb = this_dev->bus->number; | ||
2144 | |||
2145 | /* Set the device number information */ | ||
2146 | lp->device = PCI_SLOT(this_dev->devfn); | ||
2147 | lp->bus_num = pb; | ||
2148 | |||
2149 | /* Set the chipset information */ | ||
2150 | if (is_DC2114x) { | ||
2151 | device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK | ||
2152 | ? DC21142 : DC21143); | ||
2153 | } | ||
2154 | lp->chipset = device; | ||
2155 | |||
2156 | /* Get the board I/O address (64 bits on sparc64) */ | ||
2157 | iobase = pci_resource_start(this_dev, 0); | ||
2158 | |||
2159 | /* Fetch the IRQ to be used */ | ||
2160 | irq = this_dev->irq; | ||
2161 | if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue; | ||
2162 | |||
2163 | /* Check if I/O accesses are enabled */ | ||
2164 | pci_read_config_word(this_dev, PCI_COMMAND, &status); | ||
2165 | if (!(status & PCI_COMMAND_IO)) continue; | ||
2166 | |||
2167 | /* Search for a valid SROM attached to this DECchip */ | ||
2168 | DevicePresent(dev, DE4X5_APROM); | ||
2169 | for (j=0, i=0; i<ETH_ALEN; i++) { | ||
2170 | j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i); | ||
2171 | } | ||
2172 | if (j != 0 && j != 6 * 0xff) { | ||
2173 | last.chipset = device; | ||
2174 | last.bus = pb; | ||
2175 | last.irq = irq; | ||
2176 | for (i=0; i<ETH_ALEN; i++) { | ||
2177 | last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i); | ||
2178 | } | ||
2179 | return; | ||
2180 | } | ||
2181 | } | ||
2182 | } | ||
2183 | |||
2184 | /* | ||
2185 | ** PCI bus I/O device probe | ||
2186 | ** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not | ||
2187 | ** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be | ||
2188 | ** enabled by the user first in the set up utility. Hence we just check for | ||
2189 | ** enabled features and silently ignore the card if they're not. | ||
2190 | ** | ||
2191 | ** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering | ||
2192 | ** bit. Here, check for I/O accesses and then set BM. If you put the card in | ||
2193 | ** a non BM slot, you're on your own (and complain to the PC vendor that your | ||
2194 | ** PC doesn't conform to the PCI standard)! | ||
2195 | ** | ||
2196 | ** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x | ||
2197 | ** kernels use the V0.535[n] drivers. | ||
2198 | */ | ||
2199 | |||
2200 | static int __devinit de4x5_pci_probe (struct pci_dev *pdev, | ||
2201 | const struct pci_device_id *ent) | ||
2202 | { | ||
2203 | u_char pb, pbus = 0, dev_num, dnum = 0, timer; | ||
2204 | u_short vendor, status; | ||
2205 | u_int irq = 0, device; | ||
2206 | u_long iobase = 0; /* Clear upper 32 bits in Alphas */ | ||
2207 | int error; | ||
2208 | struct net_device *dev; | ||
2209 | struct de4x5_private *lp; | ||
2210 | |||
2211 | dev_num = PCI_SLOT(pdev->devfn); | ||
2212 | pb = pdev->bus->number; | ||
2213 | |||
2214 | if (io) { /* probe a single PCI device */ | ||
2215 | pbus = (u_short)(io >> 8); | ||
2216 | dnum = (u_short)(io & 0xff); | ||
2217 | if ((pbus != pb) || (dnum != dev_num)) | ||
2218 | return -ENODEV; | ||
2219 | } | ||
2220 | |||
2221 | vendor = pdev->vendor; | ||
2222 | device = pdev->device << 8; | ||
2223 | if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) | ||
2224 | return -ENODEV; | ||
2225 | |||
2226 | /* Ok, the device seems to be for us. */ | ||
2227 | if ((error = pci_enable_device (pdev))) | ||
2228 | return error; | ||
2229 | |||
2230 | if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) { | ||
2231 | error = -ENOMEM; | ||
2232 | goto disable_dev; | ||
2233 | } | ||
2234 | |||
2235 | lp = netdev_priv(dev); | ||
2236 | lp->bus = PCI; | ||
2237 | lp->bus_num = 0; | ||
2238 | |||
2239 | /* Search for an SROM on this bus */ | ||
2240 | if (lp->bus_num != pb) { | ||
2241 | lp->bus_num = pb; | ||
2242 | srom_search(dev, pdev); | ||
2243 | } | ||
2244 | |||
2245 | /* Get the chip configuration revision register */ | ||
2246 | lp->cfrv = pdev->revision; | ||
2247 | |||
2248 | /* Set the device number information */ | ||
2249 | lp->device = dev_num; | ||
2250 | lp->bus_num = pb; | ||
2251 | |||
2252 | /* Set the chipset information */ | ||
2253 | if (is_DC2114x) { | ||
2254 | device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143); | ||
2255 | } | ||
2256 | lp->chipset = device; | ||
2257 | |||
2258 | /* Get the board I/O address (64 bits on sparc64) */ | ||
2259 | iobase = pci_resource_start(pdev, 0); | ||
2260 | |||
2261 | /* Fetch the IRQ to be used */ | ||
2262 | irq = pdev->irq; | ||
2263 | if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) { | ||
2264 | error = -ENODEV; | ||
2265 | goto free_dev; | ||
2266 | } | ||
2267 | |||
2268 | /* Check if I/O accesses and Bus Mastering are enabled */ | ||
2269 | pci_read_config_word(pdev, PCI_COMMAND, &status); | ||
2270 | #ifdef __powerpc__ | ||
2271 | if (!(status & PCI_COMMAND_IO)) { | ||
2272 | status |= PCI_COMMAND_IO; | ||
2273 | pci_write_config_word(pdev, PCI_COMMAND, status); | ||
2274 | pci_read_config_word(pdev, PCI_COMMAND, &status); | ||
2275 | } | ||
2276 | #endif /* __powerpc__ */ | ||
2277 | if (!(status & PCI_COMMAND_IO)) { | ||
2278 | error = -ENODEV; | ||
2279 | goto free_dev; | ||
2280 | } | ||
2281 | |||
2282 | if (!(status & PCI_COMMAND_MASTER)) { | ||
2283 | status |= PCI_COMMAND_MASTER; | ||
2284 | pci_write_config_word(pdev, PCI_COMMAND, status); | ||
2285 | pci_read_config_word(pdev, PCI_COMMAND, &status); | ||
2286 | } | ||
2287 | if (!(status & PCI_COMMAND_MASTER)) { | ||
2288 | error = -ENODEV; | ||
2289 | goto free_dev; | ||
2290 | } | ||
2291 | |||
2292 | /* Check the latency timer for values >= 0x60 */ | ||
2293 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer); | ||
2294 | if (timer < 0x60) { | ||
2295 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60); | ||
2296 | } | ||
2297 | |||
2298 | DevicePresent(dev, DE4X5_APROM); | ||
2299 | |||
2300 | if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) { | ||
2301 | error = -EBUSY; | ||
2302 | goto free_dev; | ||
2303 | } | ||
2304 | |||
2305 | dev->irq = irq; | ||
2306 | |||
2307 | if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) { | ||
2308 | goto release; | ||
2309 | } | ||
2310 | |||
2311 | return 0; | ||
2312 | |||
2313 | release: | ||
2314 | release_region (iobase, DE4X5_PCI_TOTAL_SIZE); | ||
2315 | free_dev: | ||
2316 | free_netdev (dev); | ||
2317 | disable_dev: | ||
2318 | pci_disable_device (pdev); | ||
2319 | return error; | ||
2320 | } | ||
2321 | |||
2322 | static void __devexit de4x5_pci_remove (struct pci_dev *pdev) | ||
2323 | { | ||
2324 | struct net_device *dev; | ||
2325 | u_long iobase; | ||
2326 | |||
2327 | dev = dev_get_drvdata(&pdev->dev); | ||
2328 | iobase = dev->base_addr; | ||
2329 | |||
2330 | unregister_netdev (dev); | ||
2331 | free_netdev (dev); | ||
2332 | release_region (iobase, DE4X5_PCI_TOTAL_SIZE); | ||
2333 | pci_disable_device (pdev); | ||
2334 | } | ||
2335 | |||
2336 | static struct pci_device_id de4x5_pci_tbl[] = { | ||
2337 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP, | ||
2338 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
2339 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS, | ||
2340 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, | ||
2341 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST, | ||
2342 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, | ||
2343 | { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, | ||
2344 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, | ||
2345 | { }, | ||
2346 | }; | ||
2347 | |||
2348 | static struct pci_driver de4x5_pci_driver = { | ||
2349 | .name = "de4x5", | ||
2350 | .id_table = de4x5_pci_tbl, | ||
2351 | .probe = de4x5_pci_probe, | ||
2352 | .remove = __devexit_p (de4x5_pci_remove), | ||
2353 | }; | ||
2354 | |||
2355 | #endif | ||
2356 | |||
2357 | /* | ||
2358 | ** Auto configure the media here rather than setting the port at compile | ||
2359 | ** time. This routine is called by de4x5_init() and when a loss of media is | ||
2360 | ** detected (excessive collisions, loss of carrier, no carrier or link fail | ||
2361 | ** [TP] or no recent receive activity) to check whether the user has been | ||
2362 | ** sneaky and changed the port on us. | ||
2363 | */ | ||
2364 | static int | ||
2365 | autoconf_media(struct net_device *dev) | ||
2366 | { | ||
2367 | struct de4x5_private *lp = netdev_priv(dev); | ||
2368 | u_long iobase = dev->base_addr; | ||
2369 | |||
2370 | disable_ast(dev); | ||
2371 | |||
2372 | lp->c_media = AUTO; /* Bogus last media */ | ||
2373 | inl(DE4X5_MFC); /* Zero the lost frames counter */ | ||
2374 | lp->media = INIT; | ||
2375 | lp->tcount = 0; | ||
2376 | |||
2377 | de4x5_ast(dev); | ||
2378 | |||
2379 | return lp->media; | ||
2380 | } | ||
2381 | |||
2382 | /* | ||
2383 | ** Autoconfigure the media when using the DC21040. AUI cannot be distinguished | ||
2384 | ** from BNC as the port has a jumper to set thick or thin wire. When set for | ||
2385 | ** BNC, the BNC port will indicate activity if it's not terminated correctly. | ||
2386 | ** The only way to test for that is to place a loopback packet onto the | ||
2387 | ** network and watch for errors. Since we're messing with the interrupt mask | ||
2388 | ** register, disable the board interrupts and do not allow any more packets to | ||
2389 | ** be queued to the hardware. Re-enable everything only when the media is | ||
2390 | ** found. | ||
2391 | ** I may have to "age out" locally queued packets so that the higher layer | ||
2392 | ** timeouts don't effectively duplicate packets on the network. | ||
2393 | */ | ||
2394 | static int | ||
2395 | dc21040_autoconf(struct net_device *dev) | ||
2396 | { | ||
2397 | struct de4x5_private *lp = netdev_priv(dev); | ||
2398 | u_long iobase = dev->base_addr; | ||
2399 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
2400 | s32 imr; | ||
2401 | |||
2402 | switch (lp->media) { | ||
2403 | case INIT: | ||
2404 | DISABLE_IRQs; | ||
2405 | lp->tx_enable = false; | ||
2406 | lp->timeout = -1; | ||
2407 | de4x5_save_skbs(dev); | ||
2408 | if ((lp->autosense == AUTO) || (lp->autosense == TP)) { | ||
2409 | lp->media = TP; | ||
2410 | } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) { | ||
2411 | lp->media = BNC_AUI; | ||
2412 | } else if (lp->autosense == EXT_SIA) { | ||
2413 | lp->media = EXT_SIA; | ||
2414 | } else { | ||
2415 | lp->media = NC; | ||
2416 | } | ||
2417 | lp->local_state = 0; | ||
2418 | next_tick = dc21040_autoconf(dev); | ||
2419 | break; | ||
2420 | |||
2421 | case TP: | ||
2422 | next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI, | ||
2423 | TP_SUSPECT, test_tp); | ||
2424 | break; | ||
2425 | |||
2426 | case TP_SUSPECT: | ||
2427 | next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf); | ||
2428 | break; | ||
2429 | |||
2430 | case BNC: | ||
2431 | case AUI: | ||
2432 | case BNC_AUI: | ||
2433 | next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA, | ||
2434 | BNC_AUI_SUSPECT, ping_media); | ||
2435 | break; | ||
2436 | |||
2437 | case BNC_AUI_SUSPECT: | ||
2438 | next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf); | ||
2439 | break; | ||
2440 | |||
2441 | case EXT_SIA: | ||
2442 | next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000, | ||
2443 | NC, EXT_SIA_SUSPECT, ping_media); | ||
2444 | break; | ||
2445 | |||
2446 | case EXT_SIA_SUSPECT: | ||
2447 | next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf); | ||
2448 | break; | ||
2449 | |||
2450 | case NC: | ||
2451 | /* default to TP for all */ | ||
2452 | reset_init_sia(dev, 0x8f01, 0xffff, 0x0000); | ||
2453 | if (lp->media != lp->c_media) { | ||
2454 | de4x5_dbg_media(dev); | ||
2455 | lp->c_media = lp->media; | ||
2456 | } | ||
2457 | lp->media = INIT; | ||
2458 | lp->tx_enable = false; | ||
2459 | break; | ||
2460 | } | ||
2461 | |||
2462 | return next_tick; | ||
2463 | } | ||
2464 | |||
2465 | static int | ||
2466 | dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, | ||
2467 | int next_state, int suspect_state, | ||
2468 | int (*fn)(struct net_device *, int)) | ||
2469 | { | ||
2470 | struct de4x5_private *lp = netdev_priv(dev); | ||
2471 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
2472 | int linkBad; | ||
2473 | |||
2474 | switch (lp->local_state) { | ||
2475 | case 0: | ||
2476 | reset_init_sia(dev, csr13, csr14, csr15); | ||
2477 | lp->local_state++; | ||
2478 | next_tick = 500; | ||
2479 | break; | ||
2480 | |||
2481 | case 1: | ||
2482 | if (!lp->tx_enable) { | ||
2483 | linkBad = fn(dev, timeout); | ||
2484 | if (linkBad < 0) { | ||
2485 | next_tick = linkBad & ~TIMER_CB; | ||
2486 | } else { | ||
2487 | if (linkBad && (lp->autosense == AUTO)) { | ||
2488 | lp->local_state = 0; | ||
2489 | lp->media = next_state; | ||
2490 | } else { | ||
2491 | de4x5_init_connection(dev); | ||
2492 | } | ||
2493 | } | ||
2494 | } else if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
2495 | lp->media = suspect_state; | ||
2496 | next_tick = 3000; | ||
2497 | } | ||
2498 | break; | ||
2499 | } | ||
2500 | |||
2501 | return next_tick; | ||
2502 | } | ||
2503 | |||
2504 | static int | ||
2505 | de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, | ||
2506 | int (*fn)(struct net_device *, int), | ||
2507 | int (*asfn)(struct net_device *)) | ||
2508 | { | ||
2509 | struct de4x5_private *lp = netdev_priv(dev); | ||
2510 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
2511 | int linkBad; | ||
2512 | |||
2513 | switch (lp->local_state) { | ||
2514 | case 1: | ||
2515 | if (lp->linkOK) { | ||
2516 | lp->media = prev_state; | ||
2517 | } else { | ||
2518 | lp->local_state++; | ||
2519 | next_tick = asfn(dev); | ||
2520 | } | ||
2521 | break; | ||
2522 | |||
2523 | case 2: | ||
2524 | linkBad = fn(dev, timeout); | ||
2525 | if (linkBad < 0) { | ||
2526 | next_tick = linkBad & ~TIMER_CB; | ||
2527 | } else if (!linkBad) { | ||
2528 | lp->local_state--; | ||
2529 | lp->media = prev_state; | ||
2530 | } else { | ||
2531 | lp->media = INIT; | ||
2532 | lp->tcount++; | ||
2533 | } | ||
2534 | } | ||
2535 | |||
2536 | return next_tick; | ||
2537 | } | ||
2538 | |||
2539 | /* | ||
2540 | ** Autoconfigure the media when using the DC21041. AUI needs to be tested | ||
2541 | ** before BNC, because the BNC port will indicate activity if it's not | ||
2542 | ** terminated correctly. The only way to test for that is to place a loopback | ||
2543 | ** packet onto the network and watch for errors. Since we're messing with | ||
2544 | ** the interrupt mask register, disable the board interrupts and do not allow | ||
2545 | ** any more packets to be queued to the hardware. Re-enable everything only | ||
2546 | ** when the media is found. | ||
2547 | */ | ||
2548 | static int | ||
2549 | dc21041_autoconf(struct net_device *dev) | ||
2550 | { | ||
2551 | struct de4x5_private *lp = netdev_priv(dev); | ||
2552 | u_long iobase = dev->base_addr; | ||
2553 | s32 sts, irqs, irq_mask, imr, omr; | ||
2554 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
2555 | |||
2556 | switch (lp->media) { | ||
2557 | case INIT: | ||
2558 | DISABLE_IRQs; | ||
2559 | lp->tx_enable = false; | ||
2560 | lp->timeout = -1; | ||
2561 | de4x5_save_skbs(dev); /* Save non transmitted skb's */ | ||
2562 | if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) { | ||
2563 | lp->media = TP; /* On chip auto negotiation is broken */ | ||
2564 | } else if (lp->autosense == TP) { | ||
2565 | lp->media = TP; | ||
2566 | } else if (lp->autosense == BNC) { | ||
2567 | lp->media = BNC; | ||
2568 | } else if (lp->autosense == AUI) { | ||
2569 | lp->media = AUI; | ||
2570 | } else { | ||
2571 | lp->media = NC; | ||
2572 | } | ||
2573 | lp->local_state = 0; | ||
2574 | next_tick = dc21041_autoconf(dev); | ||
2575 | break; | ||
2576 | |||
2577 | case TP_NW: | ||
2578 | if (lp->timeout < 0) { | ||
2579 | omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */ | ||
2580 | outl(omr | OMR_FDX, DE4X5_OMR); | ||
2581 | } | ||
2582 | irqs = STS_LNF | STS_LNP; | ||
2583 | irq_mask = IMR_LFM | IMR_LPM; | ||
2584 | sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400); | ||
2585 | if (sts < 0) { | ||
2586 | next_tick = sts & ~TIMER_CB; | ||
2587 | } else { | ||
2588 | if (sts & STS_LNP) { | ||
2589 | lp->media = ANS; | ||
2590 | } else { | ||
2591 | lp->media = AUI; | ||
2592 | } | ||
2593 | next_tick = dc21041_autoconf(dev); | ||
2594 | } | ||
2595 | break; | ||
2596 | |||
2597 | case ANS: | ||
2598 | if (!lp->tx_enable) { | ||
2599 | irqs = STS_LNP; | ||
2600 | irq_mask = IMR_LPM; | ||
2601 | sts = test_ans(dev, irqs, irq_mask, 3000); | ||
2602 | if (sts < 0) { | ||
2603 | next_tick = sts & ~TIMER_CB; | ||
2604 | } else { | ||
2605 | if (!(sts & STS_LNP) && (lp->autosense == AUTO)) { | ||
2606 | lp->media = TP; | ||
2607 | next_tick = dc21041_autoconf(dev); | ||
2608 | } else { | ||
2609 | lp->local_state = 1; | ||
2610 | de4x5_init_connection(dev); | ||
2611 | } | ||
2612 | } | ||
2613 | } else if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
2614 | lp->media = ANS_SUSPECT; | ||
2615 | next_tick = 3000; | ||
2616 | } | ||
2617 | break; | ||
2618 | |||
2619 | case ANS_SUSPECT: | ||
2620 | next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf); | ||
2621 | break; | ||
2622 | |||
2623 | case TP: | ||
2624 | if (!lp->tx_enable) { | ||
2625 | if (lp->timeout < 0) { | ||
2626 | omr = inl(DE4X5_OMR); /* Set up half duplex for TP */ | ||
2627 | outl(omr & ~OMR_FDX, DE4X5_OMR); | ||
2628 | } | ||
2629 | irqs = STS_LNF | STS_LNP; | ||
2630 | irq_mask = IMR_LFM | IMR_LPM; | ||
2631 | sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400); | ||
2632 | if (sts < 0) { | ||
2633 | next_tick = sts & ~TIMER_CB; | ||
2634 | } else { | ||
2635 | if (!(sts & STS_LNP) && (lp->autosense == AUTO)) { | ||
2636 | if (inl(DE4X5_SISR) & SISR_NRA) { | ||
2637 | lp->media = AUI; /* Non selected port activity */ | ||
2638 | } else { | ||
2639 | lp->media = BNC; | ||
2640 | } | ||
2641 | next_tick = dc21041_autoconf(dev); | ||
2642 | } else { | ||
2643 | lp->local_state = 1; | ||
2644 | de4x5_init_connection(dev); | ||
2645 | } | ||
2646 | } | ||
2647 | } else if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
2648 | lp->media = TP_SUSPECT; | ||
2649 | next_tick = 3000; | ||
2650 | } | ||
2651 | break; | ||
2652 | |||
2653 | case TP_SUSPECT: | ||
2654 | next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf); | ||
2655 | break; | ||
2656 | |||
2657 | case AUI: | ||
2658 | if (!lp->tx_enable) { | ||
2659 | if (lp->timeout < 0) { | ||
2660 | omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */ | ||
2661 | outl(omr & ~OMR_FDX, DE4X5_OMR); | ||
2662 | } | ||
2663 | irqs = 0; | ||
2664 | irq_mask = 0; | ||
2665 | sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000); | ||
2666 | if (sts < 0) { | ||
2667 | next_tick = sts & ~TIMER_CB; | ||
2668 | } else { | ||
2669 | if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) { | ||
2670 | lp->media = BNC; | ||
2671 | next_tick = dc21041_autoconf(dev); | ||
2672 | } else { | ||
2673 | lp->local_state = 1; | ||
2674 | de4x5_init_connection(dev); | ||
2675 | } | ||
2676 | } | ||
2677 | } else if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
2678 | lp->media = AUI_SUSPECT; | ||
2679 | next_tick = 3000; | ||
2680 | } | ||
2681 | break; | ||
2682 | |||
2683 | case AUI_SUSPECT: | ||
2684 | next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf); | ||
2685 | break; | ||
2686 | |||
2687 | case BNC: | ||
2688 | switch (lp->local_state) { | ||
2689 | case 0: | ||
2690 | if (lp->timeout < 0) { | ||
2691 | omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */ | ||
2692 | outl(omr & ~OMR_FDX, DE4X5_OMR); | ||
2693 | } | ||
2694 | irqs = 0; | ||
2695 | irq_mask = 0; | ||
2696 | sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000); | ||
2697 | if (sts < 0) { | ||
2698 | next_tick = sts & ~TIMER_CB; | ||
2699 | } else { | ||
2700 | lp->local_state++; /* Ensure media connected */ | ||
2701 | next_tick = dc21041_autoconf(dev); | ||
2702 | } | ||
2703 | break; | ||
2704 | |||
2705 | case 1: | ||
2706 | if (!lp->tx_enable) { | ||
2707 | if ((sts = ping_media(dev, 3000)) < 0) { | ||
2708 | next_tick = sts & ~TIMER_CB; | ||
2709 | } else { | ||
2710 | if (sts) { | ||
2711 | lp->local_state = 0; | ||
2712 | lp->media = NC; | ||
2713 | } else { | ||
2714 | de4x5_init_connection(dev); | ||
2715 | } | ||
2716 | } | ||
2717 | } else if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
2718 | lp->media = BNC_SUSPECT; | ||
2719 | next_tick = 3000; | ||
2720 | } | ||
2721 | break; | ||
2722 | } | ||
2723 | break; | ||
2724 | |||
2725 | case BNC_SUSPECT: | ||
2726 | next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf); | ||
2727 | break; | ||
2728 | |||
2729 | case NC: | ||
2730 | omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */ | ||
2731 | outl(omr | OMR_FDX, DE4X5_OMR); | ||
2732 | reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */ | ||
2733 | if (lp->media != lp->c_media) { | ||
2734 | de4x5_dbg_media(dev); | ||
2735 | lp->c_media = lp->media; | ||
2736 | } | ||
2737 | lp->media = INIT; | ||
2738 | lp->tx_enable = false; | ||
2739 | break; | ||
2740 | } | ||
2741 | |||
2742 | return next_tick; | ||
2743 | } | ||
2744 | |||
2745 | /* | ||
2746 | ** Some autonegotiation chips are broken in that they do not return the | ||
2747 | ** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement | ||
2748 | ** register, except at the first power up negotiation. | ||
2749 | */ | ||
2750 | static int | ||
2751 | dc21140m_autoconf(struct net_device *dev) | ||
2752 | { | ||
2753 | struct de4x5_private *lp = netdev_priv(dev); | ||
2754 | int ana, anlpa, cap, cr, slnk, sr; | ||
2755 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
2756 | u_long imr, omr, iobase = dev->base_addr; | ||
2757 | |||
2758 | switch(lp->media) { | ||
2759 | case INIT: | ||
2760 | if (lp->timeout < 0) { | ||
2761 | DISABLE_IRQs; | ||
2762 | lp->tx_enable = false; | ||
2763 | lp->linkOK = 0; | ||
2764 | de4x5_save_skbs(dev); /* Save non transmitted skb's */ | ||
2765 | } | ||
2766 | if ((next_tick = de4x5_reset_phy(dev)) < 0) { | ||
2767 | next_tick &= ~TIMER_CB; | ||
2768 | } else { | ||
2769 | if (lp->useSROM) { | ||
2770 | if (srom_map_media(dev) < 0) { | ||
2771 | lp->tcount++; | ||
2772 | return next_tick; | ||
2773 | } | ||
2774 | srom_exec(dev, lp->phy[lp->active].gep); | ||
2775 | if (lp->infoblock_media == ANS) { | ||
2776 | ana = lp->phy[lp->active].ana | MII_ANA_CSMA; | ||
2777 | mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); | ||
2778 | } | ||
2779 | } else { | ||
2780 | lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */ | ||
2781 | SET_10Mb; | ||
2782 | if (lp->autosense == _100Mb) { | ||
2783 | lp->media = _100Mb; | ||
2784 | } else if (lp->autosense == _10Mb) { | ||
2785 | lp->media = _10Mb; | ||
2786 | } else if ((lp->autosense == AUTO) && | ||
2787 | ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { | ||
2788 | ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); | ||
2789 | ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); | ||
2790 | mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); | ||
2791 | lp->media = ANS; | ||
2792 | } else if (lp->autosense == AUTO) { | ||
2793 | lp->media = SPD_DET; | ||
2794 | } else if (is_spd_100(dev) && is_100_up(dev)) { | ||
2795 | lp->media = _100Mb; | ||
2796 | } else { | ||
2797 | lp->media = NC; | ||
2798 | } | ||
2799 | } | ||
2800 | lp->local_state = 0; | ||
2801 | next_tick = dc21140m_autoconf(dev); | ||
2802 | } | ||
2803 | break; | ||
2804 | |||
2805 | case ANS: | ||
2806 | switch (lp->local_state) { | ||
2807 | case 0: | ||
2808 | if (lp->timeout < 0) { | ||
2809 | mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); | ||
2810 | } | ||
2811 | cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500); | ||
2812 | if (cr < 0) { | ||
2813 | next_tick = cr & ~TIMER_CB; | ||
2814 | } else { | ||
2815 | if (cr) { | ||
2816 | lp->local_state = 0; | ||
2817 | lp->media = SPD_DET; | ||
2818 | } else { | ||
2819 | lp->local_state++; | ||
2820 | } | ||
2821 | next_tick = dc21140m_autoconf(dev); | ||
2822 | } | ||
2823 | break; | ||
2824 | |||
2825 | case 1: | ||
2826 | if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) { | ||
2827 | next_tick = sr & ~TIMER_CB; | ||
2828 | } else { | ||
2829 | lp->media = SPD_DET; | ||
2830 | lp->local_state = 0; | ||
2831 | if (sr) { /* Success! */ | ||
2832 | lp->tmp = MII_SR_ASSC; | ||
2833 | anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); | ||
2834 | ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); | ||
2835 | if (!(anlpa & MII_ANLPA_RF) && | ||
2836 | (cap = anlpa & MII_ANLPA_TAF & ana)) { | ||
2837 | if (cap & MII_ANA_100M) { | ||
2838 | lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0; | ||
2839 | lp->media = _100Mb; | ||
2840 | } else if (cap & MII_ANA_10M) { | ||
2841 | lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0; | ||
2842 | |||
2843 | lp->media = _10Mb; | ||
2844 | } | ||
2845 | } | ||
2846 | } /* Auto Negotiation failed to finish */ | ||
2847 | next_tick = dc21140m_autoconf(dev); | ||
2848 | } /* Auto Negotiation failed to start */ | ||
2849 | break; | ||
2850 | } | ||
2851 | break; | ||
2852 | |||
2853 | case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ | ||
2854 | if (lp->timeout < 0) { | ||
2855 | lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS : | ||
2856 | (~gep_rd(dev) & GEP_LNP)); | ||
2857 | SET_100Mb_PDET; | ||
2858 | } | ||
2859 | if ((slnk = test_for_100Mb(dev, 6500)) < 0) { | ||
2860 | next_tick = slnk & ~TIMER_CB; | ||
2861 | } else { | ||
2862 | if (is_spd_100(dev) && is_100_up(dev)) { | ||
2863 | lp->media = _100Mb; | ||
2864 | } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) { | ||
2865 | lp->media = _10Mb; | ||
2866 | } else { | ||
2867 | lp->media = NC; | ||
2868 | } | ||
2869 | next_tick = dc21140m_autoconf(dev); | ||
2870 | } | ||
2871 | break; | ||
2872 | |||
2873 | case _100Mb: /* Set 100Mb/s */ | ||
2874 | next_tick = 3000; | ||
2875 | if (!lp->tx_enable) { | ||
2876 | SET_100Mb; | ||
2877 | de4x5_init_connection(dev); | ||
2878 | } else { | ||
2879 | if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
2880 | if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) { | ||
2881 | lp->media = INIT; | ||
2882 | lp->tcount++; | ||
2883 | next_tick = DE4X5_AUTOSENSE_MS; | ||
2884 | } | ||
2885 | } | ||
2886 | } | ||
2887 | break; | ||
2888 | |||
2889 | case BNC: | ||
2890 | case AUI: | ||
2891 | case _10Mb: /* Set 10Mb/s */ | ||
2892 | next_tick = 3000; | ||
2893 | if (!lp->tx_enable) { | ||
2894 | SET_10Mb; | ||
2895 | de4x5_init_connection(dev); | ||
2896 | } else { | ||
2897 | if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
2898 | if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) { | ||
2899 | lp->media = INIT; | ||
2900 | lp->tcount++; | ||
2901 | next_tick = DE4X5_AUTOSENSE_MS; | ||
2902 | } | ||
2903 | } | ||
2904 | } | ||
2905 | break; | ||
2906 | |||
2907 | case NC: | ||
2908 | if (lp->media != lp->c_media) { | ||
2909 | de4x5_dbg_media(dev); | ||
2910 | lp->c_media = lp->media; | ||
2911 | } | ||
2912 | lp->media = INIT; | ||
2913 | lp->tx_enable = false; | ||
2914 | break; | ||
2915 | } | ||
2916 | |||
2917 | return next_tick; | ||
2918 | } | ||
2919 | |||
2920 | /* | ||
2921 | ** This routine may be merged into dc21140m_autoconf() sometime as I'm | ||
2922 | ** changing how I figure out the media - but trying to keep it backwards | ||
2923 | ** compatible with the de500-xa and de500-aa. | ||
2924 | ** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock | ||
2925 | ** functions and set during de4x5_mac_port() and/or de4x5_reset_phy(). | ||
2926 | ** This routine just has to figure out whether 10Mb/s or 100Mb/s is | ||
2927 | ** active. | ||
2928 | ** When autonegotiation is working, the ANS part searches the SROM for | ||
2929 | ** the highest common speed (TP) link that both can run and if that can | ||
2930 | ** be full duplex. That infoblock is executed and then the link speed set. | ||
2931 | ** | ||
2932 | ** Only _10Mb and _100Mb are tested here. | ||
2933 | */ | ||
2934 | static int | ||
2935 | dc2114x_autoconf(struct net_device *dev) | ||
2936 | { | ||
2937 | struct de4x5_private *lp = netdev_priv(dev); | ||
2938 | u_long iobase = dev->base_addr; | ||
2939 | s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts; | ||
2940 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
2941 | |||
2942 | switch (lp->media) { | ||
2943 | case INIT: | ||
2944 | if (lp->timeout < 0) { | ||
2945 | DISABLE_IRQs; | ||
2946 | lp->tx_enable = false; | ||
2947 | lp->linkOK = 0; | ||
2948 | lp->timeout = -1; | ||
2949 | de4x5_save_skbs(dev); /* Save non transmitted skb's */ | ||
2950 | if (lp->params.autosense & ~AUTO) { | ||
2951 | srom_map_media(dev); /* Fixed media requested */ | ||
2952 | if (lp->media != lp->params.autosense) { | ||
2953 | lp->tcount++; | ||
2954 | lp->media = INIT; | ||
2955 | return next_tick; | ||
2956 | } | ||
2957 | lp->media = INIT; | ||
2958 | } | ||
2959 | } | ||
2960 | if ((next_tick = de4x5_reset_phy(dev)) < 0) { | ||
2961 | next_tick &= ~TIMER_CB; | ||
2962 | } else { | ||
2963 | if (lp->autosense == _100Mb) { | ||
2964 | lp->media = _100Mb; | ||
2965 | } else if (lp->autosense == _10Mb) { | ||
2966 | lp->media = _10Mb; | ||
2967 | } else if (lp->autosense == TP) { | ||
2968 | lp->media = TP; | ||
2969 | } else if (lp->autosense == BNC) { | ||
2970 | lp->media = BNC; | ||
2971 | } else if (lp->autosense == AUI) { | ||
2972 | lp->media = AUI; | ||
2973 | } else { | ||
2974 | lp->media = SPD_DET; | ||
2975 | if ((lp->infoblock_media == ANS) && | ||
2976 | ((sr=is_anc_capable(dev)) & MII_SR_ANC)) { | ||
2977 | ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA); | ||
2978 | ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM); | ||
2979 | mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); | ||
2980 | lp->media = ANS; | ||
2981 | } | ||
2982 | } | ||
2983 | lp->local_state = 0; | ||
2984 | next_tick = dc2114x_autoconf(dev); | ||
2985 | } | ||
2986 | break; | ||
2987 | |||
2988 | case ANS: | ||
2989 | switch (lp->local_state) { | ||
2990 | case 0: | ||
2991 | if (lp->timeout < 0) { | ||
2992 | mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); | ||
2993 | } | ||
2994 | cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500); | ||
2995 | if (cr < 0) { | ||
2996 | next_tick = cr & ~TIMER_CB; | ||
2997 | } else { | ||
2998 | if (cr) { | ||
2999 | lp->local_state = 0; | ||
3000 | lp->media = SPD_DET; | ||
3001 | } else { | ||
3002 | lp->local_state++; | ||
3003 | } | ||
3004 | next_tick = dc2114x_autoconf(dev); | ||
3005 | } | ||
3006 | break; | ||
3007 | |||
3008 | case 1: | ||
3009 | sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000); | ||
3010 | if (sr < 0) { | ||
3011 | next_tick = sr & ~TIMER_CB; | ||
3012 | } else { | ||
3013 | lp->media = SPD_DET; | ||
3014 | lp->local_state = 0; | ||
3015 | if (sr) { /* Success! */ | ||
3016 | lp->tmp = MII_SR_ASSC; | ||
3017 | anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII); | ||
3018 | ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII); | ||
3019 | if (!(anlpa & MII_ANLPA_RF) && | ||
3020 | (cap = anlpa & MII_ANLPA_TAF & ana)) { | ||
3021 | if (cap & MII_ANA_100M) { | ||
3022 | lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0; | ||
3023 | lp->media = _100Mb; | ||
3024 | } else if (cap & MII_ANA_10M) { | ||
3025 | lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0; | ||
3026 | lp->media = _10Mb; | ||
3027 | } | ||
3028 | } | ||
3029 | } /* Auto Negotiation failed to finish */ | ||
3030 | next_tick = dc2114x_autoconf(dev); | ||
3031 | } /* Auto Negotiation failed to start */ | ||
3032 | break; | ||
3033 | } | ||
3034 | break; | ||
3035 | |||
3036 | case AUI: | ||
3037 | if (!lp->tx_enable) { | ||
3038 | if (lp->timeout < 0) { | ||
3039 | omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */ | ||
3040 | outl(omr & ~OMR_FDX, DE4X5_OMR); | ||
3041 | } | ||
3042 | irqs = 0; | ||
3043 | irq_mask = 0; | ||
3044 | sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000); | ||
3045 | if (sts < 0) { | ||
3046 | next_tick = sts & ~TIMER_CB; | ||
3047 | } else { | ||
3048 | if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) { | ||
3049 | lp->media = BNC; | ||
3050 | next_tick = dc2114x_autoconf(dev); | ||
3051 | } else { | ||
3052 | lp->local_state = 1; | ||
3053 | de4x5_init_connection(dev); | ||
3054 | } | ||
3055 | } | ||
3056 | } else if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
3057 | lp->media = AUI_SUSPECT; | ||
3058 | next_tick = 3000; | ||
3059 | } | ||
3060 | break; | ||
3061 | |||
3062 | case AUI_SUSPECT: | ||
3063 | next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf); | ||
3064 | break; | ||
3065 | |||
3066 | case BNC: | ||
3067 | switch (lp->local_state) { | ||
3068 | case 0: | ||
3069 | if (lp->timeout < 0) { | ||
3070 | omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */ | ||
3071 | outl(omr & ~OMR_FDX, DE4X5_OMR); | ||
3072 | } | ||
3073 | irqs = 0; | ||
3074 | irq_mask = 0; | ||
3075 | sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000); | ||
3076 | if (sts < 0) { | ||
3077 | next_tick = sts & ~TIMER_CB; | ||
3078 | } else { | ||
3079 | lp->local_state++; /* Ensure media connected */ | ||
3080 | next_tick = dc2114x_autoconf(dev); | ||
3081 | } | ||
3082 | break; | ||
3083 | |||
3084 | case 1: | ||
3085 | if (!lp->tx_enable) { | ||
3086 | if ((sts = ping_media(dev, 3000)) < 0) { | ||
3087 | next_tick = sts & ~TIMER_CB; | ||
3088 | } else { | ||
3089 | if (sts) { | ||
3090 | lp->local_state = 0; | ||
3091 | lp->tcount++; | ||
3092 | lp->media = INIT; | ||
3093 | } else { | ||
3094 | de4x5_init_connection(dev); | ||
3095 | } | ||
3096 | } | ||
3097 | } else if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
3098 | lp->media = BNC_SUSPECT; | ||
3099 | next_tick = 3000; | ||
3100 | } | ||
3101 | break; | ||
3102 | } | ||
3103 | break; | ||
3104 | |||
3105 | case BNC_SUSPECT: | ||
3106 | next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf); | ||
3107 | break; | ||
3108 | |||
3109 | case SPD_DET: /* Choose 10Mb/s or 100Mb/s */ | ||
3110 | if (srom_map_media(dev) < 0) { | ||
3111 | lp->tcount++; | ||
3112 | lp->media = INIT; | ||
3113 | return next_tick; | ||
3114 | } | ||
3115 | if (lp->media == _100Mb) { | ||
3116 | if ((slnk = test_for_100Mb(dev, 6500)) < 0) { | ||
3117 | lp->media = SPD_DET; | ||
3118 | return slnk & ~TIMER_CB; | ||
3119 | } | ||
3120 | } else { | ||
3121 | if (wait_for_link(dev) < 0) { | ||
3122 | lp->media = SPD_DET; | ||
3123 | return PDET_LINK_WAIT; | ||
3124 | } | ||
3125 | } | ||
3126 | if (lp->media == ANS) { /* Do MII parallel detection */ | ||
3127 | if (is_spd_100(dev)) { | ||
3128 | lp->media = _100Mb; | ||
3129 | } else { | ||
3130 | lp->media = _10Mb; | ||
3131 | } | ||
3132 | next_tick = dc2114x_autoconf(dev); | ||
3133 | } else if (((lp->media == _100Mb) && is_100_up(dev)) || | ||
3134 | (((lp->media == _10Mb) || (lp->media == TP) || | ||
3135 | (lp->media == BNC) || (lp->media == AUI)) && | ||
3136 | is_10_up(dev))) { | ||
3137 | next_tick = dc2114x_autoconf(dev); | ||
3138 | } else { | ||
3139 | lp->tcount++; | ||
3140 | lp->media = INIT; | ||
3141 | } | ||
3142 | break; | ||
3143 | |||
3144 | case _10Mb: | ||
3145 | next_tick = 3000; | ||
3146 | if (!lp->tx_enable) { | ||
3147 | SET_10Mb; | ||
3148 | de4x5_init_connection(dev); | ||
3149 | } else { | ||
3150 | if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
3151 | if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) { | ||
3152 | lp->media = INIT; | ||
3153 | lp->tcount++; | ||
3154 | next_tick = DE4X5_AUTOSENSE_MS; | ||
3155 | } | ||
3156 | } | ||
3157 | } | ||
3158 | break; | ||
3159 | |||
3160 | case _100Mb: | ||
3161 | next_tick = 3000; | ||
3162 | if (!lp->tx_enable) { | ||
3163 | SET_100Mb; | ||
3164 | de4x5_init_connection(dev); | ||
3165 | } else { | ||
3166 | if (!lp->linkOK && (lp->autosense == AUTO)) { | ||
3167 | if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) { | ||
3168 | lp->media = INIT; | ||
3169 | lp->tcount++; | ||
3170 | next_tick = DE4X5_AUTOSENSE_MS; | ||
3171 | } | ||
3172 | } | ||
3173 | } | ||
3174 | break; | ||
3175 | |||
3176 | default: | ||
3177 | lp->tcount++; | ||
3178 | printk("Huh?: media:%02x\n", lp->media); | ||
3179 | lp->media = INIT; | ||
3180 | break; | ||
3181 | } | ||
3182 | |||
3183 | return next_tick; | ||
3184 | } | ||
3185 | |||
3186 | static int | ||
3187 | srom_autoconf(struct net_device *dev) | ||
3188 | { | ||
3189 | struct de4x5_private *lp = netdev_priv(dev); | ||
3190 | |||
3191 | return lp->infoleaf_fn(dev); | ||
3192 | } | ||
3193 | |||
3194 | /* | ||
3195 | ** This mapping keeps the original media codes and FDX flag unchanged. | ||
3196 | ** While it isn't strictly necessary, it helps me for the moment... | ||
3197 | ** The early return avoids a media state / SROM media space clash. | ||
3198 | */ | ||
3199 | static int | ||
3200 | srom_map_media(struct net_device *dev) | ||
3201 | { | ||
3202 | struct de4x5_private *lp = netdev_priv(dev); | ||
3203 | |||
3204 | lp->fdx = false; | ||
3205 | if (lp->infoblock_media == lp->media) | ||
3206 | return 0; | ||
3207 | |||
3208 | switch(lp->infoblock_media) { | ||
3209 | case SROM_10BASETF: | ||
3210 | if (!lp->params.fdx) return -1; | ||
3211 | lp->fdx = true; | ||
3212 | case SROM_10BASET: | ||
3213 | if (lp->params.fdx && !lp->fdx) return -1; | ||
3214 | if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) { | ||
3215 | lp->media = _10Mb; | ||
3216 | } else { | ||
3217 | lp->media = TP; | ||
3218 | } | ||
3219 | break; | ||
3220 | |||
3221 | case SROM_10BASE2: | ||
3222 | lp->media = BNC; | ||
3223 | break; | ||
3224 | |||
3225 | case SROM_10BASE5: | ||
3226 | lp->media = AUI; | ||
3227 | break; | ||
3228 | |||
3229 | case SROM_100BASETF: | ||
3230 | if (!lp->params.fdx) return -1; | ||
3231 | lp->fdx = true; | ||
3232 | case SROM_100BASET: | ||
3233 | if (lp->params.fdx && !lp->fdx) return -1; | ||
3234 | lp->media = _100Mb; | ||
3235 | break; | ||
3236 | |||
3237 | case SROM_100BASET4: | ||
3238 | lp->media = _100Mb; | ||
3239 | break; | ||
3240 | |||
3241 | case SROM_100BASEFF: | ||
3242 | if (!lp->params.fdx) return -1; | ||
3243 | lp->fdx = true; | ||
3244 | case SROM_100BASEF: | ||
3245 | if (lp->params.fdx && !lp->fdx) return -1; | ||
3246 | lp->media = _100Mb; | ||
3247 | break; | ||
3248 | |||
3249 | case ANS: | ||
3250 | lp->media = ANS; | ||
3251 | lp->fdx = lp->params.fdx; | ||
3252 | break; | ||
3253 | |||
3254 | default: | ||
3255 | printk("%s: Bad media code [%d] detected in SROM!\n", dev->name, | ||
3256 | lp->infoblock_media); | ||
3257 | return -1; | ||
3258 | break; | ||
3259 | } | ||
3260 | |||
3261 | return 0; | ||
3262 | } | ||
3263 | |||
3264 | static void | ||
3265 | de4x5_init_connection(struct net_device *dev) | ||
3266 | { | ||
3267 | struct de4x5_private *lp = netdev_priv(dev); | ||
3268 | u_long iobase = dev->base_addr; | ||
3269 | u_long flags = 0; | ||
3270 | |||
3271 | if (lp->media != lp->c_media) { | ||
3272 | de4x5_dbg_media(dev); | ||
3273 | lp->c_media = lp->media; /* Stop scrolling media messages */ | ||
3274 | } | ||
3275 | |||
3276 | spin_lock_irqsave(&lp->lock, flags); | ||
3277 | de4x5_rst_desc_ring(dev); | ||
3278 | de4x5_setup_intr(dev); | ||
3279 | lp->tx_enable = true; | ||
3280 | spin_unlock_irqrestore(&lp->lock, flags); | ||
3281 | outl(POLL_DEMAND, DE4X5_TPD); | ||
3282 | |||
3283 | netif_wake_queue(dev); | ||
3284 | } | ||
3285 | |||
3286 | /* | ||
3287 | ** General PHY reset function. Some MII devices don't reset correctly | ||
3288 | ** since their MII address pins can float at voltages that are dependent | ||
3289 | ** on the signal pin use. Do a double reset to ensure a reset. | ||
3290 | */ | ||
3291 | static int | ||
3292 | de4x5_reset_phy(struct net_device *dev) | ||
3293 | { | ||
3294 | struct de4x5_private *lp = netdev_priv(dev); | ||
3295 | u_long iobase = dev->base_addr; | ||
3296 | int next_tick = 0; | ||
3297 | |||
3298 | if ((lp->useSROM) || (lp->phy[lp->active].id)) { | ||
3299 | if (lp->timeout < 0) { | ||
3300 | if (lp->useSROM) { | ||
3301 | if (lp->phy[lp->active].rst) { | ||
3302 | srom_exec(dev, lp->phy[lp->active].rst); | ||
3303 | srom_exec(dev, lp->phy[lp->active].rst); | ||
3304 | } else if (lp->rst) { /* Type 5 infoblock reset */ | ||
3305 | srom_exec(dev, lp->rst); | ||
3306 | srom_exec(dev, lp->rst); | ||
3307 | } | ||
3308 | } else { | ||
3309 | PHY_HARD_RESET; | ||
3310 | } | ||
3311 | if (lp->useMII) { | ||
3312 | mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); | ||
3313 | } | ||
3314 | } | ||
3315 | if (lp->useMII) { | ||
3316 | next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500); | ||
3317 | } | ||
3318 | } else if (lp->chipset == DC21140) { | ||
3319 | PHY_HARD_RESET; | ||
3320 | } | ||
3321 | |||
3322 | return next_tick; | ||
3323 | } | ||
3324 | |||
3325 | static int | ||
3326 | test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec) | ||
3327 | { | ||
3328 | struct de4x5_private *lp = netdev_priv(dev); | ||
3329 | u_long iobase = dev->base_addr; | ||
3330 | s32 sts, csr12; | ||
3331 | |||
3332 | if (lp->timeout < 0) { | ||
3333 | lp->timeout = msec/100; | ||
3334 | if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */ | ||
3335 | reset_init_sia(dev, csr13, csr14, csr15); | ||
3336 | } | ||
3337 | |||
3338 | /* set up the interrupt mask */ | ||
3339 | outl(irq_mask, DE4X5_IMR); | ||
3340 | |||
3341 | /* clear all pending interrupts */ | ||
3342 | sts = inl(DE4X5_STS); | ||
3343 | outl(sts, DE4X5_STS); | ||
3344 | |||
3345 | /* clear csr12 NRA and SRA bits */ | ||
3346 | if ((lp->chipset == DC21041) || lp->useSROM) { | ||
3347 | csr12 = inl(DE4X5_SISR); | ||
3348 | outl(csr12, DE4X5_SISR); | ||
3349 | } | ||
3350 | } | ||
3351 | |||
3352 | sts = inl(DE4X5_STS) & ~TIMER_CB; | ||
3353 | |||
3354 | if (!(sts & irqs) && --lp->timeout) { | ||
3355 | sts = 100 | TIMER_CB; | ||
3356 | } else { | ||
3357 | lp->timeout = -1; | ||
3358 | } | ||
3359 | |||
3360 | return sts; | ||
3361 | } | ||
3362 | |||
3363 | static int | ||
3364 | test_tp(struct net_device *dev, s32 msec) | ||
3365 | { | ||
3366 | struct de4x5_private *lp = netdev_priv(dev); | ||
3367 | u_long iobase = dev->base_addr; | ||
3368 | int sisr; | ||
3369 | |||
3370 | if (lp->timeout < 0) { | ||
3371 | lp->timeout = msec/100; | ||
3372 | } | ||
3373 | |||
3374 | sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR); | ||
3375 | |||
3376 | if (sisr && --lp->timeout) { | ||
3377 | sisr = 100 | TIMER_CB; | ||
3378 | } else { | ||
3379 | lp->timeout = -1; | ||
3380 | } | ||
3381 | |||
3382 | return sisr; | ||
3383 | } | ||
3384 | |||
3385 | /* | ||
3386 | ** Samples the 100Mb Link State Signal. The sample interval is important | ||
3387 | ** because too fast a rate can give erroneous results and confuse the | ||
3388 | ** speed sense algorithm. | ||
3389 | */ | ||
3390 | #define SAMPLE_INTERVAL 500 /* ms */ | ||
3391 | #define SAMPLE_DELAY 2000 /* ms */ | ||
3392 | static int | ||
3393 | test_for_100Mb(struct net_device *dev, int msec) | ||
3394 | { | ||
3395 | struct de4x5_private *lp = netdev_priv(dev); | ||
3396 | int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK); | ||
3397 | |||
3398 | if (lp->timeout < 0) { | ||
3399 | if ((msec/SAMPLE_INTERVAL) <= 0) return 0; | ||
3400 | if (msec > SAMPLE_DELAY) { | ||
3401 | lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL; | ||
3402 | gep = SAMPLE_DELAY | TIMER_CB; | ||
3403 | return gep; | ||
3404 | } else { | ||
3405 | lp->timeout = msec/SAMPLE_INTERVAL; | ||
3406 | } | ||
3407 | } | ||
3408 | |||
3409 | if (lp->phy[lp->active].id || lp->useSROM) { | ||
3410 | gep = is_100_up(dev) | is_spd_100(dev); | ||
3411 | } else { | ||
3412 | gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP)); | ||
3413 | } | ||
3414 | if (!(gep & ret) && --lp->timeout) { | ||
3415 | gep = SAMPLE_INTERVAL | TIMER_CB; | ||
3416 | } else { | ||
3417 | lp->timeout = -1; | ||
3418 | } | ||
3419 | |||
3420 | return gep; | ||
3421 | } | ||
3422 | |||
3423 | static int | ||
3424 | wait_for_link(struct net_device *dev) | ||
3425 | { | ||
3426 | struct de4x5_private *lp = netdev_priv(dev); | ||
3427 | |||
3428 | if (lp->timeout < 0) { | ||
3429 | lp->timeout = 1; | ||
3430 | } | ||
3431 | |||
3432 | if (lp->timeout--) { | ||
3433 | return TIMER_CB; | ||
3434 | } else { | ||
3435 | lp->timeout = -1; | ||
3436 | } | ||
3437 | |||
3438 | return 0; | ||
3439 | } | ||
3440 | |||
3441 | /* | ||
3442 | ** | ||
3443 | ** | ||
3444 | */ | ||
3445 | static int | ||
3446 | test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec) | ||
3447 | { | ||
3448 | struct de4x5_private *lp = netdev_priv(dev); | ||
3449 | int test; | ||
3450 | u_long iobase = dev->base_addr; | ||
3451 | |||
3452 | if (lp->timeout < 0) { | ||
3453 | lp->timeout = msec/100; | ||
3454 | } | ||
3455 | |||
3456 | reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask; | ||
3457 | test = (reg ^ (pol ? ~0 : 0)) & mask; | ||
3458 | |||
3459 | if (test && --lp->timeout) { | ||
3460 | reg = 100 | TIMER_CB; | ||
3461 | } else { | ||
3462 | lp->timeout = -1; | ||
3463 | } | ||
3464 | |||
3465 | return reg; | ||
3466 | } | ||
3467 | |||
3468 | static int | ||
3469 | is_spd_100(struct net_device *dev) | ||
3470 | { | ||
3471 | struct de4x5_private *lp = netdev_priv(dev); | ||
3472 | u_long iobase = dev->base_addr; | ||
3473 | int spd; | ||
3474 | |||
3475 | if (lp->useMII) { | ||
3476 | spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII); | ||
3477 | spd = ~(spd ^ lp->phy[lp->active].spd.value); | ||
3478 | spd &= lp->phy[lp->active].spd.mask; | ||
3479 | } else if (!lp->useSROM) { /* de500-xa */ | ||
3480 | spd = ((~gep_rd(dev)) & GEP_SLNK); | ||
3481 | } else { | ||
3482 | if ((lp->ibn == 2) || !lp->asBitValid) | ||
3483 | return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0; | ||
3484 | |||
3485 | spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) | | ||
3486 | (lp->linkOK & ~lp->asBitValid); | ||
3487 | } | ||
3488 | |||
3489 | return spd; | ||
3490 | } | ||
3491 | |||
3492 | static int | ||
3493 | is_100_up(struct net_device *dev) | ||
3494 | { | ||
3495 | struct de4x5_private *lp = netdev_priv(dev); | ||
3496 | u_long iobase = dev->base_addr; | ||
3497 | |||
3498 | if (lp->useMII) { | ||
3499 | /* Double read for sticky bits & temporary drops */ | ||
3500 | mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); | ||
3501 | return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS; | ||
3502 | } else if (!lp->useSROM) { /* de500-xa */ | ||
3503 | return (~gep_rd(dev)) & GEP_SLNK; | ||
3504 | } else { | ||
3505 | if ((lp->ibn == 2) || !lp->asBitValid) | ||
3506 | return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0; | ||
3507 | |||
3508 | return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | | ||
3509 | (lp->linkOK & ~lp->asBitValid); | ||
3510 | } | ||
3511 | } | ||
3512 | |||
3513 | static int | ||
3514 | is_10_up(struct net_device *dev) | ||
3515 | { | ||
3516 | struct de4x5_private *lp = netdev_priv(dev); | ||
3517 | u_long iobase = dev->base_addr; | ||
3518 | |||
3519 | if (lp->useMII) { | ||
3520 | /* Double read for sticky bits & temporary drops */ | ||
3521 | mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); | ||
3522 | return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS; | ||
3523 | } else if (!lp->useSROM) { /* de500-xa */ | ||
3524 | return (~gep_rd(dev)) & GEP_LNP; | ||
3525 | } else { | ||
3526 | if ((lp->ibn == 2) || !lp->asBitValid) | ||
3527 | return ((lp->chipset & ~0x00ff) == DC2114x) ? | ||
3528 | (~inl(DE4X5_SISR)&SISR_LS10): | ||
3529 | 0; | ||
3530 | |||
3531 | return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) | | ||
3532 | (lp->linkOK & ~lp->asBitValid); | ||
3533 | } | ||
3534 | } | ||
3535 | |||
3536 | static int | ||
3537 | is_anc_capable(struct net_device *dev) | ||
3538 | { | ||
3539 | struct de4x5_private *lp = netdev_priv(dev); | ||
3540 | u_long iobase = dev->base_addr; | ||
3541 | |||
3542 | if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { | ||
3543 | return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII); | ||
3544 | } else if ((lp->chipset & ~0x00ff) == DC2114x) { | ||
3545 | return (inl(DE4X5_SISR) & SISR_LPN) >> 12; | ||
3546 | } else { | ||
3547 | return 0; | ||
3548 | } | ||
3549 | } | ||
3550 | |||
3551 | /* | ||
3552 | ** Send a packet onto the media and watch for send errors that indicate the | ||
3553 | ** media is bad or unconnected. | ||
3554 | */ | ||
3555 | static int | ||
3556 | ping_media(struct net_device *dev, int msec) | ||
3557 | { | ||
3558 | struct de4x5_private *lp = netdev_priv(dev); | ||
3559 | u_long iobase = dev->base_addr; | ||
3560 | int sisr; | ||
3561 | |||
3562 | if (lp->timeout < 0) { | ||
3563 | lp->timeout = msec/100; | ||
3564 | |||
3565 | lp->tmp = lp->tx_new; /* Remember the ring position */ | ||
3566 | load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1); | ||
3567 | lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; | ||
3568 | outl(POLL_DEMAND, DE4X5_TPD); | ||
3569 | } | ||
3570 | |||
3571 | sisr = inl(DE4X5_SISR); | ||
3572 | |||
3573 | if ((!(sisr & SISR_NCR)) && | ||
3574 | ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) && | ||
3575 | (--lp->timeout)) { | ||
3576 | sisr = 100 | TIMER_CB; | ||
3577 | } else { | ||
3578 | if ((!(sisr & SISR_NCR)) && | ||
3579 | !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) && | ||
3580 | lp->timeout) { | ||
3581 | sisr = 0; | ||
3582 | } else { | ||
3583 | sisr = 1; | ||
3584 | } | ||
3585 | lp->timeout = -1; | ||
3586 | } | ||
3587 | |||
3588 | return sisr; | ||
3589 | } | ||
3590 | |||
3591 | /* | ||
3592 | ** This function does 2 things: on Intels it kmalloc's another buffer to | ||
3593 | ** replace the one about to be passed up. On Alpha's it kmallocs a buffer | ||
3594 | ** into which the packet is copied. | ||
3595 | */ | ||
3596 | static struct sk_buff * | ||
3597 | de4x5_alloc_rx_buff(struct net_device *dev, int index, int len) | ||
3598 | { | ||
3599 | struct de4x5_private *lp = netdev_priv(dev); | ||
3600 | struct sk_buff *p; | ||
3601 | |||
3602 | #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY) | ||
3603 | struct sk_buff *ret; | ||
3604 | u_long i=0, tmp; | ||
3605 | |||
3606 | p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2); | ||
3607 | if (!p) return NULL; | ||
3608 | |||
3609 | tmp = virt_to_bus(p->data); | ||
3610 | i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp; | ||
3611 | skb_reserve(p, i); | ||
3612 | lp->rx_ring[index].buf = cpu_to_le32(tmp + i); | ||
3613 | |||
3614 | ret = lp->rx_skb[index]; | ||
3615 | lp->rx_skb[index] = p; | ||
3616 | |||
3617 | if ((u_long) ret > 1) { | ||
3618 | skb_put(ret, len); | ||
3619 | } | ||
3620 | |||
3621 | return ret; | ||
3622 | |||
3623 | #else | ||
3624 | if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */ | ||
3625 | |||
3626 | p = dev_alloc_skb(len + 2); | ||
3627 | if (!p) return NULL; | ||
3628 | |||
3629 | skb_reserve(p, 2); /* Align */ | ||
3630 | if (index < lp->rx_old) { /* Wrapped buffer */ | ||
3631 | short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ; | ||
3632 | memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen); | ||
3633 | memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen); | ||
3634 | } else { /* Linear buffer */ | ||
3635 | memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len); | ||
3636 | } | ||
3637 | |||
3638 | return p; | ||
3639 | #endif | ||
3640 | } | ||
3641 | |||
3642 | static void | ||
3643 | de4x5_free_rx_buffs(struct net_device *dev) | ||
3644 | { | ||
3645 | struct de4x5_private *lp = netdev_priv(dev); | ||
3646 | int i; | ||
3647 | |||
3648 | for (i=0; i<lp->rxRingSize; i++) { | ||
3649 | if ((u_long) lp->rx_skb[i] > 1) { | ||
3650 | dev_kfree_skb(lp->rx_skb[i]); | ||
3651 | } | ||
3652 | lp->rx_ring[i].status = 0; | ||
3653 | lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */ | ||
3654 | } | ||
3655 | } | ||
3656 | |||
3657 | static void | ||
3658 | de4x5_free_tx_buffs(struct net_device *dev) | ||
3659 | { | ||
3660 | struct de4x5_private *lp = netdev_priv(dev); | ||
3661 | int i; | ||
3662 | |||
3663 | for (i=0; i<lp->txRingSize; i++) { | ||
3664 | if (lp->tx_skb[i]) | ||
3665 | de4x5_free_tx_buff(lp, i); | ||
3666 | lp->tx_ring[i].status = 0; | ||
3667 | } | ||
3668 | |||
3669 | /* Unload the locally queued packets */ | ||
3670 | __skb_queue_purge(&lp->cache.queue); | ||
3671 | } | ||
3672 | |||
3673 | /* | ||
3674 | ** When a user pulls a connection, the DECchip can end up in a | ||
3675 | ** 'running - waiting for end of transmission' state. This means that we | ||
3676 | ** have to perform a chip soft reset to ensure that we can synchronize | ||
3677 | ** the hardware and software and make any media probes using a loopback | ||
3678 | ** packet meaningful. | ||
3679 | */ | ||
3680 | static void | ||
3681 | de4x5_save_skbs(struct net_device *dev) | ||
3682 | { | ||
3683 | struct de4x5_private *lp = netdev_priv(dev); | ||
3684 | u_long iobase = dev->base_addr; | ||
3685 | s32 omr; | ||
3686 | |||
3687 | if (!lp->cache.save_cnt) { | ||
3688 | STOP_DE4X5; | ||
3689 | de4x5_tx(dev); /* Flush any sent skb's */ | ||
3690 | de4x5_free_tx_buffs(dev); | ||
3691 | de4x5_cache_state(dev, DE4X5_SAVE_STATE); | ||
3692 | de4x5_sw_reset(dev); | ||
3693 | de4x5_cache_state(dev, DE4X5_RESTORE_STATE); | ||
3694 | lp->cache.save_cnt++; | ||
3695 | START_DE4X5; | ||
3696 | } | ||
3697 | } | ||
3698 | |||
3699 | static void | ||
3700 | de4x5_rst_desc_ring(struct net_device *dev) | ||
3701 | { | ||
3702 | struct de4x5_private *lp = netdev_priv(dev); | ||
3703 | u_long iobase = dev->base_addr; | ||
3704 | int i; | ||
3705 | s32 omr; | ||
3706 | |||
3707 | if (lp->cache.save_cnt) { | ||
3708 | STOP_DE4X5; | ||
3709 | outl(lp->dma_rings, DE4X5_RRBA); | ||
3710 | outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc), | ||
3711 | DE4X5_TRBA); | ||
3712 | |||
3713 | lp->rx_new = lp->rx_old = 0; | ||
3714 | lp->tx_new = lp->tx_old = 0; | ||
3715 | |||
3716 | for (i = 0; i < lp->rxRingSize; i++) { | ||
3717 | lp->rx_ring[i].status = cpu_to_le32(R_OWN); | ||
3718 | } | ||
3719 | |||
3720 | for (i = 0; i < lp->txRingSize; i++) { | ||
3721 | lp->tx_ring[i].status = cpu_to_le32(0); | ||
3722 | } | ||
3723 | |||
3724 | barrier(); | ||
3725 | lp->cache.save_cnt--; | ||
3726 | START_DE4X5; | ||
3727 | } | ||
3728 | } | ||
3729 | |||
3730 | static void | ||
3731 | de4x5_cache_state(struct net_device *dev, int flag) | ||
3732 | { | ||
3733 | struct de4x5_private *lp = netdev_priv(dev); | ||
3734 | u_long iobase = dev->base_addr; | ||
3735 | |||
3736 | switch(flag) { | ||
3737 | case DE4X5_SAVE_STATE: | ||
3738 | lp->cache.csr0 = inl(DE4X5_BMR); | ||
3739 | lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR)); | ||
3740 | lp->cache.csr7 = inl(DE4X5_IMR); | ||
3741 | break; | ||
3742 | |||
3743 | case DE4X5_RESTORE_STATE: | ||
3744 | outl(lp->cache.csr0, DE4X5_BMR); | ||
3745 | outl(lp->cache.csr6, DE4X5_OMR); | ||
3746 | outl(lp->cache.csr7, DE4X5_IMR); | ||
3747 | if (lp->chipset == DC21140) { | ||
3748 | gep_wr(lp->cache.gepc, dev); | ||
3749 | gep_wr(lp->cache.gep, dev); | ||
3750 | } else { | ||
3751 | reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, | ||
3752 | lp->cache.csr15); | ||
3753 | } | ||
3754 | break; | ||
3755 | } | ||
3756 | } | ||
3757 | |||
3758 | static void | ||
3759 | de4x5_put_cache(struct net_device *dev, struct sk_buff *skb) | ||
3760 | { | ||
3761 | struct de4x5_private *lp = netdev_priv(dev); | ||
3762 | |||
3763 | __skb_queue_tail(&lp->cache.queue, skb); | ||
3764 | } | ||
3765 | |||
3766 | static void | ||
3767 | de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb) | ||
3768 | { | ||
3769 | struct de4x5_private *lp = netdev_priv(dev); | ||
3770 | |||
3771 | __skb_queue_head(&lp->cache.queue, skb); | ||
3772 | } | ||
3773 | |||
3774 | static struct sk_buff * | ||
3775 | de4x5_get_cache(struct net_device *dev) | ||
3776 | { | ||
3777 | struct de4x5_private *lp = netdev_priv(dev); | ||
3778 | |||
3779 | return __skb_dequeue(&lp->cache.queue); | ||
3780 | } | ||
3781 | |||
3782 | /* | ||
3783 | ** Check the Auto Negotiation State. Return OK when a link pass interrupt | ||
3784 | ** is received and the auto-negotiation status is NWAY OK. | ||
3785 | */ | ||
3786 | static int | ||
3787 | test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec) | ||
3788 | { | ||
3789 | struct de4x5_private *lp = netdev_priv(dev); | ||
3790 | u_long iobase = dev->base_addr; | ||
3791 | s32 sts, ans; | ||
3792 | |||
3793 | if (lp->timeout < 0) { | ||
3794 | lp->timeout = msec/100; | ||
3795 | outl(irq_mask, DE4X5_IMR); | ||
3796 | |||
3797 | /* clear all pending interrupts */ | ||
3798 | sts = inl(DE4X5_STS); | ||
3799 | outl(sts, DE4X5_STS); | ||
3800 | } | ||
3801 | |||
3802 | ans = inl(DE4X5_SISR) & SISR_ANS; | ||
3803 | sts = inl(DE4X5_STS) & ~TIMER_CB; | ||
3804 | |||
3805 | if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) { | ||
3806 | sts = 100 | TIMER_CB; | ||
3807 | } else { | ||
3808 | lp->timeout = -1; | ||
3809 | } | ||
3810 | |||
3811 | return sts; | ||
3812 | } | ||
3813 | |||
3814 | static void | ||
3815 | de4x5_setup_intr(struct net_device *dev) | ||
3816 | { | ||
3817 | struct de4x5_private *lp = netdev_priv(dev); | ||
3818 | u_long iobase = dev->base_addr; | ||
3819 | s32 imr, sts; | ||
3820 | |||
3821 | if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */ | ||
3822 | imr = 0; | ||
3823 | UNMASK_IRQs; | ||
3824 | sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */ | ||
3825 | outl(sts, DE4X5_STS); | ||
3826 | ENABLE_IRQs; | ||
3827 | } | ||
3828 | } | ||
3829 | |||
3830 | /* | ||
3831 | ** | ||
3832 | */ | ||
3833 | static void | ||
3834 | reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15) | ||
3835 | { | ||
3836 | struct de4x5_private *lp = netdev_priv(dev); | ||
3837 | u_long iobase = dev->base_addr; | ||
3838 | |||
3839 | RESET_SIA; | ||
3840 | if (lp->useSROM) { | ||
3841 | if (lp->ibn == 3) { | ||
3842 | srom_exec(dev, lp->phy[lp->active].rst); | ||
3843 | srom_exec(dev, lp->phy[lp->active].gep); | ||
3844 | outl(1, DE4X5_SICR); | ||
3845 | return; | ||
3846 | } else { | ||
3847 | csr15 = lp->cache.csr15; | ||
3848 | csr14 = lp->cache.csr14; | ||
3849 | csr13 = lp->cache.csr13; | ||
3850 | outl(csr15 | lp->cache.gepc, DE4X5_SIGR); | ||
3851 | outl(csr15 | lp->cache.gep, DE4X5_SIGR); | ||
3852 | } | ||
3853 | } else { | ||
3854 | outl(csr15, DE4X5_SIGR); | ||
3855 | } | ||
3856 | outl(csr14, DE4X5_STRR); | ||
3857 | outl(csr13, DE4X5_SICR); | ||
3858 | |||
3859 | mdelay(10); | ||
3860 | } | ||
3861 | |||
3862 | /* | ||
3863 | ** Create a loopback ethernet packet | ||
3864 | */ | ||
3865 | static void | ||
3866 | create_packet(struct net_device *dev, char *frame, int len) | ||
3867 | { | ||
3868 | int i; | ||
3869 | char *buf = frame; | ||
3870 | |||
3871 | for (i=0; i<ETH_ALEN; i++) { /* Use this source address */ | ||
3872 | *buf++ = dev->dev_addr[i]; | ||
3873 | } | ||
3874 | for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */ | ||
3875 | *buf++ = dev->dev_addr[i]; | ||
3876 | } | ||
3877 | |||
3878 | *buf++ = 0; /* Packet length (2 bytes) */ | ||
3879 | *buf++ = 1; | ||
3880 | } | ||
3881 | |||
3882 | /* | ||
3883 | ** Look for a particular board name in the EISA configuration space | ||
3884 | */ | ||
3885 | static int | ||
3886 | EISA_signature(char *name, struct device *device) | ||
3887 | { | ||
3888 | int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); | ||
3889 | struct eisa_device *edev; | ||
3890 | |||
3891 | *name = '\0'; | ||
3892 | edev = to_eisa_device (device); | ||
3893 | i = edev->id.driver_data; | ||
3894 | |||
3895 | if (i >= 0 && i < siglen) { | ||
3896 | strcpy (name, de4x5_signatures[i]); | ||
3897 | status = 1; | ||
3898 | } | ||
3899 | |||
3900 | return status; /* return the device name string */ | ||
3901 | } | ||
3902 | |||
3903 | /* | ||
3904 | ** Look for a particular board name in the PCI configuration space | ||
3905 | */ | ||
3906 | static int | ||
3907 | PCI_signature(char *name, struct de4x5_private *lp) | ||
3908 | { | ||
3909 | int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures); | ||
3910 | |||
3911 | if (lp->chipset == DC21040) { | ||
3912 | strcpy(name, "DE434/5"); | ||
3913 | return status; | ||
3914 | } else { /* Search for a DEC name in the SROM */ | ||
3915 | int tmp = *((char *)&lp->srom + 19) * 3; | ||
3916 | strncpy(name, (char *)&lp->srom + 26 + tmp, 8); | ||
3917 | } | ||
3918 | name[8] = '\0'; | ||
3919 | for (i=0; i<siglen; i++) { | ||
3920 | if (strstr(name,de4x5_signatures[i])!=NULL) break; | ||
3921 | } | ||
3922 | if (i == siglen) { | ||
3923 | if (dec_only) { | ||
3924 | *name = '\0'; | ||
3925 | } else { /* Use chip name to avoid confusion */ | ||
3926 | strcpy(name, (((lp->chipset == DC21040) ? "DC21040" : | ||
3927 | ((lp->chipset == DC21041) ? "DC21041" : | ||
3928 | ((lp->chipset == DC21140) ? "DC21140" : | ||
3929 | ((lp->chipset == DC21142) ? "DC21142" : | ||
3930 | ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN" | ||
3931 | ))))))); | ||
3932 | } | ||
3933 | if (lp->chipset != DC21041) { | ||
3934 | lp->useSROM = true; /* card is not recognisably DEC */ | ||
3935 | } | ||
3936 | } else if ((lp->chipset & ~0x00ff) == DC2114x) { | ||
3937 | lp->useSROM = true; | ||
3938 | } | ||
3939 | |||
3940 | return status; | ||
3941 | } | ||
3942 | |||
3943 | /* | ||
3944 | ** Set up the Ethernet PROM counter to the start of the Ethernet address on | ||
3945 | ** the DC21040, else read the SROM for the other chips. | ||
3946 | ** The SROM may not be present in a multi-MAC card, so first read the | ||
3947 | ** MAC address and check for a bad address. If there is a bad one then exit | ||
3948 | ** immediately with the prior srom contents intact (the h/w address will | ||
3949 | ** be fixed up later). | ||
3950 | */ | ||
3951 | static void | ||
3952 | DevicePresent(struct net_device *dev, u_long aprom_addr) | ||
3953 | { | ||
3954 | int i, j=0; | ||
3955 | struct de4x5_private *lp = netdev_priv(dev); | ||
3956 | |||
3957 | if (lp->chipset == DC21040) { | ||
3958 | if (lp->bus == EISA) { | ||
3959 | enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */ | ||
3960 | } else { | ||
3961 | outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */ | ||
3962 | } | ||
3963 | } else { /* Read new srom */ | ||
3964 | u_short tmp; | ||
3965 | __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD); | ||
3966 | for (i=0; i<(ETH_ALEN>>1); i++) { | ||
3967 | tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i); | ||
3968 | j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */ | ||
3969 | *p = cpu_to_le16(tmp); | ||
3970 | } | ||
3971 | if (j == 0 || j == 3 * 0xffff) { | ||
3972 | /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */ | ||
3973 | return; | ||
3974 | } | ||
3975 | |||
3976 | p = (__le16 *)&lp->srom; | ||
3977 | for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) { | ||
3978 | tmp = srom_rd(aprom_addr, i); | ||
3979 | *p++ = cpu_to_le16(tmp); | ||
3980 | } | ||
3981 | de4x5_dbg_srom((struct de4x5_srom *)&lp->srom); | ||
3982 | } | ||
3983 | } | ||
3984 | |||
3985 | /* | ||
3986 | ** Since the write on the Enet PROM register doesn't seem to reset the PROM | ||
3987 | ** pointer correctly (at least on my DE425 EISA card), this routine should do | ||
3988 | ** it...from depca.c. | ||
3989 | */ | ||
3990 | static void | ||
3991 | enet_addr_rst(u_long aprom_addr) | ||
3992 | { | ||
3993 | union { | ||
3994 | struct { | ||
3995 | u32 a; | ||
3996 | u32 b; | ||
3997 | } llsig; | ||
3998 | char Sig[sizeof(u32) << 1]; | ||
3999 | } dev; | ||
4000 | short sigLength=0; | ||
4001 | s8 data; | ||
4002 | int i, j; | ||
4003 | |||
4004 | dev.llsig.a = ETH_PROM_SIG; | ||
4005 | dev.llsig.b = ETH_PROM_SIG; | ||
4006 | sigLength = sizeof(u32) << 1; | ||
4007 | |||
4008 | for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) { | ||
4009 | data = inb(aprom_addr); | ||
4010 | if (dev.Sig[j] == data) { /* track signature */ | ||
4011 | j++; | ||
4012 | } else { /* lost signature; begin search again */ | ||
4013 | if (data == dev.Sig[0]) { /* rare case.... */ | ||
4014 | j=1; | ||
4015 | } else { | ||
4016 | j=0; | ||
4017 | } | ||
4018 | } | ||
4019 | } | ||
4020 | } | ||
4021 | |||
4022 | /* | ||
4023 | ** For the bad status case and no SROM, then add one to the previous | ||
4024 | ** address. However, need to add one backwards in case we have 0xff | ||
4025 | ** as one or more of the bytes. Only the last 3 bytes should be checked | ||
4026 | ** as the first three are invariant - assigned to an organisation. | ||
4027 | */ | ||
4028 | static int | ||
4029 | get_hw_addr(struct net_device *dev) | ||
4030 | { | ||
4031 | u_long iobase = dev->base_addr; | ||
4032 | int broken, i, k, tmp, status = 0; | ||
4033 | u_short j,chksum; | ||
4034 | struct de4x5_private *lp = netdev_priv(dev); | ||
4035 | |||
4036 | broken = de4x5_bad_srom(lp); | ||
4037 | |||
4038 | for (i=0,k=0,j=0;j<3;j++) { | ||
4039 | k <<= 1; | ||
4040 | if (k > 0xffff) k-=0xffff; | ||
4041 | |||
4042 | if (lp->bus == PCI) { | ||
4043 | if (lp->chipset == DC21040) { | ||
4044 | while ((tmp = inl(DE4X5_APROM)) < 0); | ||
4045 | k += (u_char) tmp; | ||
4046 | dev->dev_addr[i++] = (u_char) tmp; | ||
4047 | while ((tmp = inl(DE4X5_APROM)) < 0); | ||
4048 | k += (u_short) (tmp << 8); | ||
4049 | dev->dev_addr[i++] = (u_char) tmp; | ||
4050 | } else if (!broken) { | ||
4051 | dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; | ||
4052 | dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++; | ||
4053 | } else if ((broken == SMC) || (broken == ACCTON)) { | ||
4054 | dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++; | ||
4055 | dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++; | ||
4056 | } | ||
4057 | } else { | ||
4058 | k += (u_char) (tmp = inb(EISA_APROM)); | ||
4059 | dev->dev_addr[i++] = (u_char) tmp; | ||
4060 | k += (u_short) ((tmp = inb(EISA_APROM)) << 8); | ||
4061 | dev->dev_addr[i++] = (u_char) tmp; | ||
4062 | } | ||
4063 | |||
4064 | if (k > 0xffff) k-=0xffff; | ||
4065 | } | ||
4066 | if (k == 0xffff) k=0; | ||
4067 | |||
4068 | if (lp->bus == PCI) { | ||
4069 | if (lp->chipset == DC21040) { | ||
4070 | while ((tmp = inl(DE4X5_APROM)) < 0); | ||
4071 | chksum = (u_char) tmp; | ||
4072 | while ((tmp = inl(DE4X5_APROM)) < 0); | ||
4073 | chksum |= (u_short) (tmp << 8); | ||
4074 | if ((k != chksum) && (dec_only)) status = -1; | ||
4075 | } | ||
4076 | } else { | ||
4077 | chksum = (u_char) inb(EISA_APROM); | ||
4078 | chksum |= (u_short) (inb(EISA_APROM) << 8); | ||
4079 | if ((k != chksum) && (dec_only)) status = -1; | ||
4080 | } | ||
4081 | |||
4082 | /* If possible, try to fix a broken card - SMC only so far */ | ||
4083 | srom_repair(dev, broken); | ||
4084 | |||
4085 | #ifdef CONFIG_PPC_PMAC | ||
4086 | /* | ||
4087 | ** If the address starts with 00 a0, we have to bit-reverse | ||
4088 | ** each byte of the address. | ||
4089 | */ | ||
4090 | if ( machine_is(powermac) && | ||
4091 | (dev->dev_addr[0] == 0) && | ||
4092 | (dev->dev_addr[1] == 0xa0) ) | ||
4093 | { | ||
4094 | for (i = 0; i < ETH_ALEN; ++i) | ||
4095 | { | ||
4096 | int x = dev->dev_addr[i]; | ||
4097 | x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4); | ||
4098 | x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2); | ||
4099 | dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1); | ||
4100 | } | ||
4101 | } | ||
4102 | #endif /* CONFIG_PPC_PMAC */ | ||
4103 | |||
4104 | /* Test for a bad enet address */ | ||
4105 | status = test_bad_enet(dev, status); | ||
4106 | |||
4107 | return status; | ||
4108 | } | ||
4109 | |||
4110 | /* | ||
4111 | ** Test for enet addresses in the first 32 bytes. The built-in strncmp | ||
4112 | ** didn't seem to work here...? | ||
4113 | */ | ||
4114 | static int | ||
4115 | de4x5_bad_srom(struct de4x5_private *lp) | ||
4116 | { | ||
4117 | int i, status = 0; | ||
4118 | |||
4119 | for (i = 0; i < ARRAY_SIZE(enet_det); i++) { | ||
4120 | if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) && | ||
4121 | !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) { | ||
4122 | if (i == 0) { | ||
4123 | status = SMC; | ||
4124 | } else if (i == 1) { | ||
4125 | status = ACCTON; | ||
4126 | } | ||
4127 | break; | ||
4128 | } | ||
4129 | } | ||
4130 | |||
4131 | return status; | ||
4132 | } | ||
4133 | |||
4134 | static int | ||
4135 | de4x5_strncmp(char *a, char *b, int n) | ||
4136 | { | ||
4137 | int ret=0; | ||
4138 | |||
4139 | for (;n && !ret; n--) { | ||
4140 | ret = *a++ - *b++; | ||
4141 | } | ||
4142 | |||
4143 | return ret; | ||
4144 | } | ||
4145 | |||
4146 | static void | ||
4147 | srom_repair(struct net_device *dev, int card) | ||
4148 | { | ||
4149 | struct de4x5_private *lp = netdev_priv(dev); | ||
4150 | |||
4151 | switch(card) { | ||
4152 | case SMC: | ||
4153 | memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom)); | ||
4154 | memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN); | ||
4155 | memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100); | ||
4156 | lp->useSROM = true; | ||
4157 | break; | ||
4158 | } | ||
4159 | } | ||
4160 | |||
4161 | /* | ||
4162 | ** Assume that the irq's do not follow the PCI spec - this is seems | ||
4163 | ** to be true so far (2 for 2). | ||
4164 | */ | ||
4165 | static int | ||
4166 | test_bad_enet(struct net_device *dev, int status) | ||
4167 | { | ||
4168 | struct de4x5_private *lp = netdev_priv(dev); | ||
4169 | int i, tmp; | ||
4170 | |||
4171 | for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i]; | ||
4172 | if ((tmp == 0) || (tmp == 0x5fa)) { | ||
4173 | if ((lp->chipset == last.chipset) && | ||
4174 | (lp->bus_num == last.bus) && (lp->bus_num > 0)) { | ||
4175 | for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i]; | ||
4176 | for (i=ETH_ALEN-1; i>2; --i) { | ||
4177 | dev->dev_addr[i] += 1; | ||
4178 | if (dev->dev_addr[i] != 0) break; | ||
4179 | } | ||
4180 | for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i]; | ||
4181 | if (!an_exception(lp)) { | ||
4182 | dev->irq = last.irq; | ||
4183 | } | ||
4184 | |||
4185 | status = 0; | ||
4186 | } | ||
4187 | } else if (!status) { | ||
4188 | last.chipset = lp->chipset; | ||
4189 | last.bus = lp->bus_num; | ||
4190 | last.irq = dev->irq; | ||
4191 | for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i]; | ||
4192 | } | ||
4193 | |||
4194 | return status; | ||
4195 | } | ||
4196 | |||
4197 | /* | ||
4198 | ** List of board exceptions with correctly wired IRQs | ||
4199 | */ | ||
4200 | static int | ||
4201 | an_exception(struct de4x5_private *lp) | ||
4202 | { | ||
4203 | if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) && | ||
4204 | (*(u_short *)lp->srom.sub_system_id == 0x95e0)) { | ||
4205 | return -1; | ||
4206 | } | ||
4207 | |||
4208 | return 0; | ||
4209 | } | ||
4210 | |||
4211 | /* | ||
4212 | ** SROM Read | ||
4213 | */ | ||
4214 | static short | ||
4215 | srom_rd(u_long addr, u_char offset) | ||
4216 | { | ||
4217 | sendto_srom(SROM_RD | SROM_SR, addr); | ||
4218 | |||
4219 | srom_latch(SROM_RD | SROM_SR | DT_CS, addr); | ||
4220 | srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr); | ||
4221 | srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset); | ||
4222 | |||
4223 | return srom_data(SROM_RD | SROM_SR | DT_CS, addr); | ||
4224 | } | ||
4225 | |||
4226 | static void | ||
4227 | srom_latch(u_int command, u_long addr) | ||
4228 | { | ||
4229 | sendto_srom(command, addr); | ||
4230 | sendto_srom(command | DT_CLK, addr); | ||
4231 | sendto_srom(command, addr); | ||
4232 | } | ||
4233 | |||
4234 | static void | ||
4235 | srom_command(u_int command, u_long addr) | ||
4236 | { | ||
4237 | srom_latch(command, addr); | ||
4238 | srom_latch(command, addr); | ||
4239 | srom_latch((command & 0x0000ff00) | DT_CS, addr); | ||
4240 | } | ||
4241 | |||
4242 | static void | ||
4243 | srom_address(u_int command, u_long addr, u_char offset) | ||
4244 | { | ||
4245 | int i, a; | ||
4246 | |||
4247 | a = offset << 2; | ||
4248 | for (i=0; i<6; i++, a <<= 1) { | ||
4249 | srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr); | ||
4250 | } | ||
4251 | udelay(1); | ||
4252 | |||
4253 | i = (getfrom_srom(addr) >> 3) & 0x01; | ||
4254 | } | ||
4255 | |||
4256 | static short | ||
4257 | srom_data(u_int command, u_long addr) | ||
4258 | { | ||
4259 | int i; | ||
4260 | short word = 0; | ||
4261 | s32 tmp; | ||
4262 | |||
4263 | for (i=0; i<16; i++) { | ||
4264 | sendto_srom(command | DT_CLK, addr); | ||
4265 | tmp = getfrom_srom(addr); | ||
4266 | sendto_srom(command, addr); | ||
4267 | |||
4268 | word = (word << 1) | ((tmp >> 3) & 0x01); | ||
4269 | } | ||
4270 | |||
4271 | sendto_srom(command & 0x0000ff00, addr); | ||
4272 | |||
4273 | return word; | ||
4274 | } | ||
4275 | |||
4276 | /* | ||
4277 | static void | ||
4278 | srom_busy(u_int command, u_long addr) | ||
4279 | { | ||
4280 | sendto_srom((command & 0x0000ff00) | DT_CS, addr); | ||
4281 | |||
4282 | while (!((getfrom_srom(addr) >> 3) & 0x01)) { | ||
4283 | mdelay(1); | ||
4284 | } | ||
4285 | |||
4286 | sendto_srom(command & 0x0000ff00, addr); | ||
4287 | } | ||
4288 | */ | ||
4289 | |||
4290 | static void | ||
4291 | sendto_srom(u_int command, u_long addr) | ||
4292 | { | ||
4293 | outl(command, addr); | ||
4294 | udelay(1); | ||
4295 | } | ||
4296 | |||
4297 | static int | ||
4298 | getfrom_srom(u_long addr) | ||
4299 | { | ||
4300 | s32 tmp; | ||
4301 | |||
4302 | tmp = inl(addr); | ||
4303 | udelay(1); | ||
4304 | |||
4305 | return tmp; | ||
4306 | } | ||
4307 | |||
4308 | static int | ||
4309 | srom_infoleaf_info(struct net_device *dev) | ||
4310 | { | ||
4311 | struct de4x5_private *lp = netdev_priv(dev); | ||
4312 | int i, count; | ||
4313 | u_char *p; | ||
4314 | |||
4315 | /* Find the infoleaf decoder function that matches this chipset */ | ||
4316 | for (i=0; i<INFOLEAF_SIZE; i++) { | ||
4317 | if (lp->chipset == infoleaf_array[i].chipset) break; | ||
4318 | } | ||
4319 | if (i == INFOLEAF_SIZE) { | ||
4320 | lp->useSROM = false; | ||
4321 | printk("%s: Cannot find correct chipset for SROM decoding!\n", | ||
4322 | dev->name); | ||
4323 | return -ENXIO; | ||
4324 | } | ||
4325 | |||
4326 | lp->infoleaf_fn = infoleaf_array[i].fn; | ||
4327 | |||
4328 | /* Find the information offset that this function should use */ | ||
4329 | count = *((u_char *)&lp->srom + 19); | ||
4330 | p = (u_char *)&lp->srom + 26; | ||
4331 | |||
4332 | if (count > 1) { | ||
4333 | for (i=count; i; --i, p+=3) { | ||
4334 | if (lp->device == *p) break; | ||
4335 | } | ||
4336 | if (i == 0) { | ||
4337 | lp->useSROM = false; | ||
4338 | printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n", | ||
4339 | dev->name, lp->device); | ||
4340 | return -ENXIO; | ||
4341 | } | ||
4342 | } | ||
4343 | |||
4344 | lp->infoleaf_offset = get_unaligned_le16(p + 1); | ||
4345 | |||
4346 | return 0; | ||
4347 | } | ||
4348 | |||
4349 | /* | ||
4350 | ** This routine loads any type 1 or 3 MII info into the mii device | ||
4351 | ** struct and executes any type 5 code to reset PHY devices for this | ||
4352 | ** controller. | ||
4353 | ** The info for the MII devices will be valid since the index used | ||
4354 | ** will follow the discovery process from MII address 1-31 then 0. | ||
4355 | */ | ||
4356 | static void | ||
4357 | srom_init(struct net_device *dev) | ||
4358 | { | ||
4359 | struct de4x5_private *lp = netdev_priv(dev); | ||
4360 | u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; | ||
4361 | u_char count; | ||
4362 | |||
4363 | p+=2; | ||
4364 | if (lp->chipset == DC21140) { | ||
4365 | lp->cache.gepc = (*p++ | GEP_CTRL); | ||
4366 | gep_wr(lp->cache.gepc, dev); | ||
4367 | } | ||
4368 | |||
4369 | /* Block count */ | ||
4370 | count = *p++; | ||
4371 | |||
4372 | /* Jump the infoblocks to find types */ | ||
4373 | for (;count; --count) { | ||
4374 | if (*p < 128) { | ||
4375 | p += COMPACT_LEN; | ||
4376 | } else if (*(p+1) == 5) { | ||
4377 | type5_infoblock(dev, 1, p); | ||
4378 | p += ((*p & BLOCK_LEN) + 1); | ||
4379 | } else if (*(p+1) == 4) { | ||
4380 | p += ((*p & BLOCK_LEN) + 1); | ||
4381 | } else if (*(p+1) == 3) { | ||
4382 | type3_infoblock(dev, 1, p); | ||
4383 | p += ((*p & BLOCK_LEN) + 1); | ||
4384 | } else if (*(p+1) == 2) { | ||
4385 | p += ((*p & BLOCK_LEN) + 1); | ||
4386 | } else if (*(p+1) == 1) { | ||
4387 | type1_infoblock(dev, 1, p); | ||
4388 | p += ((*p & BLOCK_LEN) + 1); | ||
4389 | } else { | ||
4390 | p += ((*p & BLOCK_LEN) + 1); | ||
4391 | } | ||
4392 | } | ||
4393 | } | ||
4394 | |||
4395 | /* | ||
4396 | ** A generic routine that writes GEP control, data and reset information | ||
4397 | ** to the GEP register (21140) or csr15 GEP portion (2114[23]). | ||
4398 | */ | ||
4399 | static void | ||
4400 | srom_exec(struct net_device *dev, u_char *p) | ||
4401 | { | ||
4402 | struct de4x5_private *lp = netdev_priv(dev); | ||
4403 | u_long iobase = dev->base_addr; | ||
4404 | u_char count = (p ? *p++ : 0); | ||
4405 | u_short *w = (u_short *)p; | ||
4406 | |||
4407 | if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return; | ||
4408 | |||
4409 | if (lp->chipset != DC21140) RESET_SIA; | ||
4410 | |||
4411 | while (count--) { | ||
4412 | gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? | ||
4413 | *p++ : get_unaligned_le16(w++)), dev); | ||
4414 | mdelay(2); /* 2ms per action */ | ||
4415 | } | ||
4416 | |||
4417 | if (lp->chipset != DC21140) { | ||
4418 | outl(lp->cache.csr14, DE4X5_STRR); | ||
4419 | outl(lp->cache.csr13, DE4X5_SICR); | ||
4420 | } | ||
4421 | } | ||
4422 | |||
4423 | /* | ||
4424 | ** Basically this function is a NOP since it will never be called, | ||
4425 | ** unless I implement the DC21041 SROM functions. There's no need | ||
4426 | ** since the existing code will be satisfactory for all boards. | ||
4427 | */ | ||
4428 | static int | ||
4429 | dc21041_infoleaf(struct net_device *dev) | ||
4430 | { | ||
4431 | return DE4X5_AUTOSENSE_MS; | ||
4432 | } | ||
4433 | |||
4434 | static int | ||
4435 | dc21140_infoleaf(struct net_device *dev) | ||
4436 | { | ||
4437 | struct de4x5_private *lp = netdev_priv(dev); | ||
4438 | u_char count = 0; | ||
4439 | u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; | ||
4440 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
4441 | |||
4442 | /* Read the connection type */ | ||
4443 | p+=2; | ||
4444 | |||
4445 | /* GEP control */ | ||
4446 | lp->cache.gepc = (*p++ | GEP_CTRL); | ||
4447 | |||
4448 | /* Block count */ | ||
4449 | count = *p++; | ||
4450 | |||
4451 | /* Recursively figure out the info blocks */ | ||
4452 | if (*p < 128) { | ||
4453 | next_tick = dc_infoblock[COMPACT](dev, count, p); | ||
4454 | } else { | ||
4455 | next_tick = dc_infoblock[*(p+1)](dev, count, p); | ||
4456 | } | ||
4457 | |||
4458 | if (lp->tcount == count) { | ||
4459 | lp->media = NC; | ||
4460 | if (lp->media != lp->c_media) { | ||
4461 | de4x5_dbg_media(dev); | ||
4462 | lp->c_media = lp->media; | ||
4463 | } | ||
4464 | lp->media = INIT; | ||
4465 | lp->tcount = 0; | ||
4466 | lp->tx_enable = false; | ||
4467 | } | ||
4468 | |||
4469 | return next_tick & ~TIMER_CB; | ||
4470 | } | ||
4471 | |||
4472 | static int | ||
4473 | dc21142_infoleaf(struct net_device *dev) | ||
4474 | { | ||
4475 | struct de4x5_private *lp = netdev_priv(dev); | ||
4476 | u_char count = 0; | ||
4477 | u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; | ||
4478 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
4479 | |||
4480 | /* Read the connection type */ | ||
4481 | p+=2; | ||
4482 | |||
4483 | /* Block count */ | ||
4484 | count = *p++; | ||
4485 | |||
4486 | /* Recursively figure out the info blocks */ | ||
4487 | if (*p < 128) { | ||
4488 | next_tick = dc_infoblock[COMPACT](dev, count, p); | ||
4489 | } else { | ||
4490 | next_tick = dc_infoblock[*(p+1)](dev, count, p); | ||
4491 | } | ||
4492 | |||
4493 | if (lp->tcount == count) { | ||
4494 | lp->media = NC; | ||
4495 | if (lp->media != lp->c_media) { | ||
4496 | de4x5_dbg_media(dev); | ||
4497 | lp->c_media = lp->media; | ||
4498 | } | ||
4499 | lp->media = INIT; | ||
4500 | lp->tcount = 0; | ||
4501 | lp->tx_enable = false; | ||
4502 | } | ||
4503 | |||
4504 | return next_tick & ~TIMER_CB; | ||
4505 | } | ||
4506 | |||
4507 | static int | ||
4508 | dc21143_infoleaf(struct net_device *dev) | ||
4509 | { | ||
4510 | struct de4x5_private *lp = netdev_priv(dev); | ||
4511 | u_char count = 0; | ||
4512 | u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset; | ||
4513 | int next_tick = DE4X5_AUTOSENSE_MS; | ||
4514 | |||
4515 | /* Read the connection type */ | ||
4516 | p+=2; | ||
4517 | |||
4518 | /* Block count */ | ||
4519 | count = *p++; | ||
4520 | |||
4521 | /* Recursively figure out the info blocks */ | ||
4522 | if (*p < 128) { | ||
4523 | next_tick = dc_infoblock[COMPACT](dev, count, p); | ||
4524 | } else { | ||
4525 | next_tick = dc_infoblock[*(p+1)](dev, count, p); | ||
4526 | } | ||
4527 | if (lp->tcount == count) { | ||
4528 | lp->media = NC; | ||
4529 | if (lp->media != lp->c_media) { | ||
4530 | de4x5_dbg_media(dev); | ||
4531 | lp->c_media = lp->media; | ||
4532 | } | ||
4533 | lp->media = INIT; | ||
4534 | lp->tcount = 0; | ||
4535 | lp->tx_enable = false; | ||
4536 | } | ||
4537 | |||
4538 | return next_tick & ~TIMER_CB; | ||
4539 | } | ||
4540 | |||
4541 | /* | ||
4542 | ** The compact infoblock is only designed for DC21140[A] chips, so | ||
4543 | ** we'll reuse the dc21140m_autoconf function. Non MII media only. | ||
4544 | */ | ||
4545 | static int | ||
4546 | compact_infoblock(struct net_device *dev, u_char count, u_char *p) | ||
4547 | { | ||
4548 | struct de4x5_private *lp = netdev_priv(dev); | ||
4549 | u_char flags, csr6; | ||
4550 | |||
4551 | /* Recursively figure out the info blocks */ | ||
4552 | if (--count > lp->tcount) { | ||
4553 | if (*(p+COMPACT_LEN) < 128) { | ||
4554 | return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN); | ||
4555 | } else { | ||
4556 | return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN); | ||
4557 | } | ||
4558 | } | ||
4559 | |||
4560 | if ((lp->media == INIT) && (lp->timeout < 0)) { | ||
4561 | lp->ibn = COMPACT; | ||
4562 | lp->active = 0; | ||
4563 | gep_wr(lp->cache.gepc, dev); | ||
4564 | lp->infoblock_media = (*p++) & COMPACT_MC; | ||
4565 | lp->cache.gep = *p++; | ||
4566 | csr6 = *p++; | ||
4567 | flags = *p++; | ||
4568 | |||
4569 | lp->asBitValid = (flags & 0x80) ? 0 : -1; | ||
4570 | lp->defMedium = (flags & 0x40) ? -1 : 0; | ||
4571 | lp->asBit = 1 << ((csr6 >> 1) & 0x07); | ||
4572 | lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; | ||
4573 | lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); | ||
4574 | lp->useMII = false; | ||
4575 | |||
4576 | de4x5_switch_mac_port(dev); | ||
4577 | } | ||
4578 | |||
4579 | return dc21140m_autoconf(dev); | ||
4580 | } | ||
4581 | |||
4582 | /* | ||
4583 | ** This block describes non MII media for the DC21140[A] only. | ||
4584 | */ | ||
4585 | static int | ||
4586 | type0_infoblock(struct net_device *dev, u_char count, u_char *p) | ||
4587 | { | ||
4588 | struct de4x5_private *lp = netdev_priv(dev); | ||
4589 | u_char flags, csr6, len = (*p & BLOCK_LEN)+1; | ||
4590 | |||
4591 | /* Recursively figure out the info blocks */ | ||
4592 | if (--count > lp->tcount) { | ||
4593 | if (*(p+len) < 128) { | ||
4594 | return dc_infoblock[COMPACT](dev, count, p+len); | ||
4595 | } else { | ||
4596 | return dc_infoblock[*(p+len+1)](dev, count, p+len); | ||
4597 | } | ||
4598 | } | ||
4599 | |||
4600 | if ((lp->media == INIT) && (lp->timeout < 0)) { | ||
4601 | lp->ibn = 0; | ||
4602 | lp->active = 0; | ||
4603 | gep_wr(lp->cache.gepc, dev); | ||
4604 | p+=2; | ||
4605 | lp->infoblock_media = (*p++) & BLOCK0_MC; | ||
4606 | lp->cache.gep = *p++; | ||
4607 | csr6 = *p++; | ||
4608 | flags = *p++; | ||
4609 | |||
4610 | lp->asBitValid = (flags & 0x80) ? 0 : -1; | ||
4611 | lp->defMedium = (flags & 0x40) ? -1 : 0; | ||
4612 | lp->asBit = 1 << ((csr6 >> 1) & 0x07); | ||
4613 | lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; | ||
4614 | lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); | ||
4615 | lp->useMII = false; | ||
4616 | |||
4617 | de4x5_switch_mac_port(dev); | ||
4618 | } | ||
4619 | |||
4620 | return dc21140m_autoconf(dev); | ||
4621 | } | ||
4622 | |||
4623 | /* These functions are under construction! */ | ||
4624 | |||
4625 | static int | ||
4626 | type1_infoblock(struct net_device *dev, u_char count, u_char *p) | ||
4627 | { | ||
4628 | struct de4x5_private *lp = netdev_priv(dev); | ||
4629 | u_char len = (*p & BLOCK_LEN)+1; | ||
4630 | |||
4631 | /* Recursively figure out the info blocks */ | ||
4632 | if (--count > lp->tcount) { | ||
4633 | if (*(p+len) < 128) { | ||
4634 | return dc_infoblock[COMPACT](dev, count, p+len); | ||
4635 | } else { | ||
4636 | return dc_infoblock[*(p+len+1)](dev, count, p+len); | ||
4637 | } | ||
4638 | } | ||
4639 | |||
4640 | p += 2; | ||
4641 | if (lp->state == INITIALISED) { | ||
4642 | lp->ibn = 1; | ||
4643 | lp->active = *p++; | ||
4644 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); | ||
4645 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); | ||
4646 | lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; | ||
4647 | lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; | ||
4648 | lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; | ||
4649 | lp->phy[lp->active].ttm = get_unaligned_le16(p); | ||
4650 | return 0; | ||
4651 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { | ||
4652 | lp->ibn = 1; | ||
4653 | lp->active = *p; | ||
4654 | lp->infoblock_csr6 = OMR_MII_100; | ||
4655 | lp->useMII = true; | ||
4656 | lp->infoblock_media = ANS; | ||
4657 | |||
4658 | de4x5_switch_mac_port(dev); | ||
4659 | } | ||
4660 | |||
4661 | return dc21140m_autoconf(dev); | ||
4662 | } | ||
4663 | |||
4664 | static int | ||
4665 | type2_infoblock(struct net_device *dev, u_char count, u_char *p) | ||
4666 | { | ||
4667 | struct de4x5_private *lp = netdev_priv(dev); | ||
4668 | u_char len = (*p & BLOCK_LEN)+1; | ||
4669 | |||
4670 | /* Recursively figure out the info blocks */ | ||
4671 | if (--count > lp->tcount) { | ||
4672 | if (*(p+len) < 128) { | ||
4673 | return dc_infoblock[COMPACT](dev, count, p+len); | ||
4674 | } else { | ||
4675 | return dc_infoblock[*(p+len+1)](dev, count, p+len); | ||
4676 | } | ||
4677 | } | ||
4678 | |||
4679 | if ((lp->media == INIT) && (lp->timeout < 0)) { | ||
4680 | lp->ibn = 2; | ||
4681 | lp->active = 0; | ||
4682 | p += 2; | ||
4683 | lp->infoblock_media = (*p) & MEDIA_CODE; | ||
4684 | |||
4685 | if ((*p++) & EXT_FIELD) { | ||
4686 | lp->cache.csr13 = get_unaligned_le16(p); p += 2; | ||
4687 | lp->cache.csr14 = get_unaligned_le16(p); p += 2; | ||
4688 | lp->cache.csr15 = get_unaligned_le16(p); p += 2; | ||
4689 | } else { | ||
4690 | lp->cache.csr13 = CSR13; | ||
4691 | lp->cache.csr14 = CSR14; | ||
4692 | lp->cache.csr15 = CSR15; | ||
4693 | } | ||
4694 | lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; | ||
4695 | lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); | ||
4696 | lp->infoblock_csr6 = OMR_SIA; | ||
4697 | lp->useMII = false; | ||
4698 | |||
4699 | de4x5_switch_mac_port(dev); | ||
4700 | } | ||
4701 | |||
4702 | return dc2114x_autoconf(dev); | ||
4703 | } | ||
4704 | |||
4705 | static int | ||
4706 | type3_infoblock(struct net_device *dev, u_char count, u_char *p) | ||
4707 | { | ||
4708 | struct de4x5_private *lp = netdev_priv(dev); | ||
4709 | u_char len = (*p & BLOCK_LEN)+1; | ||
4710 | |||
4711 | /* Recursively figure out the info blocks */ | ||
4712 | if (--count > lp->tcount) { | ||
4713 | if (*(p+len) < 128) { | ||
4714 | return dc_infoblock[COMPACT](dev, count, p+len); | ||
4715 | } else { | ||
4716 | return dc_infoblock[*(p+len+1)](dev, count, p+len); | ||
4717 | } | ||
4718 | } | ||
4719 | |||
4720 | p += 2; | ||
4721 | if (lp->state == INITIALISED) { | ||
4722 | lp->ibn = 3; | ||
4723 | lp->active = *p++; | ||
4724 | if (MOTO_SROM_BUG) lp->active = 0; | ||
4725 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); | ||
4726 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); | ||
4727 | lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; | ||
4728 | lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; | ||
4729 | lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; | ||
4730 | lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2; | ||
4731 | lp->phy[lp->active].mci = *p; | ||
4732 | return 0; | ||
4733 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { | ||
4734 | lp->ibn = 3; | ||
4735 | lp->active = *p; | ||
4736 | if (MOTO_SROM_BUG) lp->active = 0; | ||
4737 | lp->infoblock_csr6 = OMR_MII_100; | ||
4738 | lp->useMII = true; | ||
4739 | lp->infoblock_media = ANS; | ||
4740 | |||
4741 | de4x5_switch_mac_port(dev); | ||
4742 | } | ||
4743 | |||
4744 | return dc2114x_autoconf(dev); | ||
4745 | } | ||
4746 | |||
4747 | static int | ||
4748 | type4_infoblock(struct net_device *dev, u_char count, u_char *p) | ||
4749 | { | ||
4750 | struct de4x5_private *lp = netdev_priv(dev); | ||
4751 | u_char flags, csr6, len = (*p & BLOCK_LEN)+1; | ||
4752 | |||
4753 | /* Recursively figure out the info blocks */ | ||
4754 | if (--count > lp->tcount) { | ||
4755 | if (*(p+len) < 128) { | ||
4756 | return dc_infoblock[COMPACT](dev, count, p+len); | ||
4757 | } else { | ||
4758 | return dc_infoblock[*(p+len+1)](dev, count, p+len); | ||
4759 | } | ||
4760 | } | ||
4761 | |||
4762 | if ((lp->media == INIT) && (lp->timeout < 0)) { | ||
4763 | lp->ibn = 4; | ||
4764 | lp->active = 0; | ||
4765 | p+=2; | ||
4766 | lp->infoblock_media = (*p++) & MEDIA_CODE; | ||
4767 | lp->cache.csr13 = CSR13; /* Hard coded defaults */ | ||
4768 | lp->cache.csr14 = CSR14; | ||
4769 | lp->cache.csr15 = CSR15; | ||
4770 | lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; | ||
4771 | lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2; | ||
4772 | csr6 = *p++; | ||
4773 | flags = *p++; | ||
4774 | |||
4775 | lp->asBitValid = (flags & 0x80) ? 0 : -1; | ||
4776 | lp->defMedium = (flags & 0x40) ? -1 : 0; | ||
4777 | lp->asBit = 1 << ((csr6 >> 1) & 0x07); | ||
4778 | lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; | ||
4779 | lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); | ||
4780 | lp->useMII = false; | ||
4781 | |||
4782 | de4x5_switch_mac_port(dev); | ||
4783 | } | ||
4784 | |||
4785 | return dc2114x_autoconf(dev); | ||
4786 | } | ||
4787 | |||
4788 | /* | ||
4789 | ** This block type provides information for resetting external devices | ||
4790 | ** (chips) through the General Purpose Register. | ||
4791 | */ | ||
4792 | static int | ||
4793 | type5_infoblock(struct net_device *dev, u_char count, u_char *p) | ||
4794 | { | ||
4795 | struct de4x5_private *lp = netdev_priv(dev); | ||
4796 | u_char len = (*p & BLOCK_LEN)+1; | ||
4797 | |||
4798 | /* Recursively figure out the info blocks */ | ||
4799 | if (--count > lp->tcount) { | ||
4800 | if (*(p+len) < 128) { | ||
4801 | return dc_infoblock[COMPACT](dev, count, p+len); | ||
4802 | } else { | ||
4803 | return dc_infoblock[*(p+len+1)](dev, count, p+len); | ||
4804 | } | ||
4805 | } | ||
4806 | |||
4807 | /* Must be initializing to run this code */ | ||
4808 | if ((lp->state == INITIALISED) || (lp->media == INIT)) { | ||
4809 | p+=2; | ||
4810 | lp->rst = p; | ||
4811 | srom_exec(dev, lp->rst); | ||
4812 | } | ||
4813 | |||
4814 | return DE4X5_AUTOSENSE_MS; | ||
4815 | } | ||
4816 | |||
4817 | /* | ||
4818 | ** MII Read/Write | ||
4819 | */ | ||
4820 | |||
4821 | static int | ||
4822 | mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr) | ||
4823 | { | ||
4824 | mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */ | ||
4825 | mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */ | ||
4826 | mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */ | ||
4827 | mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ | ||
4828 | mii_address(phyreg, ioaddr); /* PHY Register to read */ | ||
4829 | mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */ | ||
4830 | |||
4831 | return mii_rdata(ioaddr); /* Read data */ | ||
4832 | } | ||
4833 | |||
4834 | static void | ||
4835 | mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr) | ||
4836 | { | ||
4837 | mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */ | ||
4838 | mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */ | ||
4839 | mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */ | ||
4840 | mii_address(phyaddr, ioaddr); /* PHY address to be accessed */ | ||
4841 | mii_address(phyreg, ioaddr); /* PHY Register to write */ | ||
4842 | mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */ | ||
4843 | data = mii_swap(data, 16); /* Swap data bit ordering */ | ||
4844 | mii_wdata(data, 16, ioaddr); /* Write data */ | ||
4845 | } | ||
4846 | |||
4847 | static int | ||
4848 | mii_rdata(u_long ioaddr) | ||
4849 | { | ||
4850 | int i; | ||
4851 | s32 tmp = 0; | ||
4852 | |||
4853 | for (i=0; i<16; i++) { | ||
4854 | tmp <<= 1; | ||
4855 | tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr); | ||
4856 | } | ||
4857 | |||
4858 | return tmp; | ||
4859 | } | ||
4860 | |||
4861 | static void | ||
4862 | mii_wdata(int data, int len, u_long ioaddr) | ||
4863 | { | ||
4864 | int i; | ||
4865 | |||
4866 | for (i=0; i<len; i++) { | ||
4867 | sendto_mii(MII_MWR | MII_WR, data, ioaddr); | ||
4868 | data >>= 1; | ||
4869 | } | ||
4870 | } | ||
4871 | |||
4872 | static void | ||
4873 | mii_address(u_char addr, u_long ioaddr) | ||
4874 | { | ||
4875 | int i; | ||
4876 | |||
4877 | addr = mii_swap(addr, 5); | ||
4878 | for (i=0; i<5; i++) { | ||
4879 | sendto_mii(MII_MWR | MII_WR, addr, ioaddr); | ||
4880 | addr >>= 1; | ||
4881 | } | ||
4882 | } | ||
4883 | |||
4884 | static void | ||
4885 | mii_ta(u_long rw, u_long ioaddr) | ||
4886 | { | ||
4887 | if (rw == MII_STWR) { | ||
4888 | sendto_mii(MII_MWR | MII_WR, 1, ioaddr); | ||
4889 | sendto_mii(MII_MWR | MII_WR, 0, ioaddr); | ||
4890 | } else { | ||
4891 | getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */ | ||
4892 | } | ||
4893 | } | ||
4894 | |||
4895 | static int | ||
4896 | mii_swap(int data, int len) | ||
4897 | { | ||
4898 | int i, tmp = 0; | ||
4899 | |||
4900 | for (i=0; i<len; i++) { | ||
4901 | tmp <<= 1; | ||
4902 | tmp |= (data & 1); | ||
4903 | data >>= 1; | ||
4904 | } | ||
4905 | |||
4906 | return tmp; | ||
4907 | } | ||
4908 | |||
4909 | static void | ||
4910 | sendto_mii(u32 command, int data, u_long ioaddr) | ||
4911 | { | ||
4912 | u32 j; | ||
4913 | |||
4914 | j = (data & 1) << 17; | ||
4915 | outl(command | j, ioaddr); | ||
4916 | udelay(1); | ||
4917 | outl(command | MII_MDC | j, ioaddr); | ||
4918 | udelay(1); | ||
4919 | } | ||
4920 | |||
4921 | static int | ||
4922 | getfrom_mii(u32 command, u_long ioaddr) | ||
4923 | { | ||
4924 | outl(command, ioaddr); | ||
4925 | udelay(1); | ||
4926 | outl(command | MII_MDC, ioaddr); | ||
4927 | udelay(1); | ||
4928 | |||
4929 | return (inl(ioaddr) >> 19) & 1; | ||
4930 | } | ||
4931 | |||
4932 | /* | ||
4933 | ** Here's 3 ways to calculate the OUI from the ID registers. | ||
4934 | */ | ||
4935 | static int | ||
4936 | mii_get_oui(u_char phyaddr, u_long ioaddr) | ||
4937 | { | ||
4938 | /* | ||
4939 | union { | ||
4940 | u_short reg; | ||
4941 | u_char breg[2]; | ||
4942 | } a; | ||
4943 | int i, r2, r3, ret=0;*/ | ||
4944 | int r2, r3; | ||
4945 | |||
4946 | /* Read r2 and r3 */ | ||
4947 | r2 = mii_rd(MII_ID0, phyaddr, ioaddr); | ||
4948 | r3 = mii_rd(MII_ID1, phyaddr, ioaddr); | ||
4949 | /* SEEQ and Cypress way * / | ||
4950 | / * Shuffle r2 and r3 * / | ||
4951 | a.reg=0; | ||
4952 | r3 = ((r3>>10)|(r2<<6))&0x0ff; | ||
4953 | r2 = ((r2>>2)&0x3fff); | ||
4954 | |||
4955 | / * Bit reverse r3 * / | ||
4956 | for (i=0;i<8;i++) { | ||
4957 | ret<<=1; | ||
4958 | ret |= (r3&1); | ||
4959 | r3>>=1; | ||
4960 | } | ||
4961 | |||
4962 | / * Bit reverse r2 * / | ||
4963 | for (i=0;i<16;i++) { | ||
4964 | a.reg<<=1; | ||
4965 | a.reg |= (r2&1); | ||
4966 | r2>>=1; | ||
4967 | } | ||
4968 | |||
4969 | / * Swap r2 bytes * / | ||
4970 | i=a.breg[0]; | ||
4971 | a.breg[0]=a.breg[1]; | ||
4972 | a.breg[1]=i; | ||
4973 | |||
4974 | return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */ | ||
4975 | /* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */ | ||
4976 | return r2; /* (I did it) My way */ | ||
4977 | } | ||
4978 | |||
4979 | /* | ||
4980 | ** The SROM spec forces us to search addresses [1-31 0]. Bummer. | ||
4981 | */ | ||
4982 | static int | ||
4983 | mii_get_phy(struct net_device *dev) | ||
4984 | { | ||
4985 | struct de4x5_private *lp = netdev_priv(dev); | ||
4986 | u_long iobase = dev->base_addr; | ||
4987 | int i, j, k, n, limit=ARRAY_SIZE(phy_info); | ||
4988 | int id; | ||
4989 | |||
4990 | lp->active = 0; | ||
4991 | lp->useMII = true; | ||
4992 | |||
4993 | /* Search the MII address space for possible PHY devices */ | ||
4994 | for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) { | ||
4995 | lp->phy[lp->active].addr = i; | ||
4996 | if (i==0) n++; /* Count cycles */ | ||
4997 | while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */ | ||
4998 | id = mii_get_oui(i, DE4X5_MII); | ||
4999 | if ((id == 0) || (id == 65535)) continue; /* Valid ID? */ | ||
5000 | for (j=0; j<limit; j++) { /* Search PHY table */ | ||
5001 | if (id != phy_info[j].id) continue; /* ID match? */ | ||
5002 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); | ||
5003 | if (k < DE4X5_MAX_PHY) { | ||
5004 | memcpy((char *)&lp->phy[k], | ||
5005 | (char *)&phy_info[j], sizeof(struct phy_table)); | ||
5006 | lp->phy[k].addr = i; | ||
5007 | lp->mii_cnt++; | ||
5008 | lp->active++; | ||
5009 | } else { | ||
5010 | goto purgatory; /* Stop the search */ | ||
5011 | } | ||
5012 | break; | ||
5013 | } | ||
5014 | if ((j == limit) && (i < DE4X5_MAX_MII)) { | ||
5015 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++); | ||
5016 | lp->phy[k].addr = i; | ||
5017 | lp->phy[k].id = id; | ||
5018 | lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */ | ||
5019 | lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */ | ||
5020 | lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ | ||
5021 | lp->mii_cnt++; | ||
5022 | lp->active++; | ||
5023 | printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name); | ||
5024 | j = de4x5_debug; | ||
5025 | de4x5_debug |= DEBUG_MII; | ||
5026 | de4x5_dbg_mii(dev, k); | ||
5027 | de4x5_debug = j; | ||
5028 | printk("\n"); | ||
5029 | } | ||
5030 | } | ||
5031 | purgatory: | ||
5032 | lp->active = 0; | ||
5033 | if (lp->phy[0].id) { /* Reset the PHY devices */ | ||
5034 | for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/ | ||
5035 | mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII); | ||
5036 | while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST); | ||
5037 | |||
5038 | de4x5_dbg_mii(dev, k); | ||
5039 | } | ||
5040 | } | ||
5041 | if (!lp->mii_cnt) lp->useMII = false; | ||
5042 | |||
5043 | return lp->mii_cnt; | ||
5044 | } | ||
5045 | |||
5046 | static char * | ||
5047 | build_setup_frame(struct net_device *dev, int mode) | ||
5048 | { | ||
5049 | struct de4x5_private *lp = netdev_priv(dev); | ||
5050 | int i; | ||
5051 | char *pa = lp->setup_frame; | ||
5052 | |||
5053 | /* Initialise the setup frame */ | ||
5054 | if (mode == ALL) { | ||
5055 | memset(lp->setup_frame, 0, SETUP_FRAME_LEN); | ||
5056 | } | ||
5057 | |||
5058 | if (lp->setup_f == HASH_PERF) { | ||
5059 | for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) { | ||
5060 | *(pa + i) = dev->dev_addr[i]; /* Host address */ | ||
5061 | if (i & 0x01) pa += 2; | ||
5062 | } | ||
5063 | *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80; | ||
5064 | } else { | ||
5065 | for (i=0; i<ETH_ALEN; i++) { /* Host address */ | ||
5066 | *(pa + (i&1)) = dev->dev_addr[i]; | ||
5067 | if (i & 0x01) pa += 4; | ||
5068 | } | ||
5069 | for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */ | ||
5070 | *(pa + (i&1)) = (char) 0xff; | ||
5071 | if (i & 0x01) pa += 4; | ||
5072 | } | ||
5073 | } | ||
5074 | |||
5075 | return pa; /* Points to the next entry */ | ||
5076 | } | ||
5077 | |||
5078 | static void | ||
5079 | disable_ast(struct net_device *dev) | ||
5080 | { | ||
5081 | struct de4x5_private *lp = netdev_priv(dev); | ||
5082 | del_timer_sync(&lp->timer); | ||
5083 | } | ||
5084 | |||
5085 | static long | ||
5086 | de4x5_switch_mac_port(struct net_device *dev) | ||
5087 | { | ||
5088 | struct de4x5_private *lp = netdev_priv(dev); | ||
5089 | u_long iobase = dev->base_addr; | ||
5090 | s32 omr; | ||
5091 | |||
5092 | STOP_DE4X5; | ||
5093 | |||
5094 | /* Assert the OMR_PS bit in CSR6 */ | ||
5095 | omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | | ||
5096 | OMR_FDX)); | ||
5097 | omr |= lp->infoblock_csr6; | ||
5098 | if (omr & OMR_PS) omr |= OMR_HBD; | ||
5099 | outl(omr, DE4X5_OMR); | ||
5100 | |||
5101 | /* Soft Reset */ | ||
5102 | RESET_DE4X5; | ||
5103 | |||
5104 | /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */ | ||
5105 | if (lp->chipset == DC21140) { | ||
5106 | gep_wr(lp->cache.gepc, dev); | ||
5107 | gep_wr(lp->cache.gep, dev); | ||
5108 | } else if ((lp->chipset & ~0x0ff) == DC2114x) { | ||
5109 | reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15); | ||
5110 | } | ||
5111 | |||
5112 | /* Restore CSR6 */ | ||
5113 | outl(omr, DE4X5_OMR); | ||
5114 | |||
5115 | /* Reset CSR8 */ | ||
5116 | inl(DE4X5_MFC); | ||
5117 | |||
5118 | return omr; | ||
5119 | } | ||
5120 | |||
5121 | static void | ||
5122 | gep_wr(s32 data, struct net_device *dev) | ||
5123 | { | ||
5124 | struct de4x5_private *lp = netdev_priv(dev); | ||
5125 | u_long iobase = dev->base_addr; | ||
5126 | |||
5127 | if (lp->chipset == DC21140) { | ||
5128 | outl(data, DE4X5_GEP); | ||
5129 | } else if ((lp->chipset & ~0x00ff) == DC2114x) { | ||
5130 | outl((data<<16) | lp->cache.csr15, DE4X5_SIGR); | ||
5131 | } | ||
5132 | } | ||
5133 | |||
5134 | static int | ||
5135 | gep_rd(struct net_device *dev) | ||
5136 | { | ||
5137 | struct de4x5_private *lp = netdev_priv(dev); | ||
5138 | u_long iobase = dev->base_addr; | ||
5139 | |||
5140 | if (lp->chipset == DC21140) { | ||
5141 | return inl(DE4X5_GEP); | ||
5142 | } else if ((lp->chipset & ~0x00ff) == DC2114x) { | ||
5143 | return inl(DE4X5_SIGR) & 0x000fffff; | ||
5144 | } | ||
5145 | |||
5146 | return 0; | ||
5147 | } | ||
5148 | |||
5149 | static void | ||
5150 | yawn(struct net_device *dev, int state) | ||
5151 | { | ||
5152 | struct de4x5_private *lp = netdev_priv(dev); | ||
5153 | u_long iobase = dev->base_addr; | ||
5154 | |||
5155 | if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return; | ||
5156 | |||
5157 | if(lp->bus == EISA) { | ||
5158 | switch(state) { | ||
5159 | case WAKEUP: | ||
5160 | outb(WAKEUP, PCI_CFPM); | ||
5161 | mdelay(10); | ||
5162 | break; | ||
5163 | |||
5164 | case SNOOZE: | ||
5165 | outb(SNOOZE, PCI_CFPM); | ||
5166 | break; | ||
5167 | |||
5168 | case SLEEP: | ||
5169 | outl(0, DE4X5_SICR); | ||
5170 | outb(SLEEP, PCI_CFPM); | ||
5171 | break; | ||
5172 | } | ||
5173 | } else { | ||
5174 | struct pci_dev *pdev = to_pci_dev (lp->gendev); | ||
5175 | switch(state) { | ||
5176 | case WAKEUP: | ||
5177 | pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP); | ||
5178 | mdelay(10); | ||
5179 | break; | ||
5180 | |||
5181 | case SNOOZE: | ||
5182 | pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE); | ||
5183 | break; | ||
5184 | |||
5185 | case SLEEP: | ||
5186 | outl(0, DE4X5_SICR); | ||
5187 | pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP); | ||
5188 | break; | ||
5189 | } | ||
5190 | } | ||
5191 | } | ||
5192 | |||
5193 | static void | ||
5194 | de4x5_parse_params(struct net_device *dev) | ||
5195 | { | ||
5196 | struct de4x5_private *lp = netdev_priv(dev); | ||
5197 | char *p, *q, t; | ||
5198 | |||
5199 | lp->params.fdx = 0; | ||
5200 | lp->params.autosense = AUTO; | ||
5201 | |||
5202 | if (args == NULL) return; | ||
5203 | |||
5204 | if ((p = strstr(args, dev->name))) { | ||
5205 | if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p); | ||
5206 | t = *q; | ||
5207 | *q = '\0'; | ||
5208 | |||
5209 | if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1; | ||
5210 | |||
5211 | if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) { | ||
5212 | if (strstr(p, "TP")) { | ||
5213 | lp->params.autosense = TP; | ||
5214 | } else if (strstr(p, "TP_NW")) { | ||
5215 | lp->params.autosense = TP_NW; | ||
5216 | } else if (strstr(p, "BNC")) { | ||
5217 | lp->params.autosense = BNC; | ||
5218 | } else if (strstr(p, "AUI")) { | ||
5219 | lp->params.autosense = AUI; | ||
5220 | } else if (strstr(p, "BNC_AUI")) { | ||
5221 | lp->params.autosense = BNC; | ||
5222 | } else if (strstr(p, "10Mb")) { | ||
5223 | lp->params.autosense = _10Mb; | ||
5224 | } else if (strstr(p, "100Mb")) { | ||
5225 | lp->params.autosense = _100Mb; | ||
5226 | } else if (strstr(p, "AUTO")) { | ||
5227 | lp->params.autosense = AUTO; | ||
5228 | } | ||
5229 | } | ||
5230 | *q = t; | ||
5231 | } | ||
5232 | } | ||
5233 | |||
5234 | static void | ||
5235 | de4x5_dbg_open(struct net_device *dev) | ||
5236 | { | ||
5237 | struct de4x5_private *lp = netdev_priv(dev); | ||
5238 | int i; | ||
5239 | |||
5240 | if (de4x5_debug & DEBUG_OPEN) { | ||
5241 | printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq); | ||
5242 | printk("\tphysical address: "); | ||
5243 | for (i=0;i<6;i++) { | ||
5244 | printk("%2.2x:",(short)dev->dev_addr[i]); | ||
5245 | } | ||
5246 | printk("\n"); | ||
5247 | printk("Descriptor head addresses:\n"); | ||
5248 | printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring); | ||
5249 | printk("Descriptor addresses:\nRX: "); | ||
5250 | for (i=0;i<lp->rxRingSize-1;i++){ | ||
5251 | if (i < 3) { | ||
5252 | printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status); | ||
5253 | } | ||
5254 | } | ||
5255 | printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status); | ||
5256 | printk("TX: "); | ||
5257 | for (i=0;i<lp->txRingSize-1;i++){ | ||
5258 | if (i < 3) { | ||
5259 | printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status); | ||
5260 | } | ||
5261 | } | ||
5262 | printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status); | ||
5263 | printk("Descriptor buffers:\nRX: "); | ||
5264 | for (i=0;i<lp->rxRingSize-1;i++){ | ||
5265 | if (i < 3) { | ||
5266 | printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf)); | ||
5267 | } | ||
5268 | } | ||
5269 | printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf)); | ||
5270 | printk("TX: "); | ||
5271 | for (i=0;i<lp->txRingSize-1;i++){ | ||
5272 | if (i < 3) { | ||
5273 | printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf)); | ||
5274 | } | ||
5275 | } | ||
5276 | printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); | ||
5277 | printk("Ring size:\nRX: %d\nTX: %d\n", | ||
5278 | (short)lp->rxRingSize, | ||
5279 | (short)lp->txRingSize); | ||
5280 | } | ||
5281 | } | ||
5282 | |||
5283 | static void | ||
5284 | de4x5_dbg_mii(struct net_device *dev, int k) | ||
5285 | { | ||
5286 | struct de4x5_private *lp = netdev_priv(dev); | ||
5287 | u_long iobase = dev->base_addr; | ||
5288 | |||
5289 | if (de4x5_debug & DEBUG_MII) { | ||
5290 | printk("\nMII device address: %d\n", lp->phy[k].addr); | ||
5291 | printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII)); | ||
5292 | printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII)); | ||
5293 | printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII)); | ||
5294 | printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII)); | ||
5295 | if (lp->phy[k].id != BROADCOM_T4) { | ||
5296 | printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII)); | ||
5297 | printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII)); | ||
5298 | } | ||
5299 | printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII)); | ||
5300 | if (lp->phy[k].id != BROADCOM_T4) { | ||
5301 | printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII)); | ||
5302 | printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII)); | ||
5303 | } else { | ||
5304 | printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII)); | ||
5305 | } | ||
5306 | } | ||
5307 | } | ||
5308 | |||
5309 | static void | ||
5310 | de4x5_dbg_media(struct net_device *dev) | ||
5311 | { | ||
5312 | struct de4x5_private *lp = netdev_priv(dev); | ||
5313 | |||
5314 | if (lp->media != lp->c_media) { | ||
5315 | if (de4x5_debug & DEBUG_MEDIA) { | ||
5316 | printk("%s: media is %s%s\n", dev->name, | ||
5317 | (lp->media == NC ? "unconnected, link down or incompatible connection" : | ||
5318 | (lp->media == TP ? "TP" : | ||
5319 | (lp->media == ANS ? "TP/Nway" : | ||
5320 | (lp->media == BNC ? "BNC" : | ||
5321 | (lp->media == AUI ? "AUI" : | ||
5322 | (lp->media == BNC_AUI ? "BNC/AUI" : | ||
5323 | (lp->media == EXT_SIA ? "EXT SIA" : | ||
5324 | (lp->media == _100Mb ? "100Mb/s" : | ||
5325 | (lp->media == _10Mb ? "10Mb/s" : | ||
5326 | "???" | ||
5327 | ))))))))), (lp->fdx?" full duplex.":".")); | ||
5328 | } | ||
5329 | lp->c_media = lp->media; | ||
5330 | } | ||
5331 | } | ||
5332 | |||
5333 | static void | ||
5334 | de4x5_dbg_srom(struct de4x5_srom *p) | ||
5335 | { | ||
5336 | int i; | ||
5337 | |||
5338 | if (de4x5_debug & DEBUG_SROM) { | ||
5339 | printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id)); | ||
5340 | printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id)); | ||
5341 | printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc)); | ||
5342 | printk("SROM version: %02x\n", (u_char)(p->version)); | ||
5343 | printk("# controllers: %02x\n", (u_char)(p->num_controllers)); | ||
5344 | |||
5345 | printk("Hardware Address: %pM\n", p->ieee_addr); | ||
5346 | printk("CRC checksum: %04x\n", (u_short)(p->chksum)); | ||
5347 | for (i=0; i<64; i++) { | ||
5348 | printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i)); | ||
5349 | } | ||
5350 | } | ||
5351 | } | ||
5352 | |||
5353 | static void | ||
5354 | de4x5_dbg_rx(struct sk_buff *skb, int len) | ||
5355 | { | ||
5356 | int i, j; | ||
5357 | |||
5358 | if (de4x5_debug & DEBUG_RX) { | ||
5359 | printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n", | ||
5360 | skb->data, &skb->data[6], | ||
5361 | (u_char)skb->data[12], | ||
5362 | (u_char)skb->data[13], | ||
5363 | len); | ||
5364 | for (j=0; len>0;j+=16, len-=16) { | ||
5365 | printk(" %03x: ",j); | ||
5366 | for (i=0; i<16 && i<len; i++) { | ||
5367 | printk("%02x ",(u_char)skb->data[i+j]); | ||
5368 | } | ||
5369 | printk("\n"); | ||
5370 | } | ||
5371 | } | ||
5372 | } | ||
5373 | |||
5374 | /* | ||
5375 | ** Perform IOCTL call functions here. Some are privileged operations and the | ||
5376 | ** effective uid is checked in those cases. In the normal course of events | ||
5377 | ** this function is only used for my testing. | ||
5378 | */ | ||
5379 | static int | ||
5380 | de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
5381 | { | ||
5382 | struct de4x5_private *lp = netdev_priv(dev); | ||
5383 | struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru; | ||
5384 | u_long iobase = dev->base_addr; | ||
5385 | int i, j, status = 0; | ||
5386 | s32 omr; | ||
5387 | union { | ||
5388 | u8 addr[144]; | ||
5389 | u16 sval[72]; | ||
5390 | u32 lval[36]; | ||
5391 | } tmp; | ||
5392 | u_long flags = 0; | ||
5393 | |||
5394 | switch(ioc->cmd) { | ||
5395 | case DE4X5_GET_HWADDR: /* Get the hardware address */ | ||
5396 | ioc->len = ETH_ALEN; | ||
5397 | for (i=0; i<ETH_ALEN; i++) { | ||
5398 | tmp.addr[i] = dev->dev_addr[i]; | ||
5399 | } | ||
5400 | if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; | ||
5401 | break; | ||
5402 | |||
5403 | case DE4X5_SET_HWADDR: /* Set the hardware address */ | ||
5404 | if (!capable(CAP_NET_ADMIN)) return -EPERM; | ||
5405 | if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT; | ||
5406 | if (netif_queue_stopped(dev)) | ||
5407 | return -EBUSY; | ||
5408 | netif_stop_queue(dev); | ||
5409 | for (i=0; i<ETH_ALEN; i++) { | ||
5410 | dev->dev_addr[i] = tmp.addr[i]; | ||
5411 | } | ||
5412 | build_setup_frame(dev, PHYS_ADDR_ONLY); | ||
5413 | /* Set up the descriptor and give ownership to the card */ | ||
5414 | load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | | ||
5415 | SETUP_FRAME_LEN, (struct sk_buff *)1); | ||
5416 | lp->tx_new = (lp->tx_new + 1) % lp->txRingSize; | ||
5417 | outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ | ||
5418 | netif_wake_queue(dev); /* Unlock the TX ring */ | ||
5419 | break; | ||
5420 | |||
5421 | case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */ | ||
5422 | if (!capable(CAP_NET_ADMIN)) return -EPERM; | ||
5423 | printk("%s: Boo!\n", dev->name); | ||
5424 | break; | ||
5425 | |||
5426 | case DE4X5_MCA_EN: /* Enable pass all multicast addressing */ | ||
5427 | if (!capable(CAP_NET_ADMIN)) return -EPERM; | ||
5428 | omr = inl(DE4X5_OMR); | ||
5429 | omr |= OMR_PM; | ||
5430 | outl(omr, DE4X5_OMR); | ||
5431 | break; | ||
5432 | |||
5433 | case DE4X5_GET_STATS: /* Get the driver statistics */ | ||
5434 | { | ||
5435 | struct pkt_stats statbuf; | ||
5436 | ioc->len = sizeof(statbuf); | ||
5437 | spin_lock_irqsave(&lp->lock, flags); | ||
5438 | memcpy(&statbuf, &lp->pktStats, ioc->len); | ||
5439 | spin_unlock_irqrestore(&lp->lock, flags); | ||
5440 | if (copy_to_user(ioc->data, &statbuf, ioc->len)) | ||
5441 | return -EFAULT; | ||
5442 | break; | ||
5443 | } | ||
5444 | case DE4X5_CLR_STATS: /* Zero out the driver statistics */ | ||
5445 | if (!capable(CAP_NET_ADMIN)) return -EPERM; | ||
5446 | spin_lock_irqsave(&lp->lock, flags); | ||
5447 | memset(&lp->pktStats, 0, sizeof(lp->pktStats)); | ||
5448 | spin_unlock_irqrestore(&lp->lock, flags); | ||
5449 | break; | ||
5450 | |||
5451 | case DE4X5_GET_OMR: /* Get the OMR Register contents */ | ||
5452 | tmp.addr[0] = inl(DE4X5_OMR); | ||
5453 | if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT; | ||
5454 | break; | ||
5455 | |||
5456 | case DE4X5_SET_OMR: /* Set the OMR Register contents */ | ||
5457 | if (!capable(CAP_NET_ADMIN)) return -EPERM; | ||
5458 | if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT; | ||
5459 | outl(tmp.addr[0], DE4X5_OMR); | ||
5460 | break; | ||
5461 | |||
5462 | case DE4X5_GET_REG: /* Get the DE4X5 Registers */ | ||
5463 | j = 0; | ||
5464 | tmp.lval[0] = inl(DE4X5_STS); j+=4; | ||
5465 | tmp.lval[1] = inl(DE4X5_BMR); j+=4; | ||
5466 | tmp.lval[2] = inl(DE4X5_IMR); j+=4; | ||
5467 | tmp.lval[3] = inl(DE4X5_OMR); j+=4; | ||
5468 | tmp.lval[4] = inl(DE4X5_SISR); j+=4; | ||
5469 | tmp.lval[5] = inl(DE4X5_SICR); j+=4; | ||
5470 | tmp.lval[6] = inl(DE4X5_STRR); j+=4; | ||
5471 | tmp.lval[7] = inl(DE4X5_SIGR); j+=4; | ||
5472 | ioc->len = j; | ||
5473 | if (copy_to_user(ioc->data, tmp.lval, ioc->len)) | ||
5474 | return -EFAULT; | ||
5475 | break; | ||
5476 | |||
5477 | #define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ | ||
5478 | /* | ||
5479 | case DE4X5_DUMP: | ||
5480 | j = 0; | ||
5481 | tmp.addr[j++] = dev->irq; | ||
5482 | for (i=0; i<ETH_ALEN; i++) { | ||
5483 | tmp.addr[j++] = dev->dev_addr[i]; | ||
5484 | } | ||
5485 | tmp.addr[j++] = lp->rxRingSize; | ||
5486 | tmp.lval[j>>2] = (long)lp->rx_ring; j+=4; | ||
5487 | tmp.lval[j>>2] = (long)lp->tx_ring; j+=4; | ||
5488 | |||
5489 | for (i=0;i<lp->rxRingSize-1;i++){ | ||
5490 | if (i < 3) { | ||
5491 | tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; | ||
5492 | } | ||
5493 | } | ||
5494 | tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4; | ||
5495 | for (i=0;i<lp->txRingSize-1;i++){ | ||
5496 | if (i < 3) { | ||
5497 | tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; | ||
5498 | } | ||
5499 | } | ||
5500 | tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4; | ||
5501 | |||
5502 | for (i=0;i<lp->rxRingSize-1;i++){ | ||
5503 | if (i < 3) { | ||
5504 | tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; | ||
5505 | } | ||
5506 | } | ||
5507 | tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4; | ||
5508 | for (i=0;i<lp->txRingSize-1;i++){ | ||
5509 | if (i < 3) { | ||
5510 | tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; | ||
5511 | } | ||
5512 | } | ||
5513 | tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4; | ||
5514 | |||
5515 | for (i=0;i<lp->rxRingSize;i++){ | ||
5516 | tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4; | ||
5517 | } | ||
5518 | for (i=0;i<lp->txRingSize;i++){ | ||
5519 | tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4; | ||
5520 | } | ||
5521 | |||
5522 | tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4; | ||
5523 | tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4; | ||
5524 | tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4; | ||
5525 | tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4; | ||
5526 | tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4; | ||
5527 | tmp.lval[j>>2] = inl(DE4X5_STS); j+=4; | ||
5528 | tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4; | ||
5529 | tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4; | ||
5530 | tmp.lval[j>>2] = lp->chipset; j+=4; | ||
5531 | if (lp->chipset == DC21140) { | ||
5532 | tmp.lval[j>>2] = gep_rd(dev); j+=4; | ||
5533 | } else { | ||
5534 | tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4; | ||
5535 | tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4; | ||
5536 | tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4; | ||
5537 | tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4; | ||
5538 | } | ||
5539 | tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4; | ||
5540 | if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) { | ||
5541 | tmp.lval[j>>2] = lp->active; j+=4; | ||
5542 | tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5543 | tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5544 | tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5545 | tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5546 | if (lp->phy[lp->active].id != BROADCOM_T4) { | ||
5547 | tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5548 | tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5549 | } | ||
5550 | tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5551 | if (lp->phy[lp->active].id != BROADCOM_T4) { | ||
5552 | tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5553 | tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5554 | } else { | ||
5555 | tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4; | ||
5556 | } | ||
5557 | } | ||
5558 | |||
5559 | tmp.addr[j++] = lp->txRingSize; | ||
5560 | tmp.addr[j++] = netif_queue_stopped(dev); | ||
5561 | |||
5562 | ioc->len = j; | ||
5563 | if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; | ||
5564 | break; | ||
5565 | |||
5566 | */ | ||
5567 | default: | ||
5568 | return -EOPNOTSUPP; | ||
5569 | } | ||
5570 | |||
5571 | return status; | ||
5572 | } | ||
5573 | |||
5574 | static int __init de4x5_module_init (void) | ||
5575 | { | ||
5576 | int err = 0; | ||
5577 | |||
5578 | #ifdef CONFIG_PCI | ||
5579 | err = pci_register_driver(&de4x5_pci_driver); | ||
5580 | #endif | ||
5581 | #ifdef CONFIG_EISA | ||
5582 | err |= eisa_driver_register (&de4x5_eisa_driver); | ||
5583 | #endif | ||
5584 | |||
5585 | return err; | ||
5586 | } | ||
5587 | |||
5588 | static void __exit de4x5_module_exit (void) | ||
5589 | { | ||
5590 | #ifdef CONFIG_PCI | ||
5591 | pci_unregister_driver (&de4x5_pci_driver); | ||
5592 | #endif | ||
5593 | #ifdef CONFIG_EISA | ||
5594 | eisa_driver_unregister (&de4x5_eisa_driver); | ||
5595 | #endif | ||
5596 | } | ||
5597 | |||
5598 | module_init (de4x5_module_init); | ||
5599 | module_exit (de4x5_module_exit); | ||
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h new file mode 100644 index 000000000000..9f2877438fb0 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/de4x5.h | |||
@@ -0,0 +1,1019 @@ | |||
1 | /* | ||
2 | Copyright 1994 Digital Equipment Corporation. | ||
3 | |||
4 | This software may be used and distributed according to the terms of the | ||
5 | GNU General Public License, incorporated herein by reference. | ||
6 | |||
7 | The author may be reached as davies@wanton.lkg.dec.com or Digital | ||
8 | Equipment Corporation, 550 King Street, Littleton MA 01460. | ||
9 | |||
10 | ========================================================================= | ||
11 | */ | ||
12 | |||
13 | /* | ||
14 | ** DC21040 CSR<1..15> Register Address Map | ||
15 | */ | ||
16 | #define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */ | ||
17 | #define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */ | ||
18 | #define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */ | ||
19 | #define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */ | ||
20 | #define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */ | ||
21 | #define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */ | ||
22 | #define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */ | ||
23 | #define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */ | ||
24 | #define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */ | ||
25 | #define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */ | ||
26 | #define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */ | ||
27 | #define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */ | ||
28 | #define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */ | ||
29 | #define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */ | ||
30 | #define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */ | ||
31 | #define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/ | ||
32 | #define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */ | ||
33 | #define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */ | ||
34 | #define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */ | ||
35 | #define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */ | ||
36 | #define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */ | ||
37 | |||
38 | /* | ||
39 | ** EISA Register Address Map | ||
40 | */ | ||
41 | #define EISA_ID iobase+0x0c80 /* EISA ID Registers */ | ||
42 | #define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */ | ||
43 | #define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */ | ||
44 | #define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */ | ||
45 | #define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */ | ||
46 | #define EISA_CR iobase+0x0c84 /* EISA Control Register */ | ||
47 | #define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */ | ||
48 | #define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */ | ||
49 | #define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */ | ||
50 | #define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */ | ||
51 | #define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */ | ||
52 | |||
53 | /* | ||
54 | ** PCI/EISA Configuration Registers Address Map | ||
55 | */ | ||
56 | #define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */ | ||
57 | #define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */ | ||
58 | #define PCI_CFRV iobase+0x0018 /* PCI Revision Register */ | ||
59 | #define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */ | ||
60 | #define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */ | ||
61 | #define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */ | ||
62 | #define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */ | ||
63 | #define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */ | ||
64 | #define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */ | ||
65 | #define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */ | ||
66 | #define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */ | ||
67 | |||
68 | /* | ||
69 | ** EISA Configuration Register 0 bit definitions | ||
70 | */ | ||
71 | #define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */ | ||
72 | #define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */ | ||
73 | #define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */ | ||
74 | #define ER0_ISTS 0x10 /* Interrupt Status (X) */ | ||
75 | #define ER0_LI 0x08 /* Latch Interrupts */ | ||
76 | #define ER0_INTL 0x06 /* INTerrupt Level */ | ||
77 | #define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */ | ||
78 | |||
79 | /* | ||
80 | ** EISA Configuration Register 1 bit definitions | ||
81 | */ | ||
82 | #define ER1_IAM 0xe0 /* ISA Address Mode */ | ||
83 | #define ER1_IAE 0x10 /* ISA Addressing Enable */ | ||
84 | #define ER1_UPIN 0x0f /* User Pins */ | ||
85 | |||
86 | /* | ||
87 | ** EISA Configuration Register 2 bit definitions | ||
88 | */ | ||
89 | #define ER2_BRS 0xc0 /* Boot ROM Size */ | ||
90 | #define ER2_BRA 0x3c /* Boot ROM Address <16:13> */ | ||
91 | |||
92 | /* | ||
93 | ** EISA Configuration Register 3 bit definitions | ||
94 | */ | ||
95 | #define ER3_BWE 0x40 /* Burst Write Enable */ | ||
96 | #define ER3_BRE 0x04 /* Burst Read Enable */ | ||
97 | #define ER3_LSR 0x02 /* Local Software Reset */ | ||
98 | |||
99 | /* | ||
100 | ** PCI Configuration ID Register (PCI_CFID). The Device IDs are left | ||
101 | ** shifted 8 bits to allow detection of DC21142 and DC21143 variants with | ||
102 | ** the configuration revision register step number. | ||
103 | */ | ||
104 | #define CFID_DID 0xff00 /* Device ID */ | ||
105 | #define CFID_VID 0x00ff /* Vendor ID */ | ||
106 | #define DC21040_DID 0x0200 /* Unique Device ID # */ | ||
107 | #define DC21040_VID 0x1011 /* DC21040 Manufacturer */ | ||
108 | #define DC21041_DID 0x1400 /* Unique Device ID # */ | ||
109 | #define DC21041_VID 0x1011 /* DC21041 Manufacturer */ | ||
110 | #define DC21140_DID 0x0900 /* Unique Device ID # */ | ||
111 | #define DC21140_VID 0x1011 /* DC21140 Manufacturer */ | ||
112 | #define DC2114x_DID 0x1900 /* Unique Device ID # */ | ||
113 | #define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */ | ||
114 | |||
115 | /* | ||
116 | ** Chipset defines | ||
117 | */ | ||
118 | #define DC21040 DC21040_DID | ||
119 | #define DC21041 DC21041_DID | ||
120 | #define DC21140 DC21140_DID | ||
121 | #define DC2114x DC2114x_DID | ||
122 | #define DC21142 (DC2114x_DID | 0x0010) | ||
123 | #define DC21143 (DC2114x_DID | 0x0030) | ||
124 | #define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */ | ||
125 | |||
126 | #define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID)) | ||
127 | #define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID)) | ||
128 | #define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID)) | ||
129 | #define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID)) | ||
130 | #define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142)) | ||
131 | #define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143)) | ||
132 | |||
133 | /* | ||
134 | ** PCI Configuration Command/Status Register (PCI_CFCS) | ||
135 | */ | ||
136 | #define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */ | ||
137 | #define CFCS_SSE 0x40000000 /* Signal System Error (S) */ | ||
138 | #define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */ | ||
139 | #define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */ | ||
140 | #define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */ | ||
141 | #define CFCS_DPR 0x01000000 /* Data Parity Report (S) */ | ||
142 | #define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */ | ||
143 | #define CFCS_SEE 0x00000100 /* System Error Enable (C) */ | ||
144 | #define CFCS_PER 0x00000040 /* Parity Error Response (C) */ | ||
145 | #define CFCS_MO 0x00000004 /* Master Operation (C) */ | ||
146 | #define CFCS_MSA 0x00000002 /* Memory Space Access (C) */ | ||
147 | #define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */ | ||
148 | |||
149 | /* | ||
150 | ** PCI Configuration Revision Register (PCI_CFRV) | ||
151 | */ | ||
152 | #define CFRV_BC 0xff000000 /* Base Class */ | ||
153 | #define CFRV_SC 0x00ff0000 /* Subclass */ | ||
154 | #define CFRV_RN 0x000000f0 /* Revision Number */ | ||
155 | #define CFRV_SN 0x0000000f /* Step Number */ | ||
156 | #define BASE_CLASS 0x02000000 /* Indicates Network Controller */ | ||
157 | #define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */ | ||
158 | #define STEP_NUMBER 0x00000020 /* Increments for future chips */ | ||
159 | #define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */ | ||
160 | #define CFRV_MASK 0xffff0000 /* Register mask */ | ||
161 | |||
162 | /* | ||
163 | ** PCI Configuration Latency Timer Register (PCI_CFLT) | ||
164 | */ | ||
165 | #define CFLT_BC 0x0000ff00 /* Latency Timer bits */ | ||
166 | |||
167 | /* | ||
168 | ** PCI Configuration Base I/O Address Register (PCI_CBIO) | ||
169 | */ | ||
170 | #define CBIO_MASK -128 /* Base I/O Address Mask */ | ||
171 | #define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */ | ||
172 | |||
173 | /* | ||
174 | ** PCI Configuration Card Information Structure Register (PCI_CCIS) | ||
175 | */ | ||
176 | #define CCIS_ROMI 0xf0000000 /* ROM Image */ | ||
177 | #define CCIS_ASO 0x0ffffff8 /* Address Space Offset */ | ||
178 | #define CCIS_ASI 0x00000007 /* Address Space Indicator */ | ||
179 | |||
180 | /* | ||
181 | ** PCI Configuration Subsystem ID Register (PCI_SSID) | ||
182 | */ | ||
183 | #define SSID_SSID 0xffff0000 /* Subsystem ID */ | ||
184 | #define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */ | ||
185 | |||
186 | /* | ||
187 | ** PCI Configuration Expansion ROM Base Address Register (PCI_CBER) | ||
188 | */ | ||
189 | #define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */ | ||
190 | #define CBER_ROME 0x00000001 /* ROM Enable */ | ||
191 | |||
192 | /* | ||
193 | ** PCI Configuration Interrupt Register (PCI_CFIT) | ||
194 | */ | ||
195 | #define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */ | ||
196 | #define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */ | ||
197 | #define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */ | ||
198 | #define CFIT_IRQL 0x000000ff /* Interrupt Line */ | ||
199 | |||
200 | /* | ||
201 | ** PCI Configuration Power Management Area Register (PCI_CFPM) | ||
202 | */ | ||
203 | #define SLEEP 0x80 /* Power Saving Sleep Mode */ | ||
204 | #define SNOOZE 0x40 /* Power Saving Snooze Mode */ | ||
205 | #define WAKEUP 0x00 /* Power Saving Wakeup */ | ||
206 | |||
207 | #define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */ | ||
208 | #define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */ | ||
209 | |||
210 | /* | ||
211 | ** DC21040 Bus Mode Register (DE4X5_BMR) | ||
212 | */ | ||
213 | #define BMR_RML 0x00200000 /* [Memory] Read Multiple */ | ||
214 | #define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */ | ||
215 | #define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */ | ||
216 | #define BMR_DAS 0x00010000 /* Diagnostic Address Space */ | ||
217 | #define BMR_CAL 0x0000c000 /* Cache Alignment */ | ||
218 | #define BMR_PBL 0x00003f00 /* Programmable Burst Length */ | ||
219 | #define BMR_BLE 0x00000080 /* Big/Little Endian */ | ||
220 | #define BMR_DSL 0x0000007c /* Descriptor Skip Length */ | ||
221 | #define BMR_BAR 0x00000002 /* Bus ARbitration */ | ||
222 | #define BMR_SWR 0x00000001 /* Software Reset */ | ||
223 | |||
224 | /* Timings here are for 10BASE-T/AUI only*/ | ||
225 | #define TAP_NOPOLL 0x00000000 /* No automatic polling */ | ||
226 | #define TAP_200US 0x00020000 /* TX automatic polling every 200us */ | ||
227 | #define TAP_800US 0x00040000 /* TX automatic polling every 800us */ | ||
228 | #define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */ | ||
229 | #define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */ | ||
230 | #define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */ | ||
231 | #define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */ | ||
232 | #define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */ | ||
233 | |||
234 | #define CAL_NOUSE 0x00000000 /* Not used */ | ||
235 | #define CAL_8LONG 0x00004000 /* 8-longword alignment */ | ||
236 | #define CAL_16LONG 0x00008000 /* 16-longword alignment */ | ||
237 | #define CAL_32LONG 0x0000c000 /* 32-longword alignment */ | ||
238 | |||
239 | #define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */ | ||
240 | #define PBL_1 0x00000100 /* 1 longword DMA burst length */ | ||
241 | #define PBL_2 0x00000200 /* 2 longwords DMA burst length */ | ||
242 | #define PBL_4 0x00000400 /* 4 longwords DMA burst length */ | ||
243 | #define PBL_8 0x00000800 /* 8 longwords DMA burst length */ | ||
244 | #define PBL_16 0x00001000 /* 16 longwords DMA burst length */ | ||
245 | #define PBL_32 0x00002000 /* 32 longwords DMA burst length */ | ||
246 | |||
247 | #define DSL_0 0x00000000 /* 0 longword / descriptor */ | ||
248 | #define DSL_1 0x00000004 /* 1 longword / descriptor */ | ||
249 | #define DSL_2 0x00000008 /* 2 longwords / descriptor */ | ||
250 | #define DSL_4 0x00000010 /* 4 longwords / descriptor */ | ||
251 | #define DSL_8 0x00000020 /* 8 longwords / descriptor */ | ||
252 | #define DSL_16 0x00000040 /* 16 longwords / descriptor */ | ||
253 | #define DSL_32 0x00000080 /* 32 longwords / descriptor */ | ||
254 | |||
255 | /* | ||
256 | ** DC21040 Transmit Poll Demand Register (DE4X5_TPD) | ||
257 | */ | ||
258 | #define TPD 0x00000001 /* Transmit Poll Demand */ | ||
259 | |||
260 | /* | ||
261 | ** DC21040 Receive Poll Demand Register (DE4X5_RPD) | ||
262 | */ | ||
263 | #define RPD 0x00000001 /* Receive Poll Demand */ | ||
264 | |||
265 | /* | ||
266 | ** DC21040 Receive Ring Base Address Register (DE4X5_RRBA) | ||
267 | */ | ||
268 | #define RRBA 0xfffffffc /* RX Descriptor List Start Address */ | ||
269 | |||
270 | /* | ||
271 | ** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA) | ||
272 | */ | ||
273 | #define TRBA 0xfffffffc /* TX Descriptor List Start Address */ | ||
274 | |||
275 | /* | ||
276 | ** Status Register (DE4X5_STS) | ||
277 | */ | ||
278 | #define STS_GPI 0x04000000 /* General Purpose Port Interrupt */ | ||
279 | #define STS_BE 0x03800000 /* Bus Error Bits */ | ||
280 | #define STS_TS 0x00700000 /* Transmit Process State */ | ||
281 | #define STS_RS 0x000e0000 /* Receive Process State */ | ||
282 | #define STS_NIS 0x00010000 /* Normal Interrupt Summary */ | ||
283 | #define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */ | ||
284 | #define STS_ER 0x00004000 /* Early Receive */ | ||
285 | #define STS_FBE 0x00002000 /* Fatal Bus Error */ | ||
286 | #define STS_SE 0x00002000 /* System Error */ | ||
287 | #define STS_LNF 0x00001000 /* Link Fail */ | ||
288 | #define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */ | ||
289 | #define STS_TM 0x00000800 /* Timer Expired (DC21041) */ | ||
290 | #define STS_ETI 0x00000400 /* Early Transmit Interrupt */ | ||
291 | #define STS_AT 0x00000400 /* AUI/TP Pin */ | ||
292 | #define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */ | ||
293 | #define STS_RPS 0x00000100 /* Receive Process Stopped */ | ||
294 | #define STS_RU 0x00000080 /* Receive Buffer Unavailable */ | ||
295 | #define STS_RI 0x00000040 /* Receive Interrupt */ | ||
296 | #define STS_UNF 0x00000020 /* Transmit Underflow */ | ||
297 | #define STS_LNP 0x00000010 /* Link Pass */ | ||
298 | #define STS_ANC 0x00000010 /* Autonegotiation Complete */ | ||
299 | #define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */ | ||
300 | #define STS_TU 0x00000004 /* Transmit Buffer Unavailable */ | ||
301 | #define STS_TPS 0x00000002 /* Transmit Process Stopped */ | ||
302 | #define STS_TI 0x00000001 /* Transmit Interrupt */ | ||
303 | |||
304 | #define EB_PAR 0x00000000 /* Parity Error */ | ||
305 | #define EB_MA 0x00800000 /* Master Abort */ | ||
306 | #define EB_TA 0x01000000 /* Target Abort */ | ||
307 | #define EB_RES0 0x01800000 /* Reserved */ | ||
308 | #define EB_RES1 0x02000000 /* Reserved */ | ||
309 | |||
310 | #define TS_STOP 0x00000000 /* Stopped */ | ||
311 | #define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */ | ||
312 | #define TS_WEOT 0x00200000 /* Wait for End Of Transmission */ | ||
313 | #define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */ | ||
314 | #define TS_RES 0x00400000 /* Reserved */ | ||
315 | #define TS_SPKT 0x00500000 /* Setup Packet */ | ||
316 | #define TS_SUSP 0x00600000 /* Suspended */ | ||
317 | #define TS_CLTD 0x00700000 /* Close Transmit Descriptor */ | ||
318 | |||
319 | #define RS_STOP 0x00000000 /* Stopped */ | ||
320 | #define RS_FRD 0x00020000 /* Fetch Receive Descriptor */ | ||
321 | #define RS_CEOR 0x00040000 /* Check for End of Receive Packet */ | ||
322 | #define RS_WFRP 0x00060000 /* Wait for Receive Packet */ | ||
323 | #define RS_SUSP 0x00080000 /* Suspended */ | ||
324 | #define RS_CLRD 0x000a0000 /* Close Receive Descriptor */ | ||
325 | #define RS_FLUSH 0x000c0000 /* Flush RX FIFO */ | ||
326 | #define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */ | ||
327 | |||
328 | #define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */ | ||
329 | |||
330 | /* | ||
331 | ** Operation Mode Register (DE4X5_OMR) | ||
332 | */ | ||
333 | #define OMR_SC 0x80000000 /* Special Capture Effect Enable */ | ||
334 | #define OMR_RA 0x40000000 /* Receive All */ | ||
335 | #define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */ | ||
336 | #define OMR_SCR 0x01000000 /* Scrambler Mode */ | ||
337 | #define OMR_PCS 0x00800000 /* PCS Function */ | ||
338 | #define OMR_TTM 0x00400000 /* Transmit Threshold Mode */ | ||
339 | #define OMR_SF 0x00200000 /* Store and Forward */ | ||
340 | #define OMR_HBD 0x00080000 /* HeartBeat Disable */ | ||
341 | #define OMR_PS 0x00040000 /* Port Select */ | ||
342 | #define OMR_CA 0x00020000 /* Capture Effect Enable */ | ||
343 | #define OMR_BP 0x00010000 /* Back Pressure */ | ||
344 | #define OMR_TR 0x0000c000 /* Threshold Control Bits */ | ||
345 | #define OMR_ST 0x00002000 /* Start/Stop Transmission Command */ | ||
346 | #define OMR_FC 0x00001000 /* Force Collision Mode */ | ||
347 | #define OMR_OM 0x00000c00 /* Operating Mode */ | ||
348 | #define OMR_FDX 0x00000200 /* Full Duplex Mode */ | ||
349 | #define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */ | ||
350 | #define OMR_PM 0x00000080 /* Pass All Multicast */ | ||
351 | #define OMR_PR 0x00000040 /* Promiscuous Mode */ | ||
352 | #define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */ | ||
353 | #define OMR_IF 0x00000010 /* Inverse Filtering */ | ||
354 | #define OMR_PB 0x00000008 /* Pass Bad Frames */ | ||
355 | #define OMR_HO 0x00000004 /* Hash Only Filtering Mode */ | ||
356 | #define OMR_SR 0x00000002 /* Start/Stop Receive */ | ||
357 | #define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */ | ||
358 | |||
359 | #define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */ | ||
360 | #define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */ | ||
361 | #define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */ | ||
362 | #define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */ | ||
363 | |||
364 | #define OMR_DEF (OMR_SDP) | ||
365 | #define OMR_SIA (OMR_SDP | OMR_TTM) | ||
366 | #define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS) | ||
367 | #define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS) | ||
368 | #define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS) | ||
369 | |||
370 | /* | ||
371 | ** DC21040 Interrupt Mask Register (DE4X5_IMR) | ||
372 | */ | ||
373 | #define IMR_GPM 0x04000000 /* General Purpose Port Mask */ | ||
374 | #define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */ | ||
375 | #define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */ | ||
376 | #define IMR_ERM 0x00004000 /* Early Receive Mask */ | ||
377 | #define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */ | ||
378 | #define IMR_SEM 0x00002000 /* System Error Mask */ | ||
379 | #define IMR_LFM 0x00001000 /* Link Fail Mask */ | ||
380 | #define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */ | ||
381 | #define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */ | ||
382 | #define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */ | ||
383 | #define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */ | ||
384 | #define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */ | ||
385 | #define IMR_RSM 0x00000100 /* Receive Stopped Mask */ | ||
386 | #define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */ | ||
387 | #define IMR_RIM 0x00000040 /* Receive Interrupt Mask */ | ||
388 | #define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */ | ||
389 | #define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */ | ||
390 | #define IMR_LPM 0x00000010 /* Link Pass */ | ||
391 | #define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */ | ||
392 | #define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */ | ||
393 | #define IMR_TSM 0x00000002 /* Transmission Stopped Mask */ | ||
394 | #define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */ | ||
395 | |||
396 | /* | ||
397 | ** Missed Frames and FIFO Overflow Counters (DE4X5_MFC) | ||
398 | */ | ||
399 | #define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */ | ||
400 | #define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */ | ||
401 | #define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */ | ||
402 | #define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */ | ||
403 | #define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */ | ||
404 | |||
405 | /* | ||
406 | ** DC21040 Ethernet Address PROM (DE4X5_APROM) | ||
407 | */ | ||
408 | #define APROM_DN 0x80000000 /* Data Not Valid */ | ||
409 | #define APROM_DT 0x000000ff /* Address Byte */ | ||
410 | |||
411 | /* | ||
412 | ** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM) | ||
413 | */ | ||
414 | #define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */ | ||
415 | #define BROM_RD 0x00004000 /* Read from Boot ROM */ | ||
416 | #define BROM_WR 0x00002000 /* Write to Boot ROM */ | ||
417 | #define BROM_BR 0x00001000 /* Select Boot ROM when set */ | ||
418 | #define BROM_SR 0x00000800 /* Select Serial ROM when set */ | ||
419 | #define BROM_REG 0x00000400 /* External Register Select */ | ||
420 | #define BROM_DT 0x000000ff /* Data Byte */ | ||
421 | |||
422 | /* | ||
423 | ** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII) | ||
424 | */ | ||
425 | #define MII_MDI 0x00080000 /* MII Management Data In */ | ||
426 | #define MII_MDO 0x00060000 /* MII Management Mode/Data Out */ | ||
427 | #define MII_MRD 0x00040000 /* MII Management Define Read Mode */ | ||
428 | #define MII_MWR 0x00000000 /* MII Management Define Write Mode */ | ||
429 | #define MII_MDT 0x00020000 /* MII Management Data Out */ | ||
430 | #define MII_MDC 0x00010000 /* MII Management Clock */ | ||
431 | #define MII_RD 0x00004000 /* Read from MII */ | ||
432 | #define MII_WR 0x00002000 /* Write to MII */ | ||
433 | #define MII_SEL 0x00000800 /* Select MII when RESET */ | ||
434 | |||
435 | #define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */ | ||
436 | #define SROM_RD 0x00004000 /* Read from Boot ROM */ | ||
437 | #define SROM_WR 0x00002000 /* Write to Boot ROM */ | ||
438 | #define SROM_BR 0x00001000 /* Select Boot ROM when set */ | ||
439 | #define SROM_SR 0x00000800 /* Select Serial ROM when set */ | ||
440 | #define SROM_REG 0x00000400 /* External Register Select */ | ||
441 | #define SROM_DT 0x000000ff /* Data Byte */ | ||
442 | |||
443 | #define DT_OUT 0x00000008 /* Serial Data Out */ | ||
444 | #define DT_IN 0x00000004 /* Serial Data In */ | ||
445 | #define DT_CLK 0x00000002 /* Serial ROM Clock */ | ||
446 | #define DT_CS 0x00000001 /* Serial ROM Chip Select */ | ||
447 | |||
448 | #define MII_PREAMBLE 0xffffffff /* MII Management Preamble */ | ||
449 | #define MII_TEST 0xaaaaaaaa /* MII Test Signal */ | ||
450 | #define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */ | ||
451 | #define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */ | ||
452 | |||
453 | #define MII_CR 0x00 /* MII Management Control Register */ | ||
454 | #define MII_SR 0x01 /* MII Management Status Register */ | ||
455 | #define MII_ID0 0x02 /* PHY Identifier Register 0 */ | ||
456 | #define MII_ID1 0x03 /* PHY Identifier Register 1 */ | ||
457 | #define MII_ANA 0x04 /* Auto Negotiation Advertisement */ | ||
458 | #define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */ | ||
459 | #define MII_ANE 0x06 /* Auto Negotiation Expansion */ | ||
460 | #define MII_ANP 0x07 /* Auto Negotiation Next Page TX */ | ||
461 | |||
462 | #define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */ | ||
463 | |||
464 | /* | ||
465 | ** MII Management Control Register | ||
466 | */ | ||
467 | #define MII_CR_RST 0x8000 /* RESET the PHY chip */ | ||
468 | #define MII_CR_LPBK 0x4000 /* Loopback enable */ | ||
469 | #define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */ | ||
470 | #define MII_CR_10 0x0000 /* Set 10Mb/s */ | ||
471 | #define MII_CR_100 0x2000 /* Set 100Mb/s */ | ||
472 | #define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */ | ||
473 | #define MII_CR_PD 0x0800 /* Power Down */ | ||
474 | #define MII_CR_ISOL 0x0400 /* Isolate Mode */ | ||
475 | #define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */ | ||
476 | #define MII_CR_FDM 0x0100 /* Full Duplex Mode */ | ||
477 | #define MII_CR_CTE 0x0080 /* Collision Test Enable */ | ||
478 | |||
479 | /* | ||
480 | ** MII Management Status Register | ||
481 | */ | ||
482 | #define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */ | ||
483 | #define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */ | ||
484 | #define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */ | ||
485 | #define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */ | ||
486 | #define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */ | ||
487 | #define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/ | ||
488 | #define MII_SR_RFD 0x0010 /* Remote Fault Detected */ | ||
489 | #define MII_SR_ANC 0x0008 /* Auto Negotiation capable */ | ||
490 | #define MII_SR_LKS 0x0004 /* Link Status */ | ||
491 | #define MII_SR_JABD 0x0002 /* Jabber Detect */ | ||
492 | #define MII_SR_XC 0x0001 /* Extended Capabilities */ | ||
493 | |||
494 | /* | ||
495 | ** MII Management Auto Negotiation Advertisement Register | ||
496 | */ | ||
497 | #define MII_ANA_TAF 0x03e0 /* Technology Ability Field */ | ||
498 | #define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */ | ||
499 | #define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */ | ||
500 | #define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */ | ||
501 | #define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */ | ||
502 | #define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */ | ||
503 | #define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */ | ||
504 | #define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */ | ||
505 | |||
506 | /* | ||
507 | ** MII Management Auto Negotiation Remote End Register | ||
508 | */ | ||
509 | #define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */ | ||
510 | #define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */ | ||
511 | #define MII_ANLPA_RF 0x2000 /* Remote Fault */ | ||
512 | #define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */ | ||
513 | #define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */ | ||
514 | #define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */ | ||
515 | #define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */ | ||
516 | #define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */ | ||
517 | #define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */ | ||
518 | #define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */ | ||
519 | #define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */ | ||
520 | |||
521 | /* | ||
522 | ** SROM Media Definitions (ABG SROM Section) | ||
523 | */ | ||
524 | #define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */ | ||
525 | #define MEDIA_MII 0x0040 /* MII Present on the adapter */ | ||
526 | #define MEDIA_FIBRE 0x0008 /* Fibre Media present */ | ||
527 | #define MEDIA_AUI 0x0004 /* AUI Media present */ | ||
528 | #define MEDIA_TP 0x0002 /* TP Media present */ | ||
529 | #define MEDIA_BNC 0x0001 /* BNC Media present */ | ||
530 | |||
531 | /* | ||
532 | ** SROM Definitions (Digital Semiconductor Format) | ||
533 | */ | ||
534 | #define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */ | ||
535 | #define SROM_SSID 0x0002 /* Sub-system ID offset */ | ||
536 | #define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */ | ||
537 | #define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */ | ||
538 | #define SROM_IDCRC 0x0010 /* ID Block CRC offset*/ | ||
539 | #define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */ | ||
540 | #define SROM_SFV 0x0012 /* SROM Format Version offset */ | ||
541 | #define SROM_CCNT 0x0013 /* Controller Count offset */ | ||
542 | #define SROM_HWADD 0x0014 /* Hardware Address offset */ | ||
543 | #define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/ | ||
544 | #define SROM_CRC 0x007e /* SROM CRC offset */ | ||
545 | |||
546 | /* | ||
547 | ** SROM Media Connection Definitions | ||
548 | */ | ||
549 | #define SROM_10BT 0x0000 /* 10BASE-T half duplex */ | ||
550 | #define SROM_10BTN 0x0100 /* 10BASE-T with Nway */ | ||
551 | #define SROM_10BTF 0x0204 /* 10BASE-T full duplex */ | ||
552 | #define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */ | ||
553 | #define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */ | ||
554 | #define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */ | ||
555 | #define SROM_100BTH 0x0003 /* 100BASE-T half duplex */ | ||
556 | #define SROM_100BTF 0x0205 /* 100BASE-T full duplex */ | ||
557 | #define SROM_100BT4 0x0006 /* 100BASE-T4 */ | ||
558 | #define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */ | ||
559 | #define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */ | ||
560 | #define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */ | ||
561 | #define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */ | ||
562 | #define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */ | ||
563 | #define SROM_M100BT4 0x000f /* MII 100BASE-T4 */ | ||
564 | #define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */ | ||
565 | #define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */ | ||
566 | #define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */ | ||
567 | #define SROM_PAO 0x8800 /* Powerup Autosense Only */ | ||
568 | #define SROM_NSMI 0xffff /* No Selected Media Information */ | ||
569 | |||
570 | /* | ||
571 | ** SROM Media Definitions | ||
572 | */ | ||
573 | #define SROM_10BASET 0x0000 /* 10BASE-T half duplex */ | ||
574 | #define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */ | ||
575 | #define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */ | ||
576 | #define SROM_100BASET 0x0003 /* 100BASE-T half duplex */ | ||
577 | #define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */ | ||
578 | #define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */ | ||
579 | #define SROM_100BASET4 0x0006 /* 100BASE-T4 */ | ||
580 | #define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */ | ||
581 | #define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */ | ||
582 | |||
583 | #define BLOCK_LEN 0x7f /* Extended blocks length mask */ | ||
584 | #define EXT_FIELD 0x40 /* Extended blocks extension field bit */ | ||
585 | #define MEDIA_CODE 0x3f /* Extended blocks media code mask */ | ||
586 | |||
587 | /* | ||
588 | ** SROM Compact Format Block Masks | ||
589 | */ | ||
590 | #define COMPACT_FI 0x80 /* Format Indicator */ | ||
591 | #define COMPACT_LEN 0x04 /* Length */ | ||
592 | #define COMPACT_MC 0x3f /* Media Code */ | ||
593 | |||
594 | /* | ||
595 | ** SROM Extended Format Block Type 0 Masks | ||
596 | */ | ||
597 | #define BLOCK0_FI 0x80 /* Format Indicator */ | ||
598 | #define BLOCK0_MCS 0x80 /* Media Code byte Sign */ | ||
599 | #define BLOCK0_MC 0x3f /* Media Code */ | ||
600 | |||
601 | /* | ||
602 | ** DC21040 Full Duplex Register (DE4X5_FDR) | ||
603 | */ | ||
604 | #define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */ | ||
605 | |||
606 | /* | ||
607 | ** DC21041 General Purpose Timer Register (DE4X5_GPT) | ||
608 | */ | ||
609 | #define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */ | ||
610 | #define GPT_VAL 0x0000ffff /* Timer Value */ | ||
611 | |||
612 | /* | ||
613 | ** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits) | ||
614 | */ | ||
615 | /* Valid ONLY for DE500 hardware */ | ||
616 | #define GEP_LNP 0x00000080 /* Link Pass (input) */ | ||
617 | #define GEP_SLNK 0x00000040 /* SYM LINK (input) */ | ||
618 | #define GEP_SDET 0x00000020 /* Signal Detect (input) */ | ||
619 | #define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */ | ||
620 | #define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */ | ||
621 | #define GEP_PHYL 0x00000004 /* PHY Loopback (output) */ | ||
622 | #define GEP_FLED 0x00000002 /* Force Activity LED on (output) */ | ||
623 | #define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */ | ||
624 | #define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */ | ||
625 | #define GEP_CTRL 0x00000100 /* GEP control bit */ | ||
626 | |||
627 | /* | ||
628 | ** SIA Register Defaults | ||
629 | */ | ||
630 | #define CSR13 0x00000001 | ||
631 | #define CSR14 0x0003ff7f /* Autonegotiation disabled */ | ||
632 | #define CSR15 0x00000008 | ||
633 | |||
634 | /* | ||
635 | ** SIA Status Register (DE4X5_SISR) | ||
636 | */ | ||
637 | #define SISR_LPC 0xffff0000 /* Link Partner's Code Word */ | ||
638 | #define SISR_LPN 0x00008000 /* Link Partner Negotiable */ | ||
639 | #define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */ | ||
640 | #define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */ | ||
641 | #define SISR_TRF 0x00000800 /* Transmit Remote Fault */ | ||
642 | #define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */ | ||
643 | #define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/ | ||
644 | #define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */ | ||
645 | #define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */ | ||
646 | #define SISR_ARA 0x00000100 /* AUI Receive Port Activity */ | ||
647 | #define SISR_SRA 0x00000100 /* Selected Port Receive Activity */ | ||
648 | #define SISR_DAO 0x00000080 /* PLL All One */ | ||
649 | #define SISR_DAZ 0x00000040 /* PLL All Zero */ | ||
650 | #define SISR_DSP 0x00000020 /* PLL Self-Test Pass */ | ||
651 | #define SISR_DSD 0x00000010 /* PLL Self-Test Done */ | ||
652 | #define SISR_APS 0x00000008 /* Auto Polarity State */ | ||
653 | #define SISR_LKF 0x00000004 /* Link Fail Status */ | ||
654 | #define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */ | ||
655 | #define SISR_NCR 0x00000002 /* Network Connection Error */ | ||
656 | #define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */ | ||
657 | #define SISR_PAUI 0x00000001 /* AUI_TP Indication */ | ||
658 | #define SISR_MRA 0x00000001 /* MII Receive Port Activity */ | ||
659 | |||
660 | #define ANS_NDIS 0x00000000 /* Nway disable */ | ||
661 | #define ANS_TDIS 0x00001000 /* Transmit Disable */ | ||
662 | #define ANS_ADET 0x00002000 /* Ability Detect */ | ||
663 | #define ANS_ACK 0x00003000 /* Acknowledge */ | ||
664 | #define ANS_CACK 0x00004000 /* Complete Acknowledge */ | ||
665 | #define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */ | ||
666 | #define ANS_LCHK 0x00006000 /* Link Check */ | ||
667 | |||
668 | #define SISR_RST 0x00000301 /* CSR12 reset */ | ||
669 | #define SISR_ANR 0x00001301 /* Autonegotiation restart */ | ||
670 | |||
671 | /* | ||
672 | ** SIA Connectivity Register (DE4X5_SICR) | ||
673 | */ | ||
674 | #define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */ | ||
675 | #define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */ | ||
676 | #define SICR_OE24 0x00004000 /* Output Enable 2 4 */ | ||
677 | #define SICR_OE13 0x00002000 /* Output Enable 1 3 */ | ||
678 | #define SICR_IE 0x00001000 /* Input Enable */ | ||
679 | #define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */ | ||
680 | #define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */ | ||
681 | #define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/ | ||
682 | #define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/ | ||
683 | #define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */ | ||
684 | #define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */ | ||
685 | #define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/ | ||
686 | #define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */ | ||
687 | #define SICR_ASE 0x00000080 /* APLL Start Enable*/ | ||
688 | #define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */ | ||
689 | #define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */ | ||
690 | #define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */ | ||
691 | #define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */ | ||
692 | #define SICR_CAC 0x00000004 /* CSR Auto Configuration */ | ||
693 | #define SICR_PS 0x00000002 /* Pin AUI/TP Selection */ | ||
694 | #define SICR_SRL 0x00000001 /* SIA Reset */ | ||
695 | #define SIA_RESET 0x00000000 /* SIA Reset Value */ | ||
696 | |||
697 | /* | ||
698 | ** SIA Transmit and Receive Register (DE4X5_STRR) | ||
699 | */ | ||
700 | #define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */ | ||
701 | #define STRR_SPP 0x00004000 /* Set Polarity Plus */ | ||
702 | #define STRR_APE 0x00002000 /* Auto Polarity Enable */ | ||
703 | #define STRR_LTE 0x00001000 /* Link Test Enable */ | ||
704 | #define STRR_SQE 0x00000800 /* Signal Quality Enable */ | ||
705 | #define STRR_CLD 0x00000400 /* Collision Detect Enable */ | ||
706 | #define STRR_CSQ 0x00000200 /* Collision Squelch Enable */ | ||
707 | #define STRR_RSQ 0x00000100 /* Receive Squelch Enable */ | ||
708 | #define STRR_ANE 0x00000080 /* Auto Negotiate Enable */ | ||
709 | #define STRR_HDE 0x00000040 /* Half Duplex Enable */ | ||
710 | #define STRR_CPEN 0x00000030 /* Compensation Enable */ | ||
711 | #define STRR_LSE 0x00000008 /* Link Pulse Send Enable */ | ||
712 | #define STRR_DREN 0x00000004 /* Driver Enable */ | ||
713 | #define STRR_LBK 0x00000002 /* Loopback Enable */ | ||
714 | #define STRR_ECEN 0x00000001 /* Encoder Enable */ | ||
715 | #define STRR_RESET 0xffffffff /* Reset value for STRR */ | ||
716 | |||
717 | /* | ||
718 | ** SIA General Register (DE4X5_SIGR) | ||
719 | */ | ||
720 | #define SIGR_RMI 0x40000000 /* Receive Match Interrupt */ | ||
721 | #define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */ | ||
722 | #define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */ | ||
723 | #define SIGR_CWE 0x08000000 /* Control Write Enable */ | ||
724 | #define SIGR_RME 0x04000000 /* Receive Match Enable */ | ||
725 | #define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */ | ||
726 | #define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */ | ||
727 | #define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */ | ||
728 | #define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */ | ||
729 | #define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */ | ||
730 | #define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */ | ||
731 | #define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */ | ||
732 | #define SIGR_LV2 0x00008000 /* General Purpose LED2 value */ | ||
733 | #define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */ | ||
734 | #define SIGR_FRL 0x00002000 /* Force Receiver Low */ | ||
735 | #define SIGR_DPST 0x00001000 /* PLL Self Test Start */ | ||
736 | #define SIGR_LSD 0x00000800 /* LED Stretch Disable */ | ||
737 | #define SIGR_FLF 0x00000400 /* Force Link Fail */ | ||
738 | #define SIGR_FUSQ 0x00000200 /* Force Unsquelch */ | ||
739 | #define SIGR_TSCK 0x00000100 /* Test Clock */ | ||
740 | #define SIGR_LV1 0x00000080 /* General Purpose LED1 value */ | ||
741 | #define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */ | ||
742 | #define SIGR_RWR 0x00000020 /* Receive Watchdog Release */ | ||
743 | #define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */ | ||
744 | #define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */ | ||
745 | #define SIGR_JCK 0x00000004 /* Jabber Clock */ | ||
746 | #define SIGR_HUJ 0x00000002 /* Host Unjab */ | ||
747 | #define SIGR_JBD 0x00000001 /* Jabber Disable */ | ||
748 | #define SIGR_RESET 0xffff0000 /* Reset value for SIGR */ | ||
749 | |||
750 | /* | ||
751 | ** Receive Descriptor Bit Summary | ||
752 | */ | ||
753 | #define R_OWN 0x80000000 /* Own Bit */ | ||
754 | #define RD_FF 0x40000000 /* Filtering Fail */ | ||
755 | #define RD_FL 0x3fff0000 /* Frame Length */ | ||
756 | #define RD_ES 0x00008000 /* Error Summary */ | ||
757 | #define RD_LE 0x00004000 /* Length Error */ | ||
758 | #define RD_DT 0x00003000 /* Data Type */ | ||
759 | #define RD_RF 0x00000800 /* Runt Frame */ | ||
760 | #define RD_MF 0x00000400 /* Multicast Frame */ | ||
761 | #define RD_FS 0x00000200 /* First Descriptor */ | ||
762 | #define RD_LS 0x00000100 /* Last Descriptor */ | ||
763 | #define RD_TL 0x00000080 /* Frame Too Long */ | ||
764 | #define RD_CS 0x00000040 /* Collision Seen */ | ||
765 | #define RD_FT 0x00000020 /* Frame Type */ | ||
766 | #define RD_RJ 0x00000010 /* Receive Watchdog */ | ||
767 | #define RD_RE 0x00000008 /* Report on MII Error */ | ||
768 | #define RD_DB 0x00000004 /* Dribbling Bit */ | ||
769 | #define RD_CE 0x00000002 /* CRC Error */ | ||
770 | #define RD_OF 0x00000001 /* Overflow */ | ||
771 | |||
772 | #define RD_RER 0x02000000 /* Receive End Of Ring */ | ||
773 | #define RD_RCH 0x01000000 /* Second Address Chained */ | ||
774 | #define RD_RBS2 0x003ff800 /* Buffer 2 Size */ | ||
775 | #define RD_RBS1 0x000007ff /* Buffer 1 Size */ | ||
776 | |||
777 | /* | ||
778 | ** Transmit Descriptor Bit Summary | ||
779 | */ | ||
780 | #define T_OWN 0x80000000 /* Own Bit */ | ||
781 | #define TD_ES 0x00008000 /* Error Summary */ | ||
782 | #define TD_TO 0x00004000 /* Transmit Jabber Time-Out */ | ||
783 | #define TD_LO 0x00000800 /* Loss Of Carrier */ | ||
784 | #define TD_NC 0x00000400 /* No Carrier */ | ||
785 | #define TD_LC 0x00000200 /* Late Collision */ | ||
786 | #define TD_EC 0x00000100 /* Excessive Collisions */ | ||
787 | #define TD_HF 0x00000080 /* Heartbeat Fail */ | ||
788 | #define TD_CC 0x00000078 /* Collision Counter */ | ||
789 | #define TD_LF 0x00000004 /* Link Fail */ | ||
790 | #define TD_UF 0x00000002 /* Underflow Error */ | ||
791 | #define TD_DE 0x00000001 /* Deferred */ | ||
792 | |||
793 | #define TD_IC 0x80000000 /* Interrupt On Completion */ | ||
794 | #define TD_LS 0x40000000 /* Last Segment */ | ||
795 | #define TD_FS 0x20000000 /* First Segment */ | ||
796 | #define TD_FT1 0x10000000 /* Filtering Type */ | ||
797 | #define TD_SET 0x08000000 /* Setup Packet */ | ||
798 | #define TD_AC 0x04000000 /* Add CRC Disable */ | ||
799 | #define TD_TER 0x02000000 /* Transmit End Of Ring */ | ||
800 | #define TD_TCH 0x01000000 /* Second Address Chained */ | ||
801 | #define TD_DPD 0x00800000 /* Disabled Padding */ | ||
802 | #define TD_FT0 0x00400000 /* Filtering Type */ | ||
803 | #define TD_TBS2 0x003ff800 /* Buffer 2 Size */ | ||
804 | #define TD_TBS1 0x000007ff /* Buffer 1 Size */ | ||
805 | |||
806 | #define PERFECT_F 0x00000000 | ||
807 | #define HASH_F TD_FT0 | ||
808 | #define INVERSE_F TD_FT1 | ||
809 | #define HASH_O_F (TD_FT1 | TD_F0) | ||
810 | |||
811 | /* | ||
812 | ** Media / mode state machine definitions | ||
813 | ** User selectable: | ||
814 | */ | ||
815 | #define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */ | ||
816 | #define TP_NW 0x0002 /* 10Base-T with Nway */ | ||
817 | #define BNC 0x0004 /* Thinwire */ | ||
818 | #define AUI 0x0008 /* Thickwire */ | ||
819 | #define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */ | ||
820 | #define _10Mb 0x0040 /* 10Mb/s Ethernet */ | ||
821 | #define _100Mb 0x0080 /* 100Mb/s Ethernet */ | ||
822 | #define AUTO 0x4000 /* Auto sense the media or speed */ | ||
823 | |||
824 | /* | ||
825 | ** Internal states | ||
826 | */ | ||
827 | #define NC 0x0000 /* No Connection */ | ||
828 | #define ANS 0x0020 /* Intermediate AutoNegotiation State */ | ||
829 | #define SPD_DET 0x0100 /* Parallel speed detection */ | ||
830 | #define INIT 0x0200 /* Initial state */ | ||
831 | #define EXT_SIA 0x0400 /* External SIA for motherboard chip */ | ||
832 | #define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */ | ||
833 | #define TP_SUSPECT 0x0803 /* Suspect the TP port is down */ | ||
834 | #define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */ | ||
835 | #define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */ | ||
836 | #define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */ | ||
837 | #define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */ | ||
838 | #define MII 0x1000 /* MII on the 21143 */ | ||
839 | |||
840 | #define TIMER_CB 0x80000000 /* Timer callback detection */ | ||
841 | |||
842 | /* | ||
843 | ** DE4X5 DEBUG Options | ||
844 | */ | ||
845 | #define DEBUG_NONE 0x0000 /* No DEBUG messages */ | ||
846 | #define DEBUG_VERSION 0x0001 /* Print version message */ | ||
847 | #define DEBUG_MEDIA 0x0002 /* Print media messages */ | ||
848 | #define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */ | ||
849 | #define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */ | ||
850 | #define DEBUG_SROM 0x0010 /* Print SROM messages */ | ||
851 | #define DEBUG_MII 0x0020 /* Print MII messages */ | ||
852 | #define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */ | ||
853 | #define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */ | ||
854 | #define DEBUG_PCICFG 0x0100 | ||
855 | #define DEBUG_ALL 0x01ff | ||
856 | |||
857 | /* | ||
858 | ** Miscellaneous | ||
859 | */ | ||
860 | #define PCI 0 | ||
861 | #define EISA 1 | ||
862 | |||
863 | #define HASH_TABLE_LEN 512 /* Bits */ | ||
864 | #define HASH_BITS 0x01ff /* 9 LS bits */ | ||
865 | |||
866 | #define SETUP_FRAME_LEN 192 /* Bytes */ | ||
867 | #define IMPERF_PA_OFFSET 156 /* Bytes */ | ||
868 | |||
869 | #define POLL_DEMAND 1 | ||
870 | |||
871 | #define LOST_MEDIA_THRESHOLD 3 | ||
872 | |||
873 | #define MASK_INTERRUPTS 1 | ||
874 | #define UNMASK_INTERRUPTS 0 | ||
875 | |||
876 | #define DE4X5_STRLEN 8 | ||
877 | |||
878 | #define DE4X5_INIT 0 /* Initialisation time */ | ||
879 | #define DE4X5_RUN 1 /* Run time */ | ||
880 | |||
881 | #define DE4X5_SAVE_STATE 0 | ||
882 | #define DE4X5_RESTORE_STATE 1 | ||
883 | |||
884 | /* | ||
885 | ** Address Filtering Modes | ||
886 | */ | ||
887 | #define PERFECT 0 /* 16 perfect physical addresses */ | ||
888 | #define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */ | ||
889 | #define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */ | ||
890 | #define ALL_HASH 3 /* Hashes all physical & multicast addrs */ | ||
891 | |||
892 | #define ALL 0 /* Clear out all the setup frame */ | ||
893 | #define PHYS_ADDR_ONLY 1 /* Update the physical address only */ | ||
894 | |||
895 | /* | ||
896 | ** Adapter state | ||
897 | */ | ||
898 | #define INITIALISED 0 /* After h/w initialised and mem alloc'd */ | ||
899 | #define CLOSED 1 /* Ready for opening */ | ||
900 | #define OPEN 2 /* Running */ | ||
901 | |||
902 | /* | ||
903 | ** Various wait times | ||
904 | */ | ||
905 | #define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */ | ||
906 | #define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */ | ||
907 | |||
908 | /* | ||
909 | ** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since | ||
910 | ** the vendors seem split 50-50 on how to calculate the OUI register values | ||
911 | ** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()]. | ||
912 | */ | ||
913 | #define NATIONAL_TX 0x2000 | ||
914 | #define BROADCOM_T4 0x03e0 | ||
915 | #define SEEQ_T4 0x0016 | ||
916 | #define CYPRESS_T4 0x0014 | ||
917 | |||
918 | /* | ||
919 | ** Speed Selection stuff | ||
920 | */ | ||
921 | #define SET_10Mb {\ | ||
922 | if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ | ||
923 | omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\ | ||
924 | if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\ | ||
925 | mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ | ||
926 | }\ | ||
927 | omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\ | ||
928 | outl(omr, DE4X5_OMR);\ | ||
929 | if (!lp->useSROM) lp->cache.gep = 0;\ | ||
930 | } else if (lp->useSROM && !lp->useMII) {\ | ||
931 | omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ | ||
932 | omr |= (lp->fdx ? OMR_FDX : 0);\ | ||
933 | outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\ | ||
934 | } else {\ | ||
935 | omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ | ||
936 | omr |= (lp->fdx ? OMR_FDX : 0);\ | ||
937 | outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\ | ||
938 | lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\ | ||
939 | gep_wr(lp->cache.gep, dev);\ | ||
940 | }\ | ||
941 | } | ||
942 | |||
943 | #define SET_100Mb {\ | ||
944 | if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ | ||
945 | int fdx=0;\ | ||
946 | if (lp->phy[lp->active].id == NATIONAL_TX) {\ | ||
947 | mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\ | ||
948 | 0x18, lp->phy[lp->active].addr, DE4X5_MII);\ | ||
949 | }\ | ||
950 | omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\ | ||
951 | sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\ | ||
952 | if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\ | ||
953 | if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\ | ||
954 | mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ | ||
955 | }\ | ||
956 | if (fdx) omr |= OMR_FDX;\ | ||
957 | outl(omr, DE4X5_OMR);\ | ||
958 | if (!lp->useSROM) lp->cache.gep = 0;\ | ||
959 | } else if (lp->useSROM && !lp->useMII) {\ | ||
960 | omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ | ||
961 | omr |= (lp->fdx ? OMR_FDX : 0);\ | ||
962 | outl(omr | lp->infoblock_csr6, DE4X5_OMR);\ | ||
963 | } else {\ | ||
964 | omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ | ||
965 | omr |= (lp->fdx ? OMR_FDX : 0);\ | ||
966 | outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\ | ||
967 | lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\ | ||
968 | gep_wr(lp->cache.gep, dev);\ | ||
969 | }\ | ||
970 | } | ||
971 | |||
972 | /* FIX ME so I don't jam 10Mb networks */ | ||
973 | #define SET_100Mb_PDET {\ | ||
974 | if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\ | ||
975 | mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\ | ||
976 | omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ | ||
977 | outl(omr, DE4X5_OMR);\ | ||
978 | } else if (lp->useSROM && !lp->useMII) {\ | ||
979 | omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ | ||
980 | outl(omr, DE4X5_OMR);\ | ||
981 | } else {\ | ||
982 | omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\ | ||
983 | outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\ | ||
984 | lp->cache.gep = (GEP_FDXD | GEP_MODE);\ | ||
985 | gep_wr(lp->cache.gep, dev);\ | ||
986 | }\ | ||
987 | } | ||
988 | |||
989 | /* | ||
990 | ** Include the IOCTL stuff | ||
991 | */ | ||
992 | #include <linux/sockios.h> | ||
993 | |||
994 | #define DE4X5IOCTL SIOCDEVPRIVATE | ||
995 | |||
996 | struct de4x5_ioctl { | ||
997 | unsigned short cmd; /* Command to run */ | ||
998 | unsigned short len; /* Length of the data buffer */ | ||
999 | unsigned char __user *data; /* Pointer to the data buffer */ | ||
1000 | }; | ||
1001 | |||
1002 | /* | ||
1003 | ** Recognised commands for the driver | ||
1004 | */ | ||
1005 | #define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */ | ||
1006 | #define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */ | ||
1007 | /* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */ | ||
1008 | #define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */ | ||
1009 | #define DE4X5_GET_MCA 0x06 /* Get a multicast address */ | ||
1010 | #define DE4X5_SET_MCA 0x07 /* Set a multicast address */ | ||
1011 | #define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */ | ||
1012 | #define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */ | ||
1013 | #define DE4X5_GET_STATS 0x0a /* Get the driver statistics */ | ||
1014 | #define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */ | ||
1015 | #define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */ | ||
1016 | #define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ | ||
1017 | #define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ | ||
1018 | |||
1019 | #define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008) | ||
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c new file mode 100644 index 000000000000..9a21ca3873fc --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/dmfe.c | |||
@@ -0,0 +1,2253 @@ | |||
1 | /* | ||
2 | A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast | ||
3 | ethernet driver for Linux. | ||
4 | Copyright (C) 1997 Sten Wang | ||
5 | |||
6 | This program is free software; you can redistribute it and/or | ||
7 | modify it under the terms of the GNU General Public License | ||
8 | as published by the Free Software Foundation; either version 2 | ||
9 | of the License, or (at your option) any later version. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, | ||
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | GNU General Public License for more details. | ||
15 | |||
16 | DAVICOM Web-Site: www.davicom.com.tw | ||
17 | |||
18 | Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw | ||
19 | Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu> | ||
20 | |||
21 | (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. | ||
22 | |||
23 | Marcelo Tosatti <marcelo@conectiva.com.br> : | ||
24 | Made it compile in 2.3 (device to net_device) | ||
25 | |||
26 | Alan Cox <alan@lxorguk.ukuu.org.uk> : | ||
27 | Cleaned up for kernel merge. | ||
28 | Removed the back compatibility support | ||
29 | Reformatted, fixing spelling etc as I went | ||
30 | Removed IRQ 0-15 assumption | ||
31 | |||
32 | Jeff Garzik <jgarzik@pobox.com> : | ||
33 | Updated to use new PCI driver API. | ||
34 | Resource usage cleanups. | ||
35 | Report driver version to user. | ||
36 | |||
37 | Tobias Ringstrom <tori@unhappy.mine.nu> : | ||
38 | Cleaned up and added SMP safety. Thanks go to Jeff Garzik, | ||
39 | Andrew Morton and Frank Davis for the SMP safety fixes. | ||
40 | |||
41 | Vojtech Pavlik <vojtech@suse.cz> : | ||
42 | Cleaned up pointer arithmetics. | ||
43 | Fixed a lot of 64bit issues. | ||
44 | Cleaned up printk()s a bit. | ||
45 | Fixed some obvious big endian problems. | ||
46 | |||
47 | Tobias Ringstrom <tori@unhappy.mine.nu> : | ||
48 | Use time_after for jiffies calculation. Added ethtool | ||
49 | support. Updated PCI resource allocation. Do not | ||
50 | forget to unmap PCI mapped skbs. | ||
51 | |||
52 | Alan Cox <alan@lxorguk.ukuu.org.uk> | ||
53 | Added new PCI identifiers provided by Clear Zhang at ALi | ||
54 | for their 1563 ethernet device. | ||
55 | |||
56 | TODO | ||
57 | |||
58 | Check on 64 bit boxes. | ||
59 | Check and fix on big endian boxes. | ||
60 | |||
61 | Test and make sure PCI latency is now correct for all cases. | ||
62 | */ | ||
63 | |||
64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
65 | |||
66 | #define DRV_NAME "dmfe" | ||
67 | #define DRV_VERSION "1.36.4" | ||
68 | #define DRV_RELDATE "2002-01-17" | ||
69 | |||
70 | #include <linux/module.h> | ||
71 | #include <linux/kernel.h> | ||
72 | #include <linux/string.h> | ||
73 | #include <linux/timer.h> | ||
74 | #include <linux/ptrace.h> | ||
75 | #include <linux/errno.h> | ||
76 | #include <linux/ioport.h> | ||
77 | #include <linux/interrupt.h> | ||
78 | #include <linux/pci.h> | ||
79 | #include <linux/dma-mapping.h> | ||
80 | #include <linux/init.h> | ||
81 | #include <linux/netdevice.h> | ||
82 | #include <linux/etherdevice.h> | ||
83 | #include <linux/ethtool.h> | ||
84 | #include <linux/skbuff.h> | ||
85 | #include <linux/delay.h> | ||
86 | #include <linux/spinlock.h> | ||
87 | #include <linux/crc32.h> | ||
88 | #include <linux/bitops.h> | ||
89 | |||
90 | #include <asm/processor.h> | ||
91 | #include <asm/io.h> | ||
92 | #include <asm/dma.h> | ||
93 | #include <asm/uaccess.h> | ||
94 | #include <asm/irq.h> | ||
95 | |||
96 | #ifdef CONFIG_TULIP_DM910X | ||
97 | #include <linux/of.h> | ||
98 | #endif | ||
99 | |||
100 | |||
101 | /* Board/System/Debug information/definition ---------------- */ | ||
102 | #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */ | ||
103 | #define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */ | ||
104 | #define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */ | ||
105 | #define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */ | ||
106 | |||
107 | #define DM9102_IO_SIZE 0x80 | ||
108 | #define DM9102A_IO_SIZE 0x100 | ||
109 | #define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */ | ||
110 | #define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */ | ||
111 | #define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */ | ||
112 | #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */ | ||
113 | #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */ | ||
114 | #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT) | ||
115 | #define TX_BUF_ALLOC 0x600 | ||
116 | #define RX_ALLOC_SIZE 0x620 | ||
117 | #define DM910X_RESET 1 | ||
118 | #define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */ | ||
119 | #define CR6_DEFAULT 0x00080000 /* HD */ | ||
120 | #define CR7_DEFAULT 0x180c1 | ||
121 | #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */ | ||
122 | #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */ | ||
123 | #define MAX_PACKET_SIZE 1514 | ||
124 | #define DMFE_MAX_MULTICAST 14 | ||
125 | #define RX_COPY_SIZE 100 | ||
126 | #define MAX_CHECK_PACKET 0x8000 | ||
127 | #define DM9801_NOISE_FLOOR 8 | ||
128 | #define DM9802_NOISE_FLOOR 5 | ||
129 | |||
130 | #define DMFE_WOL_LINKCHANGE 0x20000000 | ||
131 | #define DMFE_WOL_SAMPLEPACKET 0x10000000 | ||
132 | #define DMFE_WOL_MAGICPACKET 0x08000000 | ||
133 | |||
134 | |||
135 | #define DMFE_10MHF 0 | ||
136 | #define DMFE_100MHF 1 | ||
137 | #define DMFE_10MFD 4 | ||
138 | #define DMFE_100MFD 5 | ||
139 | #define DMFE_AUTO 8 | ||
140 | #define DMFE_1M_HPNA 0x10 | ||
141 | |||
142 | #define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */ | ||
143 | #define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */ | ||
144 | #define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */ | ||
145 | #define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */ | ||
146 | #define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */ | ||
147 | #define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */ | ||
148 | |||
149 | #define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */ | ||
150 | #define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ | ||
151 | #define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ | ||
152 | |||
153 | #define DMFE_DBUG(dbug_now, msg, value) \ | ||
154 | do { \ | ||
155 | if (dmfe_debug || (dbug_now)) \ | ||
156 | pr_err("%s %lx\n", \ | ||
157 | (msg), (long) (value)); \ | ||
158 | } while (0) | ||
159 | |||
160 | #define SHOW_MEDIA_TYPE(mode) \ | ||
161 | pr_info("Change Speed to %sMhz %s duplex\n" , \ | ||
162 | (mode & 1) ? "100":"10", \ | ||
163 | (mode & 4) ? "full":"half"); | ||
164 | |||
165 | |||
166 | /* CR9 definition: SROM/MII */ | ||
167 | #define CR9_SROM_READ 0x4800 | ||
168 | #define CR9_SRCS 0x1 | ||
169 | #define CR9_SRCLK 0x2 | ||
170 | #define CR9_CRDOUT 0x8 | ||
171 | #define SROM_DATA_0 0x0 | ||
172 | #define SROM_DATA_1 0x4 | ||
173 | #define PHY_DATA_1 0x20000 | ||
174 | #define PHY_DATA_0 0x00000 | ||
175 | #define MDCLKH 0x10000 | ||
176 | |||
177 | #define PHY_POWER_DOWN 0x800 | ||
178 | |||
179 | #define SROM_V41_CODE 0x14 | ||
180 | |||
181 | #define SROM_CLK_WRITE(data, ioaddr) \ | ||
182 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
183 | udelay(5); \ | ||
184 | outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ | ||
185 | udelay(5); \ | ||
186 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
187 | udelay(5); | ||
188 | |||
189 | #define __CHK_IO_SIZE(pci_id, dev_rev) \ | ||
190 | (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \ | ||
191 | DM9102A_IO_SIZE: DM9102_IO_SIZE) | ||
192 | |||
193 | #define CHK_IO_SIZE(pci_dev) \ | ||
194 | (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \ | ||
195 | (pci_dev)->revision)) | ||
196 | |||
197 | /* Sten Check */ | ||
198 | #define DEVICE net_device | ||
199 | |||
200 | /* Structure/enum declaration ------------------------------- */ | ||
201 | struct tx_desc { | ||
202 | __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ | ||
203 | char *tx_buf_ptr; /* Data for us */ | ||
204 | struct tx_desc *next_tx_desc; | ||
205 | } __attribute__(( aligned(32) )); | ||
206 | |||
207 | struct rx_desc { | ||
208 | __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ | ||
209 | struct sk_buff *rx_skb_ptr; /* Data for us */ | ||
210 | struct rx_desc *next_rx_desc; | ||
211 | } __attribute__(( aligned(32) )); | ||
212 | |||
213 | struct dmfe_board_info { | ||
214 | u32 chip_id; /* Chip vendor/Device ID */ | ||
215 | u8 chip_revision; /* Chip revision */ | ||
216 | struct DEVICE *next_dev; /* next device */ | ||
217 | struct pci_dev *pdev; /* PCI device */ | ||
218 | spinlock_t lock; | ||
219 | |||
220 | long ioaddr; /* I/O base address */ | ||
221 | u32 cr0_data; | ||
222 | u32 cr5_data; | ||
223 | u32 cr6_data; | ||
224 | u32 cr7_data; | ||
225 | u32 cr15_data; | ||
226 | |||
227 | /* pointer for memory physical address */ | ||
228 | dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */ | ||
229 | dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */ | ||
230 | dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */ | ||
231 | dma_addr_t first_tx_desc_dma; | ||
232 | dma_addr_t first_rx_desc_dma; | ||
233 | |||
234 | /* descriptor pointer */ | ||
235 | unsigned char *buf_pool_ptr; /* Tx buffer pool memory */ | ||
236 | unsigned char *buf_pool_start; /* Tx buffer pool align dword */ | ||
237 | unsigned char *desc_pool_ptr; /* descriptor pool memory */ | ||
238 | struct tx_desc *first_tx_desc; | ||
239 | struct tx_desc *tx_insert_ptr; | ||
240 | struct tx_desc *tx_remove_ptr; | ||
241 | struct rx_desc *first_rx_desc; | ||
242 | struct rx_desc *rx_insert_ptr; | ||
243 | struct rx_desc *rx_ready_ptr; /* packet come pointer */ | ||
244 | unsigned long tx_packet_cnt; /* transmitted packet count */ | ||
245 | unsigned long tx_queue_cnt; /* wait to send packet count */ | ||
246 | unsigned long rx_avail_cnt; /* available rx descriptor count */ | ||
247 | unsigned long interval_rx_cnt; /* rx packet count a callback time */ | ||
248 | |||
249 | u16 HPNA_command; /* For HPNA register 16 */ | ||
250 | u16 HPNA_timer; /* For HPNA remote device check */ | ||
251 | u16 dbug_cnt; | ||
252 | u16 NIC_capability; /* NIC media capability */ | ||
253 | u16 PHY_reg4; /* Saved Phyxcer register 4 value */ | ||
254 | |||
255 | u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */ | ||
256 | u8 chip_type; /* Keep DM9102A chip type */ | ||
257 | u8 media_mode; /* user specify media mode */ | ||
258 | u8 op_mode; /* real work media mode */ | ||
259 | u8 phy_addr; | ||
260 | u8 wait_reset; /* Hardware failed, need to reset */ | ||
261 | u8 dm910x_chk_mode; /* Operating mode check */ | ||
262 | u8 first_in_callback; /* Flag to record state */ | ||
263 | u8 wol_mode; /* user WOL settings */ | ||
264 | struct timer_list timer; | ||
265 | |||
266 | /* Driver defined statistic counter */ | ||
267 | unsigned long tx_fifo_underrun; | ||
268 | unsigned long tx_loss_carrier; | ||
269 | unsigned long tx_no_carrier; | ||
270 | unsigned long tx_late_collision; | ||
271 | unsigned long tx_excessive_collision; | ||
272 | unsigned long tx_jabber_timeout; | ||
273 | unsigned long reset_count; | ||
274 | unsigned long reset_cr8; | ||
275 | unsigned long reset_fatal; | ||
276 | unsigned long reset_TXtimeout; | ||
277 | |||
278 | /* NIC SROM data */ | ||
279 | unsigned char srom[128]; | ||
280 | }; | ||
281 | |||
282 | enum dmfe_offsets { | ||
283 | DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20, | ||
284 | DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48, | ||
285 | DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70, | ||
286 | DCR15 = 0x78 | ||
287 | }; | ||
288 | |||
289 | enum dmfe_CR6_bits { | ||
290 | CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80, | ||
291 | CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000, | ||
292 | CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000 | ||
293 | }; | ||
294 | |||
295 | /* Global variable declaration ----------------------------- */ | ||
296 | static int __devinitdata printed_version; | ||
297 | static const char version[] __devinitconst = | ||
298 | "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")"; | ||
299 | |||
300 | static int dmfe_debug; | ||
301 | static unsigned char dmfe_media_mode = DMFE_AUTO; | ||
302 | static u32 dmfe_cr6_user_set; | ||
303 | |||
304 | /* For module input parameter */ | ||
305 | static int debug; | ||
306 | static u32 cr6set; | ||
307 | static unsigned char mode = 8; | ||
308 | static u8 chkmode = 1; | ||
309 | static u8 HPNA_mode; /* Default: Low Power/High Speed */ | ||
310 | static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */ | ||
311 | static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */ | ||
312 | static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */ | ||
313 | static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control | ||
314 | 4: TX pause packet */ | ||
315 | |||
316 | |||
317 | /* function declaration ------------------------------------- */ | ||
318 | static int dmfe_open(struct DEVICE *); | ||
319 | static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *); | ||
320 | static int dmfe_stop(struct DEVICE *); | ||
321 | static void dmfe_set_filter_mode(struct DEVICE *); | ||
322 | static const struct ethtool_ops netdev_ethtool_ops; | ||
323 | static u16 read_srom_word(long ,int); | ||
324 | static irqreturn_t dmfe_interrupt(int , void *); | ||
325 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
326 | static void poll_dmfe (struct net_device *dev); | ||
327 | #endif | ||
328 | static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long); | ||
329 | static void allocate_rx_buffer(struct dmfe_board_info *); | ||
330 | static void update_cr6(u32, unsigned long); | ||
331 | static void send_filter_frame(struct DEVICE *); | ||
332 | static void dm9132_id_table(struct DEVICE *); | ||
333 | static u16 phy_read(unsigned long, u8, u8, u32); | ||
334 | static void phy_write(unsigned long, u8, u8, u16, u32); | ||
335 | static void phy_write_1bit(unsigned long, u32); | ||
336 | static u16 phy_read_1bit(unsigned long); | ||
337 | static u8 dmfe_sense_speed(struct dmfe_board_info *); | ||
338 | static void dmfe_process_mode(struct dmfe_board_info *); | ||
339 | static void dmfe_timer(unsigned long); | ||
340 | static inline u32 cal_CRC(unsigned char *, unsigned int, u8); | ||
341 | static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *); | ||
342 | static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *); | ||
343 | static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *); | ||
344 | static void dmfe_dynamic_reset(struct DEVICE *); | ||
345 | static void dmfe_free_rxbuffer(struct dmfe_board_info *); | ||
346 | static void dmfe_init_dm910x(struct DEVICE *); | ||
347 | static void dmfe_parse_srom(struct dmfe_board_info *); | ||
348 | static void dmfe_program_DM9801(struct dmfe_board_info *, int); | ||
349 | static void dmfe_program_DM9802(struct dmfe_board_info *); | ||
350 | static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * ); | ||
351 | static void dmfe_set_phyxcer(struct dmfe_board_info *); | ||
352 | |||
353 | /* DM910X network board routine ---------------------------- */ | ||
354 | |||
355 | static const struct net_device_ops netdev_ops = { | ||
356 | .ndo_open = dmfe_open, | ||
357 | .ndo_stop = dmfe_stop, | ||
358 | .ndo_start_xmit = dmfe_start_xmit, | ||
359 | .ndo_set_multicast_list = dmfe_set_filter_mode, | ||
360 | .ndo_change_mtu = eth_change_mtu, | ||
361 | .ndo_set_mac_address = eth_mac_addr, | ||
362 | .ndo_validate_addr = eth_validate_addr, | ||
363 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
364 | .ndo_poll_controller = poll_dmfe, | ||
365 | #endif | ||
366 | }; | ||
367 | |||
368 | /* | ||
369 | * Search DM910X board ,allocate space and register it | ||
370 | */ | ||
371 | |||
372 | static int __devinit dmfe_init_one (struct pci_dev *pdev, | ||
373 | const struct pci_device_id *ent) | ||
374 | { | ||
375 | struct dmfe_board_info *db; /* board information structure */ | ||
376 | struct net_device *dev; | ||
377 | u32 pci_pmr; | ||
378 | int i, err; | ||
379 | |||
380 | DMFE_DBUG(0, "dmfe_init_one()", 0); | ||
381 | |||
382 | if (!printed_version++) | ||
383 | pr_info("%s\n", version); | ||
384 | |||
385 | /* | ||
386 | * SPARC on-board DM910x chips should be handled by the main | ||
387 | * tulip driver, except for early DM9100s. | ||
388 | */ | ||
389 | #ifdef CONFIG_TULIP_DM910X | ||
390 | if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) || | ||
391 | ent->driver_data == PCI_DM9102_ID) { | ||
392 | struct device_node *dp = pci_device_to_OF_node(pdev); | ||
393 | |||
394 | if (dp && of_get_property(dp, "local-mac-address", NULL)) { | ||
395 | pr_info("skipping on-board DM910x (use tulip)\n"); | ||
396 | return -ENODEV; | ||
397 | } | ||
398 | } | ||
399 | #endif | ||
400 | |||
401 | /* Init network device */ | ||
402 | dev = alloc_etherdev(sizeof(*db)); | ||
403 | if (dev == NULL) | ||
404 | return -ENOMEM; | ||
405 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
406 | |||
407 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
408 | pr_warn("32-bit PCI DMA not available\n"); | ||
409 | err = -ENODEV; | ||
410 | goto err_out_free; | ||
411 | } | ||
412 | |||
413 | /* Enable Master/IO access, Disable memory access */ | ||
414 | err = pci_enable_device(pdev); | ||
415 | if (err) | ||
416 | goto err_out_free; | ||
417 | |||
418 | if (!pci_resource_start(pdev, 0)) { | ||
419 | pr_err("I/O base is zero\n"); | ||
420 | err = -ENODEV; | ||
421 | goto err_out_disable; | ||
422 | } | ||
423 | |||
424 | if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) { | ||
425 | pr_err("Allocated I/O size too small\n"); | ||
426 | err = -ENODEV; | ||
427 | goto err_out_disable; | ||
428 | } | ||
429 | |||
430 | #if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */ | ||
431 | |||
432 | /* Set Latency Timer 80h */ | ||
433 | /* FIXME: setting values > 32 breaks some SiS 559x stuff. | ||
434 | Need a PCI quirk.. */ | ||
435 | |||
436 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80); | ||
437 | #endif | ||
438 | |||
439 | if (pci_request_regions(pdev, DRV_NAME)) { | ||
440 | pr_err("Failed to request PCI regions\n"); | ||
441 | err = -ENODEV; | ||
442 | goto err_out_disable; | ||
443 | } | ||
444 | |||
445 | /* Init system & device */ | ||
446 | db = netdev_priv(dev); | ||
447 | |||
448 | /* Allocate Tx/Rx descriptor memory */ | ||
449 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * | ||
450 | DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); | ||
451 | if (!db->desc_pool_ptr) | ||
452 | goto err_out_res; | ||
453 | |||
454 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * | ||
455 | TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | ||
456 | if (!db->buf_pool_ptr) | ||
457 | goto err_out_free_desc; | ||
458 | |||
459 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; | ||
460 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; | ||
461 | db->buf_pool_start = db->buf_pool_ptr; | ||
462 | db->buf_pool_dma_start = db->buf_pool_dma_ptr; | ||
463 | |||
464 | db->chip_id = ent->driver_data; | ||
465 | db->ioaddr = pci_resource_start(pdev, 0); | ||
466 | db->chip_revision = pdev->revision; | ||
467 | db->wol_mode = 0; | ||
468 | |||
469 | db->pdev = pdev; | ||
470 | |||
471 | dev->base_addr = db->ioaddr; | ||
472 | dev->irq = pdev->irq; | ||
473 | pci_set_drvdata(pdev, dev); | ||
474 | dev->netdev_ops = &netdev_ops; | ||
475 | dev->ethtool_ops = &netdev_ethtool_ops; | ||
476 | netif_carrier_off(dev); | ||
477 | spin_lock_init(&db->lock); | ||
478 | |||
479 | pci_read_config_dword(pdev, 0x50, &pci_pmr); | ||
480 | pci_pmr &= 0x70000; | ||
481 | if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) ) | ||
482 | db->chip_type = 1; /* DM9102A E3 */ | ||
483 | else | ||
484 | db->chip_type = 0; | ||
485 | |||
486 | /* read 64 word srom data */ | ||
487 | for (i = 0; i < 64; i++) | ||
488 | ((__le16 *) db->srom)[i] = | ||
489 | cpu_to_le16(read_srom_word(db->ioaddr, i)); | ||
490 | |||
491 | /* Set Node address */ | ||
492 | for (i = 0; i < 6; i++) | ||
493 | dev->dev_addr[i] = db->srom[20 + i]; | ||
494 | |||
495 | err = register_netdev (dev); | ||
496 | if (err) | ||
497 | goto err_out_free_buf; | ||
498 | |||
499 | dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n", | ||
500 | ent->driver_data >> 16, | ||
501 | pci_name(pdev), dev->dev_addr, dev->irq); | ||
502 | |||
503 | pci_set_master(pdev); | ||
504 | |||
505 | return 0; | ||
506 | |||
507 | err_out_free_buf: | ||
508 | pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | ||
509 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | ||
510 | err_out_free_desc: | ||
511 | pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, | ||
512 | db->desc_pool_ptr, db->desc_pool_dma_ptr); | ||
513 | err_out_res: | ||
514 | pci_release_regions(pdev); | ||
515 | err_out_disable: | ||
516 | pci_disable_device(pdev); | ||
517 | err_out_free: | ||
518 | pci_set_drvdata(pdev, NULL); | ||
519 | free_netdev(dev); | ||
520 | |||
521 | return err; | ||
522 | } | ||
523 | |||
524 | |||
525 | static void __devexit dmfe_remove_one (struct pci_dev *pdev) | ||
526 | { | ||
527 | struct net_device *dev = pci_get_drvdata(pdev); | ||
528 | struct dmfe_board_info *db = netdev_priv(dev); | ||
529 | |||
530 | DMFE_DBUG(0, "dmfe_remove_one()", 0); | ||
531 | |||
532 | if (dev) { | ||
533 | |||
534 | unregister_netdev(dev); | ||
535 | |||
536 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * | ||
537 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, | ||
538 | db->desc_pool_dma_ptr); | ||
539 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | ||
540 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | ||
541 | pci_release_regions(pdev); | ||
542 | free_netdev(dev); /* free board information */ | ||
543 | |||
544 | pci_set_drvdata(pdev, NULL); | ||
545 | } | ||
546 | |||
547 | DMFE_DBUG(0, "dmfe_remove_one() exit", 0); | ||
548 | } | ||
549 | |||
550 | |||
551 | /* | ||
552 | * Open the interface. | ||
553 | * The interface is opened whenever "ifconfig" actives it. | ||
554 | */ | ||
555 | |||
556 | static int dmfe_open(struct DEVICE *dev) | ||
557 | { | ||
558 | int ret; | ||
559 | struct dmfe_board_info *db = netdev_priv(dev); | ||
560 | |||
561 | DMFE_DBUG(0, "dmfe_open", 0); | ||
562 | |||
563 | ret = request_irq(dev->irq, dmfe_interrupt, | ||
564 | IRQF_SHARED, dev->name, dev); | ||
565 | if (ret) | ||
566 | return ret; | ||
567 | |||
568 | /* system variable init */ | ||
569 | db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set; | ||
570 | db->tx_packet_cnt = 0; | ||
571 | db->tx_queue_cnt = 0; | ||
572 | db->rx_avail_cnt = 0; | ||
573 | db->wait_reset = 0; | ||
574 | |||
575 | db->first_in_callback = 0; | ||
576 | db->NIC_capability = 0xf; /* All capability*/ | ||
577 | db->PHY_reg4 = 0x1e0; | ||
578 | |||
579 | /* CR6 operation mode decision */ | ||
580 | if ( !chkmode || (db->chip_id == PCI_DM9132_ID) || | ||
581 | (db->chip_revision >= 0x30) ) { | ||
582 | db->cr6_data |= DMFE_TXTH_256; | ||
583 | db->cr0_data = CR0_DEFAULT; | ||
584 | db->dm910x_chk_mode=4; /* Enter the normal mode */ | ||
585 | } else { | ||
586 | db->cr6_data |= CR6_SFT; /* Store & Forward mode */ | ||
587 | db->cr0_data = 0; | ||
588 | db->dm910x_chk_mode = 1; /* Enter the check mode */ | ||
589 | } | ||
590 | |||
591 | /* Initialize DM910X board */ | ||
592 | dmfe_init_dm910x(dev); | ||
593 | |||
594 | /* Active System Interface */ | ||
595 | netif_wake_queue(dev); | ||
596 | |||
597 | /* set and active a timer process */ | ||
598 | init_timer(&db->timer); | ||
599 | db->timer.expires = DMFE_TIMER_WUT + HZ * 2; | ||
600 | db->timer.data = (unsigned long)dev; | ||
601 | db->timer.function = dmfe_timer; | ||
602 | add_timer(&db->timer); | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | |||
608 | /* Initialize DM910X board | ||
609 | * Reset DM910X board | ||
610 | * Initialize TX/Rx descriptor chain structure | ||
611 | * Send the set-up frame | ||
612 | * Enable Tx/Rx machine | ||
613 | */ | ||
614 | |||
615 | static void dmfe_init_dm910x(struct DEVICE *dev) | ||
616 | { | ||
617 | struct dmfe_board_info *db = netdev_priv(dev); | ||
618 | unsigned long ioaddr = db->ioaddr; | ||
619 | |||
620 | DMFE_DBUG(0, "dmfe_init_dm910x()", 0); | ||
621 | |||
622 | /* Reset DM910x MAC controller */ | ||
623 | outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */ | ||
624 | udelay(100); | ||
625 | outl(db->cr0_data, ioaddr + DCR0); | ||
626 | udelay(5); | ||
627 | |||
628 | /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */ | ||
629 | db->phy_addr = 1; | ||
630 | |||
631 | /* Parser SROM and media mode */ | ||
632 | dmfe_parse_srom(db); | ||
633 | db->media_mode = dmfe_media_mode; | ||
634 | |||
635 | /* RESET Phyxcer Chip by GPR port bit 7 */ | ||
636 | outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */ | ||
637 | if (db->chip_id == PCI_DM9009_ID) { | ||
638 | outl(0x80, ioaddr + DCR12); /* Issue RESET signal */ | ||
639 | mdelay(300); /* Delay 300 ms */ | ||
640 | } | ||
641 | outl(0x0, ioaddr + DCR12); /* Clear RESET signal */ | ||
642 | |||
643 | /* Process Phyxcer Media Mode */ | ||
644 | if ( !(db->media_mode & 0x10) ) /* Force 1M mode */ | ||
645 | dmfe_set_phyxcer(db); | ||
646 | |||
647 | /* Media Mode Process */ | ||
648 | if ( !(db->media_mode & DMFE_AUTO) ) | ||
649 | db->op_mode = db->media_mode; /* Force Mode */ | ||
650 | |||
651 | /* Initialize Transmit/Receive decriptor and CR3/4 */ | ||
652 | dmfe_descriptor_init(db, ioaddr); | ||
653 | |||
654 | /* Init CR6 to program DM910x operation */ | ||
655 | update_cr6(db->cr6_data, ioaddr); | ||
656 | |||
657 | /* Send setup frame */ | ||
658 | if (db->chip_id == PCI_DM9132_ID) | ||
659 | dm9132_id_table(dev); /* DM9132 */ | ||
660 | else | ||
661 | send_filter_frame(dev); /* DM9102/DM9102A */ | ||
662 | |||
663 | /* Init CR7, interrupt active bit */ | ||
664 | db->cr7_data = CR7_DEFAULT; | ||
665 | outl(db->cr7_data, ioaddr + DCR7); | ||
666 | |||
667 | /* Init CR15, Tx jabber and Rx watchdog timer */ | ||
668 | outl(db->cr15_data, ioaddr + DCR15); | ||
669 | |||
670 | /* Enable DM910X Tx/Rx function */ | ||
671 | db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000; | ||
672 | update_cr6(db->cr6_data, ioaddr); | ||
673 | } | ||
674 | |||
675 | |||
676 | /* | ||
677 | * Hardware start transmission. | ||
678 | * Send a packet to media from the upper layer. | ||
679 | */ | ||
680 | |||
681 | static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb, | ||
682 | struct DEVICE *dev) | ||
683 | { | ||
684 | struct dmfe_board_info *db = netdev_priv(dev); | ||
685 | struct tx_desc *txptr; | ||
686 | unsigned long flags; | ||
687 | |||
688 | DMFE_DBUG(0, "dmfe_start_xmit", 0); | ||
689 | |||
690 | /* Too large packet check */ | ||
691 | if (skb->len > MAX_PACKET_SIZE) { | ||
692 | pr_err("big packet = %d\n", (u16)skb->len); | ||
693 | dev_kfree_skb(skb); | ||
694 | return NETDEV_TX_OK; | ||
695 | } | ||
696 | |||
697 | /* Resource flag check */ | ||
698 | netif_stop_queue(dev); | ||
699 | |||
700 | spin_lock_irqsave(&db->lock, flags); | ||
701 | |||
702 | /* No Tx resource check, it never happen nromally */ | ||
703 | if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) { | ||
704 | spin_unlock_irqrestore(&db->lock, flags); | ||
705 | pr_err("No Tx resource %ld\n", db->tx_queue_cnt); | ||
706 | return NETDEV_TX_BUSY; | ||
707 | } | ||
708 | |||
709 | /* Disable NIC interrupt */ | ||
710 | outl(0, dev->base_addr + DCR7); | ||
711 | |||
712 | /* transmit this packet */ | ||
713 | txptr = db->tx_insert_ptr; | ||
714 | skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); | ||
715 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); | ||
716 | |||
717 | /* Point to next transmit free descriptor */ | ||
718 | db->tx_insert_ptr = txptr->next_tx_desc; | ||
719 | |||
720 | /* Transmit Packet Process */ | ||
721 | if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) { | ||
722 | txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ | ||
723 | db->tx_packet_cnt++; /* Ready to send */ | ||
724 | outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ | ||
725 | dev->trans_start = jiffies; /* saved time stamp */ | ||
726 | } else { | ||
727 | db->tx_queue_cnt++; /* queue TX packet */ | ||
728 | outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ | ||
729 | } | ||
730 | |||
731 | /* Tx resource check */ | ||
732 | if ( db->tx_queue_cnt < TX_FREE_DESC_CNT ) | ||
733 | netif_wake_queue(dev); | ||
734 | |||
735 | /* Restore CR7 to enable interrupt */ | ||
736 | spin_unlock_irqrestore(&db->lock, flags); | ||
737 | outl(db->cr7_data, dev->base_addr + DCR7); | ||
738 | |||
739 | /* free this SKB */ | ||
740 | dev_kfree_skb(skb); | ||
741 | |||
742 | return NETDEV_TX_OK; | ||
743 | } | ||
744 | |||
745 | |||
746 | /* | ||
747 | * Stop the interface. | ||
748 | * The interface is stopped when it is brought. | ||
749 | */ | ||
750 | |||
751 | static int dmfe_stop(struct DEVICE *dev) | ||
752 | { | ||
753 | struct dmfe_board_info *db = netdev_priv(dev); | ||
754 | unsigned long ioaddr = dev->base_addr; | ||
755 | |||
756 | DMFE_DBUG(0, "dmfe_stop", 0); | ||
757 | |||
758 | /* disable system */ | ||
759 | netif_stop_queue(dev); | ||
760 | |||
761 | /* deleted timer */ | ||
762 | del_timer_sync(&db->timer); | ||
763 | |||
764 | /* Reset & stop DM910X board */ | ||
765 | outl(DM910X_RESET, ioaddr + DCR0); | ||
766 | udelay(5); | ||
767 | phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); | ||
768 | |||
769 | /* free interrupt */ | ||
770 | free_irq(dev->irq, dev); | ||
771 | |||
772 | /* free allocated rx buffer */ | ||
773 | dmfe_free_rxbuffer(db); | ||
774 | |||
775 | #if 0 | ||
776 | /* show statistic counter */ | ||
777 | printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n", | ||
778 | db->tx_fifo_underrun, db->tx_excessive_collision, | ||
779 | db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier, | ||
780 | db->tx_jabber_timeout, db->reset_count, db->reset_cr8, | ||
781 | db->reset_fatal, db->reset_TXtimeout); | ||
782 | #endif | ||
783 | |||
784 | return 0; | ||
785 | } | ||
786 | |||
787 | |||
788 | /* | ||
789 | * DM9102 insterrupt handler | ||
790 | * receive the packet to upper layer, free the transmitted packet | ||
791 | */ | ||
792 | |||
793 | static irqreturn_t dmfe_interrupt(int irq, void *dev_id) | ||
794 | { | ||
795 | struct DEVICE *dev = dev_id; | ||
796 | struct dmfe_board_info *db = netdev_priv(dev); | ||
797 | unsigned long ioaddr = dev->base_addr; | ||
798 | unsigned long flags; | ||
799 | |||
800 | DMFE_DBUG(0, "dmfe_interrupt()", 0); | ||
801 | |||
802 | spin_lock_irqsave(&db->lock, flags); | ||
803 | |||
804 | /* Got DM910X status */ | ||
805 | db->cr5_data = inl(ioaddr + DCR5); | ||
806 | outl(db->cr5_data, ioaddr + DCR5); | ||
807 | if ( !(db->cr5_data & 0xc1) ) { | ||
808 | spin_unlock_irqrestore(&db->lock, flags); | ||
809 | return IRQ_HANDLED; | ||
810 | } | ||
811 | |||
812 | /* Disable all interrupt in CR7 to solve the interrupt edge problem */ | ||
813 | outl(0, ioaddr + DCR7); | ||
814 | |||
815 | /* Check system status */ | ||
816 | if (db->cr5_data & 0x2000) { | ||
817 | /* system bus error happen */ | ||
818 | DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data); | ||
819 | db->reset_fatal++; | ||
820 | db->wait_reset = 1; /* Need to RESET */ | ||
821 | spin_unlock_irqrestore(&db->lock, flags); | ||
822 | return IRQ_HANDLED; | ||
823 | } | ||
824 | |||
825 | /* Received the coming packet */ | ||
826 | if ( (db->cr5_data & 0x40) && db->rx_avail_cnt ) | ||
827 | dmfe_rx_packet(dev, db); | ||
828 | |||
829 | /* reallocate rx descriptor buffer */ | ||
830 | if (db->rx_avail_cnt<RX_DESC_CNT) | ||
831 | allocate_rx_buffer(db); | ||
832 | |||
833 | /* Free the transmitted descriptor */ | ||
834 | if ( db->cr5_data & 0x01) | ||
835 | dmfe_free_tx_pkt(dev, db); | ||
836 | |||
837 | /* Mode Check */ | ||
838 | if (db->dm910x_chk_mode & 0x2) { | ||
839 | db->dm910x_chk_mode = 0x4; | ||
840 | db->cr6_data |= 0x100; | ||
841 | update_cr6(db->cr6_data, db->ioaddr); | ||
842 | } | ||
843 | |||
844 | /* Restore CR7 to enable interrupt mask */ | ||
845 | outl(db->cr7_data, ioaddr + DCR7); | ||
846 | |||
847 | spin_unlock_irqrestore(&db->lock, flags); | ||
848 | return IRQ_HANDLED; | ||
849 | } | ||
850 | |||
851 | |||
852 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
853 | /* | ||
854 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
855 | * without having to re-enable interrupts. It's not called while | ||
856 | * the interrupt routine is executing. | ||
857 | */ | ||
858 | |||
859 | static void poll_dmfe (struct net_device *dev) | ||
860 | { | ||
861 | /* disable_irq here is not very nice, but with the lockless | ||
862 | interrupt handler we have no other choice. */ | ||
863 | disable_irq(dev->irq); | ||
864 | dmfe_interrupt (dev->irq, dev); | ||
865 | enable_irq(dev->irq); | ||
866 | } | ||
867 | #endif | ||
868 | |||
869 | /* | ||
870 | * Free TX resource after TX complete | ||
871 | */ | ||
872 | |||
873 | static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) | ||
874 | { | ||
875 | struct tx_desc *txptr; | ||
876 | unsigned long ioaddr = dev->base_addr; | ||
877 | u32 tdes0; | ||
878 | |||
879 | txptr = db->tx_remove_ptr; | ||
880 | while(db->tx_packet_cnt) { | ||
881 | tdes0 = le32_to_cpu(txptr->tdes0); | ||
882 | if (tdes0 & 0x80000000) | ||
883 | break; | ||
884 | |||
885 | /* A packet sent completed */ | ||
886 | db->tx_packet_cnt--; | ||
887 | dev->stats.tx_packets++; | ||
888 | |||
889 | /* Transmit statistic counter */ | ||
890 | if ( tdes0 != 0x7fffffff ) { | ||
891 | dev->stats.collisions += (tdes0 >> 3) & 0xf; | ||
892 | dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; | ||
893 | if (tdes0 & TDES0_ERR_MASK) { | ||
894 | dev->stats.tx_errors++; | ||
895 | |||
896 | if (tdes0 & 0x0002) { /* UnderRun */ | ||
897 | db->tx_fifo_underrun++; | ||
898 | if ( !(db->cr6_data & CR6_SFT) ) { | ||
899 | db->cr6_data = db->cr6_data | CR6_SFT; | ||
900 | update_cr6(db->cr6_data, db->ioaddr); | ||
901 | } | ||
902 | } | ||
903 | if (tdes0 & 0x0100) | ||
904 | db->tx_excessive_collision++; | ||
905 | if (tdes0 & 0x0200) | ||
906 | db->tx_late_collision++; | ||
907 | if (tdes0 & 0x0400) | ||
908 | db->tx_no_carrier++; | ||
909 | if (tdes0 & 0x0800) | ||
910 | db->tx_loss_carrier++; | ||
911 | if (tdes0 & 0x4000) | ||
912 | db->tx_jabber_timeout++; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | txptr = txptr->next_tx_desc; | ||
917 | }/* End of while */ | ||
918 | |||
919 | /* Update TX remove pointer to next */ | ||
920 | db->tx_remove_ptr = txptr; | ||
921 | |||
922 | /* Send the Tx packet in queue */ | ||
923 | if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) { | ||
924 | txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ | ||
925 | db->tx_packet_cnt++; /* Ready to send */ | ||
926 | db->tx_queue_cnt--; | ||
927 | outl(0x1, ioaddr + DCR1); /* Issue Tx polling */ | ||
928 | dev->trans_start = jiffies; /* saved time stamp */ | ||
929 | } | ||
930 | |||
931 | /* Resource available check */ | ||
932 | if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT ) | ||
933 | netif_wake_queue(dev); /* Active upper layer, send again */ | ||
934 | } | ||
935 | |||
936 | |||
937 | /* | ||
938 | * Calculate the CRC valude of the Rx packet | ||
939 | * flag = 1 : return the reverse CRC (for the received packet CRC) | ||
940 | * 0 : return the normal CRC (for Hash Table index) | ||
941 | */ | ||
942 | |||
943 | static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag) | ||
944 | { | ||
945 | u32 crc = crc32(~0, Data, Len); | ||
946 | if (flag) crc = ~crc; | ||
947 | return crc; | ||
948 | } | ||
949 | |||
950 | |||
951 | /* | ||
952 | * Receive the come packet and pass to upper layer | ||
953 | */ | ||
954 | |||
955 | static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db) | ||
956 | { | ||
957 | struct rx_desc *rxptr; | ||
958 | struct sk_buff *skb, *newskb; | ||
959 | int rxlen; | ||
960 | u32 rdes0; | ||
961 | |||
962 | rxptr = db->rx_ready_ptr; | ||
963 | |||
964 | while(db->rx_avail_cnt) { | ||
965 | rdes0 = le32_to_cpu(rxptr->rdes0); | ||
966 | if (rdes0 & 0x80000000) /* packet owner check */ | ||
967 | break; | ||
968 | |||
969 | db->rx_avail_cnt--; | ||
970 | db->interval_rx_cnt++; | ||
971 | |||
972 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), | ||
973 | RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | ||
974 | |||
975 | if ( (rdes0 & 0x300) != 0x300) { | ||
976 | /* A packet without First/Last flag */ | ||
977 | /* reuse this SKB */ | ||
978 | DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0); | ||
979 | dmfe_reuse_skb(db, rxptr->rx_skb_ptr); | ||
980 | } else { | ||
981 | /* A packet with First/Last flag */ | ||
982 | rxlen = ( (rdes0 >> 16) & 0x3fff) - 4; | ||
983 | |||
984 | /* error summary bit check */ | ||
985 | if (rdes0 & 0x8000) { | ||
986 | /* This is a error packet */ | ||
987 | dev->stats.rx_errors++; | ||
988 | if (rdes0 & 1) | ||
989 | dev->stats.rx_fifo_errors++; | ||
990 | if (rdes0 & 2) | ||
991 | dev->stats.rx_crc_errors++; | ||
992 | if (rdes0 & 0x80) | ||
993 | dev->stats.rx_length_errors++; | ||
994 | } | ||
995 | |||
996 | if ( !(rdes0 & 0x8000) || | ||
997 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { | ||
998 | skb = rxptr->rx_skb_ptr; | ||
999 | |||
1000 | /* Received Packet CRC check need or not */ | ||
1001 | if ( (db->dm910x_chk_mode & 1) && | ||
1002 | (cal_CRC(skb->data, rxlen, 1) != | ||
1003 | (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */ | ||
1004 | /* Found a error received packet */ | ||
1005 | dmfe_reuse_skb(db, rxptr->rx_skb_ptr); | ||
1006 | db->dm910x_chk_mode = 3; | ||
1007 | } else { | ||
1008 | /* Good packet, send to upper layer */ | ||
1009 | /* Shorst packet used new SKB */ | ||
1010 | if ((rxlen < RX_COPY_SIZE) && | ||
1011 | ((newskb = dev_alloc_skb(rxlen + 2)) | ||
1012 | != NULL)) { | ||
1013 | |||
1014 | skb = newskb; | ||
1015 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | ||
1016 | skb_reserve(skb, 2); /* 16byte align */ | ||
1017 | skb_copy_from_linear_data(rxptr->rx_skb_ptr, | ||
1018 | skb_put(skb, rxlen), | ||
1019 | rxlen); | ||
1020 | dmfe_reuse_skb(db, rxptr->rx_skb_ptr); | ||
1021 | } else | ||
1022 | skb_put(skb, rxlen); | ||
1023 | |||
1024 | skb->protocol = eth_type_trans(skb, dev); | ||
1025 | netif_rx(skb); | ||
1026 | dev->stats.rx_packets++; | ||
1027 | dev->stats.rx_bytes += rxlen; | ||
1028 | } | ||
1029 | } else { | ||
1030 | /* Reuse SKB buffer when the packet is error */ | ||
1031 | DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0); | ||
1032 | dmfe_reuse_skb(db, rxptr->rx_skb_ptr); | ||
1033 | } | ||
1034 | } | ||
1035 | |||
1036 | rxptr = rxptr->next_rx_desc; | ||
1037 | } | ||
1038 | |||
1039 | db->rx_ready_ptr = rxptr; | ||
1040 | } | ||
1041 | |||
1042 | /* | ||
1043 | * Set DM910X multicast address | ||
1044 | */ | ||
1045 | |||
1046 | static void dmfe_set_filter_mode(struct DEVICE * dev) | ||
1047 | { | ||
1048 | struct dmfe_board_info *db = netdev_priv(dev); | ||
1049 | unsigned long flags; | ||
1050 | int mc_count = netdev_mc_count(dev); | ||
1051 | |||
1052 | DMFE_DBUG(0, "dmfe_set_filter_mode()", 0); | ||
1053 | spin_lock_irqsave(&db->lock, flags); | ||
1054 | |||
1055 | if (dev->flags & IFF_PROMISC) { | ||
1056 | DMFE_DBUG(0, "Enable PROM Mode", 0); | ||
1057 | db->cr6_data |= CR6_PM | CR6_PBF; | ||
1058 | update_cr6(db->cr6_data, db->ioaddr); | ||
1059 | spin_unlock_irqrestore(&db->lock, flags); | ||
1060 | return; | ||
1061 | } | ||
1062 | |||
1063 | if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) { | ||
1064 | DMFE_DBUG(0, "Pass all multicast address", mc_count); | ||
1065 | db->cr6_data &= ~(CR6_PM | CR6_PBF); | ||
1066 | db->cr6_data |= CR6_PAM; | ||
1067 | spin_unlock_irqrestore(&db->lock, flags); | ||
1068 | return; | ||
1069 | } | ||
1070 | |||
1071 | DMFE_DBUG(0, "Set multicast address", mc_count); | ||
1072 | if (db->chip_id == PCI_DM9132_ID) | ||
1073 | dm9132_id_table(dev); /* DM9132 */ | ||
1074 | else | ||
1075 | send_filter_frame(dev); /* DM9102/DM9102A */ | ||
1076 | spin_unlock_irqrestore(&db->lock, flags); | ||
1077 | } | ||
1078 | |||
1079 | /* | ||
1080 | * Ethtool interace | ||
1081 | */ | ||
1082 | |||
1083 | static void dmfe_ethtool_get_drvinfo(struct net_device *dev, | ||
1084 | struct ethtool_drvinfo *info) | ||
1085 | { | ||
1086 | struct dmfe_board_info *np = netdev_priv(dev); | ||
1087 | |||
1088 | strcpy(info->driver, DRV_NAME); | ||
1089 | strcpy(info->version, DRV_VERSION); | ||
1090 | if (np->pdev) | ||
1091 | strcpy(info->bus_info, pci_name(np->pdev)); | ||
1092 | else | ||
1093 | sprintf(info->bus_info, "EISA 0x%lx %d", | ||
1094 | dev->base_addr, dev->irq); | ||
1095 | } | ||
1096 | |||
1097 | static int dmfe_ethtool_set_wol(struct net_device *dev, | ||
1098 | struct ethtool_wolinfo *wolinfo) | ||
1099 | { | ||
1100 | struct dmfe_board_info *db = netdev_priv(dev); | ||
1101 | |||
1102 | if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | | ||
1103 | WAKE_ARP | WAKE_MAGICSECURE)) | ||
1104 | return -EOPNOTSUPP; | ||
1105 | |||
1106 | db->wol_mode = wolinfo->wolopts; | ||
1107 | return 0; | ||
1108 | } | ||
1109 | |||
1110 | static void dmfe_ethtool_get_wol(struct net_device *dev, | ||
1111 | struct ethtool_wolinfo *wolinfo) | ||
1112 | { | ||
1113 | struct dmfe_board_info *db = netdev_priv(dev); | ||
1114 | |||
1115 | wolinfo->supported = WAKE_PHY | WAKE_MAGIC; | ||
1116 | wolinfo->wolopts = db->wol_mode; | ||
1117 | } | ||
1118 | |||
1119 | |||
1120 | static const struct ethtool_ops netdev_ethtool_ops = { | ||
1121 | .get_drvinfo = dmfe_ethtool_get_drvinfo, | ||
1122 | .get_link = ethtool_op_get_link, | ||
1123 | .set_wol = dmfe_ethtool_set_wol, | ||
1124 | .get_wol = dmfe_ethtool_get_wol, | ||
1125 | }; | ||
1126 | |||
1127 | /* | ||
1128 | * A periodic timer routine | ||
1129 | * Dynamic media sense, allocate Rx buffer... | ||
1130 | */ | ||
1131 | |||
1132 | static void dmfe_timer(unsigned long data) | ||
1133 | { | ||
1134 | u32 tmp_cr8; | ||
1135 | unsigned char tmp_cr12; | ||
1136 | struct DEVICE *dev = (struct DEVICE *) data; | ||
1137 | struct dmfe_board_info *db = netdev_priv(dev); | ||
1138 | unsigned long flags; | ||
1139 | |||
1140 | int link_ok, link_ok_phy; | ||
1141 | |||
1142 | DMFE_DBUG(0, "dmfe_timer()", 0); | ||
1143 | spin_lock_irqsave(&db->lock, flags); | ||
1144 | |||
1145 | /* Media mode process when Link OK before enter this route */ | ||
1146 | if (db->first_in_callback == 0) { | ||
1147 | db->first_in_callback = 1; | ||
1148 | if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { | ||
1149 | db->cr6_data &= ~0x40000; | ||
1150 | update_cr6(db->cr6_data, db->ioaddr); | ||
1151 | phy_write(db->ioaddr, | ||
1152 | db->phy_addr, 0, 0x1000, db->chip_id); | ||
1153 | db->cr6_data |= 0x40000; | ||
1154 | update_cr6(db->cr6_data, db->ioaddr); | ||
1155 | db->timer.expires = DMFE_TIMER_WUT + HZ * 2; | ||
1156 | add_timer(&db->timer); | ||
1157 | spin_unlock_irqrestore(&db->lock, flags); | ||
1158 | return; | ||
1159 | } | ||
1160 | } | ||
1161 | |||
1162 | |||
1163 | /* Operating Mode Check */ | ||
1164 | if ( (db->dm910x_chk_mode & 0x1) && | ||
1165 | (dev->stats.rx_packets > MAX_CHECK_PACKET) ) | ||
1166 | db->dm910x_chk_mode = 0x4; | ||
1167 | |||
1168 | /* Dynamic reset DM910X : system error or transmit time-out */ | ||
1169 | tmp_cr8 = inl(db->ioaddr + DCR8); | ||
1170 | if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { | ||
1171 | db->reset_cr8++; | ||
1172 | db->wait_reset = 1; | ||
1173 | } | ||
1174 | db->interval_rx_cnt = 0; | ||
1175 | |||
1176 | /* TX polling kick monitor */ | ||
1177 | if ( db->tx_packet_cnt && | ||
1178 | time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) { | ||
1179 | outl(0x1, dev->base_addr + DCR1); /* Tx polling again */ | ||
1180 | |||
1181 | /* TX Timeout */ | ||
1182 | if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) { | ||
1183 | db->reset_TXtimeout++; | ||
1184 | db->wait_reset = 1; | ||
1185 | dev_warn(&dev->dev, "Tx timeout - resetting\n"); | ||
1186 | } | ||
1187 | } | ||
1188 | |||
1189 | if (db->wait_reset) { | ||
1190 | DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt); | ||
1191 | db->reset_count++; | ||
1192 | dmfe_dynamic_reset(dev); | ||
1193 | db->first_in_callback = 0; | ||
1194 | db->timer.expires = DMFE_TIMER_WUT; | ||
1195 | add_timer(&db->timer); | ||
1196 | spin_unlock_irqrestore(&db->lock, flags); | ||
1197 | return; | ||
1198 | } | ||
1199 | |||
1200 | /* Link status check, Dynamic media type change */ | ||
1201 | if (db->chip_id == PCI_DM9132_ID) | ||
1202 | tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */ | ||
1203 | else | ||
1204 | tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */ | ||
1205 | |||
1206 | if ( ((db->chip_id == PCI_DM9102_ID) && | ||
1207 | (db->chip_revision == 0x30)) || | ||
1208 | ((db->chip_id == PCI_DM9132_ID) && | ||
1209 | (db->chip_revision == 0x10)) ) { | ||
1210 | /* DM9102A Chip */ | ||
1211 | if (tmp_cr12 & 2) | ||
1212 | link_ok = 0; | ||
1213 | else | ||
1214 | link_ok = 1; | ||
1215 | } | ||
1216 | else | ||
1217 | /*0x43 is used instead of 0x3 because bit 6 should represent | ||
1218 | link status of external PHY */ | ||
1219 | link_ok = (tmp_cr12 & 0x43) ? 1 : 0; | ||
1220 | |||
1221 | |||
1222 | /* If chip reports that link is failed it could be because external | ||
1223 | PHY link status pin is not connected correctly to chip | ||
1224 | To be sure ask PHY too. | ||
1225 | */ | ||
1226 | |||
1227 | /* need a dummy read because of PHY's register latch*/ | ||
1228 | phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1229 | link_ok_phy = (phy_read (db->ioaddr, | ||
1230 | db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0; | ||
1231 | |||
1232 | if (link_ok_phy != link_ok) { | ||
1233 | DMFE_DBUG (0, "PHY and chip report different link status", 0); | ||
1234 | link_ok = link_ok | link_ok_phy; | ||
1235 | } | ||
1236 | |||
1237 | if ( !link_ok && netif_carrier_ok(dev)) { | ||
1238 | /* Link Failed */ | ||
1239 | DMFE_DBUG(0, "Link Failed", tmp_cr12); | ||
1240 | netif_carrier_off(dev); | ||
1241 | |||
1242 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ | ||
1243 | /* AUTO or force 1M Homerun/Longrun don't need */ | ||
1244 | if ( !(db->media_mode & 0x38) ) | ||
1245 | phy_write(db->ioaddr, db->phy_addr, | ||
1246 | 0, 0x1000, db->chip_id); | ||
1247 | |||
1248 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ | ||
1249 | if (db->media_mode & DMFE_AUTO) { | ||
1250 | /* 10/100M link failed, used 1M Home-Net */ | ||
1251 | db->cr6_data|=0x00040000; /* bit18=1, MII */ | ||
1252 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ | ||
1253 | update_cr6(db->cr6_data, db->ioaddr); | ||
1254 | } | ||
1255 | } else if (!netif_carrier_ok(dev)) { | ||
1256 | |||
1257 | DMFE_DBUG(0, "Link link OK", tmp_cr12); | ||
1258 | |||
1259 | /* Auto Sense Speed */ | ||
1260 | if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) { | ||
1261 | netif_carrier_on(dev); | ||
1262 | SHOW_MEDIA_TYPE(db->op_mode); | ||
1263 | } | ||
1264 | |||
1265 | dmfe_process_mode(db); | ||
1266 | } | ||
1267 | |||
1268 | /* HPNA remote command check */ | ||
1269 | if (db->HPNA_command & 0xf00) { | ||
1270 | db->HPNA_timer--; | ||
1271 | if (!db->HPNA_timer) | ||
1272 | dmfe_HPNA_remote_cmd_chk(db); | ||
1273 | } | ||
1274 | |||
1275 | /* Timer active again */ | ||
1276 | db->timer.expires = DMFE_TIMER_WUT; | ||
1277 | add_timer(&db->timer); | ||
1278 | spin_unlock_irqrestore(&db->lock, flags); | ||
1279 | } | ||
1280 | |||
1281 | |||
1282 | /* | ||
1283 | * Dynamic reset the DM910X board | ||
1284 | * Stop DM910X board | ||
1285 | * Free Tx/Rx allocated memory | ||
1286 | * Reset DM910X board | ||
1287 | * Re-initialize DM910X board | ||
1288 | */ | ||
1289 | |||
1290 | static void dmfe_dynamic_reset(struct DEVICE *dev) | ||
1291 | { | ||
1292 | struct dmfe_board_info *db = netdev_priv(dev); | ||
1293 | |||
1294 | DMFE_DBUG(0, "dmfe_dynamic_reset()", 0); | ||
1295 | |||
1296 | /* Sopt MAC controller */ | ||
1297 | db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ | ||
1298 | update_cr6(db->cr6_data, dev->base_addr); | ||
1299 | outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ | ||
1300 | outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); | ||
1301 | |||
1302 | /* Disable upper layer interface */ | ||
1303 | netif_stop_queue(dev); | ||
1304 | |||
1305 | /* Free Rx Allocate buffer */ | ||
1306 | dmfe_free_rxbuffer(db); | ||
1307 | |||
1308 | /* system variable init */ | ||
1309 | db->tx_packet_cnt = 0; | ||
1310 | db->tx_queue_cnt = 0; | ||
1311 | db->rx_avail_cnt = 0; | ||
1312 | netif_carrier_off(dev); | ||
1313 | db->wait_reset = 0; | ||
1314 | |||
1315 | /* Re-initialize DM910X board */ | ||
1316 | dmfe_init_dm910x(dev); | ||
1317 | |||
1318 | /* Restart upper layer interface */ | ||
1319 | netif_wake_queue(dev); | ||
1320 | } | ||
1321 | |||
1322 | |||
1323 | /* | ||
1324 | * free all allocated rx buffer | ||
1325 | */ | ||
1326 | |||
1327 | static void dmfe_free_rxbuffer(struct dmfe_board_info * db) | ||
1328 | { | ||
1329 | DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0); | ||
1330 | |||
1331 | /* free allocated rx buffer */ | ||
1332 | while (db->rx_avail_cnt) { | ||
1333 | dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr); | ||
1334 | db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc; | ||
1335 | db->rx_avail_cnt--; | ||
1336 | } | ||
1337 | } | ||
1338 | |||
1339 | |||
1340 | /* | ||
1341 | * Reuse the SK buffer | ||
1342 | */ | ||
1343 | |||
1344 | static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb) | ||
1345 | { | ||
1346 | struct rx_desc *rxptr = db->rx_insert_ptr; | ||
1347 | |||
1348 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { | ||
1349 | rxptr->rx_skb_ptr = skb; | ||
1350 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, | ||
1351 | skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1352 | wmb(); | ||
1353 | rxptr->rdes0 = cpu_to_le32(0x80000000); | ||
1354 | db->rx_avail_cnt++; | ||
1355 | db->rx_insert_ptr = rxptr->next_rx_desc; | ||
1356 | } else | ||
1357 | DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt); | ||
1358 | } | ||
1359 | |||
1360 | |||
1361 | /* | ||
1362 | * Initialize transmit/Receive descriptor | ||
1363 | * Using Chain structure, and allocate Tx/Rx buffer | ||
1364 | */ | ||
1365 | |||
1366 | static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr) | ||
1367 | { | ||
1368 | struct tx_desc *tmp_tx; | ||
1369 | struct rx_desc *tmp_rx; | ||
1370 | unsigned char *tmp_buf; | ||
1371 | dma_addr_t tmp_tx_dma, tmp_rx_dma; | ||
1372 | dma_addr_t tmp_buf_dma; | ||
1373 | int i; | ||
1374 | |||
1375 | DMFE_DBUG(0, "dmfe_descriptor_init()", 0); | ||
1376 | |||
1377 | /* tx descriptor start pointer */ | ||
1378 | db->tx_insert_ptr = db->first_tx_desc; | ||
1379 | db->tx_remove_ptr = db->first_tx_desc; | ||
1380 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ | ||
1381 | |||
1382 | /* rx descriptor start pointer */ | ||
1383 | db->first_rx_desc = (void *)db->first_tx_desc + | ||
1384 | sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1385 | |||
1386 | db->first_rx_desc_dma = db->first_tx_desc_dma + | ||
1387 | sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1388 | db->rx_insert_ptr = db->first_rx_desc; | ||
1389 | db->rx_ready_ptr = db->first_rx_desc; | ||
1390 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ | ||
1391 | |||
1392 | /* Init Transmit chain */ | ||
1393 | tmp_buf = db->buf_pool_start; | ||
1394 | tmp_buf_dma = db->buf_pool_dma_start; | ||
1395 | tmp_tx_dma = db->first_tx_desc_dma; | ||
1396 | for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) { | ||
1397 | tmp_tx->tx_buf_ptr = tmp_buf; | ||
1398 | tmp_tx->tdes0 = cpu_to_le32(0); | ||
1399 | tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */ | ||
1400 | tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma); | ||
1401 | tmp_tx_dma += sizeof(struct tx_desc); | ||
1402 | tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma); | ||
1403 | tmp_tx->next_tx_desc = tmp_tx + 1; | ||
1404 | tmp_buf = tmp_buf + TX_BUF_ALLOC; | ||
1405 | tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC; | ||
1406 | } | ||
1407 | (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma); | ||
1408 | tmp_tx->next_tx_desc = db->first_tx_desc; | ||
1409 | |||
1410 | /* Init Receive descriptor chain */ | ||
1411 | tmp_rx_dma=db->first_rx_desc_dma; | ||
1412 | for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) { | ||
1413 | tmp_rx->rdes0 = cpu_to_le32(0); | ||
1414 | tmp_rx->rdes1 = cpu_to_le32(0x01000600); | ||
1415 | tmp_rx_dma += sizeof(struct rx_desc); | ||
1416 | tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma); | ||
1417 | tmp_rx->next_rx_desc = tmp_rx + 1; | ||
1418 | } | ||
1419 | (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma); | ||
1420 | tmp_rx->next_rx_desc = db->first_rx_desc; | ||
1421 | |||
1422 | /* pre-allocate Rx buffer */ | ||
1423 | allocate_rx_buffer(db); | ||
1424 | } | ||
1425 | |||
1426 | |||
1427 | /* | ||
1428 | * Update CR6 value | ||
1429 | * Firstly stop DM910X , then written value and start | ||
1430 | */ | ||
1431 | |||
1432 | static void update_cr6(u32 cr6_data, unsigned long ioaddr) | ||
1433 | { | ||
1434 | u32 cr6_tmp; | ||
1435 | |||
1436 | cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */ | ||
1437 | outl(cr6_tmp, ioaddr + DCR6); | ||
1438 | udelay(5); | ||
1439 | outl(cr6_data, ioaddr + DCR6); | ||
1440 | udelay(5); | ||
1441 | } | ||
1442 | |||
1443 | |||
1444 | /* | ||
1445 | * Send a setup frame for DM9132 | ||
1446 | * This setup frame initialize DM910X address filter mode | ||
1447 | */ | ||
1448 | |||
1449 | static void dm9132_id_table(struct DEVICE *dev) | ||
1450 | { | ||
1451 | struct netdev_hw_addr *ha; | ||
1452 | u16 * addrptr; | ||
1453 | unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */ | ||
1454 | u32 hash_val; | ||
1455 | u16 i, hash_table[4]; | ||
1456 | |||
1457 | DMFE_DBUG(0, "dm9132_id_table()", 0); | ||
1458 | |||
1459 | /* Node address */ | ||
1460 | addrptr = (u16 *) dev->dev_addr; | ||
1461 | outw(addrptr[0], ioaddr); | ||
1462 | ioaddr += 4; | ||
1463 | outw(addrptr[1], ioaddr); | ||
1464 | ioaddr += 4; | ||
1465 | outw(addrptr[2], ioaddr); | ||
1466 | ioaddr += 4; | ||
1467 | |||
1468 | /* Clear Hash Table */ | ||
1469 | memset(hash_table, 0, sizeof(hash_table)); | ||
1470 | |||
1471 | /* broadcast address */ | ||
1472 | hash_table[3] = 0x8000; | ||
1473 | |||
1474 | /* the multicast address in Hash Table : 64 bits */ | ||
1475 | netdev_for_each_mc_addr(ha, dev) { | ||
1476 | hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f; | ||
1477 | hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); | ||
1478 | } | ||
1479 | |||
1480 | /* Write the hash table to MAC MD table */ | ||
1481 | for (i = 0; i < 4; i++, ioaddr += 4) | ||
1482 | outw(hash_table[i], ioaddr); | ||
1483 | } | ||
1484 | |||
1485 | |||
1486 | /* | ||
1487 | * Send a setup frame for DM9102/DM9102A | ||
1488 | * This setup frame initialize DM910X address filter mode | ||
1489 | */ | ||
1490 | |||
1491 | static void send_filter_frame(struct DEVICE *dev) | ||
1492 | { | ||
1493 | struct dmfe_board_info *db = netdev_priv(dev); | ||
1494 | struct netdev_hw_addr *ha; | ||
1495 | struct tx_desc *txptr; | ||
1496 | u16 * addrptr; | ||
1497 | u32 * suptr; | ||
1498 | int i; | ||
1499 | |||
1500 | DMFE_DBUG(0, "send_filter_frame()", 0); | ||
1501 | |||
1502 | txptr = db->tx_insert_ptr; | ||
1503 | suptr = (u32 *) txptr->tx_buf_ptr; | ||
1504 | |||
1505 | /* Node address */ | ||
1506 | addrptr = (u16 *) dev->dev_addr; | ||
1507 | *suptr++ = addrptr[0]; | ||
1508 | *suptr++ = addrptr[1]; | ||
1509 | *suptr++ = addrptr[2]; | ||
1510 | |||
1511 | /* broadcast address */ | ||
1512 | *suptr++ = 0xffff; | ||
1513 | *suptr++ = 0xffff; | ||
1514 | *suptr++ = 0xffff; | ||
1515 | |||
1516 | /* fit the multicast address */ | ||
1517 | netdev_for_each_mc_addr(ha, dev) { | ||
1518 | addrptr = (u16 *) ha->addr; | ||
1519 | *suptr++ = addrptr[0]; | ||
1520 | *suptr++ = addrptr[1]; | ||
1521 | *suptr++ = addrptr[2]; | ||
1522 | } | ||
1523 | |||
1524 | for (i = netdev_mc_count(dev); i < 14; i++) { | ||
1525 | *suptr++ = 0xffff; | ||
1526 | *suptr++ = 0xffff; | ||
1527 | *suptr++ = 0xffff; | ||
1528 | } | ||
1529 | |||
1530 | /* prepare the setup frame */ | ||
1531 | db->tx_insert_ptr = txptr->next_tx_desc; | ||
1532 | txptr->tdes1 = cpu_to_le32(0x890000c0); | ||
1533 | |||
1534 | /* Resource Check and Send the setup packet */ | ||
1535 | if (!db->tx_packet_cnt) { | ||
1536 | /* Resource Empty */ | ||
1537 | db->tx_packet_cnt++; | ||
1538 | txptr->tdes0 = cpu_to_le32(0x80000000); | ||
1539 | update_cr6(db->cr6_data | 0x2000, dev->base_addr); | ||
1540 | outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ | ||
1541 | update_cr6(db->cr6_data, dev->base_addr); | ||
1542 | dev->trans_start = jiffies; | ||
1543 | } else | ||
1544 | db->tx_queue_cnt++; /* Put in TX queue */ | ||
1545 | } | ||
1546 | |||
1547 | |||
1548 | /* | ||
1549 | * Allocate rx buffer, | ||
1550 | * As possible as allocate maxiumn Rx buffer | ||
1551 | */ | ||
1552 | |||
1553 | static void allocate_rx_buffer(struct dmfe_board_info *db) | ||
1554 | { | ||
1555 | struct rx_desc *rxptr; | ||
1556 | struct sk_buff *skb; | ||
1557 | |||
1558 | rxptr = db->rx_insert_ptr; | ||
1559 | |||
1560 | while(db->rx_avail_cnt < RX_DESC_CNT) { | ||
1561 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) | ||
1562 | break; | ||
1563 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ | ||
1564 | rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, | ||
1565 | RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); | ||
1566 | wmb(); | ||
1567 | rxptr->rdes0 = cpu_to_le32(0x80000000); | ||
1568 | rxptr = rxptr->next_rx_desc; | ||
1569 | db->rx_avail_cnt++; | ||
1570 | } | ||
1571 | |||
1572 | db->rx_insert_ptr = rxptr; | ||
1573 | } | ||
1574 | |||
1575 | |||
1576 | /* | ||
1577 | * Read one word data from the serial ROM | ||
1578 | */ | ||
1579 | |||
1580 | static u16 read_srom_word(long ioaddr, int offset) | ||
1581 | { | ||
1582 | int i; | ||
1583 | u16 srom_data = 0; | ||
1584 | long cr9_ioaddr = ioaddr + DCR9; | ||
1585 | |||
1586 | outl(CR9_SROM_READ, cr9_ioaddr); | ||
1587 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1588 | |||
1589 | /* Send the Read Command 110b */ | ||
1590 | SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); | ||
1591 | SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); | ||
1592 | SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); | ||
1593 | |||
1594 | /* Send the offset */ | ||
1595 | for (i = 5; i >= 0; i--) { | ||
1596 | srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; | ||
1597 | SROM_CLK_WRITE(srom_data, cr9_ioaddr); | ||
1598 | } | ||
1599 | |||
1600 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1601 | |||
1602 | for (i = 16; i > 0; i--) { | ||
1603 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); | ||
1604 | udelay(5); | ||
1605 | srom_data = (srom_data << 1) | | ||
1606 | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | ||
1607 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1608 | udelay(5); | ||
1609 | } | ||
1610 | |||
1611 | outl(CR9_SROM_READ, cr9_ioaddr); | ||
1612 | return srom_data; | ||
1613 | } | ||
1614 | |||
1615 | |||
1616 | /* | ||
1617 | * Auto sense the media mode | ||
1618 | */ | ||
1619 | |||
1620 | static u8 dmfe_sense_speed(struct dmfe_board_info * db) | ||
1621 | { | ||
1622 | u8 ErrFlag = 0; | ||
1623 | u16 phy_mode; | ||
1624 | |||
1625 | /* CR6 bit18=0, select 10/100M */ | ||
1626 | update_cr6( (db->cr6_data & ~0x40000), db->ioaddr); | ||
1627 | |||
1628 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1629 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1630 | |||
1631 | if ( (phy_mode & 0x24) == 0x24 ) { | ||
1632 | if (db->chip_id == PCI_DM9132_ID) /* DM9132 */ | ||
1633 | phy_mode = phy_read(db->ioaddr, | ||
1634 | db->phy_addr, 7, db->chip_id) & 0xf000; | ||
1635 | else /* DM9102/DM9102A */ | ||
1636 | phy_mode = phy_read(db->ioaddr, | ||
1637 | db->phy_addr, 17, db->chip_id) & 0xf000; | ||
1638 | switch (phy_mode) { | ||
1639 | case 0x1000: db->op_mode = DMFE_10MHF; break; | ||
1640 | case 0x2000: db->op_mode = DMFE_10MFD; break; | ||
1641 | case 0x4000: db->op_mode = DMFE_100MHF; break; | ||
1642 | case 0x8000: db->op_mode = DMFE_100MFD; break; | ||
1643 | default: db->op_mode = DMFE_10MHF; | ||
1644 | ErrFlag = 1; | ||
1645 | break; | ||
1646 | } | ||
1647 | } else { | ||
1648 | db->op_mode = DMFE_10MHF; | ||
1649 | DMFE_DBUG(0, "Link Failed :", phy_mode); | ||
1650 | ErrFlag = 1; | ||
1651 | } | ||
1652 | |||
1653 | return ErrFlag; | ||
1654 | } | ||
1655 | |||
1656 | |||
1657 | /* | ||
1658 | * Set 10/100 phyxcer capability | ||
1659 | * AUTO mode : phyxcer register4 is NIC capability | ||
1660 | * Force mode: phyxcer register4 is the force media | ||
1661 | */ | ||
1662 | |||
1663 | static void dmfe_set_phyxcer(struct dmfe_board_info *db) | ||
1664 | { | ||
1665 | u16 phy_reg; | ||
1666 | |||
1667 | /* Select 10/100M phyxcer */ | ||
1668 | db->cr6_data &= ~0x40000; | ||
1669 | update_cr6(db->cr6_data, db->ioaddr); | ||
1670 | |||
1671 | /* DM9009 Chip: Phyxcer reg18 bit12=0 */ | ||
1672 | if (db->chip_id == PCI_DM9009_ID) { | ||
1673 | phy_reg = phy_read(db->ioaddr, | ||
1674 | db->phy_addr, 18, db->chip_id) & ~0x1000; | ||
1675 | |||
1676 | phy_write(db->ioaddr, | ||
1677 | db->phy_addr, 18, phy_reg, db->chip_id); | ||
1678 | } | ||
1679 | |||
1680 | /* Phyxcer capability setting */ | ||
1681 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; | ||
1682 | |||
1683 | if (db->media_mode & DMFE_AUTO) { | ||
1684 | /* AUTO Mode */ | ||
1685 | phy_reg |= db->PHY_reg4; | ||
1686 | } else { | ||
1687 | /* Force Mode */ | ||
1688 | switch(db->media_mode) { | ||
1689 | case DMFE_10MHF: phy_reg |= 0x20; break; | ||
1690 | case DMFE_10MFD: phy_reg |= 0x40; break; | ||
1691 | case DMFE_100MHF: phy_reg |= 0x80; break; | ||
1692 | case DMFE_100MFD: phy_reg |= 0x100; break; | ||
1693 | } | ||
1694 | if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61; | ||
1695 | } | ||
1696 | |||
1697 | /* Write new capability to Phyxcer Reg4 */ | ||
1698 | if ( !(phy_reg & 0x01e0)) { | ||
1699 | phy_reg|=db->PHY_reg4; | ||
1700 | db->media_mode|=DMFE_AUTO; | ||
1701 | } | ||
1702 | phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); | ||
1703 | |||
1704 | /* Restart Auto-Negotiation */ | ||
1705 | if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) | ||
1706 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id); | ||
1707 | if ( !db->chip_type ) | ||
1708 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); | ||
1709 | } | ||
1710 | |||
1711 | |||
1712 | /* | ||
1713 | * Process op-mode | ||
1714 | * AUTO mode : PHY controller in Auto-negotiation Mode | ||
1715 | * Force mode: PHY controller in force mode with HUB | ||
1716 | * N-way force capability with SWITCH | ||
1717 | */ | ||
1718 | |||
1719 | static void dmfe_process_mode(struct dmfe_board_info *db) | ||
1720 | { | ||
1721 | u16 phy_reg; | ||
1722 | |||
1723 | /* Full Duplex Mode Check */ | ||
1724 | if (db->op_mode & 0x4) | ||
1725 | db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */ | ||
1726 | else | ||
1727 | db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */ | ||
1728 | |||
1729 | /* Transciver Selection */ | ||
1730 | if (db->op_mode & 0x10) /* 1M HomePNA */ | ||
1731 | db->cr6_data |= 0x40000;/* External MII select */ | ||
1732 | else | ||
1733 | db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */ | ||
1734 | |||
1735 | update_cr6(db->cr6_data, db->ioaddr); | ||
1736 | |||
1737 | /* 10/100M phyxcer force mode need */ | ||
1738 | if ( !(db->media_mode & 0x18)) { | ||
1739 | /* Forece Mode */ | ||
1740 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); | ||
1741 | if ( !(phy_reg & 0x1) ) { | ||
1742 | /* parter without N-Way capability */ | ||
1743 | phy_reg = 0x0; | ||
1744 | switch(db->op_mode) { | ||
1745 | case DMFE_10MHF: phy_reg = 0x0; break; | ||
1746 | case DMFE_10MFD: phy_reg = 0x100; break; | ||
1747 | case DMFE_100MHF: phy_reg = 0x2000; break; | ||
1748 | case DMFE_100MFD: phy_reg = 0x2100; break; | ||
1749 | } | ||
1750 | phy_write(db->ioaddr, | ||
1751 | db->phy_addr, 0, phy_reg, db->chip_id); | ||
1752 | if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) ) | ||
1753 | mdelay(20); | ||
1754 | phy_write(db->ioaddr, | ||
1755 | db->phy_addr, 0, phy_reg, db->chip_id); | ||
1756 | } | ||
1757 | } | ||
1758 | } | ||
1759 | |||
1760 | |||
1761 | /* | ||
1762 | * Write a word to Phy register | ||
1763 | */ | ||
1764 | |||
1765 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, | ||
1766 | u16 phy_data, u32 chip_id) | ||
1767 | { | ||
1768 | u16 i; | ||
1769 | unsigned long ioaddr; | ||
1770 | |||
1771 | if (chip_id == PCI_DM9132_ID) { | ||
1772 | ioaddr = iobase + 0x80 + offset * 4; | ||
1773 | outw(phy_data, ioaddr); | ||
1774 | } else { | ||
1775 | /* DM9102/DM9102A Chip */ | ||
1776 | ioaddr = iobase + DCR9; | ||
1777 | |||
1778 | /* Send 33 synchronization clock to Phy controller */ | ||
1779 | for (i = 0; i < 35; i++) | ||
1780 | phy_write_1bit(ioaddr, PHY_DATA_1); | ||
1781 | |||
1782 | /* Send start command(01) to Phy */ | ||
1783 | phy_write_1bit(ioaddr, PHY_DATA_0); | ||
1784 | phy_write_1bit(ioaddr, PHY_DATA_1); | ||
1785 | |||
1786 | /* Send write command(01) to Phy */ | ||
1787 | phy_write_1bit(ioaddr, PHY_DATA_0); | ||
1788 | phy_write_1bit(ioaddr, PHY_DATA_1); | ||
1789 | |||
1790 | /* Send Phy address */ | ||
1791 | for (i = 0x10; i > 0; i = i >> 1) | ||
1792 | phy_write_1bit(ioaddr, | ||
1793 | phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1794 | |||
1795 | /* Send register address */ | ||
1796 | for (i = 0x10; i > 0; i = i >> 1) | ||
1797 | phy_write_1bit(ioaddr, | ||
1798 | offset & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1799 | |||
1800 | /* written trasnition */ | ||
1801 | phy_write_1bit(ioaddr, PHY_DATA_1); | ||
1802 | phy_write_1bit(ioaddr, PHY_DATA_0); | ||
1803 | |||
1804 | /* Write a word data to PHY controller */ | ||
1805 | for ( i = 0x8000; i > 0; i >>= 1) | ||
1806 | phy_write_1bit(ioaddr, | ||
1807 | phy_data & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1808 | } | ||
1809 | } | ||
1810 | |||
1811 | |||
1812 | /* | ||
1813 | * Read a word data from phy register | ||
1814 | */ | ||
1815 | |||
1816 | static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) | ||
1817 | { | ||
1818 | int i; | ||
1819 | u16 phy_data; | ||
1820 | unsigned long ioaddr; | ||
1821 | |||
1822 | if (chip_id == PCI_DM9132_ID) { | ||
1823 | /* DM9132 Chip */ | ||
1824 | ioaddr = iobase + 0x80 + offset * 4; | ||
1825 | phy_data = inw(ioaddr); | ||
1826 | } else { | ||
1827 | /* DM9102/DM9102A Chip */ | ||
1828 | ioaddr = iobase + DCR9; | ||
1829 | |||
1830 | /* Send 33 synchronization clock to Phy controller */ | ||
1831 | for (i = 0; i < 35; i++) | ||
1832 | phy_write_1bit(ioaddr, PHY_DATA_1); | ||
1833 | |||
1834 | /* Send start command(01) to Phy */ | ||
1835 | phy_write_1bit(ioaddr, PHY_DATA_0); | ||
1836 | phy_write_1bit(ioaddr, PHY_DATA_1); | ||
1837 | |||
1838 | /* Send read command(10) to Phy */ | ||
1839 | phy_write_1bit(ioaddr, PHY_DATA_1); | ||
1840 | phy_write_1bit(ioaddr, PHY_DATA_0); | ||
1841 | |||
1842 | /* Send Phy address */ | ||
1843 | for (i = 0x10; i > 0; i = i >> 1) | ||
1844 | phy_write_1bit(ioaddr, | ||
1845 | phy_addr & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1846 | |||
1847 | /* Send register address */ | ||
1848 | for (i = 0x10; i > 0; i = i >> 1) | ||
1849 | phy_write_1bit(ioaddr, | ||
1850 | offset & i ? PHY_DATA_1 : PHY_DATA_0); | ||
1851 | |||
1852 | /* Skip transition state */ | ||
1853 | phy_read_1bit(ioaddr); | ||
1854 | |||
1855 | /* read 16bit data */ | ||
1856 | for (phy_data = 0, i = 0; i < 16; i++) { | ||
1857 | phy_data <<= 1; | ||
1858 | phy_data |= phy_read_1bit(ioaddr); | ||
1859 | } | ||
1860 | } | ||
1861 | |||
1862 | return phy_data; | ||
1863 | } | ||
1864 | |||
1865 | |||
1866 | /* | ||
1867 | * Write one bit data to Phy Controller | ||
1868 | */ | ||
1869 | |||
1870 | static void phy_write_1bit(unsigned long ioaddr, u32 phy_data) | ||
1871 | { | ||
1872 | outl(phy_data, ioaddr); /* MII Clock Low */ | ||
1873 | udelay(1); | ||
1874 | outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ | ||
1875 | udelay(1); | ||
1876 | outl(phy_data, ioaddr); /* MII Clock Low */ | ||
1877 | udelay(1); | ||
1878 | } | ||
1879 | |||
1880 | |||
1881 | /* | ||
1882 | * Read one bit phy data from PHY controller | ||
1883 | */ | ||
1884 | |||
1885 | static u16 phy_read_1bit(unsigned long ioaddr) | ||
1886 | { | ||
1887 | u16 phy_data; | ||
1888 | |||
1889 | outl(0x50000, ioaddr); | ||
1890 | udelay(1); | ||
1891 | phy_data = ( inl(ioaddr) >> 19 ) & 0x1; | ||
1892 | outl(0x40000, ioaddr); | ||
1893 | udelay(1); | ||
1894 | |||
1895 | return phy_data; | ||
1896 | } | ||
1897 | |||
1898 | |||
1899 | /* | ||
1900 | * Parser SROM and media mode | ||
1901 | */ | ||
1902 | |||
1903 | static void dmfe_parse_srom(struct dmfe_board_info * db) | ||
1904 | { | ||
1905 | char * srom = db->srom; | ||
1906 | int dmfe_mode, tmp_reg; | ||
1907 | |||
1908 | DMFE_DBUG(0, "dmfe_parse_srom() ", 0); | ||
1909 | |||
1910 | /* Init CR15 */ | ||
1911 | db->cr15_data = CR15_DEFAULT; | ||
1912 | |||
1913 | /* Check SROM Version */ | ||
1914 | if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) { | ||
1915 | /* SROM V4.01 */ | ||
1916 | /* Get NIC support media mode */ | ||
1917 | db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34)); | ||
1918 | db->PHY_reg4 = 0; | ||
1919 | for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) { | ||
1920 | switch( db->NIC_capability & tmp_reg ) { | ||
1921 | case 0x1: db->PHY_reg4 |= 0x0020; break; | ||
1922 | case 0x2: db->PHY_reg4 |= 0x0040; break; | ||
1923 | case 0x4: db->PHY_reg4 |= 0x0080; break; | ||
1924 | case 0x8: db->PHY_reg4 |= 0x0100; break; | ||
1925 | } | ||
1926 | } | ||
1927 | |||
1928 | /* Media Mode Force or not check */ | ||
1929 | dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) & | ||
1930 | le32_to_cpup((__le32 *) (srom + 36))); | ||
1931 | switch(dmfe_mode) { | ||
1932 | case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */ | ||
1933 | case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */ | ||
1934 | case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */ | ||
1935 | case 0x100: | ||
1936 | case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */ | ||
1937 | } | ||
1938 | |||
1939 | /* Special Function setting */ | ||
1940 | /* VLAN function */ | ||
1941 | if ( (SF_mode & 0x1) || (srom[43] & 0x80) ) | ||
1942 | db->cr15_data |= 0x40; | ||
1943 | |||
1944 | /* Flow Control */ | ||
1945 | if ( (SF_mode & 0x2) || (srom[40] & 0x1) ) | ||
1946 | db->cr15_data |= 0x400; | ||
1947 | |||
1948 | /* TX pause packet */ | ||
1949 | if ( (SF_mode & 0x4) || (srom[40] & 0xe) ) | ||
1950 | db->cr15_data |= 0x9800; | ||
1951 | } | ||
1952 | |||
1953 | /* Parse HPNA parameter */ | ||
1954 | db->HPNA_command = 1; | ||
1955 | |||
1956 | /* Accept remote command or not */ | ||
1957 | if (HPNA_rx_cmd == 0) | ||
1958 | db->HPNA_command |= 0x8000; | ||
1959 | |||
1960 | /* Issue remote command & operation mode */ | ||
1961 | if (HPNA_tx_cmd == 1) | ||
1962 | switch(HPNA_mode) { /* Issue Remote Command */ | ||
1963 | case 0: db->HPNA_command |= 0x0904; break; | ||
1964 | case 1: db->HPNA_command |= 0x0a00; break; | ||
1965 | case 2: db->HPNA_command |= 0x0506; break; | ||
1966 | case 3: db->HPNA_command |= 0x0602; break; | ||
1967 | } | ||
1968 | else | ||
1969 | switch(HPNA_mode) { /* Don't Issue */ | ||
1970 | case 0: db->HPNA_command |= 0x0004; break; | ||
1971 | case 1: db->HPNA_command |= 0x0000; break; | ||
1972 | case 2: db->HPNA_command |= 0x0006; break; | ||
1973 | case 3: db->HPNA_command |= 0x0002; break; | ||
1974 | } | ||
1975 | |||
1976 | /* Check DM9801 or DM9802 present or not */ | ||
1977 | db->HPNA_present = 0; | ||
1978 | update_cr6(db->cr6_data|0x40000, db->ioaddr); | ||
1979 | tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); | ||
1980 | if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { | ||
1981 | /* DM9801 or DM9802 present */ | ||
1982 | db->HPNA_timer = 8; | ||
1983 | if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) { | ||
1984 | /* DM9801 HomeRun */ | ||
1985 | db->HPNA_present = 1; | ||
1986 | dmfe_program_DM9801(db, tmp_reg); | ||
1987 | } else { | ||
1988 | /* DM9802 LongRun */ | ||
1989 | db->HPNA_present = 2; | ||
1990 | dmfe_program_DM9802(db); | ||
1991 | } | ||
1992 | } | ||
1993 | |||
1994 | } | ||
1995 | |||
1996 | |||
1997 | /* | ||
1998 | * Init HomeRun DM9801 | ||
1999 | */ | ||
2000 | |||
2001 | static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev) | ||
2002 | { | ||
2003 | uint reg17, reg25; | ||
2004 | |||
2005 | if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR; | ||
2006 | switch(HPNA_rev) { | ||
2007 | case 0xb900: /* DM9801 E3 */ | ||
2008 | db->HPNA_command |= 0x1000; | ||
2009 | reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id); | ||
2010 | reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000; | ||
2011 | reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); | ||
2012 | break; | ||
2013 | case 0xb901: /* DM9801 E4 */ | ||
2014 | reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); | ||
2015 | reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor; | ||
2016 | reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); | ||
2017 | reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3; | ||
2018 | break; | ||
2019 | case 0xb902: /* DM9801 E5 */ | ||
2020 | case 0xb903: /* DM9801 E6 */ | ||
2021 | default: | ||
2022 | db->HPNA_command |= 0x1000; | ||
2023 | reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); | ||
2024 | reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5; | ||
2025 | reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id); | ||
2026 | reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor; | ||
2027 | break; | ||
2028 | } | ||
2029 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); | ||
2030 | phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id); | ||
2031 | phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id); | ||
2032 | } | ||
2033 | |||
2034 | |||
2035 | /* | ||
2036 | * Init HomeRun DM9802 | ||
2037 | */ | ||
2038 | |||
2039 | static void dmfe_program_DM9802(struct dmfe_board_info * db) | ||
2040 | { | ||
2041 | uint phy_reg; | ||
2042 | |||
2043 | if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR; | ||
2044 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id); | ||
2045 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id); | ||
2046 | phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor; | ||
2047 | phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id); | ||
2048 | } | ||
2049 | |||
2050 | |||
2051 | /* | ||
2052 | * Check remote HPNA power and speed status. If not correct, | ||
2053 | * issue command again. | ||
2054 | */ | ||
2055 | |||
2056 | static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db) | ||
2057 | { | ||
2058 | uint phy_reg; | ||
2059 | |||
2060 | /* Got remote device status */ | ||
2061 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60; | ||
2062 | switch(phy_reg) { | ||
2063 | case 0x00: phy_reg = 0x0a00;break; /* LP/LS */ | ||
2064 | case 0x20: phy_reg = 0x0900;break; /* LP/HS */ | ||
2065 | case 0x40: phy_reg = 0x0600;break; /* HP/LS */ | ||
2066 | case 0x60: phy_reg = 0x0500;break; /* HP/HS */ | ||
2067 | } | ||
2068 | |||
2069 | /* Check remote device status match our setting ot not */ | ||
2070 | if ( phy_reg != (db->HPNA_command & 0x0f00) ) { | ||
2071 | phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, | ||
2072 | db->chip_id); | ||
2073 | db->HPNA_timer=8; | ||
2074 | } else | ||
2075 | db->HPNA_timer=600; /* Match, every 10 minutes, check */ | ||
2076 | } | ||
2077 | |||
2078 | |||
2079 | |||
2080 | static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = { | ||
2081 | { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID }, | ||
2082 | { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID }, | ||
2083 | { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID }, | ||
2084 | { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID }, | ||
2085 | { 0, } | ||
2086 | }; | ||
2087 | MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl); | ||
2088 | |||
2089 | |||
2090 | #ifdef CONFIG_PM | ||
2091 | static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state) | ||
2092 | { | ||
2093 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
2094 | struct dmfe_board_info *db = netdev_priv(dev); | ||
2095 | u32 tmp; | ||
2096 | |||
2097 | /* Disable upper layer interface */ | ||
2098 | netif_device_detach(dev); | ||
2099 | |||
2100 | /* Disable Tx/Rx */ | ||
2101 | db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); | ||
2102 | update_cr6(db->cr6_data, dev->base_addr); | ||
2103 | |||
2104 | /* Disable Interrupt */ | ||
2105 | outl(0, dev->base_addr + DCR7); | ||
2106 | outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5); | ||
2107 | |||
2108 | /* Fre RX buffers */ | ||
2109 | dmfe_free_rxbuffer(db); | ||
2110 | |||
2111 | /* Enable WOL */ | ||
2112 | pci_read_config_dword(pci_dev, 0x40, &tmp); | ||
2113 | tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET); | ||
2114 | |||
2115 | if (db->wol_mode & WAKE_PHY) | ||
2116 | tmp |= DMFE_WOL_LINKCHANGE; | ||
2117 | if (db->wol_mode & WAKE_MAGIC) | ||
2118 | tmp |= DMFE_WOL_MAGICPACKET; | ||
2119 | |||
2120 | pci_write_config_dword(pci_dev, 0x40, tmp); | ||
2121 | |||
2122 | pci_enable_wake(pci_dev, PCI_D3hot, 1); | ||
2123 | pci_enable_wake(pci_dev, PCI_D3cold, 1); | ||
2124 | |||
2125 | /* Power down device*/ | ||
2126 | pci_save_state(pci_dev); | ||
2127 | pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state)); | ||
2128 | |||
2129 | return 0; | ||
2130 | } | ||
2131 | |||
2132 | static int dmfe_resume(struct pci_dev *pci_dev) | ||
2133 | { | ||
2134 | struct net_device *dev = pci_get_drvdata(pci_dev); | ||
2135 | u32 tmp; | ||
2136 | |||
2137 | pci_set_power_state(pci_dev, PCI_D0); | ||
2138 | pci_restore_state(pci_dev); | ||
2139 | |||
2140 | /* Re-initialize DM910X board */ | ||
2141 | dmfe_init_dm910x(dev); | ||
2142 | |||
2143 | /* Disable WOL */ | ||
2144 | pci_read_config_dword(pci_dev, 0x40, &tmp); | ||
2145 | |||
2146 | tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET); | ||
2147 | pci_write_config_dword(pci_dev, 0x40, tmp); | ||
2148 | |||
2149 | pci_enable_wake(pci_dev, PCI_D3hot, 0); | ||
2150 | pci_enable_wake(pci_dev, PCI_D3cold, 0); | ||
2151 | |||
2152 | /* Restart upper layer interface */ | ||
2153 | netif_device_attach(dev); | ||
2154 | |||
2155 | return 0; | ||
2156 | } | ||
2157 | #else | ||
2158 | #define dmfe_suspend NULL | ||
2159 | #define dmfe_resume NULL | ||
2160 | #endif | ||
2161 | |||
2162 | static struct pci_driver dmfe_driver = { | ||
2163 | .name = "dmfe", | ||
2164 | .id_table = dmfe_pci_tbl, | ||
2165 | .probe = dmfe_init_one, | ||
2166 | .remove = __devexit_p(dmfe_remove_one), | ||
2167 | .suspend = dmfe_suspend, | ||
2168 | .resume = dmfe_resume | ||
2169 | }; | ||
2170 | |||
2171 | MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw"); | ||
2172 | MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver"); | ||
2173 | MODULE_LICENSE("GPL"); | ||
2174 | MODULE_VERSION(DRV_VERSION); | ||
2175 | |||
2176 | module_param(debug, int, 0); | ||
2177 | module_param(mode, byte, 0); | ||
2178 | module_param(cr6set, int, 0); | ||
2179 | module_param(chkmode, byte, 0); | ||
2180 | module_param(HPNA_mode, byte, 0); | ||
2181 | module_param(HPNA_rx_cmd, byte, 0); | ||
2182 | module_param(HPNA_tx_cmd, byte, 0); | ||
2183 | module_param(HPNA_NoiseFloor, byte, 0); | ||
2184 | module_param(SF_mode, byte, 0); | ||
2185 | MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)"); | ||
2186 | MODULE_PARM_DESC(mode, "Davicom DM9xxx: " | ||
2187 | "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); | ||
2188 | |||
2189 | MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function " | ||
2190 | "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)"); | ||
2191 | |||
2192 | /* Description: | ||
2193 | * when user used insmod to add module, system invoked init_module() | ||
2194 | * to initialize and register. | ||
2195 | */ | ||
2196 | |||
2197 | static int __init dmfe_init_module(void) | ||
2198 | { | ||
2199 | int rc; | ||
2200 | |||
2201 | pr_info("%s\n", version); | ||
2202 | printed_version = 1; | ||
2203 | |||
2204 | DMFE_DBUG(0, "init_module() ", debug); | ||
2205 | |||
2206 | if (debug) | ||
2207 | dmfe_debug = debug; /* set debug flag */ | ||
2208 | if (cr6set) | ||
2209 | dmfe_cr6_user_set = cr6set; | ||
2210 | |||
2211 | switch(mode) { | ||
2212 | case DMFE_10MHF: | ||
2213 | case DMFE_100MHF: | ||
2214 | case DMFE_10MFD: | ||
2215 | case DMFE_100MFD: | ||
2216 | case DMFE_1M_HPNA: | ||
2217 | dmfe_media_mode = mode; | ||
2218 | break; | ||
2219 | default:dmfe_media_mode = DMFE_AUTO; | ||
2220 | break; | ||
2221 | } | ||
2222 | |||
2223 | if (HPNA_mode > 4) | ||
2224 | HPNA_mode = 0; /* Default: LP/HS */ | ||
2225 | if (HPNA_rx_cmd > 1) | ||
2226 | HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */ | ||
2227 | if (HPNA_tx_cmd > 1) | ||
2228 | HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */ | ||
2229 | if (HPNA_NoiseFloor > 15) | ||
2230 | HPNA_NoiseFloor = 0; | ||
2231 | |||
2232 | rc = pci_register_driver(&dmfe_driver); | ||
2233 | if (rc < 0) | ||
2234 | return rc; | ||
2235 | |||
2236 | return 0; | ||
2237 | } | ||
2238 | |||
2239 | |||
2240 | /* | ||
2241 | * Description: | ||
2242 | * when user used rmmod to delete module, system invoked clean_module() | ||
2243 | * to un-register all registered services. | ||
2244 | */ | ||
2245 | |||
2246 | static void __exit dmfe_cleanup_module(void) | ||
2247 | { | ||
2248 | DMFE_DBUG(0, "dmfe_clean_module() ", debug); | ||
2249 | pci_unregister_driver(&dmfe_driver); | ||
2250 | } | ||
2251 | |||
2252 | module_init(dmfe_init_module); | ||
2253 | module_exit(dmfe_cleanup_module); | ||
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c new file mode 100644 index 000000000000..fa5eee925f25 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/eeprom.c | |||
@@ -0,0 +1,385 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/eeprom.c | ||
3 | |||
4 | Copyright 2000,2001 The Linux Kernel Team | ||
5 | Written/copyright 1994-2001 by Donald Becker. | ||
6 | |||
7 | This software may be used and distributed according to the terms | ||
8 | of the GNU General Public License, incorporated herein by reference. | ||
9 | |||
10 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
11 | for more information on this driver. | ||
12 | Please submit bug reports to http://bugzilla.kernel.org/. | ||
13 | */ | ||
14 | |||
15 | #include <linux/pci.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include "tulip.h" | ||
18 | #include <linux/init.h> | ||
19 | #include <asm/unaligned.h> | ||
20 | |||
21 | |||
22 | |||
23 | /* Serial EEPROM section. */ | ||
24 | /* The main routine to parse the very complicated SROM structure. | ||
25 | Search www.digital.com for "21X4 SROM" to get details. | ||
26 | This code is very complex, and will require changes to support | ||
27 | additional cards, so I'll be verbose about what is going on. | ||
28 | */ | ||
29 | |||
30 | /* Known cards that have old-style EEPROMs. */ | ||
31 | static struct eeprom_fixup eeprom_fixups[] __devinitdata = { | ||
32 | {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c, | ||
33 | 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }}, | ||
34 | {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f, | ||
35 | 0x0000, 0x009E, /* 10baseT */ | ||
36 | 0x0004, 0x009E, /* 10baseT-FD */ | ||
37 | 0x0903, 0x006D, /* 100baseTx */ | ||
38 | 0x0905, 0x006D, /* 100baseTx-FD */ }}, | ||
39 | {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f, | ||
40 | 0x0107, 0x8021, /* 100baseFx */ | ||
41 | 0x0108, 0x8021, /* 100baseFx-FD */ | ||
42 | 0x0100, 0x009E, /* 10baseT */ | ||
43 | 0x0104, 0x009E, /* 10baseT-FD */ | ||
44 | 0x0103, 0x006D, /* 100baseTx */ | ||
45 | 0x0105, 0x006D, /* 100baseTx-FD */ }}, | ||
46 | {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513, | ||
47 | 0x1001, 0x009E, /* 10base2, CSR12 0x10*/ | ||
48 | 0x0000, 0x009E, /* 10baseT */ | ||
49 | 0x0004, 0x009E, /* 10baseT-FD */ | ||
50 | 0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */ | ||
51 | 0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}}, | ||
52 | {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F, | ||
53 | 0x1B01, 0x0000, /* 10base2, CSR12 0x1B */ | ||
54 | 0x0B00, 0x009E, /* 10baseT, CSR12 0x0B */ | ||
55 | 0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */ | ||
56 | 0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */ | ||
57 | 0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */ | ||
58 | }}, | ||
59 | {"NetWinder", 0x00, 0x10, 0x57, | ||
60 | /* Default media = MII | ||
61 | * MII block, reset sequence (3) = 0x0821 0x0000 0x0001, capabilities 0x01e1 | ||
62 | */ | ||
63 | { 0x1e00, 0x0000, 0x000b, 0x8f01, 0x0103, 0x0300, 0x0821, 0x000, 0x0001, 0x0000, 0x01e1 } | ||
64 | }, | ||
65 | {"Cobalt Microserver", 0, 0x10, 0xE0, {0x1e00, /* 0 == controller #, 1e == offset */ | ||
66 | 0x0000, /* 0 == high offset, 0 == gap */ | ||
67 | 0x0800, /* Default Autoselect */ | ||
68 | 0x8001, /* 1 leaf, extended type, bogus len */ | ||
69 | 0x0003, /* Type 3 (MII), PHY #0 */ | ||
70 | 0x0400, /* 0 init instr, 4 reset instr */ | ||
71 | 0x0801, /* Set control mode, GP0 output */ | ||
72 | 0x0000, /* Drive GP0 Low (RST is active low) */ | ||
73 | 0x0800, /* control mode, GP0 input (undriven) */ | ||
74 | 0x0000, /* clear control mode */ | ||
75 | 0x7800, /* 100TX FDX + HDX, 10bT FDX + HDX */ | ||
76 | 0x01e0, /* Advertise all above */ | ||
77 | 0x5000, /* FDX all above */ | ||
78 | 0x1800, /* Set fast TTM in 100bt modes */ | ||
79 | 0x0000, /* PHY cannot be unplugged */ | ||
80 | }}, | ||
81 | {NULL}}; | ||
82 | |||
83 | |||
84 | static const char *block_name[] __devinitdata = { | ||
85 | "21140 non-MII", | ||
86 | "21140 MII PHY", | ||
87 | "21142 Serial PHY", | ||
88 | "21142 MII PHY", | ||
89 | "21143 SYM PHY", | ||
90 | "21143 reset method" | ||
91 | }; | ||
92 | |||
93 | |||
94 | /** | ||
95 | * tulip_build_fake_mediatable - Build a fake mediatable entry. | ||
96 | * @tp: Ptr to the tulip private data. | ||
97 | * | ||
98 | * Some cards like the 3x5 HSC cards (J3514A) do not have a standard | ||
99 | * srom and can not be handled under the fixup routine. These cards | ||
100 | * still need a valid mediatable entry for correct csr12 setup and | ||
101 | * mii handling. | ||
102 | * | ||
103 | * Since this is currently a parisc-linux specific function, the | ||
104 | * #ifdef __hppa__ should completely optimize this function away for | ||
105 | * non-parisc hardware. | ||
106 | */ | ||
107 | static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp) | ||
108 | { | ||
109 | #ifdef CONFIG_GSC | ||
110 | if (tp->flags & NEEDS_FAKE_MEDIA_TABLE) { | ||
111 | static unsigned char leafdata[] = | ||
112 | { 0x01, /* phy number */ | ||
113 | 0x02, /* gpr setup sequence length */ | ||
114 | 0x02, 0x00, /* gpr setup sequence */ | ||
115 | 0x02, /* phy reset sequence length */ | ||
116 | 0x01, 0x00, /* phy reset sequence */ | ||
117 | 0x00, 0x78, /* media capabilities */ | ||
118 | 0x00, 0xe0, /* nway advertisement */ | ||
119 | 0x00, 0x05, /* fdx bit map */ | ||
120 | 0x00, 0x06 /* ttm bit map */ | ||
121 | }; | ||
122 | |||
123 | tp->mtable = kmalloc(sizeof(struct mediatable) + | ||
124 | sizeof(struct medialeaf), GFP_KERNEL); | ||
125 | |||
126 | if (tp->mtable == NULL) | ||
127 | return; /* Horrible, impossible failure. */ | ||
128 | |||
129 | tp->mtable->defaultmedia = 0x800; | ||
130 | tp->mtable->leafcount = 1; | ||
131 | tp->mtable->csr12dir = 0x3f; /* inputs on bit7 for hsc-pci, bit6 for pci-fx */ | ||
132 | tp->mtable->has_nonmii = 0; | ||
133 | tp->mtable->has_reset = 0; | ||
134 | tp->mtable->has_mii = 1; | ||
135 | tp->mtable->csr15dir = tp->mtable->csr15val = 0; | ||
136 | tp->mtable->mleaf[0].type = 1; | ||
137 | tp->mtable->mleaf[0].media = 11; | ||
138 | tp->mtable->mleaf[0].leafdata = &leafdata[0]; | ||
139 | tp->flags |= HAS_PHY_IRQ; | ||
140 | tp->csr12_shadow = -1; | ||
141 | } | ||
142 | #endif | ||
143 | } | ||
144 | |||
145 | void __devinit tulip_parse_eeprom(struct net_device *dev) | ||
146 | { | ||
147 | /* | ||
148 | dev is not registered at this point, so logging messages can't | ||
149 | use dev_<level> or netdev_<level> but dev->name is good via a | ||
150 | hack in the caller | ||
151 | */ | ||
152 | |||
153 | /* The last media info list parsed, for multiport boards. */ | ||
154 | static struct mediatable *last_mediatable; | ||
155 | static unsigned char *last_ee_data; | ||
156 | static int controller_index; | ||
157 | struct tulip_private *tp = netdev_priv(dev); | ||
158 | unsigned char *ee_data = tp->eeprom; | ||
159 | int i; | ||
160 | |||
161 | tp->mtable = NULL; | ||
162 | /* Detect an old-style (SA only) EEPROM layout: | ||
163 | memcmp(eedata, eedata+16, 8). */ | ||
164 | for (i = 0; i < 8; i ++) | ||
165 | if (ee_data[i] != ee_data[16+i]) | ||
166 | break; | ||
167 | if (i >= 8) { | ||
168 | if (ee_data[0] == 0xff) { | ||
169 | if (last_mediatable) { | ||
170 | controller_index++; | ||
171 | pr_info("%s: Controller %d of multiport board\n", | ||
172 | dev->name, controller_index); | ||
173 | tp->mtable = last_mediatable; | ||
174 | ee_data = last_ee_data; | ||
175 | goto subsequent_board; | ||
176 | } else | ||
177 | pr_info("%s: Missing EEPROM, this interface may not work correctly!\n", | ||
178 | dev->name); | ||
179 | return; | ||
180 | } | ||
181 | /* Do a fix-up based on the vendor half of the station address prefix. */ | ||
182 | for (i = 0; eeprom_fixups[i].name; i++) { | ||
183 | if (dev->dev_addr[0] == eeprom_fixups[i].addr0 && | ||
184 | dev->dev_addr[1] == eeprom_fixups[i].addr1 && | ||
185 | dev->dev_addr[2] == eeprom_fixups[i].addr2) { | ||
186 | if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55) | ||
187 | i++; /* An Accton EN1207, not an outlaw Maxtech. */ | ||
188 | memcpy(ee_data + 26, eeprom_fixups[i].newtable, | ||
189 | sizeof(eeprom_fixups[i].newtable)); | ||
190 | pr_info("%s: Old format EEPROM on '%s' board. Using substitute media control info\n", | ||
191 | dev->name, eeprom_fixups[i].name); | ||
192 | break; | ||
193 | } | ||
194 | } | ||
195 | if (eeprom_fixups[i].name == NULL) { /* No fixup found. */ | ||
196 | pr_info("%s: Old style EEPROM with no media selection information\n", | ||
197 | dev->name); | ||
198 | return; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | controller_index = 0; | ||
203 | if (ee_data[19] > 1) { /* Multiport board. */ | ||
204 | last_ee_data = ee_data; | ||
205 | } | ||
206 | subsequent_board: | ||
207 | |||
208 | if (ee_data[27] == 0) { /* No valid media table. */ | ||
209 | tulip_build_fake_mediatable(tp); | ||
210 | } else { | ||
211 | unsigned char *p = (void *)ee_data + ee_data[27]; | ||
212 | unsigned char csr12dir = 0; | ||
213 | int count, new_advertise = 0; | ||
214 | struct mediatable *mtable; | ||
215 | u16 media = get_u16(p); | ||
216 | |||
217 | p += 2; | ||
218 | if (tp->flags & CSR12_IN_SROM) | ||
219 | csr12dir = *p++; | ||
220 | count = *p++; | ||
221 | |||
222 | /* there is no phy information, don't even try to build mtable */ | ||
223 | if (count == 0) { | ||
224 | if (tulip_debug > 0) | ||
225 | pr_warn("%s: no phy info, aborting mtable build\n", | ||
226 | dev->name); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | mtable = kmalloc(sizeof(struct mediatable) + | ||
231 | count * sizeof(struct medialeaf), | ||
232 | GFP_KERNEL); | ||
233 | if (mtable == NULL) | ||
234 | return; /* Horrible, impossible failure. */ | ||
235 | last_mediatable = tp->mtable = mtable; | ||
236 | mtable->defaultmedia = media; | ||
237 | mtable->leafcount = count; | ||
238 | mtable->csr12dir = csr12dir; | ||
239 | mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0; | ||
240 | mtable->csr15dir = mtable->csr15val = 0; | ||
241 | |||
242 | pr_info("%s: EEPROM default media type %s\n", | ||
243 | dev->name, | ||
244 | media & 0x0800 ? "Autosense" | ||
245 | : medianame[media & MEDIA_MASK]); | ||
246 | for (i = 0; i < count; i++) { | ||
247 | struct medialeaf *leaf = &mtable->mleaf[i]; | ||
248 | |||
249 | if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */ | ||
250 | leaf->type = 0; | ||
251 | leaf->media = p[0] & 0x3f; | ||
252 | leaf->leafdata = p; | ||
253 | if ((p[2] & 0x61) == 0x01) /* Bogus, but Znyx boards do it. */ | ||
254 | mtable->has_mii = 1; | ||
255 | p += 4; | ||
256 | } else { | ||
257 | leaf->type = p[1]; | ||
258 | if (p[1] == 0x05) { | ||
259 | mtable->has_reset = i; | ||
260 | leaf->media = p[2] & 0x0f; | ||
261 | } else if (tp->chip_id == DM910X && p[1] == 0x80) { | ||
262 | /* Hack to ignore Davicom delay period block */ | ||
263 | mtable->leafcount--; | ||
264 | count--; | ||
265 | i--; | ||
266 | leaf->leafdata = p + 2; | ||
267 | p += (p[0] & 0x3f) + 1; | ||
268 | continue; | ||
269 | } else if (p[1] & 1) { | ||
270 | int gpr_len, reset_len; | ||
271 | |||
272 | mtable->has_mii = 1; | ||
273 | leaf->media = 11; | ||
274 | gpr_len=p[3]*2; | ||
275 | reset_len=p[4+gpr_len]*2; | ||
276 | new_advertise |= get_u16(&p[7+gpr_len+reset_len]); | ||
277 | } else { | ||
278 | mtable->has_nonmii = 1; | ||
279 | leaf->media = p[2] & MEDIA_MASK; | ||
280 | /* Davicom's media number for 100BaseTX is strange */ | ||
281 | if (tp->chip_id == DM910X && leaf->media == 1) | ||
282 | leaf->media = 3; | ||
283 | switch (leaf->media) { | ||
284 | case 0: new_advertise |= 0x0020; break; | ||
285 | case 4: new_advertise |= 0x0040; break; | ||
286 | case 3: new_advertise |= 0x0080; break; | ||
287 | case 5: new_advertise |= 0x0100; break; | ||
288 | case 6: new_advertise |= 0x0200; break; | ||
289 | } | ||
290 | if (p[1] == 2 && leaf->media == 0) { | ||
291 | if (p[2] & 0x40) { | ||
292 | u32 base15 = get_unaligned((u16*)&p[7]); | ||
293 | mtable->csr15dir = | ||
294 | (get_unaligned((u16*)&p[9])<<16) + base15; | ||
295 | mtable->csr15val = | ||
296 | (get_unaligned((u16*)&p[11])<<16) + base15; | ||
297 | } else { | ||
298 | mtable->csr15dir = get_unaligned((u16*)&p[3])<<16; | ||
299 | mtable->csr15val = get_unaligned((u16*)&p[5])<<16; | ||
300 | } | ||
301 | } | ||
302 | } | ||
303 | leaf->leafdata = p + 2; | ||
304 | p += (p[0] & 0x3f) + 1; | ||
305 | } | ||
306 | if (tulip_debug > 1 && leaf->media == 11) { | ||
307 | unsigned char *bp = leaf->leafdata; | ||
308 | pr_info("%s: MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n", | ||
309 | dev->name, | ||
310 | bp[0], bp[1], bp[2 + bp[1]*2], | ||
311 | bp[5 + bp[2 + bp[1]*2]*2], | ||
312 | bp[4 + bp[2 + bp[1]*2]*2]); | ||
313 | } | ||
314 | pr_info("%s: Index #%d - Media %s (#%d) described by a %s (%d) block\n", | ||
315 | dev->name, | ||
316 | i, medianame[leaf->media & 15], leaf->media, | ||
317 | leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>", | ||
318 | leaf->type); | ||
319 | } | ||
320 | if (new_advertise) | ||
321 | tp->sym_advertise = new_advertise; | ||
322 | } | ||
323 | } | ||
324 | /* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/ | ||
325 | |||
326 | /* EEPROM_Ctrl bits. */ | ||
327 | #define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */ | ||
328 | #define EE_CS 0x01 /* EEPROM chip select. */ | ||
329 | #define EE_DATA_WRITE 0x04 /* Data from the Tulip to EEPROM. */ | ||
330 | #define EE_WRITE_0 0x01 | ||
331 | #define EE_WRITE_1 0x05 | ||
332 | #define EE_DATA_READ 0x08 /* Data from the EEPROM chip. */ | ||
333 | #define EE_ENB (0x4800 | EE_CS) | ||
334 | |||
335 | /* Delay between EEPROM clock transitions. | ||
336 | Even at 33Mhz current PCI implementations don't overrun the EEPROM clock. | ||
337 | We add a bus turn-around to insure that this remains true. */ | ||
338 | #define eeprom_delay() ioread32(ee_addr) | ||
339 | |||
340 | /* The EEPROM commands include the alway-set leading bit. */ | ||
341 | #define EE_READ_CMD (6) | ||
342 | |||
343 | /* Note: this routine returns extra data bits for size detection. */ | ||
344 | int __devinit tulip_read_eeprom(struct net_device *dev, int location, int addr_len) | ||
345 | { | ||
346 | int i; | ||
347 | unsigned retval = 0; | ||
348 | struct tulip_private *tp = netdev_priv(dev); | ||
349 | void __iomem *ee_addr = tp->base_addr + CSR9; | ||
350 | int read_cmd = location | (EE_READ_CMD << addr_len); | ||
351 | |||
352 | /* If location is past the end of what we can address, don't | ||
353 | * read some other location (ie truncate). Just return zero. | ||
354 | */ | ||
355 | if (location > (1 << addr_len) - 1) | ||
356 | return 0; | ||
357 | |||
358 | iowrite32(EE_ENB & ~EE_CS, ee_addr); | ||
359 | iowrite32(EE_ENB, ee_addr); | ||
360 | |||
361 | /* Shift the read command bits out. */ | ||
362 | for (i = 4 + addr_len; i >= 0; i--) { | ||
363 | short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0; | ||
364 | iowrite32(EE_ENB | dataval, ee_addr); | ||
365 | eeprom_delay(); | ||
366 | iowrite32(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); | ||
367 | eeprom_delay(); | ||
368 | retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0); | ||
369 | } | ||
370 | iowrite32(EE_ENB, ee_addr); | ||
371 | eeprom_delay(); | ||
372 | |||
373 | for (i = 16; i > 0; i--) { | ||
374 | iowrite32(EE_ENB | EE_SHIFT_CLK, ee_addr); | ||
375 | eeprom_delay(); | ||
376 | retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0); | ||
377 | iowrite32(EE_ENB, ee_addr); | ||
378 | eeprom_delay(); | ||
379 | } | ||
380 | |||
381 | /* Terminate the EEPROM access. */ | ||
382 | iowrite32(EE_ENB & ~EE_CS, ee_addr); | ||
383 | return (tp->flags & HAS_SWAPPED_SEEPROM) ? swab16(retval) : retval; | ||
384 | } | ||
385 | |||
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c new file mode 100644 index 000000000000..5350d753e0ff --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/interrupt.c | |||
@@ -0,0 +1,811 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/interrupt.c | ||
3 | |||
4 | Copyright 2000,2001 The Linux Kernel Team | ||
5 | Written/copyright 1994-2001 by Donald Becker. | ||
6 | |||
7 | This software may be used and distributed according to the terms | ||
8 | of the GNU General Public License, incorporated herein by reference. | ||
9 | |||
10 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
11 | for more information on this driver. | ||
12 | Please submit bugs to http://bugzilla.kernel.org/ . | ||
13 | |||
14 | */ | ||
15 | |||
16 | #include <linux/pci.h> | ||
17 | #include "tulip.h" | ||
18 | #include <linux/etherdevice.h> | ||
19 | |||
20 | int tulip_rx_copybreak; | ||
21 | unsigned int tulip_max_interrupt_work; | ||
22 | |||
23 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
24 | #define MIT_SIZE 15 | ||
25 | #define MIT_TABLE 15 /* We use 0 or max */ | ||
26 | |||
27 | static unsigned int mit_table[MIT_SIZE+1] = | ||
28 | { | ||
29 | /* CRS11 21143 hardware Mitigation Control Interrupt | ||
30 | We use only RX mitigation we other techniques for | ||
31 | TX intr. mitigation. | ||
32 | |||
33 | 31 Cycle Size (timer control) | ||
34 | 30:27 TX timer in 16 * Cycle size | ||
35 | 26:24 TX No pkts before Int. | ||
36 | 23:20 RX timer in Cycle size | ||
37 | 19:17 RX No pkts before Int. | ||
38 | 16 Continues Mode (CM) | ||
39 | */ | ||
40 | |||
41 | 0x0, /* IM disabled */ | ||
42 | 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */ | ||
43 | 0x80150000, | ||
44 | 0x80270000, | ||
45 | 0x80370000, | ||
46 | 0x80490000, | ||
47 | 0x80590000, | ||
48 | 0x80690000, | ||
49 | 0x807B0000, | ||
50 | 0x808B0000, | ||
51 | 0x809D0000, | ||
52 | 0x80AD0000, | ||
53 | 0x80BD0000, | ||
54 | 0x80CF0000, | ||
55 | 0x80DF0000, | ||
56 | // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */ | ||
57 | 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */ | ||
58 | }; | ||
59 | #endif | ||
60 | |||
61 | |||
62 | int tulip_refill_rx(struct net_device *dev) | ||
63 | { | ||
64 | struct tulip_private *tp = netdev_priv(dev); | ||
65 | int entry; | ||
66 | int refilled = 0; | ||
67 | |||
68 | /* Refill the Rx ring buffers. */ | ||
69 | for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) { | ||
70 | entry = tp->dirty_rx % RX_RING_SIZE; | ||
71 | if (tp->rx_buffers[entry].skb == NULL) { | ||
72 | struct sk_buff *skb; | ||
73 | dma_addr_t mapping; | ||
74 | |||
75 | skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ); | ||
76 | if (skb == NULL) | ||
77 | break; | ||
78 | |||
79 | mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, | ||
80 | PCI_DMA_FROMDEVICE); | ||
81 | tp->rx_buffers[entry].mapping = mapping; | ||
82 | |||
83 | skb->dev = dev; /* Mark as being used by this device. */ | ||
84 | tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); | ||
85 | refilled++; | ||
86 | } | ||
87 | tp->rx_ring[entry].status = cpu_to_le32(DescOwned); | ||
88 | } | ||
89 | if(tp->chip_id == LC82C168) { | ||
90 | if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) { | ||
91 | /* Rx stopped due to out of buffers, | ||
92 | * restart it | ||
93 | */ | ||
94 | iowrite32(0x01, tp->base_addr + CSR2); | ||
95 | } | ||
96 | } | ||
97 | return refilled; | ||
98 | } | ||
99 | |||
100 | #ifdef CONFIG_TULIP_NAPI | ||
101 | |||
102 | void oom_timer(unsigned long data) | ||
103 | { | ||
104 | struct net_device *dev = (struct net_device *)data; | ||
105 | struct tulip_private *tp = netdev_priv(dev); | ||
106 | napi_schedule(&tp->napi); | ||
107 | } | ||
108 | |||
109 | int tulip_poll(struct napi_struct *napi, int budget) | ||
110 | { | ||
111 | struct tulip_private *tp = container_of(napi, struct tulip_private, napi); | ||
112 | struct net_device *dev = tp->dev; | ||
113 | int entry = tp->cur_rx % RX_RING_SIZE; | ||
114 | int work_done = 0; | ||
115 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
116 | int received = 0; | ||
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
120 | |||
121 | /* that one buffer is needed for mit activation; or might be a | ||
122 | bug in the ring buffer code; check later -- JHS*/ | ||
123 | |||
124 | if (budget >=RX_RING_SIZE) budget--; | ||
125 | #endif | ||
126 | |||
127 | if (tulip_debug > 4) | ||
128 | netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n", | ||
129 | entry, tp->rx_ring[entry].status); | ||
130 | |||
131 | do { | ||
132 | if (ioread32(tp->base_addr + CSR5) == 0xffffffff) { | ||
133 | netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n"); | ||
134 | break; | ||
135 | } | ||
136 | /* Acknowledge current RX interrupt sources. */ | ||
137 | iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5); | ||
138 | |||
139 | |||
140 | /* If we own the next entry, it is a new packet. Send it up. */ | ||
141 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | ||
142 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | ||
143 | short pkt_len; | ||
144 | |||
145 | if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx) | ||
146 | break; | ||
147 | |||
148 | if (tulip_debug > 5) | ||
149 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", | ||
150 | entry, status); | ||
151 | |||
152 | if (++work_done >= budget) | ||
153 | goto not_done; | ||
154 | |||
155 | /* | ||
156 | * Omit the four octet CRC from the length. | ||
157 | * (May not be considered valid until we have | ||
158 | * checked status for RxLengthOver2047 bits) | ||
159 | */ | ||
160 | pkt_len = ((status >> 16) & 0x7ff) - 4; | ||
161 | |||
162 | /* | ||
163 | * Maximum pkt_len is 1518 (1514 + vlan header) | ||
164 | * Anything higher than this is always invalid | ||
165 | * regardless of RxLengthOver2047 bits | ||
166 | */ | ||
167 | |||
168 | if ((status & (RxLengthOver2047 | | ||
169 | RxDescCRCError | | ||
170 | RxDescCollisionSeen | | ||
171 | RxDescRunt | | ||
172 | RxDescDescErr | | ||
173 | RxWholePkt)) != RxWholePkt || | ||
174 | pkt_len > 1518) { | ||
175 | if ((status & (RxLengthOver2047 | | ||
176 | RxWholePkt)) != RxWholePkt) { | ||
177 | /* Ingore earlier buffers. */ | ||
178 | if ((status & 0xffff) != 0x7fff) { | ||
179 | if (tulip_debug > 1) | ||
180 | dev_warn(&dev->dev, | ||
181 | "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", | ||
182 | status); | ||
183 | dev->stats.rx_length_errors++; | ||
184 | } | ||
185 | } else { | ||
186 | /* There was a fatal error. */ | ||
187 | if (tulip_debug > 2) | ||
188 | netdev_dbg(dev, "Receive error, Rx status %08x\n", | ||
189 | status); | ||
190 | dev->stats.rx_errors++; /* end of a packet.*/ | ||
191 | if (pkt_len > 1518 || | ||
192 | (status & RxDescRunt)) | ||
193 | dev->stats.rx_length_errors++; | ||
194 | |||
195 | if (status & 0x0004) | ||
196 | dev->stats.rx_frame_errors++; | ||
197 | if (status & 0x0002) | ||
198 | dev->stats.rx_crc_errors++; | ||
199 | if (status & 0x0001) | ||
200 | dev->stats.rx_fifo_errors++; | ||
201 | } | ||
202 | } else { | ||
203 | struct sk_buff *skb; | ||
204 | |||
205 | /* Check if the packet is long enough to accept without copying | ||
206 | to a minimally-sized skbuff. */ | ||
207 | if (pkt_len < tulip_rx_copybreak && | ||
208 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
209 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | ||
210 | pci_dma_sync_single_for_cpu(tp->pdev, | ||
211 | tp->rx_buffers[entry].mapping, | ||
212 | pkt_len, PCI_DMA_FROMDEVICE); | ||
213 | #if ! defined(__alpha__) | ||
214 | skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, | ||
215 | pkt_len); | ||
216 | skb_put(skb, pkt_len); | ||
217 | #else | ||
218 | memcpy(skb_put(skb, pkt_len), | ||
219 | tp->rx_buffers[entry].skb->data, | ||
220 | pkt_len); | ||
221 | #endif | ||
222 | pci_dma_sync_single_for_device(tp->pdev, | ||
223 | tp->rx_buffers[entry].mapping, | ||
224 | pkt_len, PCI_DMA_FROMDEVICE); | ||
225 | } else { /* Pass up the skb already on the Rx ring. */ | ||
226 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, | ||
227 | pkt_len); | ||
228 | |||
229 | #ifndef final_version | ||
230 | if (tp->rx_buffers[entry].mapping != | ||
231 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { | ||
232 | dev_err(&dev->dev, | ||
233 | "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n", | ||
234 | le32_to_cpu(tp->rx_ring[entry].buffer1), | ||
235 | (unsigned long long)tp->rx_buffers[entry].mapping, | ||
236 | skb->head, temp); | ||
237 | } | ||
238 | #endif | ||
239 | |||
240 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, | ||
241 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
242 | |||
243 | tp->rx_buffers[entry].skb = NULL; | ||
244 | tp->rx_buffers[entry].mapping = 0; | ||
245 | } | ||
246 | skb->protocol = eth_type_trans(skb, dev); | ||
247 | |||
248 | netif_receive_skb(skb); | ||
249 | |||
250 | dev->stats.rx_packets++; | ||
251 | dev->stats.rx_bytes += pkt_len; | ||
252 | } | ||
253 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
254 | received++; | ||
255 | #endif | ||
256 | |||
257 | entry = (++tp->cur_rx) % RX_RING_SIZE; | ||
258 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4) | ||
259 | tulip_refill_rx(dev); | ||
260 | |||
261 | } | ||
262 | |||
263 | /* New ack strategy... irq does not ack Rx any longer | ||
264 | hopefully this helps */ | ||
265 | |||
266 | /* Really bad things can happen here... If new packet arrives | ||
267 | * and an irq arrives (tx or just due to occasionally unset | ||
268 | * mask), it will be acked by irq handler, but new thread | ||
269 | * is not scheduled. It is major hole in design. | ||
270 | * No idea how to fix this if "playing with fire" will fail | ||
271 | * tomorrow (night 011029). If it will not fail, we won | ||
272 | * finally: amount of IO did not increase at all. */ | ||
273 | } while ((ioread32(tp->base_addr + CSR5) & RxIntr)); | ||
274 | |||
275 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
276 | |||
277 | /* We use this simplistic scheme for IM. It's proven by | ||
278 | real life installations. We can have IM enabled | ||
279 | continuesly but this would cause unnecessary latency. | ||
280 | Unfortunely we can't use all the NET_RX_* feedback here. | ||
281 | This would turn on IM for devices that is not contributing | ||
282 | to backlog congestion with unnecessary latency. | ||
283 | |||
284 | We monitor the device RX-ring and have: | ||
285 | |||
286 | HW Interrupt Mitigation either ON or OFF. | ||
287 | |||
288 | ON: More then 1 pkt received (per intr.) OR we are dropping | ||
289 | OFF: Only 1 pkt received | ||
290 | |||
291 | Note. We only use min and max (0, 15) settings from mit_table */ | ||
292 | |||
293 | |||
294 | if( tp->flags & HAS_INTR_MITIGATION) { | ||
295 | if( received > 1 ) { | ||
296 | if( ! tp->mit_on ) { | ||
297 | tp->mit_on = 1; | ||
298 | iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11); | ||
299 | } | ||
300 | } | ||
301 | else { | ||
302 | if( tp->mit_on ) { | ||
303 | tp->mit_on = 0; | ||
304 | iowrite32(0, tp->base_addr + CSR11); | ||
305 | } | ||
306 | } | ||
307 | } | ||
308 | |||
309 | #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */ | ||
310 | |||
311 | tulip_refill_rx(dev); | ||
312 | |||
313 | /* If RX ring is not full we are out of memory. */ | ||
314 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | ||
315 | goto oom; | ||
316 | |||
317 | /* Remove us from polling list and enable RX intr. */ | ||
318 | |||
319 | napi_complete(napi); | ||
320 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7); | ||
321 | |||
322 | /* The last op happens after poll completion. Which means the following: | ||
323 | * 1. it can race with disabling irqs in irq handler | ||
324 | * 2. it can race with dise/enabling irqs in other poll threads | ||
325 | * 3. if an irq raised after beginning loop, it will be immediately | ||
326 | * triggered here. | ||
327 | * | ||
328 | * Summarizing: the logic results in some redundant irqs both | ||
329 | * due to races in masking and due to too late acking of already | ||
330 | * processed irqs. But it must not result in losing events. | ||
331 | */ | ||
332 | |||
333 | return work_done; | ||
334 | |||
335 | not_done: | ||
336 | if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 || | ||
337 | tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | ||
338 | tulip_refill_rx(dev); | ||
339 | |||
340 | if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) | ||
341 | goto oom; | ||
342 | |||
343 | return work_done; | ||
344 | |||
345 | oom: /* Executed with RX ints disabled */ | ||
346 | |||
347 | /* Start timer, stop polling, but do not enable rx interrupts. */ | ||
348 | mod_timer(&tp->oom_timer, jiffies+1); | ||
349 | |||
350 | /* Think: timer_pending() was an explicit signature of bug. | ||
351 | * Timer can be pending now but fired and completed | ||
352 | * before we did napi_complete(). See? We would lose it. */ | ||
353 | |||
354 | /* remove ourselves from the polling list */ | ||
355 | napi_complete(napi); | ||
356 | |||
357 | return work_done; | ||
358 | } | ||
359 | |||
360 | #else /* CONFIG_TULIP_NAPI */ | ||
361 | |||
362 | static int tulip_rx(struct net_device *dev) | ||
363 | { | ||
364 | struct tulip_private *tp = netdev_priv(dev); | ||
365 | int entry = tp->cur_rx % RX_RING_SIZE; | ||
366 | int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx; | ||
367 | int received = 0; | ||
368 | |||
369 | if (tulip_debug > 4) | ||
370 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", | ||
371 | entry, tp->rx_ring[entry].status); | ||
372 | /* If we own the next entry, it is a new packet. Send it up. */ | ||
373 | while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) { | ||
374 | s32 status = le32_to_cpu(tp->rx_ring[entry].status); | ||
375 | short pkt_len; | ||
376 | |||
377 | if (tulip_debug > 5) | ||
378 | netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n", | ||
379 | entry, status); | ||
380 | if (--rx_work_limit < 0) | ||
381 | break; | ||
382 | |||
383 | /* | ||
384 | Omit the four octet CRC from the length. | ||
385 | (May not be considered valid until we have | ||
386 | checked status for RxLengthOver2047 bits) | ||
387 | */ | ||
388 | pkt_len = ((status >> 16) & 0x7ff) - 4; | ||
389 | /* | ||
390 | Maximum pkt_len is 1518 (1514 + vlan header) | ||
391 | Anything higher than this is always invalid | ||
392 | regardless of RxLengthOver2047 bits | ||
393 | */ | ||
394 | |||
395 | if ((status & (RxLengthOver2047 | | ||
396 | RxDescCRCError | | ||
397 | RxDescCollisionSeen | | ||
398 | RxDescRunt | | ||
399 | RxDescDescErr | | ||
400 | RxWholePkt)) != RxWholePkt || | ||
401 | pkt_len > 1518) { | ||
402 | if ((status & (RxLengthOver2047 | | ||
403 | RxWholePkt)) != RxWholePkt) { | ||
404 | /* Ingore earlier buffers. */ | ||
405 | if ((status & 0xffff) != 0x7fff) { | ||
406 | if (tulip_debug > 1) | ||
407 | netdev_warn(dev, | ||
408 | "Oversized Ethernet frame spanned multiple buffers, status %08x!\n", | ||
409 | status); | ||
410 | dev->stats.rx_length_errors++; | ||
411 | } | ||
412 | } else { | ||
413 | /* There was a fatal error. */ | ||
414 | if (tulip_debug > 2) | ||
415 | netdev_dbg(dev, "Receive error, Rx status %08x\n", | ||
416 | status); | ||
417 | dev->stats.rx_errors++; /* end of a packet.*/ | ||
418 | if (pkt_len > 1518 || | ||
419 | (status & RxDescRunt)) | ||
420 | dev->stats.rx_length_errors++; | ||
421 | if (status & 0x0004) | ||
422 | dev->stats.rx_frame_errors++; | ||
423 | if (status & 0x0002) | ||
424 | dev->stats.rx_crc_errors++; | ||
425 | if (status & 0x0001) | ||
426 | dev->stats.rx_fifo_errors++; | ||
427 | } | ||
428 | } else { | ||
429 | struct sk_buff *skb; | ||
430 | |||
431 | /* Check if the packet is long enough to accept without copying | ||
432 | to a minimally-sized skbuff. */ | ||
433 | if (pkt_len < tulip_rx_copybreak && | ||
434 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
435 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | ||
436 | pci_dma_sync_single_for_cpu(tp->pdev, | ||
437 | tp->rx_buffers[entry].mapping, | ||
438 | pkt_len, PCI_DMA_FROMDEVICE); | ||
439 | #if ! defined(__alpha__) | ||
440 | skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, | ||
441 | pkt_len); | ||
442 | skb_put(skb, pkt_len); | ||
443 | #else | ||
444 | memcpy(skb_put(skb, pkt_len), | ||
445 | tp->rx_buffers[entry].skb->data, | ||
446 | pkt_len); | ||
447 | #endif | ||
448 | pci_dma_sync_single_for_device(tp->pdev, | ||
449 | tp->rx_buffers[entry].mapping, | ||
450 | pkt_len, PCI_DMA_FROMDEVICE); | ||
451 | } else { /* Pass up the skb already on the Rx ring. */ | ||
452 | char *temp = skb_put(skb = tp->rx_buffers[entry].skb, | ||
453 | pkt_len); | ||
454 | |||
455 | #ifndef final_version | ||
456 | if (tp->rx_buffers[entry].mapping != | ||
457 | le32_to_cpu(tp->rx_ring[entry].buffer1)) { | ||
458 | dev_err(&dev->dev, | ||
459 | "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n", | ||
460 | le32_to_cpu(tp->rx_ring[entry].buffer1), | ||
461 | (long long)tp->rx_buffers[entry].mapping, | ||
462 | skb->head, temp); | ||
463 | } | ||
464 | #endif | ||
465 | |||
466 | pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, | ||
467 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
468 | |||
469 | tp->rx_buffers[entry].skb = NULL; | ||
470 | tp->rx_buffers[entry].mapping = 0; | ||
471 | } | ||
472 | skb->protocol = eth_type_trans(skb, dev); | ||
473 | |||
474 | netif_rx(skb); | ||
475 | |||
476 | dev->stats.rx_packets++; | ||
477 | dev->stats.rx_bytes += pkt_len; | ||
478 | } | ||
479 | received++; | ||
480 | entry = (++tp->cur_rx) % RX_RING_SIZE; | ||
481 | } | ||
482 | return received; | ||
483 | } | ||
484 | #endif /* CONFIG_TULIP_NAPI */ | ||
485 | |||
486 | static inline unsigned int phy_interrupt (struct net_device *dev) | ||
487 | { | ||
488 | #ifdef __hppa__ | ||
489 | struct tulip_private *tp = netdev_priv(dev); | ||
490 | int csr12 = ioread32(tp->base_addr + CSR12) & 0xff; | ||
491 | |||
492 | if (csr12 != tp->csr12_shadow) { | ||
493 | /* ack interrupt */ | ||
494 | iowrite32(csr12 | 0x02, tp->base_addr + CSR12); | ||
495 | tp->csr12_shadow = csr12; | ||
496 | /* do link change stuff */ | ||
497 | spin_lock(&tp->lock); | ||
498 | tulip_check_duplex(dev); | ||
499 | spin_unlock(&tp->lock); | ||
500 | /* clear irq ack bit */ | ||
501 | iowrite32(csr12 & ~0x02, tp->base_addr + CSR12); | ||
502 | |||
503 | return 1; | ||
504 | } | ||
505 | #endif | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | /* The interrupt handler does all of the Rx thread work and cleans up | ||
511 | after the Tx thread. */ | ||
512 | irqreturn_t tulip_interrupt(int irq, void *dev_instance) | ||
513 | { | ||
514 | struct net_device *dev = (struct net_device *)dev_instance; | ||
515 | struct tulip_private *tp = netdev_priv(dev); | ||
516 | void __iomem *ioaddr = tp->base_addr; | ||
517 | int csr5; | ||
518 | int missed; | ||
519 | int rx = 0; | ||
520 | int tx = 0; | ||
521 | int oi = 0; | ||
522 | int maxrx = RX_RING_SIZE; | ||
523 | int maxtx = TX_RING_SIZE; | ||
524 | int maxoi = TX_RING_SIZE; | ||
525 | #ifdef CONFIG_TULIP_NAPI | ||
526 | int rxd = 0; | ||
527 | #else | ||
528 | int entry; | ||
529 | #endif | ||
530 | unsigned int work_count = tulip_max_interrupt_work; | ||
531 | unsigned int handled = 0; | ||
532 | |||
533 | /* Let's see whether the interrupt really is for us */ | ||
534 | csr5 = ioread32(ioaddr + CSR5); | ||
535 | |||
536 | if (tp->flags & HAS_PHY_IRQ) | ||
537 | handled = phy_interrupt (dev); | ||
538 | |||
539 | if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) | ||
540 | return IRQ_RETVAL(handled); | ||
541 | |||
542 | tp->nir++; | ||
543 | |||
544 | do { | ||
545 | |||
546 | #ifdef CONFIG_TULIP_NAPI | ||
547 | |||
548 | if (!rxd && (csr5 & (RxIntr | RxNoBuf))) { | ||
549 | rxd++; | ||
550 | /* Mask RX intrs and add the device to poll list. */ | ||
551 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7); | ||
552 | napi_schedule(&tp->napi); | ||
553 | |||
554 | if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass))) | ||
555 | break; | ||
556 | } | ||
557 | |||
558 | /* Acknowledge the interrupt sources we handle here ASAP | ||
559 | the poll function does Rx and RxNoBuf acking */ | ||
560 | |||
561 | iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5); | ||
562 | |||
563 | #else | ||
564 | /* Acknowledge all of the current interrupt sources ASAP. */ | ||
565 | iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5); | ||
566 | |||
567 | |||
568 | if (csr5 & (RxIntr | RxNoBuf)) { | ||
569 | rx += tulip_rx(dev); | ||
570 | tulip_refill_rx(dev); | ||
571 | } | ||
572 | |||
573 | #endif /* CONFIG_TULIP_NAPI */ | ||
574 | |||
575 | if (tulip_debug > 4) | ||
576 | netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n", | ||
577 | csr5, ioread32(ioaddr + CSR5)); | ||
578 | |||
579 | |||
580 | if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) { | ||
581 | unsigned int dirty_tx; | ||
582 | |||
583 | spin_lock(&tp->lock); | ||
584 | |||
585 | for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0; | ||
586 | dirty_tx++) { | ||
587 | int entry = dirty_tx % TX_RING_SIZE; | ||
588 | int status = le32_to_cpu(tp->tx_ring[entry].status); | ||
589 | |||
590 | if (status < 0) | ||
591 | break; /* It still has not been Txed */ | ||
592 | |||
593 | /* Check for Rx filter setup frames. */ | ||
594 | if (tp->tx_buffers[entry].skb == NULL) { | ||
595 | /* test because dummy frames not mapped */ | ||
596 | if (tp->tx_buffers[entry].mapping) | ||
597 | pci_unmap_single(tp->pdev, | ||
598 | tp->tx_buffers[entry].mapping, | ||
599 | sizeof(tp->setup_frame), | ||
600 | PCI_DMA_TODEVICE); | ||
601 | continue; | ||
602 | } | ||
603 | |||
604 | if (status & 0x8000) { | ||
605 | /* There was an major error, log it. */ | ||
606 | #ifndef final_version | ||
607 | if (tulip_debug > 1) | ||
608 | netdev_dbg(dev, "Transmit error, Tx status %08x\n", | ||
609 | status); | ||
610 | #endif | ||
611 | dev->stats.tx_errors++; | ||
612 | if (status & 0x4104) | ||
613 | dev->stats.tx_aborted_errors++; | ||
614 | if (status & 0x0C00) | ||
615 | dev->stats.tx_carrier_errors++; | ||
616 | if (status & 0x0200) | ||
617 | dev->stats.tx_window_errors++; | ||
618 | if (status & 0x0002) | ||
619 | dev->stats.tx_fifo_errors++; | ||
620 | if ((status & 0x0080) && tp->full_duplex == 0) | ||
621 | dev->stats.tx_heartbeat_errors++; | ||
622 | } else { | ||
623 | dev->stats.tx_bytes += | ||
624 | tp->tx_buffers[entry].skb->len; | ||
625 | dev->stats.collisions += (status >> 3) & 15; | ||
626 | dev->stats.tx_packets++; | ||
627 | } | ||
628 | |||
629 | pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, | ||
630 | tp->tx_buffers[entry].skb->len, | ||
631 | PCI_DMA_TODEVICE); | ||
632 | |||
633 | /* Free the original skb. */ | ||
634 | dev_kfree_skb_irq(tp->tx_buffers[entry].skb); | ||
635 | tp->tx_buffers[entry].skb = NULL; | ||
636 | tp->tx_buffers[entry].mapping = 0; | ||
637 | tx++; | ||
638 | } | ||
639 | |||
640 | #ifndef final_version | ||
641 | if (tp->cur_tx - dirty_tx > TX_RING_SIZE) { | ||
642 | dev_err(&dev->dev, | ||
643 | "Out-of-sync dirty pointer, %d vs. %d\n", | ||
644 | dirty_tx, tp->cur_tx); | ||
645 | dirty_tx += TX_RING_SIZE; | ||
646 | } | ||
647 | #endif | ||
648 | |||
649 | if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) | ||
650 | netif_wake_queue(dev); | ||
651 | |||
652 | tp->dirty_tx = dirty_tx; | ||
653 | if (csr5 & TxDied) { | ||
654 | if (tulip_debug > 2) | ||
655 | dev_warn(&dev->dev, | ||
656 | "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n", | ||
657 | csr5, ioread32(ioaddr + CSR6), | ||
658 | tp->csr6); | ||
659 | tulip_restart_rxtx(tp); | ||
660 | } | ||
661 | spin_unlock(&tp->lock); | ||
662 | } | ||
663 | |||
664 | /* Log errors. */ | ||
665 | if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */ | ||
666 | if (csr5 == 0xffffffff) | ||
667 | break; | ||
668 | if (csr5 & TxJabber) | ||
669 | dev->stats.tx_errors++; | ||
670 | if (csr5 & TxFIFOUnderflow) { | ||
671 | if ((tp->csr6 & 0xC000) != 0xC000) | ||
672 | tp->csr6 += 0x4000; /* Bump up the Tx threshold */ | ||
673 | else | ||
674 | tp->csr6 |= 0x00200000; /* Store-n-forward. */ | ||
675 | /* Restart the transmit process. */ | ||
676 | tulip_restart_rxtx(tp); | ||
677 | iowrite32(0, ioaddr + CSR1); | ||
678 | } | ||
679 | if (csr5 & (RxDied | RxNoBuf)) { | ||
680 | if (tp->flags & COMET_MAC_ADDR) { | ||
681 | iowrite32(tp->mc_filter[0], ioaddr + 0xAC); | ||
682 | iowrite32(tp->mc_filter[1], ioaddr + 0xB0); | ||
683 | } | ||
684 | } | ||
685 | if (csr5 & RxDied) { /* Missed a Rx frame. */ | ||
686 | dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; | ||
687 | dev->stats.rx_errors++; | ||
688 | tulip_start_rxtx(tp); | ||
689 | } | ||
690 | /* | ||
691 | * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this | ||
692 | * call is ever done under the spinlock | ||
693 | */ | ||
694 | if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) { | ||
695 | if (tp->link_change) | ||
696 | (tp->link_change)(dev, csr5); | ||
697 | } | ||
698 | if (csr5 & SystemError) { | ||
699 | int error = (csr5 >> 23) & 7; | ||
700 | /* oops, we hit a PCI error. The code produced corresponds | ||
701 | * to the reason: | ||
702 | * 0 - parity error | ||
703 | * 1 - master abort | ||
704 | * 2 - target abort | ||
705 | * Note that on parity error, we should do a software reset | ||
706 | * of the chip to get it back into a sane state (according | ||
707 | * to the 21142/3 docs that is). | ||
708 | * -- rmk | ||
709 | */ | ||
710 | dev_err(&dev->dev, | ||
711 | "(%lu) System Error occurred (%d)\n", | ||
712 | tp->nir, error); | ||
713 | } | ||
714 | /* Clear all error sources, included undocumented ones! */ | ||
715 | iowrite32(0x0800f7ba, ioaddr + CSR5); | ||
716 | oi++; | ||
717 | } | ||
718 | if (csr5 & TimerInt) { | ||
719 | |||
720 | if (tulip_debug > 2) | ||
721 | dev_err(&dev->dev, | ||
722 | "Re-enabling interrupts, %08x\n", | ||
723 | csr5); | ||
724 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); | ||
725 | tp->ttimer = 0; | ||
726 | oi++; | ||
727 | } | ||
728 | if (tx > maxtx || rx > maxrx || oi > maxoi) { | ||
729 | if (tulip_debug > 1) | ||
730 | dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n", | ||
731 | csr5, tp->nir, tx, rx, oi); | ||
732 | |||
733 | /* Acknowledge all interrupt sources. */ | ||
734 | iowrite32(0x8001ffff, ioaddr + CSR5); | ||
735 | if (tp->flags & HAS_INTR_MITIGATION) { | ||
736 | /* Josip Loncaric at ICASE did extensive experimentation | ||
737 | to develop a good interrupt mitigation setting.*/ | ||
738 | iowrite32(0x8b240000, ioaddr + CSR11); | ||
739 | } else if (tp->chip_id == LC82C168) { | ||
740 | /* the LC82C168 doesn't have a hw timer.*/ | ||
741 | iowrite32(0x00, ioaddr + CSR7); | ||
742 | mod_timer(&tp->timer, RUN_AT(HZ/50)); | ||
743 | } else { | ||
744 | /* Mask all interrupting sources, set timer to | ||
745 | re-enable. */ | ||
746 | iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7); | ||
747 | iowrite32(0x0012, ioaddr + CSR11); | ||
748 | } | ||
749 | break; | ||
750 | } | ||
751 | |||
752 | work_count--; | ||
753 | if (work_count == 0) | ||
754 | break; | ||
755 | |||
756 | csr5 = ioread32(ioaddr + CSR5); | ||
757 | |||
758 | #ifdef CONFIG_TULIP_NAPI | ||
759 | if (rxd) | ||
760 | csr5 &= ~RxPollInt; | ||
761 | } while ((csr5 & (TxNoBuf | | ||
762 | TxDied | | ||
763 | TxIntr | | ||
764 | TimerInt | | ||
765 | /* Abnormal intr. */ | ||
766 | RxDied | | ||
767 | TxFIFOUnderflow | | ||
768 | TxJabber | | ||
769 | TPLnkFail | | ||
770 | SystemError )) != 0); | ||
771 | #else | ||
772 | } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0); | ||
773 | |||
774 | tulip_refill_rx(dev); | ||
775 | |||
776 | /* check if the card is in suspend mode */ | ||
777 | entry = tp->dirty_rx % RX_RING_SIZE; | ||
778 | if (tp->rx_buffers[entry].skb == NULL) { | ||
779 | if (tulip_debug > 1) | ||
780 | dev_warn(&dev->dev, | ||
781 | "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", | ||
782 | tp->nir, tp->cur_rx, tp->ttimer, rx); | ||
783 | if (tp->chip_id == LC82C168) { | ||
784 | iowrite32(0x00, ioaddr + CSR7); | ||
785 | mod_timer(&tp->timer, RUN_AT(HZ/50)); | ||
786 | } else { | ||
787 | if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) { | ||
788 | if (tulip_debug > 1) | ||
789 | dev_warn(&dev->dev, | ||
790 | "in rx suspend mode: (%lu) set timer\n", | ||
791 | tp->nir); | ||
792 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt, | ||
793 | ioaddr + CSR7); | ||
794 | iowrite32(TimerInt, ioaddr + CSR5); | ||
795 | iowrite32(12, ioaddr + CSR11); | ||
796 | tp->ttimer = 1; | ||
797 | } | ||
798 | } | ||
799 | } | ||
800 | #endif /* CONFIG_TULIP_NAPI */ | ||
801 | |||
802 | if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) { | ||
803 | dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed; | ||
804 | } | ||
805 | |||
806 | if (tulip_debug > 4) | ||
807 | netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n", | ||
808 | ioread32(ioaddr + CSR5)); | ||
809 | |||
810 | return IRQ_HANDLED; | ||
811 | } | ||
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c new file mode 100644 index 000000000000..4bd13922875d --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/media.c | |||
@@ -0,0 +1,556 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/media.c | ||
3 | |||
4 | Copyright 2000,2001 The Linux Kernel Team | ||
5 | Written/copyright 1994-2001 by Donald Becker. | ||
6 | |||
7 | This software may be used and distributed according to the terms | ||
8 | of the GNU General Public License, incorporated herein by reference. | ||
9 | |||
10 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
11 | for more information on this driver. | ||
12 | |||
13 | Please submit bugs to http://bugzilla.kernel.org/ . | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/mii.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include "tulip.h" | ||
22 | |||
23 | |||
24 | /* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually | ||
25 | met by back-to-back PCI I/O cycles, but we insert a delay to avoid | ||
26 | "overclocking" issues or future 66Mhz PCI. */ | ||
27 | #define mdio_delay() ioread32(mdio_addr) | ||
28 | |||
29 | /* Read and write the MII registers using software-generated serial | ||
30 | MDIO protocol. It is just different enough from the EEPROM protocol | ||
31 | to not share code. The maxium data clock rate is 2.5 Mhz. */ | ||
32 | #define MDIO_SHIFT_CLK 0x10000 | ||
33 | #define MDIO_DATA_WRITE0 0x00000 | ||
34 | #define MDIO_DATA_WRITE1 0x20000 | ||
35 | #define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */ | ||
36 | #define MDIO_ENB_IN 0x40000 | ||
37 | #define MDIO_DATA_READ 0x80000 | ||
38 | |||
39 | static const unsigned char comet_miireg2offset[32] = { | ||
40 | 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0, | ||
41 | 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, }; | ||
42 | |||
43 | |||
44 | /* MII transceiver control section. | ||
45 | Read and write the MII registers using software-generated serial | ||
46 | MDIO protocol. | ||
47 | See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions") | ||
48 | or DP83840A data sheet for more details. | ||
49 | */ | ||
50 | |||
51 | int tulip_mdio_read(struct net_device *dev, int phy_id, int location) | ||
52 | { | ||
53 | struct tulip_private *tp = netdev_priv(dev); | ||
54 | int i; | ||
55 | int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location; | ||
56 | int retval = 0; | ||
57 | void __iomem *ioaddr = tp->base_addr; | ||
58 | void __iomem *mdio_addr = ioaddr + CSR9; | ||
59 | unsigned long flags; | ||
60 | |||
61 | if (location & ~0x1f) | ||
62 | return 0xffff; | ||
63 | |||
64 | if (tp->chip_id == COMET && phy_id == 30) { | ||
65 | if (comet_miireg2offset[location]) | ||
66 | return ioread32(ioaddr + comet_miireg2offset[location]); | ||
67 | return 0xffff; | ||
68 | } | ||
69 | |||
70 | spin_lock_irqsave(&tp->mii_lock, flags); | ||
71 | if (tp->chip_id == LC82C168) { | ||
72 | iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0); | ||
73 | ioread32(ioaddr + 0xA0); | ||
74 | ioread32(ioaddr + 0xA0); | ||
75 | for (i = 1000; i >= 0; --i) { | ||
76 | barrier(); | ||
77 | if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000)) | ||
78 | break; | ||
79 | } | ||
80 | spin_unlock_irqrestore(&tp->mii_lock, flags); | ||
81 | return retval & 0xffff; | ||
82 | } | ||
83 | |||
84 | /* Establish sync by sending at least 32 logic ones. */ | ||
85 | for (i = 32; i >= 0; i--) { | ||
86 | iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); | ||
87 | mdio_delay(); | ||
88 | iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); | ||
89 | mdio_delay(); | ||
90 | } | ||
91 | /* Shift the read command bits out. */ | ||
92 | for (i = 15; i >= 0; i--) { | ||
93 | int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; | ||
94 | |||
95 | iowrite32(MDIO_ENB | dataval, mdio_addr); | ||
96 | mdio_delay(); | ||
97 | iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); | ||
98 | mdio_delay(); | ||
99 | } | ||
100 | /* Read the two transition, 16 data, and wire-idle bits. */ | ||
101 | for (i = 19; i > 0; i--) { | ||
102 | iowrite32(MDIO_ENB_IN, mdio_addr); | ||
103 | mdio_delay(); | ||
104 | retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0); | ||
105 | iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); | ||
106 | mdio_delay(); | ||
107 | } | ||
108 | |||
109 | spin_unlock_irqrestore(&tp->mii_lock, flags); | ||
110 | return (retval>>1) & 0xffff; | ||
111 | } | ||
112 | |||
113 | void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val) | ||
114 | { | ||
115 | struct tulip_private *tp = netdev_priv(dev); | ||
116 | int i; | ||
117 | int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff); | ||
118 | void __iomem *ioaddr = tp->base_addr; | ||
119 | void __iomem *mdio_addr = ioaddr + CSR9; | ||
120 | unsigned long flags; | ||
121 | |||
122 | if (location & ~0x1f) | ||
123 | return; | ||
124 | |||
125 | if (tp->chip_id == COMET && phy_id == 30) { | ||
126 | if (comet_miireg2offset[location]) | ||
127 | iowrite32(val, ioaddr + comet_miireg2offset[location]); | ||
128 | return; | ||
129 | } | ||
130 | |||
131 | spin_lock_irqsave(&tp->mii_lock, flags); | ||
132 | if (tp->chip_id == LC82C168) { | ||
133 | iowrite32(cmd, ioaddr + 0xA0); | ||
134 | for (i = 1000; i >= 0; --i) { | ||
135 | barrier(); | ||
136 | if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000)) | ||
137 | break; | ||
138 | } | ||
139 | spin_unlock_irqrestore(&tp->mii_lock, flags); | ||
140 | return; | ||
141 | } | ||
142 | |||
143 | /* Establish sync by sending 32 logic ones. */ | ||
144 | for (i = 32; i >= 0; i--) { | ||
145 | iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr); | ||
146 | mdio_delay(); | ||
147 | iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr); | ||
148 | mdio_delay(); | ||
149 | } | ||
150 | /* Shift the command bits out. */ | ||
151 | for (i = 31; i >= 0; i--) { | ||
152 | int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0; | ||
153 | iowrite32(MDIO_ENB | dataval, mdio_addr); | ||
154 | mdio_delay(); | ||
155 | iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr); | ||
156 | mdio_delay(); | ||
157 | } | ||
158 | /* Clear out extra bits. */ | ||
159 | for (i = 2; i > 0; i--) { | ||
160 | iowrite32(MDIO_ENB_IN, mdio_addr); | ||
161 | mdio_delay(); | ||
162 | iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); | ||
163 | mdio_delay(); | ||
164 | } | ||
165 | |||
166 | spin_unlock_irqrestore(&tp->mii_lock, flags); | ||
167 | } | ||
168 | |||
169 | |||
170 | /* Set up the transceiver control registers for the selected media type. */ | ||
171 | void tulip_select_media(struct net_device *dev, int startup) | ||
172 | { | ||
173 | struct tulip_private *tp = netdev_priv(dev); | ||
174 | void __iomem *ioaddr = tp->base_addr; | ||
175 | struct mediatable *mtable = tp->mtable; | ||
176 | u32 new_csr6; | ||
177 | int i; | ||
178 | |||
179 | if (mtable) { | ||
180 | struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index]; | ||
181 | unsigned char *p = mleaf->leafdata; | ||
182 | switch (mleaf->type) { | ||
183 | case 0: /* 21140 non-MII xcvr. */ | ||
184 | if (tulip_debug > 1) | ||
185 | netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n", | ||
186 | p[1]); | ||
187 | dev->if_port = p[0]; | ||
188 | if (startup) | ||
189 | iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); | ||
190 | iowrite32(p[1], ioaddr + CSR12); | ||
191 | new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18); | ||
192 | break; | ||
193 | case 2: case 4: { | ||
194 | u16 setup[5]; | ||
195 | u32 csr13val, csr14val, csr15dir, csr15val; | ||
196 | for (i = 0; i < 5; i++) | ||
197 | setup[i] = get_u16(&p[i*2 + 1]); | ||
198 | |||
199 | dev->if_port = p[0] & MEDIA_MASK; | ||
200 | if (tulip_media_cap[dev->if_port] & MediaAlwaysFD) | ||
201 | tp->full_duplex = 1; | ||
202 | |||
203 | if (startup && mtable->has_reset) { | ||
204 | struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; | ||
205 | unsigned char *rst = rleaf->leafdata; | ||
206 | if (tulip_debug > 1) | ||
207 | netdev_dbg(dev, "Resetting the transceiver\n"); | ||
208 | for (i = 0; i < rst[0]; i++) | ||
209 | iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); | ||
210 | } | ||
211 | if (tulip_debug > 1) | ||
212 | netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n", | ||
213 | medianame[dev->if_port], | ||
214 | setup[0], setup[1]); | ||
215 | if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */ | ||
216 | csr13val = setup[0]; | ||
217 | csr14val = setup[1]; | ||
218 | csr15dir = (setup[3]<<16) | setup[2]; | ||
219 | csr15val = (setup[4]<<16) | setup[2]; | ||
220 | iowrite32(0, ioaddr + CSR13); | ||
221 | iowrite32(csr14val, ioaddr + CSR14); | ||
222 | iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ | ||
223 | iowrite32(csr15val, ioaddr + CSR15); /* Data */ | ||
224 | iowrite32(csr13val, ioaddr + CSR13); | ||
225 | } else { | ||
226 | csr13val = 1; | ||
227 | csr14val = 0; | ||
228 | csr15dir = (setup[0]<<16) | 0x0008; | ||
229 | csr15val = (setup[1]<<16) | 0x0008; | ||
230 | if (dev->if_port <= 4) | ||
231 | csr14val = t21142_csr14[dev->if_port]; | ||
232 | if (startup) { | ||
233 | iowrite32(0, ioaddr + CSR13); | ||
234 | iowrite32(csr14val, ioaddr + CSR14); | ||
235 | } | ||
236 | iowrite32(csr15dir, ioaddr + CSR15); /* Direction */ | ||
237 | iowrite32(csr15val, ioaddr + CSR15); /* Data */ | ||
238 | if (startup) iowrite32(csr13val, ioaddr + CSR13); | ||
239 | } | ||
240 | if (tulip_debug > 1) | ||
241 | netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n", | ||
242 | csr15dir, csr15val); | ||
243 | if (mleaf->type == 4) | ||
244 | new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18); | ||
245 | else | ||
246 | new_csr6 = 0x82420000; | ||
247 | break; | ||
248 | } | ||
249 | case 1: case 3: { | ||
250 | int phy_num = p[0]; | ||
251 | int init_length = p[1]; | ||
252 | u16 *misc_info, tmp_info; | ||
253 | |||
254 | dev->if_port = 11; | ||
255 | new_csr6 = 0x020E0000; | ||
256 | if (mleaf->type == 3) { /* 21142 */ | ||
257 | u16 *init_sequence = (u16*)(p+2); | ||
258 | u16 *reset_sequence = &((u16*)(p+3))[init_length]; | ||
259 | int reset_length = p[2 + init_length*2]; | ||
260 | misc_info = reset_sequence + reset_length; | ||
261 | if (startup) { | ||
262 | int timeout = 10; /* max 1 ms */ | ||
263 | for (i = 0; i < reset_length; i++) | ||
264 | iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15); | ||
265 | |||
266 | /* flush posted writes */ | ||
267 | ioread32(ioaddr + CSR15); | ||
268 | |||
269 | /* Sect 3.10.3 in DP83840A.pdf (p39) */ | ||
270 | udelay(500); | ||
271 | |||
272 | /* Section 4.2 in DP83840A.pdf (p43) */ | ||
273 | /* and IEEE 802.3 "22.2.4.1.1 Reset" */ | ||
274 | while (timeout-- && | ||
275 | (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) | ||
276 | udelay(100); | ||
277 | } | ||
278 | for (i = 0; i < init_length; i++) | ||
279 | iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15); | ||
280 | |||
281 | ioread32(ioaddr + CSR15); /* flush posted writes */ | ||
282 | } else { | ||
283 | u8 *init_sequence = p + 2; | ||
284 | u8 *reset_sequence = p + 3 + init_length; | ||
285 | int reset_length = p[2 + init_length]; | ||
286 | misc_info = (u16*)(reset_sequence + reset_length); | ||
287 | if (startup) { | ||
288 | int timeout = 10; /* max 1 ms */ | ||
289 | iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12); | ||
290 | for (i = 0; i < reset_length; i++) | ||
291 | iowrite32(reset_sequence[i], ioaddr + CSR12); | ||
292 | |||
293 | /* flush posted writes */ | ||
294 | ioread32(ioaddr + CSR12); | ||
295 | |||
296 | /* Sect 3.10.3 in DP83840A.pdf (p39) */ | ||
297 | udelay(500); | ||
298 | |||
299 | /* Section 4.2 in DP83840A.pdf (p43) */ | ||
300 | /* and IEEE 802.3 "22.2.4.1.1 Reset" */ | ||
301 | while (timeout-- && | ||
302 | (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET)) | ||
303 | udelay(100); | ||
304 | } | ||
305 | for (i = 0; i < init_length; i++) | ||
306 | iowrite32(init_sequence[i], ioaddr + CSR12); | ||
307 | |||
308 | ioread32(ioaddr + CSR12); /* flush posted writes */ | ||
309 | } | ||
310 | |||
311 | tmp_info = get_u16(&misc_info[1]); | ||
312 | if (tmp_info) | ||
313 | tp->advertising[phy_num] = tmp_info | 1; | ||
314 | if (tmp_info && startup < 2) { | ||
315 | if (tp->mii_advertise == 0) | ||
316 | tp->mii_advertise = tp->advertising[phy_num]; | ||
317 | if (tulip_debug > 1) | ||
318 | netdev_dbg(dev, " Advertising %04x on MII %d\n", | ||
319 | tp->mii_advertise, | ||
320 | tp->phys[phy_num]); | ||
321 | tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise); | ||
322 | } | ||
323 | break; | ||
324 | } | ||
325 | case 5: case 6: { | ||
326 | u16 setup[5]; | ||
327 | |||
328 | new_csr6 = 0; /* FIXME */ | ||
329 | |||
330 | for (i = 0; i < 5; i++) | ||
331 | setup[i] = get_u16(&p[i*2 + 1]); | ||
332 | |||
333 | if (startup && mtable->has_reset) { | ||
334 | struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset]; | ||
335 | unsigned char *rst = rleaf->leafdata; | ||
336 | if (tulip_debug > 1) | ||
337 | netdev_dbg(dev, "Resetting the transceiver\n"); | ||
338 | for (i = 0; i < rst[0]; i++) | ||
339 | iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15); | ||
340 | } | ||
341 | |||
342 | break; | ||
343 | } | ||
344 | default: | ||
345 | netdev_dbg(dev, " Invalid media table selection %d\n", | ||
346 | mleaf->type); | ||
347 | new_csr6 = 0x020E0000; | ||
348 | } | ||
349 | if (tulip_debug > 1) | ||
350 | netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n", | ||
351 | medianame[dev->if_port], | ||
352 | ioread32(ioaddr + CSR12) & 0xff); | ||
353 | } else if (tp->chip_id == LC82C168) { | ||
354 | if (startup && ! tp->medialock) | ||
355 | dev->if_port = tp->mii_cnt ? 11 : 0; | ||
356 | if (tulip_debug > 1) | ||
357 | netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n", | ||
358 | ioread32(ioaddr + 0xB8), | ||
359 | medianame[dev->if_port]); | ||
360 | if (tp->mii_cnt) { | ||
361 | new_csr6 = 0x810C0000; | ||
362 | iowrite32(0x0001, ioaddr + CSR15); | ||
363 | iowrite32(0x0201B07A, ioaddr + 0xB8); | ||
364 | } else if (startup) { | ||
365 | /* Start with 10mbps to do autonegotiation. */ | ||
366 | iowrite32(0x32, ioaddr + CSR12); | ||
367 | new_csr6 = 0x00420000; | ||
368 | iowrite32(0x0001B078, ioaddr + 0xB8); | ||
369 | iowrite32(0x0201B078, ioaddr + 0xB8); | ||
370 | } else if (dev->if_port == 3 || dev->if_port == 5) { | ||
371 | iowrite32(0x33, ioaddr + CSR12); | ||
372 | new_csr6 = 0x01860000; | ||
373 | /* Trigger autonegotiation. */ | ||
374 | iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); | ||
375 | } else { | ||
376 | iowrite32(0x32, ioaddr + CSR12); | ||
377 | new_csr6 = 0x00420000; | ||
378 | iowrite32(0x1F078, ioaddr + 0xB8); | ||
379 | } | ||
380 | } else { /* Unknown chip type with no media table. */ | ||
381 | if (tp->default_port == 0) | ||
382 | dev->if_port = tp->mii_cnt ? 11 : 3; | ||
383 | if (tulip_media_cap[dev->if_port] & MediaIsMII) { | ||
384 | new_csr6 = 0x020E0000; | ||
385 | } else if (tulip_media_cap[dev->if_port] & MediaIsFx) { | ||
386 | new_csr6 = 0x02860000; | ||
387 | } else | ||
388 | new_csr6 = 0x03860000; | ||
389 | if (tulip_debug > 1) | ||
390 | netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n", | ||
391 | medianame[dev->if_port], | ||
392 | ioread32(ioaddr + CSR12)); | ||
393 | } | ||
394 | |||
395 | tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); | ||
396 | |||
397 | mdelay(1); | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | Check the MII negotiated duplex and change the CSR6 setting if | ||
402 | required. | ||
403 | Return 0 if everything is OK. | ||
404 | Return < 0 if the transceiver is missing or has no link beat. | ||
405 | */ | ||
406 | int tulip_check_duplex(struct net_device *dev) | ||
407 | { | ||
408 | struct tulip_private *tp = netdev_priv(dev); | ||
409 | unsigned int bmsr, lpa, negotiated, new_csr6; | ||
410 | |||
411 | bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); | ||
412 | lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA); | ||
413 | if (tulip_debug > 1) | ||
414 | dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n", | ||
415 | bmsr, lpa); | ||
416 | if (bmsr == 0xffff) | ||
417 | return -2; | ||
418 | if ((bmsr & BMSR_LSTATUS) == 0) { | ||
419 | int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR); | ||
420 | if ((new_bmsr & BMSR_LSTATUS) == 0) { | ||
421 | if (tulip_debug > 1) | ||
422 | dev_info(&dev->dev, | ||
423 | "No link beat on the MII interface, status %04x\n", | ||
424 | new_bmsr); | ||
425 | return -1; | ||
426 | } | ||
427 | } | ||
428 | negotiated = lpa & tp->advertising[0]; | ||
429 | tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated); | ||
430 | |||
431 | new_csr6 = tp->csr6; | ||
432 | |||
433 | if (negotiated & LPA_100) new_csr6 &= ~TxThreshold; | ||
434 | else new_csr6 |= TxThreshold; | ||
435 | if (tp->full_duplex) new_csr6 |= FullDuplex; | ||
436 | else new_csr6 &= ~FullDuplex; | ||
437 | |||
438 | if (new_csr6 != tp->csr6) { | ||
439 | tp->csr6 = new_csr6; | ||
440 | tulip_restart_rxtx(tp); | ||
441 | |||
442 | if (tulip_debug > 0) | ||
443 | dev_info(&dev->dev, | ||
444 | "Setting %s-duplex based on MII#%d link partner capability of %04x\n", | ||
445 | tp->full_duplex ? "full" : "half", | ||
446 | tp->phys[0], lpa); | ||
447 | return 1; | ||
448 | } | ||
449 | |||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | void __devinit tulip_find_mii (struct net_device *dev, int board_idx) | ||
454 | { | ||
455 | struct tulip_private *tp = netdev_priv(dev); | ||
456 | int phyn, phy_idx = 0; | ||
457 | int mii_reg0; | ||
458 | int mii_advert; | ||
459 | unsigned int to_advert, new_bmcr, ane_switch; | ||
460 | |||
461 | /* Find the connected MII xcvrs. | ||
462 | Doing this in open() would allow detecting external xcvrs later, | ||
463 | but takes much time. */ | ||
464 | for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) { | ||
465 | int phy = phyn & 0x1f; | ||
466 | int mii_status = tulip_mdio_read (dev, phy, MII_BMSR); | ||
467 | if ((mii_status & 0x8301) == 0x8001 || | ||
468 | ((mii_status & BMSR_100BASE4) == 0 && | ||
469 | (mii_status & 0x7800) != 0)) { | ||
470 | /* preserve Becker logic, gain indentation level */ | ||
471 | } else { | ||
472 | continue; | ||
473 | } | ||
474 | |||
475 | mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR); | ||
476 | mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE); | ||
477 | ane_switch = 0; | ||
478 | |||
479 | /* if not advertising at all, gen an | ||
480 | * advertising value from the capability | ||
481 | * bits in BMSR | ||
482 | */ | ||
483 | if ((mii_advert & ADVERTISE_ALL) == 0) { | ||
484 | unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR); | ||
485 | mii_advert = ((tmpadv >> 6) & 0x3e0) | 1; | ||
486 | } | ||
487 | |||
488 | if (tp->mii_advertise) { | ||
489 | tp->advertising[phy_idx] = | ||
490 | to_advert = tp->mii_advertise; | ||
491 | } else if (tp->advertising[phy_idx]) { | ||
492 | to_advert = tp->advertising[phy_idx]; | ||
493 | } else { | ||
494 | tp->advertising[phy_idx] = | ||
495 | tp->mii_advertise = | ||
496 | to_advert = mii_advert; | ||
497 | } | ||
498 | |||
499 | tp->phys[phy_idx++] = phy; | ||
500 | |||
501 | pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n", | ||
502 | board_idx, phy, mii_reg0, mii_status, mii_advert); | ||
503 | |||
504 | /* Fixup for DLink with miswired PHY. */ | ||
505 | if (mii_advert != to_advert) { | ||
506 | pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n", | ||
507 | board_idx, to_advert, phy, mii_advert); | ||
508 | tulip_mdio_write (dev, phy, 4, to_advert); | ||
509 | } | ||
510 | |||
511 | /* Enable autonegotiation: some boards default to off. */ | ||
512 | if (tp->default_port == 0) { | ||
513 | new_bmcr = mii_reg0 | BMCR_ANENABLE; | ||
514 | if (new_bmcr != mii_reg0) { | ||
515 | new_bmcr |= BMCR_ANRESTART; | ||
516 | ane_switch = 1; | ||
517 | } | ||
518 | } | ||
519 | /* ...or disable nway, if forcing media */ | ||
520 | else { | ||
521 | new_bmcr = mii_reg0 & ~BMCR_ANENABLE; | ||
522 | if (new_bmcr != mii_reg0) | ||
523 | ane_switch = 1; | ||
524 | } | ||
525 | |||
526 | /* clear out bits we never want at this point */ | ||
527 | new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE | | ||
528 | BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK | | ||
529 | BMCR_RESET); | ||
530 | |||
531 | if (tp->full_duplex) | ||
532 | new_bmcr |= BMCR_FULLDPLX; | ||
533 | if (tulip_media_cap[tp->default_port] & MediaIs100) | ||
534 | new_bmcr |= BMCR_SPEED100; | ||
535 | |||
536 | if (new_bmcr != mii_reg0) { | ||
537 | /* some phys need the ANE switch to | ||
538 | * happen before forced media settings | ||
539 | * will "take." However, we write the | ||
540 | * same value twice in order not to | ||
541 | * confuse the sane phys. | ||
542 | */ | ||
543 | if (ane_switch) { | ||
544 | tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); | ||
545 | udelay (10); | ||
546 | } | ||
547 | tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr); | ||
548 | } | ||
549 | } | ||
550 | tp->mii_cnt = phy_idx; | ||
551 | if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) { | ||
552 | pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n", | ||
553 | board_idx); | ||
554 | tp->phys[0] = 1; | ||
555 | } | ||
556 | } | ||
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c new file mode 100644 index 000000000000..52d898bdbeb4 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/pnic.c | |||
@@ -0,0 +1,173 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/pnic.c | ||
3 | |||
4 | Copyright 2000,2001 The Linux Kernel Team | ||
5 | Written/copyright 1994-2001 by Donald Becker. | ||
6 | |||
7 | This software may be used and distributed according to the terms | ||
8 | of the GNU General Public License, incorporated herein by reference. | ||
9 | |||
10 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
11 | for more information on this driver. | ||
12 | |||
13 | Please submit bugs to http://bugzilla.kernel.org/ . | ||
14 | */ | ||
15 | |||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/jiffies.h> | ||
19 | #include "tulip.h" | ||
20 | |||
21 | |||
22 | void pnic_do_nway(struct net_device *dev) | ||
23 | { | ||
24 | struct tulip_private *tp = netdev_priv(dev); | ||
25 | void __iomem *ioaddr = tp->base_addr; | ||
26 | u32 phy_reg = ioread32(ioaddr + 0xB8); | ||
27 | u32 new_csr6 = tp->csr6 & ~0x40C40200; | ||
28 | |||
29 | if (phy_reg & 0x78000000) { /* Ignore baseT4 */ | ||
30 | if (phy_reg & 0x20000000) dev->if_port = 5; | ||
31 | else if (phy_reg & 0x40000000) dev->if_port = 3; | ||
32 | else if (phy_reg & 0x10000000) dev->if_port = 4; | ||
33 | else if (phy_reg & 0x08000000) dev->if_port = 0; | ||
34 | tp->nwayset = 1; | ||
35 | new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000; | ||
36 | iowrite32(0x32 | (dev->if_port & 1), ioaddr + CSR12); | ||
37 | if (dev->if_port & 1) | ||
38 | iowrite32(0x1F868, ioaddr + 0xB8); | ||
39 | if (phy_reg & 0x30000000) { | ||
40 | tp->full_duplex = 1; | ||
41 | new_csr6 |= 0x00000200; | ||
42 | } | ||
43 | if (tulip_debug > 1) | ||
44 | netdev_dbg(dev, "PNIC autonegotiated status %08x, %s\n", | ||
45 | phy_reg, medianame[dev->if_port]); | ||
46 | if (tp->csr6 != new_csr6) { | ||
47 | tp->csr6 = new_csr6; | ||
48 | /* Restart Tx */ | ||
49 | tulip_restart_rxtx(tp); | ||
50 | dev->trans_start = jiffies; | ||
51 | } | ||
52 | } | ||
53 | } | ||
54 | |||
55 | void pnic_lnk_change(struct net_device *dev, int csr5) | ||
56 | { | ||
57 | struct tulip_private *tp = netdev_priv(dev); | ||
58 | void __iomem *ioaddr = tp->base_addr; | ||
59 | int phy_reg = ioread32(ioaddr + 0xB8); | ||
60 | |||
61 | if (tulip_debug > 1) | ||
62 | netdev_dbg(dev, "PNIC link changed state %08x, CSR5 %08x\n", | ||
63 | phy_reg, csr5); | ||
64 | if (ioread32(ioaddr + CSR5) & TPLnkFail) { | ||
65 | iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7); | ||
66 | /* If we use an external MII, then we mustn't use the | ||
67 | * internal negotiation. | ||
68 | */ | ||
69 | if (tulip_media_cap[dev->if_port] & MediaIsMII) | ||
70 | return; | ||
71 | if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) { | ||
72 | tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff); | ||
73 | iowrite32(tp->csr6, ioaddr + CSR6); | ||
74 | iowrite32(0x30, ioaddr + CSR12); | ||
75 | iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */ | ||
76 | dev->trans_start = jiffies; | ||
77 | } | ||
78 | } else if (ioread32(ioaddr + CSR5) & TPLnkPass) { | ||
79 | if (tulip_media_cap[dev->if_port] & MediaIsMII) { | ||
80 | spin_lock(&tp->lock); | ||
81 | tulip_check_duplex(dev); | ||
82 | spin_unlock(&tp->lock); | ||
83 | } else { | ||
84 | pnic_do_nway(dev); | ||
85 | } | ||
86 | iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | void pnic_timer(unsigned long data) | ||
91 | { | ||
92 | struct net_device *dev = (struct net_device *)data; | ||
93 | struct tulip_private *tp = netdev_priv(dev); | ||
94 | void __iomem *ioaddr = tp->base_addr; | ||
95 | int next_tick = 60*HZ; | ||
96 | |||
97 | if(!ioread32(ioaddr + CSR7)) { | ||
98 | /* the timer was called due to a work overflow | ||
99 | * in the interrupt handler. Skip the connection | ||
100 | * checks, the nic is definitively speaking with | ||
101 | * his link partner. | ||
102 | */ | ||
103 | goto too_good_connection; | ||
104 | } | ||
105 | |||
106 | if (tulip_media_cap[dev->if_port] & MediaIsMII) { | ||
107 | spin_lock_irq(&tp->lock); | ||
108 | if (tulip_check_duplex(dev) > 0) | ||
109 | next_tick = 3*HZ; | ||
110 | spin_unlock_irq(&tp->lock); | ||
111 | } else { | ||
112 | int csr12 = ioread32(ioaddr + CSR12); | ||
113 | int new_csr6 = tp->csr6 & ~0x40C40200; | ||
114 | int phy_reg = ioread32(ioaddr + 0xB8); | ||
115 | int csr5 = ioread32(ioaddr + CSR5); | ||
116 | |||
117 | if (tulip_debug > 1) | ||
118 | netdev_dbg(dev, "PNIC timer PHY status %08x, %s CSR5 %08x\n", | ||
119 | phy_reg, medianame[dev->if_port], csr5); | ||
120 | if (phy_reg & 0x04000000) { /* Remote link fault */ | ||
121 | iowrite32(0x0201F078, ioaddr + 0xB8); | ||
122 | next_tick = 1*HZ; | ||
123 | tp->nwayset = 0; | ||
124 | } else if (phy_reg & 0x78000000) { /* Ignore baseT4 */ | ||
125 | pnic_do_nway(dev); | ||
126 | next_tick = 60*HZ; | ||
127 | } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */ | ||
128 | if (tulip_debug > 1) | ||
129 | netdev_dbg(dev, "%s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n", | ||
130 | medianame[dev->if_port], | ||
131 | csr12, | ||
132 | ioread32(ioaddr + CSR5), | ||
133 | ioread32(ioaddr + 0xB8)); | ||
134 | next_tick = 3*HZ; | ||
135 | if (tp->medialock) { | ||
136 | } else if (tp->nwayset && (dev->if_port & 1)) { | ||
137 | next_tick = 1*HZ; | ||
138 | } else if (dev->if_port == 0) { | ||
139 | dev->if_port = 3; | ||
140 | iowrite32(0x33, ioaddr + CSR12); | ||
141 | new_csr6 = 0x01860000; | ||
142 | iowrite32(0x1F868, ioaddr + 0xB8); | ||
143 | } else { | ||
144 | dev->if_port = 0; | ||
145 | iowrite32(0x32, ioaddr + CSR12); | ||
146 | new_csr6 = 0x00420000; | ||
147 | iowrite32(0x1F078, ioaddr + 0xB8); | ||
148 | } | ||
149 | if (tp->csr6 != new_csr6) { | ||
150 | tp->csr6 = new_csr6; | ||
151 | /* Restart Tx */ | ||
152 | tulip_restart_rxtx(tp); | ||
153 | dev->trans_start = jiffies; | ||
154 | if (tulip_debug > 1) | ||
155 | dev_info(&dev->dev, | ||
156 | "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n", | ||
157 | medianame[dev->if_port], | ||
158 | tp->full_duplex ? "full" : "half", | ||
159 | new_csr6); | ||
160 | } | ||
161 | } | ||
162 | } | ||
163 | too_good_connection: | ||
164 | mod_timer(&tp->timer, RUN_AT(next_tick)); | ||
165 | if(!ioread32(ioaddr + CSR7)) { | ||
166 | if (tulip_debug > 1) | ||
167 | dev_info(&dev->dev, "sw timer wakeup\n"); | ||
168 | disable_irq(dev->irq); | ||
169 | tulip_refill_rx(dev); | ||
170 | enable_irq(dev->irq); | ||
171 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); | ||
172 | } | ||
173 | } | ||
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c new file mode 100644 index 000000000000..93358ee4d830 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/pnic2.c | |||
@@ -0,0 +1,406 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/pnic2.c | ||
3 | |||
4 | Copyright 2000,2001 The Linux Kernel Team | ||
5 | Written/copyright 1994-2001 by Donald Becker. | ||
6 | Modified to hep support PNIC_II by Kevin B. Hendricks | ||
7 | |||
8 | This software may be used and distributed according to the terms | ||
9 | of the GNU General Public License, incorporated herein by reference. | ||
10 | |||
11 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
12 | for more information on this driver. | ||
13 | |||
14 | Please submit bugs to http://bugzilla.kernel.org/ . | ||
15 | */ | ||
16 | |||
17 | |||
18 | /* Understanding the PNIC_II - everything is this file is based | ||
19 | * on the PNIC_II_PDF datasheet which is sorely lacking in detail | ||
20 | * | ||
21 | * As I understand things, here are the registers and bits that | ||
22 | * explain the masks and constants used in this file that are | ||
23 | * either different from the 21142/3 or important for basic operation. | ||
24 | * | ||
25 | * | ||
26 | * CSR 6 (mask = 0xfe3bd1fd of bits not to change) | ||
27 | * ----- | ||
28 | * Bit 24 - SCR | ||
29 | * Bit 23 - PCS | ||
30 | * Bit 22 - TTM (Trasmit Threshold Mode) | ||
31 | * Bit 18 - Port Select | ||
32 | * Bit 13 - Start - 1, Stop - 0 Transmissions | ||
33 | * Bit 11:10 - Loop Back Operation Mode | ||
34 | * Bit 9 - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set) | ||
35 | * Bit 1 - Start - 1, Stop - 0 Receive | ||
36 | * | ||
37 | * | ||
38 | * CSR 14 (mask = 0xfff0ee39 of bits not to change) | ||
39 | * ------ | ||
40 | * Bit 19 - PAUSE-Pause | ||
41 | * Bit 18 - Advertise T4 | ||
42 | * Bit 17 - Advertise 100baseTx-FD | ||
43 | * Bit 16 - Advertise 100baseTx-HD | ||
44 | * Bit 12 - LTE - Link Test Enable | ||
45 | * Bit 7 - ANE - Auto Negotiate Enable | ||
46 | * Bit 6 - HDE - Advertise 10baseT-HD | ||
47 | * Bit 2 - Reset to Power down - kept as 1 for normal operation | ||
48 | * Bit 1 - Loop Back enable for 10baseT MCC | ||
49 | * | ||
50 | * | ||
51 | * CSR 12 | ||
52 | * ------ | ||
53 | * Bit 25 - Partner can do T4 | ||
54 | * Bit 24 - Partner can do 100baseTx-FD | ||
55 | * Bit 23 - Partner can do 100baseTx-HD | ||
56 | * Bit 22 - Partner can do 10baseT-FD | ||
57 | * Bit 21 - Partner can do 10baseT-HD | ||
58 | * Bit 15 - LPN is 1 if all above bits are valid other wise 0 | ||
59 | * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate) | ||
60 | * Bit 3 - Autopolarity state | ||
61 | * Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed | ||
62 | * Bit 1 - LS100B - link state of 100baseT 0 - good, 1 - failed | ||
63 | * | ||
64 | * | ||
65 | * Data Port Selection Info | ||
66 | *------------------------- | ||
67 | * | ||
68 | * CSR14<7> CSR6<18> CSR6<22> CSR6<23> CSR6<24> MODE/PORT | ||
69 | * 1 0 0 (X) 0 (X) 1 NWAY | ||
70 | * 0 0 1 0 (X) 0 10baseT | ||
71 | * 0 1 0 1 1 (X) 100baseT | ||
72 | * | ||
73 | * | ||
74 | */ | ||
75 | |||
76 | |||
77 | |||
78 | #include "tulip.h" | ||
79 | #include <linux/delay.h> | ||
80 | |||
81 | |||
82 | void pnic2_timer(unsigned long data) | ||
83 | { | ||
84 | struct net_device *dev = (struct net_device *)data; | ||
85 | struct tulip_private *tp = netdev_priv(dev); | ||
86 | void __iomem *ioaddr = tp->base_addr; | ||
87 | int next_tick = 60*HZ; | ||
88 | |||
89 | if (tulip_debug > 3) | ||
90 | dev_info(&dev->dev, "PNIC2 negotiation status %08x\n", | ||
91 | ioread32(ioaddr + CSR12)); | ||
92 | |||
93 | if (next_tick) { | ||
94 | mod_timer(&tp->timer, RUN_AT(next_tick)); | ||
95 | } | ||
96 | } | ||
97 | |||
98 | |||
99 | void pnic2_start_nway(struct net_device *dev) | ||
100 | { | ||
101 | struct tulip_private *tp = netdev_priv(dev); | ||
102 | void __iomem *ioaddr = tp->base_addr; | ||
103 | int csr14; | ||
104 | int csr12; | ||
105 | |||
106 | /* set up what to advertise during the negotiation */ | ||
107 | |||
108 | /* load in csr14 and mask off bits not to touch | ||
109 | * comment at top of file explains mask value | ||
110 | */ | ||
111 | csr14 = (ioread32(ioaddr + CSR14) & 0xfff0ee39); | ||
112 | |||
113 | /* bit 17 - advetise 100baseTx-FD */ | ||
114 | if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000; | ||
115 | |||
116 | /* bit 16 - advertise 100baseTx-HD */ | ||
117 | if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000; | ||
118 | |||
119 | /* bit 6 - advertise 10baseT-HD */ | ||
120 | if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040; | ||
121 | |||
122 | /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable | ||
123 | * and bit 0 Don't PowerDown 10baseT | ||
124 | */ | ||
125 | csr14 |= 0x00001184; | ||
126 | |||
127 | if (tulip_debug > 1) | ||
128 | netdev_dbg(dev, "Restarting PNIC2 autonegotiation, csr14=%08x\n", | ||
129 | csr14); | ||
130 | |||
131 | /* tell pnic2_lnk_change we are doing an nway negotiation */ | ||
132 | dev->if_port = 0; | ||
133 | tp->nway = tp->mediasense = 1; | ||
134 | tp->nwayset = tp->lpar = 0; | ||
135 | |||
136 | /* now we have to set up csr6 for NWAY state */ | ||
137 | |||
138 | tp->csr6 = ioread32(ioaddr + CSR6); | ||
139 | if (tulip_debug > 1) | ||
140 | netdev_dbg(dev, "On Entry to Nway, csr6=%08x\n", tp->csr6); | ||
141 | |||
142 | /* mask off any bits not to touch | ||
143 | * comment at top of file explains mask value | ||
144 | */ | ||
145 | tp->csr6 = tp->csr6 & 0xfe3bd1fd; | ||
146 | |||
147 | /* don't forget that bit 9 is also used for advertising */ | ||
148 | /* advertise 10baseT-FD for the negotiation (bit 9) */ | ||
149 | if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200; | ||
150 | |||
151 | /* set bit 24 for nway negotiation mode ... | ||
152 | * see Data Port Selection comment at top of file | ||
153 | * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1) | ||
154 | */ | ||
155 | tp->csr6 |= 0x01000000; | ||
156 | iowrite32(csr14, ioaddr + CSR14); | ||
157 | iowrite32(tp->csr6, ioaddr + CSR6); | ||
158 | udelay(100); | ||
159 | |||
160 | /* all set up so now force the negotiation to begin */ | ||
161 | |||
162 | /* read in current values and mask off all but the | ||
163 | * Autonegotiation bits 14:12. Writing a 001 to those bits | ||
164 | * should start the autonegotiation | ||
165 | */ | ||
166 | csr12 = (ioread32(ioaddr + CSR12) & 0xffff8fff); | ||
167 | csr12 |= 0x1000; | ||
168 | iowrite32(csr12, ioaddr + CSR12); | ||
169 | } | ||
170 | |||
171 | |||
172 | |||
173 | void pnic2_lnk_change(struct net_device *dev, int csr5) | ||
174 | { | ||
175 | struct tulip_private *tp = netdev_priv(dev); | ||
176 | void __iomem *ioaddr = tp->base_addr; | ||
177 | int csr14; | ||
178 | |||
179 | /* read the staus register to find out what is up */ | ||
180 | int csr12 = ioread32(ioaddr + CSR12); | ||
181 | |||
182 | if (tulip_debug > 1) | ||
183 | dev_info(&dev->dev, | ||
184 | "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n", | ||
185 | csr12, csr5, ioread32(ioaddr + CSR14)); | ||
186 | |||
187 | /* If NWay finished and we have a negotiated partner capability. | ||
188 | * check bits 14:12 for bit pattern 101 - all is good | ||
189 | */ | ||
190 | if (tp->nway && !tp->nwayset) { | ||
191 | |||
192 | /* we did an auto negotiation */ | ||
193 | |||
194 | if ((csr12 & 0x7000) == 0x5000) { | ||
195 | |||
196 | /* negotiation ended successfully */ | ||
197 | |||
198 | /* get the link partners reply and mask out all but | ||
199 | * bits 24-21 which show the partners capabilities | ||
200 | * and match those to what we advertised | ||
201 | * | ||
202 | * then begin to interpret the results of the negotiation. | ||
203 | * Always go in this order : (we are ignoring T4 for now) | ||
204 | * 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD | ||
205 | */ | ||
206 | |||
207 | int negotiated = ((csr12 >> 16) & 0x01E0) & tp->sym_advertise; | ||
208 | tp->lpar = (csr12 >> 16); | ||
209 | tp->nwayset = 1; | ||
210 | |||
211 | if (negotiated & 0x0100) dev->if_port = 5; | ||
212 | else if (negotiated & 0x0080) dev->if_port = 3; | ||
213 | else if (negotiated & 0x0040) dev->if_port = 4; | ||
214 | else if (negotiated & 0x0020) dev->if_port = 0; | ||
215 | else { | ||
216 | if (tulip_debug > 1) | ||
217 | dev_info(&dev->dev, | ||
218 | "funny autonegotiate result csr12 %08x advertising %04x\n", | ||
219 | csr12, tp->sym_advertise); | ||
220 | tp->nwayset = 0; | ||
221 | /* so check if 100baseTx link state is okay */ | ||
222 | if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180)) | ||
223 | dev->if_port = 3; | ||
224 | } | ||
225 | |||
226 | /* now record the duplex that was negotiated */ | ||
227 | tp->full_duplex = 0; | ||
228 | if ((dev->if_port == 4) || (dev->if_port == 5)) | ||
229 | tp->full_duplex = 1; | ||
230 | |||
231 | if (tulip_debug > 1) { | ||
232 | if (tp->nwayset) | ||
233 | dev_info(&dev->dev, | ||
234 | "Switching to %s based on link negotiation %04x & %04x = %04x\n", | ||
235 | medianame[dev->if_port], | ||
236 | tp->sym_advertise, tp->lpar, | ||
237 | negotiated); | ||
238 | } | ||
239 | |||
240 | /* remember to turn off bit 7 - autonegotiate | ||
241 | * enable so we can properly end nway mode and | ||
242 | * set duplex (ie. use csr6<9> again) | ||
243 | */ | ||
244 | csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f); | ||
245 | iowrite32(csr14,ioaddr + CSR14); | ||
246 | |||
247 | |||
248 | /* now set the data port and operating mode | ||
249 | * (see the Data Port Selection comments at | ||
250 | * the top of the file | ||
251 | */ | ||
252 | |||
253 | /* get current csr6 and mask off bits not to touch */ | ||
254 | /* see comment at top of file */ | ||
255 | |||
256 | tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd); | ||
257 | |||
258 | /* so if using if_port 3 or 5 then select the 100baseT | ||
259 | * port else select the 10baseT port. | ||
260 | * See the Data Port Selection table at the top | ||
261 | * of the file which was taken from the PNIC_II.PDF | ||
262 | * datasheet | ||
263 | */ | ||
264 | if (dev->if_port & 1) tp->csr6 |= 0x01840000; | ||
265 | else tp->csr6 |= 0x00400000; | ||
266 | |||
267 | /* now set the full duplex bit appropriately */ | ||
268 | if (tp->full_duplex) tp->csr6 |= 0x00000200; | ||
269 | |||
270 | iowrite32(1, ioaddr + CSR13); | ||
271 | |||
272 | if (tulip_debug > 2) | ||
273 | netdev_dbg(dev, "Setting CSR6 %08x/%x CSR12 %08x\n", | ||
274 | tp->csr6, | ||
275 | ioread32(ioaddr + CSR6), | ||
276 | ioread32(ioaddr + CSR12)); | ||
277 | |||
278 | /* now the following actually writes out the | ||
279 | * new csr6 values | ||
280 | */ | ||
281 | tulip_start_rxtx(tp); | ||
282 | |||
283 | return; | ||
284 | |||
285 | } else { | ||
286 | dev_info(&dev->dev, | ||
287 | "Autonegotiation failed, using %s, link beat status %04x\n", | ||
288 | medianame[dev->if_port], csr12); | ||
289 | |||
290 | /* remember to turn off bit 7 - autonegotiate | ||
291 | * enable so we don't forget | ||
292 | */ | ||
293 | csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f); | ||
294 | iowrite32(csr14,ioaddr + CSR14); | ||
295 | |||
296 | /* what should we do when autonegotiate fails? | ||
297 | * should we try again or default to baseline | ||
298 | * case. I just don't know. | ||
299 | * | ||
300 | * for now default to some baseline case | ||
301 | */ | ||
302 | |||
303 | dev->if_port = 0; | ||
304 | tp->nway = 0; | ||
305 | tp->nwayset = 1; | ||
306 | |||
307 | /* set to 10baseTx-HD - see Data Port Selection | ||
308 | * comment given at the top of the file | ||
309 | */ | ||
310 | tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd); | ||
311 | tp->csr6 |= 0x00400000; | ||
312 | |||
313 | tulip_restart_rxtx(tp); | ||
314 | |||
315 | return; | ||
316 | |||
317 | } | ||
318 | } | ||
319 | |||
320 | if ((tp->nwayset && (csr5 & 0x08000000) && | ||
321 | (dev->if_port == 3 || dev->if_port == 5) && | ||
322 | (csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) { | ||
323 | |||
324 | /* Link blew? Maybe restart NWay. */ | ||
325 | |||
326 | if (tulip_debug > 2) | ||
327 | netdev_dbg(dev, "Ugh! Link blew?\n"); | ||
328 | |||
329 | del_timer_sync(&tp->timer); | ||
330 | pnic2_start_nway(dev); | ||
331 | tp->timer.expires = RUN_AT(3*HZ); | ||
332 | add_timer(&tp->timer); | ||
333 | |||
334 | return; | ||
335 | } | ||
336 | |||
337 | |||
338 | if (dev->if_port == 3 || dev->if_port == 5) { | ||
339 | |||
340 | /* we are at 100mb and a potential link change occurred */ | ||
341 | |||
342 | if (tulip_debug > 1) | ||
343 | dev_info(&dev->dev, "PNIC2 %s link beat %s\n", | ||
344 | medianame[dev->if_port], | ||
345 | (csr12 & 2) ? "failed" : "good"); | ||
346 | |||
347 | /* check 100 link beat */ | ||
348 | |||
349 | tp->nway = 0; | ||
350 | tp->nwayset = 1; | ||
351 | |||
352 | /* if failed then try doing an nway to get in sync */ | ||
353 | if ((csr12 & 2) && ! tp->medialock) { | ||
354 | del_timer_sync(&tp->timer); | ||
355 | pnic2_start_nway(dev); | ||
356 | tp->timer.expires = RUN_AT(3*HZ); | ||
357 | add_timer(&tp->timer); | ||
358 | } | ||
359 | |||
360 | return; | ||
361 | } | ||
362 | |||
363 | if (dev->if_port == 0 || dev->if_port == 4) { | ||
364 | |||
365 | /* we are at 10mb and a potential link change occurred */ | ||
366 | |||
367 | if (tulip_debug > 1) | ||
368 | dev_info(&dev->dev, "PNIC2 %s link beat %s\n", | ||
369 | medianame[dev->if_port], | ||
370 | (csr12 & 4) ? "failed" : "good"); | ||
371 | |||
372 | |||
373 | tp->nway = 0; | ||
374 | tp->nwayset = 1; | ||
375 | |||
376 | /* if failed, try doing an nway to get in sync */ | ||
377 | if ((csr12 & 4) && ! tp->medialock) { | ||
378 | del_timer_sync(&tp->timer); | ||
379 | pnic2_start_nway(dev); | ||
380 | tp->timer.expires = RUN_AT(3*HZ); | ||
381 | add_timer(&tp->timer); | ||
382 | } | ||
383 | |||
384 | return; | ||
385 | } | ||
386 | |||
387 | |||
388 | if (tulip_debug > 1) | ||
389 | dev_info(&dev->dev, "PNIC2 Link Change Default?\n"); | ||
390 | |||
391 | /* if all else fails default to trying 10baseT-HD */ | ||
392 | dev->if_port = 0; | ||
393 | |||
394 | /* make sure autonegotiate enable is off */ | ||
395 | csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f); | ||
396 | iowrite32(csr14,ioaddr + CSR14); | ||
397 | |||
398 | /* set to 10baseTx-HD - see Data Port Selection | ||
399 | * comment given at the top of the file | ||
400 | */ | ||
401 | tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd); | ||
402 | tp->csr6 |= 0x00400000; | ||
403 | |||
404 | tulip_restart_rxtx(tp); | ||
405 | } | ||
406 | |||
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c new file mode 100644 index 000000000000..2017faf2d0e6 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/timer.c | |||
@@ -0,0 +1,179 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/timer.c | ||
3 | |||
4 | Copyright 2000,2001 The Linux Kernel Team | ||
5 | Written/copyright 1994-2001 by Donald Becker. | ||
6 | |||
7 | This software may be used and distributed according to the terms | ||
8 | of the GNU General Public License, incorporated herein by reference. | ||
9 | |||
10 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
11 | for more information on this driver. | ||
12 | |||
13 | Please submit bugs to http://bugzilla.kernel.org/ . | ||
14 | */ | ||
15 | |||
16 | |||
17 | #include "tulip.h" | ||
18 | |||
19 | |||
20 | void tulip_media_task(struct work_struct *work) | ||
21 | { | ||
22 | struct tulip_private *tp = | ||
23 | container_of(work, struct tulip_private, media_work); | ||
24 | struct net_device *dev = tp->dev; | ||
25 | void __iomem *ioaddr = tp->base_addr; | ||
26 | u32 csr12 = ioread32(ioaddr + CSR12); | ||
27 | int next_tick = 2*HZ; | ||
28 | unsigned long flags; | ||
29 | |||
30 | if (tulip_debug > 2) { | ||
31 | netdev_dbg(dev, "Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n", | ||
32 | medianame[dev->if_port], | ||
33 | ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6), | ||
34 | csr12, ioread32(ioaddr + CSR13), | ||
35 | ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15)); | ||
36 | } | ||
37 | switch (tp->chip_id) { | ||
38 | case DC21140: | ||
39 | case DC21142: | ||
40 | case MX98713: | ||
41 | case COMPEX9881: | ||
42 | case DM910X: | ||
43 | default: { | ||
44 | struct medialeaf *mleaf; | ||
45 | unsigned char *p; | ||
46 | if (tp->mtable == NULL) { /* No EEPROM info, use generic code. */ | ||
47 | /* Not much that can be done. | ||
48 | Assume this a generic MII or SYM transceiver. */ | ||
49 | next_tick = 60*HZ; | ||
50 | if (tulip_debug > 2) | ||
51 | netdev_dbg(dev, "network media monitor CSR6 %08x CSR12 0x%02x\n", | ||
52 | ioread32(ioaddr + CSR6), | ||
53 | csr12 & 0xff); | ||
54 | break; | ||
55 | } | ||
56 | mleaf = &tp->mtable->mleaf[tp->cur_index]; | ||
57 | p = mleaf->leafdata; | ||
58 | switch (mleaf->type) { | ||
59 | case 0: case 4: { | ||
60 | /* Type 0 serial or 4 SYM transceiver. Check the link beat bit. */ | ||
61 | int offset = mleaf->type == 4 ? 5 : 2; | ||
62 | s8 bitnum = p[offset]; | ||
63 | if (p[offset+1] & 0x80) { | ||
64 | if (tulip_debug > 1) | ||
65 | netdev_dbg(dev, "Transceiver monitor tick CSR12=%#02x, no media sense\n", | ||
66 | csr12); | ||
67 | if (mleaf->type == 4) { | ||
68 | if (mleaf->media == 3 && (csr12 & 0x02)) | ||
69 | goto select_next_media; | ||
70 | } | ||
71 | break; | ||
72 | } | ||
73 | if (tulip_debug > 2) | ||
74 | netdev_dbg(dev, "Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n", | ||
75 | csr12, (bitnum >> 1) & 7, | ||
76 | (csr12 & (1 << ((bitnum >> 1) & 7))) != 0, | ||
77 | (bitnum >= 0)); | ||
78 | /* Check that the specified bit has the proper value. */ | ||
79 | if ((bitnum < 0) != | ||
80 | ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) { | ||
81 | if (tulip_debug > 2) | ||
82 | netdev_dbg(dev, "Link beat detected for %s\n", | ||
83 | medianame[mleaf->media & MEDIA_MASK]); | ||
84 | if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */ | ||
85 | goto actually_mii; | ||
86 | netif_carrier_on(dev); | ||
87 | break; | ||
88 | } | ||
89 | netif_carrier_off(dev); | ||
90 | if (tp->medialock) | ||
91 | break; | ||
92 | select_next_media: | ||
93 | if (--tp->cur_index < 0) { | ||
94 | /* We start again, but should instead look for default. */ | ||
95 | tp->cur_index = tp->mtable->leafcount - 1; | ||
96 | } | ||
97 | dev->if_port = tp->mtable->mleaf[tp->cur_index].media; | ||
98 | if (tulip_media_cap[dev->if_port] & MediaIsFD) | ||
99 | goto select_next_media; /* Skip FD entries. */ | ||
100 | if (tulip_debug > 1) | ||
101 | netdev_dbg(dev, "No link beat on media %s, trying transceiver type %s\n", | ||
102 | medianame[mleaf->media & MEDIA_MASK], | ||
103 | medianame[tp->mtable->mleaf[tp->cur_index].media]); | ||
104 | tulip_select_media(dev, 0); | ||
105 | /* Restart the transmit process. */ | ||
106 | tulip_restart_rxtx(tp); | ||
107 | next_tick = (24*HZ)/10; | ||
108 | break; | ||
109 | } | ||
110 | case 1: case 3: /* 21140, 21142 MII */ | ||
111 | actually_mii: | ||
112 | if (tulip_check_duplex(dev) < 0) { | ||
113 | netif_carrier_off(dev); | ||
114 | next_tick = 3*HZ; | ||
115 | } else { | ||
116 | netif_carrier_on(dev); | ||
117 | next_tick = 60*HZ; | ||
118 | } | ||
119 | break; | ||
120 | case 2: /* 21142 serial block has no link beat. */ | ||
121 | default: | ||
122 | break; | ||
123 | } | ||
124 | } | ||
125 | break; | ||
126 | } | ||
127 | |||
128 | |||
129 | spin_lock_irqsave(&tp->lock, flags); | ||
130 | if (tp->timeout_recovery) { | ||
131 | tulip_tx_timeout_complete(tp, ioaddr); | ||
132 | tp->timeout_recovery = 0; | ||
133 | } | ||
134 | spin_unlock_irqrestore(&tp->lock, flags); | ||
135 | |||
136 | /* mod_timer synchronizes us with potential add_timer calls | ||
137 | * from interrupts. | ||
138 | */ | ||
139 | mod_timer(&tp->timer, RUN_AT(next_tick)); | ||
140 | } | ||
141 | |||
142 | |||
143 | void mxic_timer(unsigned long data) | ||
144 | { | ||
145 | struct net_device *dev = (struct net_device *)data; | ||
146 | struct tulip_private *tp = netdev_priv(dev); | ||
147 | void __iomem *ioaddr = tp->base_addr; | ||
148 | int next_tick = 60*HZ; | ||
149 | |||
150 | if (tulip_debug > 3) { | ||
151 | dev_info(&dev->dev, "MXIC negotiation status %08x\n", | ||
152 | ioread32(ioaddr + CSR12)); | ||
153 | } | ||
154 | if (next_tick) { | ||
155 | mod_timer(&tp->timer, RUN_AT(next_tick)); | ||
156 | } | ||
157 | } | ||
158 | |||
159 | |||
160 | void comet_timer(unsigned long data) | ||
161 | { | ||
162 | struct net_device *dev = (struct net_device *)data; | ||
163 | struct tulip_private *tp = netdev_priv(dev); | ||
164 | int next_tick = 60*HZ; | ||
165 | |||
166 | if (tulip_debug > 1) | ||
167 | netdev_dbg(dev, "Comet link status %04x partner capability %04x\n", | ||
168 | tulip_mdio_read(dev, tp->phys[0], 1), | ||
169 | tulip_mdio_read(dev, tp->phys[0], 5)); | ||
170 | /* mod_timer synchronizes us with potential add_timer calls | ||
171 | * from interrupts. | ||
172 | */ | ||
173 | if (tulip_check_duplex(dev) < 0) | ||
174 | { netif_carrier_off(dev); } | ||
175 | else | ||
176 | { netif_carrier_on(dev); } | ||
177 | mod_timer(&tp->timer, RUN_AT(next_tick)); | ||
178 | } | ||
179 | |||
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h new file mode 100644 index 000000000000..9db528967da9 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/tulip.h | |||
@@ -0,0 +1,573 @@ | |||
1 | /* | ||
2 | drivers/net/tulip/tulip.h | ||
3 | |||
4 | Copyright 2000,2001 The Linux Kernel Team | ||
5 | Written/copyright 1994-2001 by Donald Becker. | ||
6 | |||
7 | This software may be used and distributed according to the terms | ||
8 | of the GNU General Public License, incorporated herein by reference. | ||
9 | |||
10 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
11 | for more information on this driver. | ||
12 | |||
13 | Please submit bugs to http://bugzilla.kernel.org/ . | ||
14 | */ | ||
15 | |||
16 | #ifndef __NET_TULIP_H__ | ||
17 | #define __NET_TULIP_H__ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/netdevice.h> | ||
23 | #include <linux/ethtool.h> | ||
24 | #include <linux/timer.h> | ||
25 | #include <linux/delay.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <asm/io.h> | ||
28 | #include <asm/irq.h> | ||
29 | #include <asm/unaligned.h> | ||
30 | |||
31 | |||
32 | |||
33 | /* undefine, or define to various debugging levels (>4 == obscene levels) */ | ||
34 | #define TULIP_DEBUG 1 | ||
35 | |||
36 | #ifdef CONFIG_TULIP_MMIO | ||
37 | #define TULIP_BAR 1 /* CBMA */ | ||
38 | #else | ||
39 | #define TULIP_BAR 0 /* CBIO */ | ||
40 | #endif | ||
41 | |||
42 | |||
43 | |||
44 | struct tulip_chip_table { | ||
45 | char *chip_name; | ||
46 | int io_size; | ||
47 | int valid_intrs; /* CSR7 interrupt enable settings */ | ||
48 | int flags; | ||
49 | void (*media_timer) (unsigned long); | ||
50 | work_func_t media_task; | ||
51 | }; | ||
52 | |||
53 | |||
54 | enum tbl_flag { | ||
55 | HAS_MII = 0x00001, | ||
56 | HAS_MEDIA_TABLE = 0x00002, | ||
57 | CSR12_IN_SROM = 0x00004, | ||
58 | ALWAYS_CHECK_MII = 0x00008, | ||
59 | HAS_ACPI = 0x00010, | ||
60 | MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */ | ||
61 | HAS_PNICNWAY = 0x00080, | ||
62 | HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */ | ||
63 | HAS_INTR_MITIGATION = 0x00100, | ||
64 | IS_ASIX = 0x00200, | ||
65 | HAS_8023X = 0x00400, | ||
66 | COMET_MAC_ADDR = 0x00800, | ||
67 | HAS_PCI_MWI = 0x01000, | ||
68 | HAS_PHY_IRQ = 0x02000, | ||
69 | HAS_SWAPPED_SEEPROM = 0x04000, | ||
70 | NEEDS_FAKE_MEDIA_TABLE = 0x08000, | ||
71 | COMET_PM = 0x10000, | ||
72 | }; | ||
73 | |||
74 | |||
75 | /* chip types. careful! order is VERY IMPORTANT here, as these | ||
76 | * are used throughout the driver as indices into arrays */ | ||
77 | /* Note 21142 == 21143. */ | ||
78 | enum chips { | ||
79 | DC21040 = 0, | ||
80 | DC21041 = 1, | ||
81 | DC21140 = 2, | ||
82 | DC21142 = 3, DC21143 = 3, | ||
83 | LC82C168, | ||
84 | MX98713, | ||
85 | MX98715, | ||
86 | MX98725, | ||
87 | AX88140, | ||
88 | PNIC2, | ||
89 | COMET, | ||
90 | COMPEX9881, | ||
91 | I21145, | ||
92 | DM910X, | ||
93 | CONEXANT, | ||
94 | }; | ||
95 | |||
96 | |||
97 | enum MediaIs { | ||
98 | MediaIsFD = 1, | ||
99 | MediaAlwaysFD = 2, | ||
100 | MediaIsMII = 4, | ||
101 | MediaIsFx = 8, | ||
102 | MediaIs100 = 16 | ||
103 | }; | ||
104 | |||
105 | |||
106 | /* Offsets to the Command and Status Registers, "CSRs". All accesses | ||
107 | must be longword instructions and quadword aligned. */ | ||
108 | enum tulip_offsets { | ||
109 | CSR0 = 0, | ||
110 | CSR1 = 0x08, | ||
111 | CSR2 = 0x10, | ||
112 | CSR3 = 0x18, | ||
113 | CSR4 = 0x20, | ||
114 | CSR5 = 0x28, | ||
115 | CSR6 = 0x30, | ||
116 | CSR7 = 0x38, | ||
117 | CSR8 = 0x40, | ||
118 | CSR9 = 0x48, | ||
119 | CSR10 = 0x50, | ||
120 | CSR11 = 0x58, | ||
121 | CSR12 = 0x60, | ||
122 | CSR13 = 0x68, | ||
123 | CSR14 = 0x70, | ||
124 | CSR15 = 0x78, | ||
125 | CSR18 = 0x88, | ||
126 | CSR19 = 0x8c, | ||
127 | CSR20 = 0x90, | ||
128 | CSR27 = 0xAC, | ||
129 | CSR28 = 0xB0, | ||
130 | }; | ||
131 | |||
132 | /* register offset and bits for CFDD PCI config reg */ | ||
133 | enum pci_cfg_driver_reg { | ||
134 | CFDD = 0x40, | ||
135 | CFDD_Sleep = (1 << 31), | ||
136 | CFDD_Snooze = (1 << 30), | ||
137 | }; | ||
138 | |||
139 | #define RxPollInt (RxIntr|RxNoBuf|RxDied|RxJabber) | ||
140 | |||
141 | /* The bits in the CSR5 status registers, mostly interrupt sources. */ | ||
142 | enum status_bits { | ||
143 | TimerInt = 0x800, | ||
144 | SystemError = 0x2000, | ||
145 | TPLnkFail = 0x1000, | ||
146 | TPLnkPass = 0x10, | ||
147 | NormalIntr = 0x10000, | ||
148 | AbnormalIntr = 0x8000, | ||
149 | RxJabber = 0x200, | ||
150 | RxDied = 0x100, | ||
151 | RxNoBuf = 0x80, | ||
152 | RxIntr = 0x40, | ||
153 | TxFIFOUnderflow = 0x20, | ||
154 | RxErrIntr = 0x10, | ||
155 | TxJabber = 0x08, | ||
156 | TxNoBuf = 0x04, | ||
157 | TxDied = 0x02, | ||
158 | TxIntr = 0x01, | ||
159 | }; | ||
160 | |||
161 | /* bit mask for CSR5 TX/RX process state */ | ||
162 | #define CSR5_TS 0x00700000 | ||
163 | #define CSR5_RS 0x000e0000 | ||
164 | |||
165 | enum tulip_mode_bits { | ||
166 | TxThreshold = (1 << 22), | ||
167 | FullDuplex = (1 << 9), | ||
168 | TxOn = 0x2000, | ||
169 | AcceptBroadcast = 0x0100, | ||
170 | AcceptAllMulticast = 0x0080, | ||
171 | AcceptAllPhys = 0x0040, | ||
172 | AcceptRunt = 0x0008, | ||
173 | RxOn = 0x0002, | ||
174 | RxTx = (TxOn | RxOn), | ||
175 | }; | ||
176 | |||
177 | |||
178 | enum tulip_busconfig_bits { | ||
179 | MWI = (1 << 24), | ||
180 | MRL = (1 << 23), | ||
181 | MRM = (1 << 21), | ||
182 | CALShift = 14, | ||
183 | BurstLenShift = 8, | ||
184 | }; | ||
185 | |||
186 | |||
187 | /* The Tulip Rx and Tx buffer descriptors. */ | ||
188 | struct tulip_rx_desc { | ||
189 | __le32 status; | ||
190 | __le32 length; | ||
191 | __le32 buffer1; | ||
192 | __le32 buffer2; | ||
193 | }; | ||
194 | |||
195 | |||
196 | struct tulip_tx_desc { | ||
197 | __le32 status; | ||
198 | __le32 length; | ||
199 | __le32 buffer1; | ||
200 | __le32 buffer2; /* We use only buffer 1. */ | ||
201 | }; | ||
202 | |||
203 | |||
204 | enum desc_status_bits { | ||
205 | DescOwned = 0x80000000, | ||
206 | DescWholePkt = 0x60000000, | ||
207 | DescEndPkt = 0x40000000, | ||
208 | DescStartPkt = 0x20000000, | ||
209 | DescEndRing = 0x02000000, | ||
210 | DescUseLink = 0x01000000, | ||
211 | |||
212 | /* | ||
213 | * Error summary flag is logical or of 'CRC Error', 'Collision Seen', | ||
214 | * 'Frame Too Long', 'Runt' and 'Descriptor Error' flags generated | ||
215 | * within tulip chip. | ||
216 | */ | ||
217 | RxDescErrorSummary = 0x8000, | ||
218 | RxDescCRCError = 0x0002, | ||
219 | RxDescCollisionSeen = 0x0040, | ||
220 | |||
221 | /* | ||
222 | * 'Frame Too Long' flag is set if packet length including CRC exceeds | ||
223 | * 1518. However, a full sized VLAN tagged frame is 1522 bytes | ||
224 | * including CRC. | ||
225 | * | ||
226 | * The tulip chip does not block oversized frames, and if this flag is | ||
227 | * set on a receive descriptor it does not indicate the frame has been | ||
228 | * truncated. The receive descriptor also includes the actual length. | ||
229 | * Therefore we can safety ignore this flag and check the length | ||
230 | * ourselves. | ||
231 | */ | ||
232 | RxDescFrameTooLong = 0x0080, | ||
233 | RxDescRunt = 0x0800, | ||
234 | RxDescDescErr = 0x4000, | ||
235 | RxWholePkt = 0x00000300, | ||
236 | /* | ||
237 | * Top three bits of 14 bit frame length (status bits 27-29) should | ||
238 | * never be set as that would make frame over 2047 bytes. The Receive | ||
239 | * Watchdog flag (bit 4) may indicate the length is over 2048 and the | ||
240 | * length field is invalid. | ||
241 | */ | ||
242 | RxLengthOver2047 = 0x38000010 | ||
243 | }; | ||
244 | |||
245 | |||
246 | enum t21143_csr6_bits { | ||
247 | csr6_sc = (1<<31), | ||
248 | csr6_ra = (1<<30), | ||
249 | csr6_ign_dest_msb = (1<<26), | ||
250 | csr6_mbo = (1<<25), | ||
251 | csr6_scr = (1<<24), /* scramble mode flag: can't be set */ | ||
252 | csr6_pcs = (1<<23), /* Enables PCS functions (symbol mode requires csr6_ps be set) default is set */ | ||
253 | csr6_ttm = (1<<22), /* Transmit Threshold Mode, set for 10baseT, 0 for 100BaseTX */ | ||
254 | csr6_sf = (1<<21), /* Store and forward. If set ignores TR bits */ | ||
255 | csr6_hbd = (1<<19), /* Heart beat disable. Disables SQE function in 10baseT */ | ||
256 | csr6_ps = (1<<18), /* Port Select. 0 (defualt) = 10baseT, 1 = 100baseTX: can't be set */ | ||
257 | csr6_ca = (1<<17), /* Collision Offset Enable. If set uses special algorithm in low collision situations */ | ||
258 | csr6_trh = (1<<15), /* Transmit Threshold high bit */ | ||
259 | csr6_trl = (1<<14), /* Transmit Threshold low bit */ | ||
260 | |||
261 | /*************************************************************** | ||
262 | * This table shows transmit threshold values based on media * | ||
263 | * and these two registers (from PNIC1 & 2 docs) Note: this is * | ||
264 | * all meaningless if sf is set. * | ||
265 | ***************************************************************/ | ||
266 | |||
267 | /*********************************** | ||
268 | * (trh,trl) * 100BaseTX * 10BaseT * | ||
269 | *********************************** | ||
270 | * (0,0) * 128 * 72 * | ||
271 | * (0,1) * 256 * 96 * | ||
272 | * (1,0) * 512 * 128 * | ||
273 | * (1,1) * 1024 * 160 * | ||
274 | ***********************************/ | ||
275 | |||
276 | csr6_fc = (1<<12), /* Forces a collision in next transmission (for testing in loopback mode) */ | ||
277 | csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */ | ||
278 | csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */ | ||
279 | /* set both and you get (PHY) loopback */ | ||
280 | csr6_fd = (1<<9), /* Full duplex mode, disables hearbeat, no loopback */ | ||
281 | csr6_pm = (1<<7), /* Pass All Multicast */ | ||
282 | csr6_pr = (1<<6), /* Promiscuous mode */ | ||
283 | csr6_sb = (1<<5), /* Start(1)/Stop(0) backoff counter */ | ||
284 | csr6_if = (1<<4), /* Inverse Filtering, rejects only addresses in address table: can't be set */ | ||
285 | csr6_pb = (1<<3), /* Pass Bad Frames, (1) causes even bad frames to be passed on */ | ||
286 | csr6_ho = (1<<2), /* Hash-only filtering mode: can't be set */ | ||
287 | csr6_hp = (1<<0), /* Hash/Perfect Receive Filtering Mode: can't be set */ | ||
288 | |||
289 | csr6_mask_capture = (csr6_sc | csr6_ca), | ||
290 | csr6_mask_defstate = (csr6_mask_capture | csr6_mbo), | ||
291 | csr6_mask_hdcap = (csr6_mask_defstate | csr6_hbd | csr6_ps), | ||
292 | csr6_mask_hdcaptt = (csr6_mask_hdcap | csr6_trh | csr6_trl), | ||
293 | csr6_mask_fullcap = (csr6_mask_hdcaptt | csr6_fd), | ||
294 | csr6_mask_fullpromisc = (csr6_pr | csr6_pm), | ||
295 | csr6_mask_filters = (csr6_hp | csr6_ho | csr6_if), | ||
296 | csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd), | ||
297 | }; | ||
298 | |||
299 | enum tulip_comet_csr13_bits { | ||
300 | /* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they | ||
301 | * determine which link status transition wakes up if LSCE is | ||
302 | * enabled */ | ||
303 | comet_csr13_linkoffe = (1 << 17), | ||
304 | comet_csr13_linkone = (1 << 16), | ||
305 | comet_csr13_wfre = (1 << 10), | ||
306 | comet_csr13_mpre = (1 << 9), | ||
307 | comet_csr13_lsce = (1 << 8), | ||
308 | comet_csr13_wfr = (1 << 2), | ||
309 | comet_csr13_mpr = (1 << 1), | ||
310 | comet_csr13_lsc = (1 << 0), | ||
311 | }; | ||
312 | |||
313 | enum tulip_comet_csr18_bits { | ||
314 | comet_csr18_pmes_sticky = (1 << 24), | ||
315 | comet_csr18_pm_mode = (1 << 19), | ||
316 | comet_csr18_apm_mode = (1 << 18), | ||
317 | comet_csr18_d3a = (1 << 7) | ||
318 | }; | ||
319 | |||
320 | enum tulip_comet_csr20_bits { | ||
321 | comet_csr20_pmes = (1 << 15), | ||
322 | }; | ||
323 | |||
324 | /* Keep the ring sizes a power of two for efficiency. | ||
325 | Making the Tx ring too large decreases the effectiveness of channel | ||
326 | bonding and packet priority. | ||
327 | There are no ill effects from too-large receive rings. */ | ||
328 | |||
329 | #define TX_RING_SIZE 32 | ||
330 | #define RX_RING_SIZE 128 | ||
331 | #define MEDIA_MASK 31 | ||
332 | |||
333 | /* The receiver on the DC21143 rev 65 can fail to close the last | ||
334 | * receive descriptor in certain circumstances (see errata) when | ||
335 | * using MWI. This can only occur if the receive buffer ends on | ||
336 | * a cache line boundary, so the "+ 4" below ensures it doesn't. | ||
337 | */ | ||
338 | #define PKT_BUF_SZ (1536 + 4) /* Size of each temporary Rx buffer. */ | ||
339 | |||
340 | #define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */ | ||
341 | |||
342 | #if defined(__sparc__) || defined(__hppa__) | ||
343 | /* The UltraSparc PCI controllers will disconnect at every 64-byte | ||
344 | * crossing anyways so it makes no sense to tell Tulip to burst | ||
345 | * any more than that. | ||
346 | */ | ||
347 | #define TULIP_MAX_CACHE_LINE 16 /* in units of 32-bit words */ | ||
348 | #else | ||
349 | #define TULIP_MAX_CACHE_LINE 32 /* in units of 32-bit words */ | ||
350 | #endif | ||
351 | |||
352 | |||
353 | /* Ring-wrap flag in length field, use for last ring entry. | ||
354 | 0x01000000 means chain on buffer2 address, | ||
355 | 0x02000000 means use the ring start address in CSR2/3. | ||
356 | Note: Some work-alike chips do not function correctly in chained mode. | ||
357 | The ASIX chip works only in chained mode. | ||
358 | Thus we indicates ring mode, but always write the 'next' field for | ||
359 | chained mode as well. | ||
360 | */ | ||
361 | #define DESC_RING_WRAP 0x02000000 | ||
362 | |||
363 | |||
364 | #define EEPROM_SIZE 512 /* 2 << EEPROM_ADDRLEN */ | ||
365 | |||
366 | |||
367 | #define RUN_AT(x) (jiffies + (x)) | ||
368 | |||
369 | #define get_u16(ptr) get_unaligned_le16((ptr)) | ||
370 | |||
371 | struct medialeaf { | ||
372 | u8 type; | ||
373 | u8 media; | ||
374 | unsigned char *leafdata; | ||
375 | }; | ||
376 | |||
377 | |||
378 | struct mediatable { | ||
379 | u16 defaultmedia; | ||
380 | u8 leafcount; | ||
381 | u8 csr12dir; /* General purpose pin directions. */ | ||
382 | unsigned has_mii:1; | ||
383 | unsigned has_nonmii:1; | ||
384 | unsigned has_reset:6; | ||
385 | u32 csr15dir; | ||
386 | u32 csr15val; /* 21143 NWay setting. */ | ||
387 | struct medialeaf mleaf[0]; | ||
388 | }; | ||
389 | |||
390 | |||
391 | struct mediainfo { | ||
392 | struct mediainfo *next; | ||
393 | int info_type; | ||
394 | int index; | ||
395 | unsigned char *info; | ||
396 | }; | ||
397 | |||
398 | struct ring_info { | ||
399 | struct sk_buff *skb; | ||
400 | dma_addr_t mapping; | ||
401 | }; | ||
402 | |||
403 | |||
404 | struct tulip_private { | ||
405 | const char *product_name; | ||
406 | struct net_device *next_module; | ||
407 | struct tulip_rx_desc *rx_ring; | ||
408 | struct tulip_tx_desc *tx_ring; | ||
409 | dma_addr_t rx_ring_dma; | ||
410 | dma_addr_t tx_ring_dma; | ||
411 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | ||
412 | struct ring_info tx_buffers[TX_RING_SIZE]; | ||
413 | /* The addresses of receive-in-place skbuffs. */ | ||
414 | struct ring_info rx_buffers[RX_RING_SIZE]; | ||
415 | u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */ | ||
416 | int chip_id; | ||
417 | int revision; | ||
418 | int flags; | ||
419 | struct napi_struct napi; | ||
420 | struct timer_list timer; /* Media selection timer. */ | ||
421 | struct timer_list oom_timer; /* Out of memory timer. */ | ||
422 | u32 mc_filter[2]; | ||
423 | spinlock_t lock; | ||
424 | spinlock_t mii_lock; | ||
425 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ | ||
426 | unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ | ||
427 | |||
428 | #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION | ||
429 | int mit_on; | ||
430 | #endif | ||
431 | unsigned int full_duplex:1; /* Full-duplex operation requested. */ | ||
432 | unsigned int full_duplex_lock:1; | ||
433 | unsigned int fake_addr:1; /* Multiport board faked address. */ | ||
434 | unsigned int default_port:4; /* Last dev->if_port value. */ | ||
435 | unsigned int media2:4; /* Secondary monitored media port. */ | ||
436 | unsigned int medialock:1; /* Don't sense media type. */ | ||
437 | unsigned int mediasense:1; /* Media sensing in progress. */ | ||
438 | unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */ | ||
439 | unsigned int timeout_recovery:1; | ||
440 | unsigned int csr0; /* CSR0 setting. */ | ||
441 | unsigned int csr6; /* Current CSR6 control settings. */ | ||
442 | unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */ | ||
443 | void (*link_change) (struct net_device * dev, int csr5); | ||
444 | struct ethtool_wolinfo wolinfo; /* WOL settings */ | ||
445 | u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */ | ||
446 | u16 lpar; /* 21143 Link partner ability. */ | ||
447 | u16 advertising[4]; | ||
448 | signed char phys[4], mii_cnt; /* MII device addresses. */ | ||
449 | struct mediatable *mtable; | ||
450 | int cur_index; /* Current media index. */ | ||
451 | int saved_if_port; | ||
452 | struct pci_dev *pdev; | ||
453 | int ttimer; | ||
454 | int susp_rx; | ||
455 | unsigned long nir; | ||
456 | void __iomem *base_addr; | ||
457 | int csr12_shadow; | ||
458 | int pad0; /* Used for 8-byte alignment */ | ||
459 | struct work_struct media_work; | ||
460 | struct net_device *dev; | ||
461 | }; | ||
462 | |||
463 | |||
464 | struct eeprom_fixup { | ||
465 | char *name; | ||
466 | unsigned char addr0; | ||
467 | unsigned char addr1; | ||
468 | unsigned char addr2; | ||
469 | u16 newtable[32]; /* Max length below. */ | ||
470 | }; | ||
471 | |||
472 | |||
473 | /* 21142.c */ | ||
474 | extern u16 t21142_csr14[]; | ||
475 | void t21142_media_task(struct work_struct *work); | ||
476 | void t21142_start_nway(struct net_device *dev); | ||
477 | void t21142_lnk_change(struct net_device *dev, int csr5); | ||
478 | |||
479 | |||
480 | /* PNIC2.c */ | ||
481 | void pnic2_lnk_change(struct net_device *dev, int csr5); | ||
482 | void pnic2_timer(unsigned long data); | ||
483 | void pnic2_start_nway(struct net_device *dev); | ||
484 | void pnic2_lnk_change(struct net_device *dev, int csr5); | ||
485 | |||
486 | /* eeprom.c */ | ||
487 | void tulip_parse_eeprom(struct net_device *dev); | ||
488 | int tulip_read_eeprom(struct net_device *dev, int location, int addr_len); | ||
489 | |||
490 | /* interrupt.c */ | ||
491 | extern unsigned int tulip_max_interrupt_work; | ||
492 | extern int tulip_rx_copybreak; | ||
493 | irqreturn_t tulip_interrupt(int irq, void *dev_instance); | ||
494 | int tulip_refill_rx(struct net_device *dev); | ||
495 | #ifdef CONFIG_TULIP_NAPI | ||
496 | int tulip_poll(struct napi_struct *napi, int budget); | ||
497 | #endif | ||
498 | |||
499 | |||
500 | /* media.c */ | ||
501 | int tulip_mdio_read(struct net_device *dev, int phy_id, int location); | ||
502 | void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int value); | ||
503 | void tulip_select_media(struct net_device *dev, int startup); | ||
504 | int tulip_check_duplex(struct net_device *dev); | ||
505 | void tulip_find_mii (struct net_device *dev, int board_idx); | ||
506 | |||
507 | /* pnic.c */ | ||
508 | void pnic_do_nway(struct net_device *dev); | ||
509 | void pnic_lnk_change(struct net_device *dev, int csr5); | ||
510 | void pnic_timer(unsigned long data); | ||
511 | |||
512 | /* timer.c */ | ||
513 | void tulip_media_task(struct work_struct *work); | ||
514 | void mxic_timer(unsigned long data); | ||
515 | void comet_timer(unsigned long data); | ||
516 | |||
517 | /* tulip_core.c */ | ||
518 | extern int tulip_debug; | ||
519 | extern const char * const medianame[]; | ||
520 | extern const char tulip_media_cap[]; | ||
521 | extern struct tulip_chip_table tulip_tbl[]; | ||
522 | void oom_timer(unsigned long data); | ||
523 | extern u8 t21040_csr13[]; | ||
524 | |||
525 | static inline void tulip_start_rxtx(struct tulip_private *tp) | ||
526 | { | ||
527 | void __iomem *ioaddr = tp->base_addr; | ||
528 | iowrite32(tp->csr6 | RxTx, ioaddr + CSR6); | ||
529 | barrier(); | ||
530 | (void) ioread32(ioaddr + CSR6); /* mmio sync */ | ||
531 | } | ||
532 | |||
533 | static inline void tulip_stop_rxtx(struct tulip_private *tp) | ||
534 | { | ||
535 | void __iomem *ioaddr = tp->base_addr; | ||
536 | u32 csr6 = ioread32(ioaddr + CSR6); | ||
537 | |||
538 | if (csr6 & RxTx) { | ||
539 | unsigned i=1300/10; | ||
540 | iowrite32(csr6 & ~RxTx, ioaddr + CSR6); | ||
541 | barrier(); | ||
542 | /* wait until in-flight frame completes. | ||
543 | * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin) | ||
544 | * Typically expect this loop to end in < 50 us on 100BT. | ||
545 | */ | ||
546 | while (--i && (ioread32(ioaddr + CSR5) & (CSR5_TS|CSR5_RS))) | ||
547 | udelay(10); | ||
548 | |||
549 | if (!i) | ||
550 | netdev_dbg(tp->dev, "tulip_stop_rxtx() failed (CSR5 0x%x CSR6 0x%x)\n", | ||
551 | ioread32(ioaddr + CSR5), | ||
552 | ioread32(ioaddr + CSR6)); | ||
553 | } | ||
554 | } | ||
555 | |||
556 | static inline void tulip_restart_rxtx(struct tulip_private *tp) | ||
557 | { | ||
558 | tulip_stop_rxtx(tp); | ||
559 | udelay(5); | ||
560 | tulip_start_rxtx(tp); | ||
561 | } | ||
562 | |||
563 | static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __iomem *ioaddr) | ||
564 | { | ||
565 | /* Stop and restart the chip's Tx processes. */ | ||
566 | tulip_restart_rxtx(tp); | ||
567 | /* Trigger an immediate transmit demand. */ | ||
568 | iowrite32(0, ioaddr + CSR1); | ||
569 | |||
570 | tp->dev->stats.tx_errors++; | ||
571 | } | ||
572 | |||
573 | #endif /* __NET_TULIP_H__ */ | ||
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c new file mode 100644 index 000000000000..1246998a677c --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c | |||
@@ -0,0 +1,2011 @@ | |||
1 | /* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. | ||
2 | |||
3 | Copyright 2000,2001 The Linux Kernel Team | ||
4 | Written/copyright 1994-2001 by Donald Becker. | ||
5 | |||
6 | This software may be used and distributed according to the terms | ||
7 | of the GNU General Public License, incorporated herein by reference. | ||
8 | |||
9 | Please refer to Documentation/DocBook/tulip-user.{pdf,ps,html} | ||
10 | for more information on this driver. | ||
11 | |||
12 | Please submit bugs to http://bugzilla.kernel.org/ . | ||
13 | */ | ||
14 | |||
15 | #define pr_fmt(fmt) "tulip: " fmt | ||
16 | |||
17 | #define DRV_NAME "tulip" | ||
18 | #ifdef CONFIG_TULIP_NAPI | ||
19 | #define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */ | ||
20 | #else | ||
21 | #define DRV_VERSION "1.1.15" | ||
22 | #endif | ||
23 | #define DRV_RELDATE "Feb 27, 2007" | ||
24 | |||
25 | |||
26 | #include <linux/module.h> | ||
27 | #include <linux/pci.h> | ||
28 | #include <linux/slab.h> | ||
29 | #include "tulip.h" | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/mii.h> | ||
35 | #include <linux/crc32.h> | ||
36 | #include <asm/unaligned.h> | ||
37 | #include <asm/uaccess.h> | ||
38 | |||
39 | #ifdef CONFIG_SPARC | ||
40 | #include <asm/prom.h> | ||
41 | #endif | ||
42 | |||
43 | static char version[] __devinitdata = | ||
44 | "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n"; | ||
45 | |||
46 | /* A few user-configurable values. */ | ||
47 | |||
48 | /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ | ||
49 | static unsigned int max_interrupt_work = 25; | ||
50 | |||
51 | #define MAX_UNITS 8 | ||
52 | /* Used to pass the full-duplex flag, etc. */ | ||
53 | static int full_duplex[MAX_UNITS]; | ||
54 | static int options[MAX_UNITS]; | ||
55 | static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */ | ||
56 | |||
57 | /* The possible media types that can be set in options[] are: */ | ||
58 | const char * const medianame[32] = { | ||
59 | "10baseT", "10base2", "AUI", "100baseTx", | ||
60 | "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx", | ||
61 | "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII", | ||
62 | "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4", | ||
63 | "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19", | ||
64 | "","","","", "","","","", "","","","Transceiver reset", | ||
65 | }; | ||
66 | |||
67 | /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */ | ||
68 | #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \ | ||
69 | defined(CONFIG_SPARC) || defined(__ia64__) || \ | ||
70 | defined(__sh__) || defined(__mips__) | ||
71 | static int rx_copybreak = 1518; | ||
72 | #else | ||
73 | static int rx_copybreak = 100; | ||
74 | #endif | ||
75 | |||
76 | /* | ||
77 | Set the bus performance register. | ||
78 | Typical: Set 16 longword cache alignment, no burst limit. | ||
79 | Cache alignment bits 15:14 Burst length 13:8 | ||
80 | 0000 No alignment 0x00000000 unlimited 0800 8 longwords | ||
81 | 4000 8 longwords 0100 1 longword 1000 16 longwords | ||
82 | 8000 16 longwords 0200 2 longwords 2000 32 longwords | ||
83 | C000 32 longwords 0400 4 longwords | ||
84 | Warning: many older 486 systems are broken and require setting 0x00A04800 | ||
85 | 8 longword cache alignment, 8 longword burst. | ||
86 | ToDo: Non-Intel setting could be better. | ||
87 | */ | ||
88 | |||
89 | #if defined(__alpha__) || defined(__ia64__) | ||
90 | static int csr0 = 0x01A00000 | 0xE000; | ||
91 | #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__) | ||
92 | static int csr0 = 0x01A00000 | 0x8000; | ||
93 | #elif defined(CONFIG_SPARC) || defined(__hppa__) | ||
94 | /* The UltraSparc PCI controllers will disconnect at every 64-byte | ||
95 | * crossing anyways so it makes no sense to tell Tulip to burst | ||
96 | * any more than that. | ||
97 | */ | ||
98 | static int csr0 = 0x01A00000 | 0x9000; | ||
99 | #elif defined(__arm__) || defined(__sh__) | ||
100 | static int csr0 = 0x01A00000 | 0x4800; | ||
101 | #elif defined(__mips__) | ||
102 | static int csr0 = 0x00200000 | 0x4000; | ||
103 | #else | ||
104 | #warning Processor architecture undefined! | ||
105 | static int csr0 = 0x00A00000 | 0x4800; | ||
106 | #endif | ||
107 | |||
108 | /* Operational parameters that usually are not changed. */ | ||
109 | /* Time in jiffies before concluding the transmitter is hung. */ | ||
110 | #define TX_TIMEOUT (4*HZ) | ||
111 | |||
112 | |||
113 | MODULE_AUTHOR("The Linux Kernel Team"); | ||
114 | MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver"); | ||
115 | MODULE_LICENSE("GPL"); | ||
116 | MODULE_VERSION(DRV_VERSION); | ||
117 | module_param(tulip_debug, int, 0); | ||
118 | module_param(max_interrupt_work, int, 0); | ||
119 | module_param(rx_copybreak, int, 0); | ||
120 | module_param(csr0, int, 0); | ||
121 | module_param_array(options, int, NULL, 0); | ||
122 | module_param_array(full_duplex, int, NULL, 0); | ||
123 | |||
124 | #ifdef TULIP_DEBUG | ||
125 | int tulip_debug = TULIP_DEBUG; | ||
126 | #else | ||
127 | int tulip_debug = 1; | ||
128 | #endif | ||
129 | |||
130 | static void tulip_timer(unsigned long data) | ||
131 | { | ||
132 | struct net_device *dev = (struct net_device *)data; | ||
133 | struct tulip_private *tp = netdev_priv(dev); | ||
134 | |||
135 | if (netif_running(dev)) | ||
136 | schedule_work(&tp->media_work); | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * This table use during operation for capabilities and media timer. | ||
141 | * | ||
142 | * It is indexed via the values in 'enum chips' | ||
143 | */ | ||
144 | |||
145 | struct tulip_chip_table tulip_tbl[] = { | ||
146 | { }, /* placeholder for array, slot unused currently */ | ||
147 | { }, /* placeholder for array, slot unused currently */ | ||
148 | |||
149 | /* DC21140 */ | ||
150 | { "Digital DS21140 Tulip", 128, 0x0001ebef, | ||
151 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer, | ||
152 | tulip_media_task }, | ||
153 | |||
154 | /* DC21142, DC21143 */ | ||
155 | { "Digital DS21142/43 Tulip", 128, 0x0801fbff, | ||
156 | HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY | ||
157 | | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task }, | ||
158 | |||
159 | /* LC82C168 */ | ||
160 | { "Lite-On 82c168 PNIC", 256, 0x0001fbef, | ||
161 | HAS_MII | HAS_PNICNWAY, pnic_timer, }, | ||
162 | |||
163 | /* MX98713 */ | ||
164 | { "Macronix 98713 PMAC", 128, 0x0001ebef, | ||
165 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, }, | ||
166 | |||
167 | /* MX98715 */ | ||
168 | { "Macronix 98715 PMAC", 256, 0x0001ebef, | ||
169 | HAS_MEDIA_TABLE, mxic_timer, }, | ||
170 | |||
171 | /* MX98725 */ | ||
172 | { "Macronix 98725 PMAC", 256, 0x0001ebef, | ||
173 | HAS_MEDIA_TABLE, mxic_timer, }, | ||
174 | |||
175 | /* AX88140 */ | ||
176 | { "ASIX AX88140", 128, 0x0001fbff, | ||
177 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | ||
178 | | IS_ASIX, tulip_timer, tulip_media_task }, | ||
179 | |||
180 | /* PNIC2 */ | ||
181 | { "Lite-On PNIC-II", 256, 0x0801fbff, | ||
182 | HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, }, | ||
183 | |||
184 | /* COMET */ | ||
185 | { "ADMtek Comet", 256, 0x0001abef, | ||
186 | HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, }, | ||
187 | |||
188 | /* COMPEX9881 */ | ||
189 | { "Compex 9881 PMAC", 128, 0x0001ebef, | ||
190 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, }, | ||
191 | |||
192 | /* I21145 */ | ||
193 | { "Intel DS21145 Tulip", 128, 0x0801fbff, | ||
194 | HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | ||
195 | | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task }, | ||
196 | |||
197 | /* DM910X */ | ||
198 | #ifdef CONFIG_TULIP_DM910X | ||
199 | { "Davicom DM9102/DM9102A", 128, 0x0001ebef, | ||
200 | HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, | ||
201 | tulip_timer, tulip_media_task }, | ||
202 | #else | ||
203 | { NULL }, | ||
204 | #endif | ||
205 | |||
206 | /* RS7112 */ | ||
207 | { "Conexant LANfinity", 256, 0x0001ebef, | ||
208 | HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task }, | ||
209 | |||
210 | }; | ||
211 | |||
212 | |||
213 | static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = { | ||
214 | { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 }, | ||
215 | { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 }, | ||
216 | { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 }, | ||
217 | { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 }, | ||
218 | { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, | ||
219 | /* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/ | ||
220 | { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 }, | ||
221 | { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 }, | ||
222 | { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
223 | { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
224 | { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
225 | { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
226 | { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
227 | { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
228 | { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
229 | { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
230 | { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
231 | { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
232 | { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, | ||
233 | { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, | ||
234 | #ifdef CONFIG_TULIP_DM910X | ||
235 | { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, | ||
236 | { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, | ||
237 | #endif | ||
238 | { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
239 | { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, | ||
240 | { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
241 | { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
242 | { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
243 | { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
244 | { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT }, | ||
245 | { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
246 | { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
247 | { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
248 | { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
249 | { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */ | ||
250 | { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */ | ||
251 | { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */ | ||
252 | { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, | ||
253 | { } /* terminate list */ | ||
254 | }; | ||
255 | MODULE_DEVICE_TABLE(pci, tulip_pci_tbl); | ||
256 | |||
257 | |||
258 | /* A full-duplex map for media types. */ | ||
259 | const char tulip_media_cap[32] = | ||
260 | {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, }; | ||
261 | |||
262 | static void tulip_tx_timeout(struct net_device *dev); | ||
263 | static void tulip_init_ring(struct net_device *dev); | ||
264 | static void tulip_free_ring(struct net_device *dev); | ||
265 | static netdev_tx_t tulip_start_xmit(struct sk_buff *skb, | ||
266 | struct net_device *dev); | ||
267 | static int tulip_open(struct net_device *dev); | ||
268 | static int tulip_close(struct net_device *dev); | ||
269 | static void tulip_up(struct net_device *dev); | ||
270 | static void tulip_down(struct net_device *dev); | ||
271 | static struct net_device_stats *tulip_get_stats(struct net_device *dev); | ||
272 | static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
273 | static void set_rx_mode(struct net_device *dev); | ||
274 | static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts); | ||
275 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
276 | static void poll_tulip(struct net_device *dev); | ||
277 | #endif | ||
278 | |||
279 | static void tulip_set_power_state (struct tulip_private *tp, | ||
280 | int sleep, int snooze) | ||
281 | { | ||
282 | if (tp->flags & HAS_ACPI) { | ||
283 | u32 tmp, newtmp; | ||
284 | pci_read_config_dword (tp->pdev, CFDD, &tmp); | ||
285 | newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze); | ||
286 | if (sleep) | ||
287 | newtmp |= CFDD_Sleep; | ||
288 | else if (snooze) | ||
289 | newtmp |= CFDD_Snooze; | ||
290 | if (tmp != newtmp) | ||
291 | pci_write_config_dword (tp->pdev, CFDD, newtmp); | ||
292 | } | ||
293 | |||
294 | } | ||
295 | |||
296 | |||
297 | static void tulip_up(struct net_device *dev) | ||
298 | { | ||
299 | struct tulip_private *tp = netdev_priv(dev); | ||
300 | void __iomem *ioaddr = tp->base_addr; | ||
301 | int next_tick = 3*HZ; | ||
302 | u32 reg; | ||
303 | int i; | ||
304 | |||
305 | #ifdef CONFIG_TULIP_NAPI | ||
306 | napi_enable(&tp->napi); | ||
307 | #endif | ||
308 | |||
309 | /* Wake the chip from sleep/snooze mode. */ | ||
310 | tulip_set_power_state (tp, 0, 0); | ||
311 | |||
312 | /* Disable all WOL events */ | ||
313 | pci_enable_wake(tp->pdev, PCI_D3hot, 0); | ||
314 | pci_enable_wake(tp->pdev, PCI_D3cold, 0); | ||
315 | tulip_set_wolopts(tp->pdev, 0); | ||
316 | |||
317 | /* On some chip revs we must set the MII/SYM port before the reset!? */ | ||
318 | if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii)) | ||
319 | iowrite32(0x00040000, ioaddr + CSR6); | ||
320 | |||
321 | /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */ | ||
322 | iowrite32(0x00000001, ioaddr + CSR0); | ||
323 | pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */ | ||
324 | udelay(100); | ||
325 | |||
326 | /* Deassert reset. | ||
327 | Wait the specified 50 PCI cycles after a reset by initializing | ||
328 | Tx and Rx queues and the address filter list. */ | ||
329 | iowrite32(tp->csr0, ioaddr + CSR0); | ||
330 | pci_read_config_dword(tp->pdev, PCI_COMMAND, ®); /* flush write */ | ||
331 | udelay(100); | ||
332 | |||
333 | if (tulip_debug > 1) | ||
334 | netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq); | ||
335 | |||
336 | iowrite32(tp->rx_ring_dma, ioaddr + CSR3); | ||
337 | iowrite32(tp->tx_ring_dma, ioaddr + CSR4); | ||
338 | tp->cur_rx = tp->cur_tx = 0; | ||
339 | tp->dirty_rx = tp->dirty_tx = 0; | ||
340 | |||
341 | if (tp->flags & MC_HASH_ONLY) { | ||
342 | u32 addr_low = get_unaligned_le32(dev->dev_addr); | ||
343 | u32 addr_high = get_unaligned_le16(dev->dev_addr + 4); | ||
344 | if (tp->chip_id == AX88140) { | ||
345 | iowrite32(0, ioaddr + CSR13); | ||
346 | iowrite32(addr_low, ioaddr + CSR14); | ||
347 | iowrite32(1, ioaddr + CSR13); | ||
348 | iowrite32(addr_high, ioaddr + CSR14); | ||
349 | } else if (tp->flags & COMET_MAC_ADDR) { | ||
350 | iowrite32(addr_low, ioaddr + 0xA4); | ||
351 | iowrite32(addr_high, ioaddr + 0xA8); | ||
352 | iowrite32(0, ioaddr + CSR27); | ||
353 | iowrite32(0, ioaddr + CSR28); | ||
354 | } | ||
355 | } else { | ||
356 | /* This is set_rx_mode(), but without starting the transmitter. */ | ||
357 | u16 *eaddrs = (u16 *)dev->dev_addr; | ||
358 | u16 *setup_frm = &tp->setup_frame[15*6]; | ||
359 | dma_addr_t mapping; | ||
360 | |||
361 | /* 21140 bug: you must add the broadcast address. */ | ||
362 | memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame)); | ||
363 | /* Fill the final entry of the table with our physical address. */ | ||
364 | *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; | ||
365 | *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; | ||
366 | *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; | ||
367 | |||
368 | mapping = pci_map_single(tp->pdev, tp->setup_frame, | ||
369 | sizeof(tp->setup_frame), | ||
370 | PCI_DMA_TODEVICE); | ||
371 | tp->tx_buffers[tp->cur_tx].skb = NULL; | ||
372 | tp->tx_buffers[tp->cur_tx].mapping = mapping; | ||
373 | |||
374 | /* Put the setup frame on the Tx list. */ | ||
375 | tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192); | ||
376 | tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); | ||
377 | tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned); | ||
378 | |||
379 | tp->cur_tx++; | ||
380 | } | ||
381 | |||
382 | tp->saved_if_port = dev->if_port; | ||
383 | if (dev->if_port == 0) | ||
384 | dev->if_port = tp->default_port; | ||
385 | |||
386 | /* Allow selecting a default media. */ | ||
387 | i = 0; | ||
388 | if (tp->mtable == NULL) | ||
389 | goto media_picked; | ||
390 | if (dev->if_port) { | ||
391 | int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 : | ||
392 | (dev->if_port == 12 ? 0 : dev->if_port); | ||
393 | for (i = 0; i < tp->mtable->leafcount; i++) | ||
394 | if (tp->mtable->mleaf[i].media == looking_for) { | ||
395 | dev_info(&dev->dev, | ||
396 | "Using user-specified media %s\n", | ||
397 | medianame[dev->if_port]); | ||
398 | goto media_picked; | ||
399 | } | ||
400 | } | ||
401 | if ((tp->mtable->defaultmedia & 0x0800) == 0) { | ||
402 | int looking_for = tp->mtable->defaultmedia & MEDIA_MASK; | ||
403 | for (i = 0; i < tp->mtable->leafcount; i++) | ||
404 | if (tp->mtable->mleaf[i].media == looking_for) { | ||
405 | dev_info(&dev->dev, | ||
406 | "Using EEPROM-set media %s\n", | ||
407 | medianame[looking_for]); | ||
408 | goto media_picked; | ||
409 | } | ||
410 | } | ||
411 | /* Start sensing first non-full-duplex media. */ | ||
412 | for (i = tp->mtable->leafcount - 1; | ||
413 | (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--) | ||
414 | ; | ||
415 | media_picked: | ||
416 | |||
417 | tp->csr6 = 0; | ||
418 | tp->cur_index = i; | ||
419 | tp->nwayset = 0; | ||
420 | |||
421 | if (dev->if_port) { | ||
422 | if (tp->chip_id == DC21143 && | ||
423 | (tulip_media_cap[dev->if_port] & MediaIsMII)) { | ||
424 | /* We must reset the media CSRs when we force-select MII mode. */ | ||
425 | iowrite32(0x0000, ioaddr + CSR13); | ||
426 | iowrite32(0x0000, ioaddr + CSR14); | ||
427 | iowrite32(0x0008, ioaddr + CSR15); | ||
428 | } | ||
429 | tulip_select_media(dev, 1); | ||
430 | } else if (tp->chip_id == DC21142) { | ||
431 | if (tp->mii_cnt) { | ||
432 | tulip_select_media(dev, 1); | ||
433 | if (tulip_debug > 1) | ||
434 | dev_info(&dev->dev, | ||
435 | "Using MII transceiver %d, status %04x\n", | ||
436 | tp->phys[0], | ||
437 | tulip_mdio_read(dev, tp->phys[0], 1)); | ||
438 | iowrite32(csr6_mask_defstate, ioaddr + CSR6); | ||
439 | tp->csr6 = csr6_mask_hdcap; | ||
440 | dev->if_port = 11; | ||
441 | iowrite32(0x0000, ioaddr + CSR13); | ||
442 | iowrite32(0x0000, ioaddr + CSR14); | ||
443 | } else | ||
444 | t21142_start_nway(dev); | ||
445 | } else if (tp->chip_id == PNIC2) { | ||
446 | /* for initial startup advertise 10/100 Full and Half */ | ||
447 | tp->sym_advertise = 0x01E0; | ||
448 | /* enable autonegotiate end interrupt */ | ||
449 | iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5); | ||
450 | iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7); | ||
451 | pnic2_start_nway(dev); | ||
452 | } else if (tp->chip_id == LC82C168 && ! tp->medialock) { | ||
453 | if (tp->mii_cnt) { | ||
454 | dev->if_port = 11; | ||
455 | tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0); | ||
456 | iowrite32(0x0001, ioaddr + CSR15); | ||
457 | } else if (ioread32(ioaddr + CSR5) & TPLnkPass) | ||
458 | pnic_do_nway(dev); | ||
459 | else { | ||
460 | /* Start with 10mbps to do autonegotiation. */ | ||
461 | iowrite32(0x32, ioaddr + CSR12); | ||
462 | tp->csr6 = 0x00420000; | ||
463 | iowrite32(0x0001B078, ioaddr + 0xB8); | ||
464 | iowrite32(0x0201B078, ioaddr + 0xB8); | ||
465 | next_tick = 1*HZ; | ||
466 | } | ||
467 | } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) && | ||
468 | ! tp->medialock) { | ||
469 | dev->if_port = 0; | ||
470 | tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0); | ||
471 | iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); | ||
472 | } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) { | ||
473 | /* Provided by BOLO, Macronix - 12/10/1998. */ | ||
474 | dev->if_port = 0; | ||
475 | tp->csr6 = 0x01a80200; | ||
476 | iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80); | ||
477 | iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0); | ||
478 | } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) { | ||
479 | /* Enable automatic Tx underrun recovery. */ | ||
480 | iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88); | ||
481 | dev->if_port = tp->mii_cnt ? 11 : 0; | ||
482 | tp->csr6 = 0x00040000; | ||
483 | } else if (tp->chip_id == AX88140) { | ||
484 | tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100; | ||
485 | } else | ||
486 | tulip_select_media(dev, 1); | ||
487 | |||
488 | /* Start the chip's Tx to process setup frame. */ | ||
489 | tulip_stop_rxtx(tp); | ||
490 | barrier(); | ||
491 | udelay(5); | ||
492 | iowrite32(tp->csr6 | TxOn, ioaddr + CSR6); | ||
493 | |||
494 | /* Enable interrupts by setting the interrupt mask. */ | ||
495 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5); | ||
496 | iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7); | ||
497 | tulip_start_rxtx(tp); | ||
498 | iowrite32(0, ioaddr + CSR2); /* Rx poll demand */ | ||
499 | |||
500 | if (tulip_debug > 2) { | ||
501 | netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n", | ||
502 | ioread32(ioaddr + CSR0), | ||
503 | ioread32(ioaddr + CSR5), | ||
504 | ioread32(ioaddr + CSR6)); | ||
505 | } | ||
506 | |||
507 | /* Set the timer to switch to check for link beat and perhaps switch | ||
508 | to an alternate media type. */ | ||
509 | tp->timer.expires = RUN_AT(next_tick); | ||
510 | add_timer(&tp->timer); | ||
511 | #ifdef CONFIG_TULIP_NAPI | ||
512 | init_timer(&tp->oom_timer); | ||
513 | tp->oom_timer.data = (unsigned long)dev; | ||
514 | tp->oom_timer.function = oom_timer; | ||
515 | #endif | ||
516 | } | ||
517 | |||
518 | static int | ||
519 | tulip_open(struct net_device *dev) | ||
520 | { | ||
521 | int retval; | ||
522 | |||
523 | tulip_init_ring (dev); | ||
524 | |||
525 | retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev); | ||
526 | if (retval) | ||
527 | goto free_ring; | ||
528 | |||
529 | tulip_up (dev); | ||
530 | |||
531 | netif_start_queue (dev); | ||
532 | |||
533 | return 0; | ||
534 | |||
535 | free_ring: | ||
536 | tulip_free_ring (dev); | ||
537 | return retval; | ||
538 | } | ||
539 | |||
540 | |||
541 | static void tulip_tx_timeout(struct net_device *dev) | ||
542 | { | ||
543 | struct tulip_private *tp = netdev_priv(dev); | ||
544 | void __iomem *ioaddr = tp->base_addr; | ||
545 | unsigned long flags; | ||
546 | |||
547 | spin_lock_irqsave (&tp->lock, flags); | ||
548 | |||
549 | if (tulip_media_cap[dev->if_port] & MediaIsMII) { | ||
550 | /* Do nothing -- the media monitor should handle this. */ | ||
551 | if (tulip_debug > 1) | ||
552 | dev_warn(&dev->dev, | ||
553 | "Transmit timeout using MII device\n"); | ||
554 | } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 || | ||
555 | tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 || | ||
556 | tp->chip_id == DM910X) { | ||
557 | dev_warn(&dev->dev, | ||
558 | "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n", | ||
559 | ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12), | ||
560 | ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), | ||
561 | ioread32(ioaddr + CSR15)); | ||
562 | tp->timeout_recovery = 1; | ||
563 | schedule_work(&tp->media_work); | ||
564 | goto out_unlock; | ||
565 | } else if (tp->chip_id == PNIC2) { | ||
566 | dev_warn(&dev->dev, | ||
567 | "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n", | ||
568 | (int)ioread32(ioaddr + CSR5), | ||
569 | (int)ioread32(ioaddr + CSR6), | ||
570 | (int)ioread32(ioaddr + CSR7), | ||
571 | (int)ioread32(ioaddr + CSR12)); | ||
572 | } else { | ||
573 | dev_warn(&dev->dev, | ||
574 | "Transmit timed out, status %08x, CSR12 %08x, resetting...\n", | ||
575 | ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12)); | ||
576 | dev->if_port = 0; | ||
577 | } | ||
578 | |||
579 | #if defined(way_too_many_messages) | ||
580 | if (tulip_debug > 3) { | ||
581 | int i; | ||
582 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
583 | u8 *buf = (u8 *)(tp->rx_ring[i].buffer1); | ||
584 | int j; | ||
585 | printk(KERN_DEBUG | ||
586 | "%2d: %08x %08x %08x %08x %02x %02x %02x\n", | ||
587 | i, | ||
588 | (unsigned int)tp->rx_ring[i].status, | ||
589 | (unsigned int)tp->rx_ring[i].length, | ||
590 | (unsigned int)tp->rx_ring[i].buffer1, | ||
591 | (unsigned int)tp->rx_ring[i].buffer2, | ||
592 | buf[0], buf[1], buf[2]); | ||
593 | for (j = 0; buf[j] != 0xee && j < 1600; j++) | ||
594 | if (j < 100) | ||
595 | pr_cont(" %02x", buf[j]); | ||
596 | pr_cont(" j=%d\n", j); | ||
597 | } | ||
598 | printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring); | ||
599 | for (i = 0; i < RX_RING_SIZE; i++) | ||
600 | pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status); | ||
601 | printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring); | ||
602 | for (i = 0; i < TX_RING_SIZE; i++) | ||
603 | pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status); | ||
604 | pr_cont("\n"); | ||
605 | } | ||
606 | #endif | ||
607 | |||
608 | tulip_tx_timeout_complete(tp, ioaddr); | ||
609 | |||
610 | out_unlock: | ||
611 | spin_unlock_irqrestore (&tp->lock, flags); | ||
612 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
613 | netif_wake_queue (dev); | ||
614 | } | ||
615 | |||
616 | |||
617 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | ||
618 | static void tulip_init_ring(struct net_device *dev) | ||
619 | { | ||
620 | struct tulip_private *tp = netdev_priv(dev); | ||
621 | int i; | ||
622 | |||
623 | tp->susp_rx = 0; | ||
624 | tp->ttimer = 0; | ||
625 | tp->nir = 0; | ||
626 | |||
627 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
628 | tp->rx_ring[i].status = 0x00000000; | ||
629 | tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ); | ||
630 | tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1)); | ||
631 | tp->rx_buffers[i].skb = NULL; | ||
632 | tp->rx_buffers[i].mapping = 0; | ||
633 | } | ||
634 | /* Mark the last entry as wrapping the ring. */ | ||
635 | tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP); | ||
636 | tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma); | ||
637 | |||
638 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
639 | dma_addr_t mapping; | ||
640 | |||
641 | /* Note the receive buffer must be longword aligned. | ||
642 | dev_alloc_skb() provides 16 byte alignment. But do *not* | ||
643 | use skb_reserve() to align the IP header! */ | ||
644 | struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); | ||
645 | tp->rx_buffers[i].skb = skb; | ||
646 | if (skb == NULL) | ||
647 | break; | ||
648 | mapping = pci_map_single(tp->pdev, skb->data, | ||
649 | PKT_BUF_SZ, PCI_DMA_FROMDEVICE); | ||
650 | tp->rx_buffers[i].mapping = mapping; | ||
651 | skb->dev = dev; /* Mark as being used by this device. */ | ||
652 | tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */ | ||
653 | tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); | ||
654 | } | ||
655 | tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | ||
656 | |||
657 | /* The Tx buffer descriptor is filled in as needed, but we | ||
658 | do need to clear the ownership bit. */ | ||
659 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
660 | tp->tx_buffers[i].skb = NULL; | ||
661 | tp->tx_buffers[i].mapping = 0; | ||
662 | tp->tx_ring[i].status = 0x00000000; | ||
663 | tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1)); | ||
664 | } | ||
665 | tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma); | ||
666 | } | ||
667 | |||
668 | static netdev_tx_t | ||
669 | tulip_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
670 | { | ||
671 | struct tulip_private *tp = netdev_priv(dev); | ||
672 | int entry; | ||
673 | u32 flag; | ||
674 | dma_addr_t mapping; | ||
675 | unsigned long flags; | ||
676 | |||
677 | spin_lock_irqsave(&tp->lock, flags); | ||
678 | |||
679 | /* Calculate the next Tx descriptor entry. */ | ||
680 | entry = tp->cur_tx % TX_RING_SIZE; | ||
681 | |||
682 | tp->tx_buffers[entry].skb = skb; | ||
683 | mapping = pci_map_single(tp->pdev, skb->data, | ||
684 | skb->len, PCI_DMA_TODEVICE); | ||
685 | tp->tx_buffers[entry].mapping = mapping; | ||
686 | tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping); | ||
687 | |||
688 | if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */ | ||
689 | flag = 0x60000000; /* No interrupt */ | ||
690 | } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) { | ||
691 | flag = 0xe0000000; /* Tx-done intr. */ | ||
692 | } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) { | ||
693 | flag = 0x60000000; /* No Tx-done intr. */ | ||
694 | } else { /* Leave room for set_rx_mode() to fill entries. */ | ||
695 | flag = 0xe0000000; /* Tx-done intr. */ | ||
696 | netif_stop_queue(dev); | ||
697 | } | ||
698 | if (entry == TX_RING_SIZE-1) | ||
699 | flag = 0xe0000000 | DESC_RING_WRAP; | ||
700 | |||
701 | tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag); | ||
702 | /* if we were using Transmit Automatic Polling, we would need a | ||
703 | * wmb() here. */ | ||
704 | tp->tx_ring[entry].status = cpu_to_le32(DescOwned); | ||
705 | wmb(); | ||
706 | |||
707 | tp->cur_tx++; | ||
708 | |||
709 | /* Trigger an immediate transmit demand. */ | ||
710 | iowrite32(0, tp->base_addr + CSR1); | ||
711 | |||
712 | spin_unlock_irqrestore(&tp->lock, flags); | ||
713 | |||
714 | return NETDEV_TX_OK; | ||
715 | } | ||
716 | |||
717 | static void tulip_clean_tx_ring(struct tulip_private *tp) | ||
718 | { | ||
719 | unsigned int dirty_tx; | ||
720 | |||
721 | for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0; | ||
722 | dirty_tx++) { | ||
723 | int entry = dirty_tx % TX_RING_SIZE; | ||
724 | int status = le32_to_cpu(tp->tx_ring[entry].status); | ||
725 | |||
726 | if (status < 0) { | ||
727 | tp->dev->stats.tx_errors++; /* It wasn't Txed */ | ||
728 | tp->tx_ring[entry].status = 0; | ||
729 | } | ||
730 | |||
731 | /* Check for Tx filter setup frames. */ | ||
732 | if (tp->tx_buffers[entry].skb == NULL) { | ||
733 | /* test because dummy frames not mapped */ | ||
734 | if (tp->tx_buffers[entry].mapping) | ||
735 | pci_unmap_single(tp->pdev, | ||
736 | tp->tx_buffers[entry].mapping, | ||
737 | sizeof(tp->setup_frame), | ||
738 | PCI_DMA_TODEVICE); | ||
739 | continue; | ||
740 | } | ||
741 | |||
742 | pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, | ||
743 | tp->tx_buffers[entry].skb->len, | ||
744 | PCI_DMA_TODEVICE); | ||
745 | |||
746 | /* Free the original skb. */ | ||
747 | dev_kfree_skb_irq(tp->tx_buffers[entry].skb); | ||
748 | tp->tx_buffers[entry].skb = NULL; | ||
749 | tp->tx_buffers[entry].mapping = 0; | ||
750 | } | ||
751 | } | ||
752 | |||
753 | static void tulip_down (struct net_device *dev) | ||
754 | { | ||
755 | struct tulip_private *tp = netdev_priv(dev); | ||
756 | void __iomem *ioaddr = tp->base_addr; | ||
757 | unsigned long flags; | ||
758 | |||
759 | cancel_work_sync(&tp->media_work); | ||
760 | |||
761 | #ifdef CONFIG_TULIP_NAPI | ||
762 | napi_disable(&tp->napi); | ||
763 | #endif | ||
764 | |||
765 | del_timer_sync (&tp->timer); | ||
766 | #ifdef CONFIG_TULIP_NAPI | ||
767 | del_timer_sync (&tp->oom_timer); | ||
768 | #endif | ||
769 | spin_lock_irqsave (&tp->lock, flags); | ||
770 | |||
771 | /* Disable interrupts by clearing the interrupt mask. */ | ||
772 | iowrite32 (0x00000000, ioaddr + CSR7); | ||
773 | |||
774 | /* Stop the Tx and Rx processes. */ | ||
775 | tulip_stop_rxtx(tp); | ||
776 | |||
777 | /* prepare receive buffers */ | ||
778 | tulip_refill_rx(dev); | ||
779 | |||
780 | /* release any unconsumed transmit buffers */ | ||
781 | tulip_clean_tx_ring(tp); | ||
782 | |||
783 | if (ioread32(ioaddr + CSR6) != 0xffffffff) | ||
784 | dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; | ||
785 | |||
786 | spin_unlock_irqrestore (&tp->lock, flags); | ||
787 | |||
788 | init_timer(&tp->timer); | ||
789 | tp->timer.data = (unsigned long)dev; | ||
790 | tp->timer.function = tulip_tbl[tp->chip_id].media_timer; | ||
791 | |||
792 | dev->if_port = tp->saved_if_port; | ||
793 | |||
794 | /* Leave the driver in snooze, not sleep, mode. */ | ||
795 | tulip_set_power_state (tp, 0, 1); | ||
796 | } | ||
797 | |||
798 | static void tulip_free_ring (struct net_device *dev) | ||
799 | { | ||
800 | struct tulip_private *tp = netdev_priv(dev); | ||
801 | int i; | ||
802 | |||
803 | /* Free all the skbuffs in the Rx queue. */ | ||
804 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
805 | struct sk_buff *skb = tp->rx_buffers[i].skb; | ||
806 | dma_addr_t mapping = tp->rx_buffers[i].mapping; | ||
807 | |||
808 | tp->rx_buffers[i].skb = NULL; | ||
809 | tp->rx_buffers[i].mapping = 0; | ||
810 | |||
811 | tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */ | ||
812 | tp->rx_ring[i].length = 0; | ||
813 | /* An invalid address. */ | ||
814 | tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0); | ||
815 | if (skb) { | ||
816 | pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ, | ||
817 | PCI_DMA_FROMDEVICE); | ||
818 | dev_kfree_skb (skb); | ||
819 | } | ||
820 | } | ||
821 | |||
822 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
823 | struct sk_buff *skb = tp->tx_buffers[i].skb; | ||
824 | |||
825 | if (skb != NULL) { | ||
826 | pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping, | ||
827 | skb->len, PCI_DMA_TODEVICE); | ||
828 | dev_kfree_skb (skb); | ||
829 | } | ||
830 | tp->tx_buffers[i].skb = NULL; | ||
831 | tp->tx_buffers[i].mapping = 0; | ||
832 | } | ||
833 | } | ||
834 | |||
835 | static int tulip_close (struct net_device *dev) | ||
836 | { | ||
837 | struct tulip_private *tp = netdev_priv(dev); | ||
838 | void __iomem *ioaddr = tp->base_addr; | ||
839 | |||
840 | netif_stop_queue (dev); | ||
841 | |||
842 | tulip_down (dev); | ||
843 | |||
844 | if (tulip_debug > 1) | ||
845 | netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", | ||
846 | ioread32 (ioaddr + CSR5)); | ||
847 | |||
848 | free_irq (dev->irq, dev); | ||
849 | |||
850 | tulip_free_ring (dev); | ||
851 | |||
852 | return 0; | ||
853 | } | ||
854 | |||
855 | static struct net_device_stats *tulip_get_stats(struct net_device *dev) | ||
856 | { | ||
857 | struct tulip_private *tp = netdev_priv(dev); | ||
858 | void __iomem *ioaddr = tp->base_addr; | ||
859 | |||
860 | if (netif_running(dev)) { | ||
861 | unsigned long flags; | ||
862 | |||
863 | spin_lock_irqsave (&tp->lock, flags); | ||
864 | |||
865 | dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff; | ||
866 | |||
867 | spin_unlock_irqrestore(&tp->lock, flags); | ||
868 | } | ||
869 | |||
870 | return &dev->stats; | ||
871 | } | ||
872 | |||
873 | |||
874 | static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | ||
875 | { | ||
876 | struct tulip_private *np = netdev_priv(dev); | ||
877 | strcpy(info->driver, DRV_NAME); | ||
878 | strcpy(info->version, DRV_VERSION); | ||
879 | strcpy(info->bus_info, pci_name(np->pdev)); | ||
880 | } | ||
881 | |||
882 | |||
883 | static int tulip_ethtool_set_wol(struct net_device *dev, | ||
884 | struct ethtool_wolinfo *wolinfo) | ||
885 | { | ||
886 | struct tulip_private *tp = netdev_priv(dev); | ||
887 | |||
888 | if (wolinfo->wolopts & (~tp->wolinfo.supported)) | ||
889 | return -EOPNOTSUPP; | ||
890 | |||
891 | tp->wolinfo.wolopts = wolinfo->wolopts; | ||
892 | device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts); | ||
893 | return 0; | ||
894 | } | ||
895 | |||
896 | static void tulip_ethtool_get_wol(struct net_device *dev, | ||
897 | struct ethtool_wolinfo *wolinfo) | ||
898 | { | ||
899 | struct tulip_private *tp = netdev_priv(dev); | ||
900 | |||
901 | wolinfo->supported = tp->wolinfo.supported; | ||
902 | wolinfo->wolopts = tp->wolinfo.wolopts; | ||
903 | return; | ||
904 | } | ||
905 | |||
906 | |||
907 | static const struct ethtool_ops ops = { | ||
908 | .get_drvinfo = tulip_get_drvinfo, | ||
909 | .set_wol = tulip_ethtool_set_wol, | ||
910 | .get_wol = tulip_ethtool_get_wol, | ||
911 | }; | ||
912 | |||
913 | /* Provide ioctl() calls to examine the MII xcvr state. */ | ||
914 | static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) | ||
915 | { | ||
916 | struct tulip_private *tp = netdev_priv(dev); | ||
917 | void __iomem *ioaddr = tp->base_addr; | ||
918 | struct mii_ioctl_data *data = if_mii(rq); | ||
919 | const unsigned int phy_idx = 0; | ||
920 | int phy = tp->phys[phy_idx] & 0x1f; | ||
921 | unsigned int regnum = data->reg_num; | ||
922 | |||
923 | switch (cmd) { | ||
924 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | ||
925 | if (tp->mii_cnt) | ||
926 | data->phy_id = phy; | ||
927 | else if (tp->flags & HAS_NWAY) | ||
928 | data->phy_id = 32; | ||
929 | else if (tp->chip_id == COMET) | ||
930 | data->phy_id = 1; | ||
931 | else | ||
932 | return -ENODEV; | ||
933 | |||
934 | case SIOCGMIIREG: /* Read MII PHY register. */ | ||
935 | if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { | ||
936 | int csr12 = ioread32 (ioaddr + CSR12); | ||
937 | int csr14 = ioread32 (ioaddr + CSR14); | ||
938 | switch (regnum) { | ||
939 | case 0: | ||
940 | if (((csr14<<5) & 0x1000) || | ||
941 | (dev->if_port == 5 && tp->nwayset)) | ||
942 | data->val_out = 0x1000; | ||
943 | else | ||
944 | data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0) | ||
945 | | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0); | ||
946 | break; | ||
947 | case 1: | ||
948 | data->val_out = | ||
949 | 0x1848 + | ||
950 | ((csr12&0x7000) == 0x5000 ? 0x20 : 0) + | ||
951 | ((csr12&0x06) == 6 ? 0 : 4); | ||
952 | data->val_out |= 0x6048; | ||
953 | break; | ||
954 | case 4: | ||
955 | /* Advertised value, bogus 10baseTx-FD value from CSR6. */ | ||
956 | data->val_out = | ||
957 | ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) + | ||
958 | ((csr14 >> 1) & 0x20) + 1; | ||
959 | data->val_out |= ((csr14 >> 9) & 0x03C0); | ||
960 | break; | ||
961 | case 5: data->val_out = tp->lpar; break; | ||
962 | default: data->val_out = 0; break; | ||
963 | } | ||
964 | } else { | ||
965 | data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum); | ||
966 | } | ||
967 | return 0; | ||
968 | |||
969 | case SIOCSMIIREG: /* Write MII PHY register. */ | ||
970 | if (regnum & ~0x1f) | ||
971 | return -EINVAL; | ||
972 | if (data->phy_id == phy) { | ||
973 | u16 value = data->val_in; | ||
974 | switch (regnum) { | ||
975 | case 0: /* Check for autonegotiation on or reset. */ | ||
976 | tp->full_duplex_lock = (value & 0x9000) ? 0 : 1; | ||
977 | if (tp->full_duplex_lock) | ||
978 | tp->full_duplex = (value & 0x0100) ? 1 : 0; | ||
979 | break; | ||
980 | case 4: | ||
981 | tp->advertising[phy_idx] = | ||
982 | tp->mii_advertise = data->val_in; | ||
983 | break; | ||
984 | } | ||
985 | } | ||
986 | if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) { | ||
987 | u16 value = data->val_in; | ||
988 | if (regnum == 0) { | ||
989 | if ((value & 0x1200) == 0x1200) { | ||
990 | if (tp->chip_id == PNIC2) { | ||
991 | pnic2_start_nway (dev); | ||
992 | } else { | ||
993 | t21142_start_nway (dev); | ||
994 | } | ||
995 | } | ||
996 | } else if (regnum == 4) | ||
997 | tp->sym_advertise = value; | ||
998 | } else { | ||
999 | tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in); | ||
1000 | } | ||
1001 | return 0; | ||
1002 | default: | ||
1003 | return -EOPNOTSUPP; | ||
1004 | } | ||
1005 | |||
1006 | return -EOPNOTSUPP; | ||
1007 | } | ||
1008 | |||
1009 | |||
1010 | /* Set or clear the multicast filter for this adaptor. | ||
1011 | Note that we only use exclusion around actually queueing the | ||
1012 | new frame, not around filling tp->setup_frame. This is non-deterministic | ||
1013 | when re-entered but still correct. */ | ||
1014 | |||
1015 | #undef set_bit_le | ||
1016 | #define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0) | ||
1017 | |||
1018 | static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev) | ||
1019 | { | ||
1020 | struct tulip_private *tp = netdev_priv(dev); | ||
1021 | u16 hash_table[32]; | ||
1022 | struct netdev_hw_addr *ha; | ||
1023 | int i; | ||
1024 | u16 *eaddrs; | ||
1025 | |||
1026 | memset(hash_table, 0, sizeof(hash_table)); | ||
1027 | set_bit_le(255, hash_table); /* Broadcast entry */ | ||
1028 | /* This should work on big-endian machines as well. */ | ||
1029 | netdev_for_each_mc_addr(ha, dev) { | ||
1030 | int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff; | ||
1031 | |||
1032 | set_bit_le(index, hash_table); | ||
1033 | } | ||
1034 | for (i = 0; i < 32; i++) { | ||
1035 | *setup_frm++ = hash_table[i]; | ||
1036 | *setup_frm++ = hash_table[i]; | ||
1037 | } | ||
1038 | setup_frm = &tp->setup_frame[13*6]; | ||
1039 | |||
1040 | /* Fill the final entry with our physical address. */ | ||
1041 | eaddrs = (u16 *)dev->dev_addr; | ||
1042 | *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; | ||
1043 | *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; | ||
1044 | *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; | ||
1045 | } | ||
1046 | |||
1047 | static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) | ||
1048 | { | ||
1049 | struct tulip_private *tp = netdev_priv(dev); | ||
1050 | struct netdev_hw_addr *ha; | ||
1051 | u16 *eaddrs; | ||
1052 | |||
1053 | /* We have <= 14 addresses so we can use the wonderful | ||
1054 | 16 address perfect filtering of the Tulip. */ | ||
1055 | netdev_for_each_mc_addr(ha, dev) { | ||
1056 | eaddrs = (u16 *) ha->addr; | ||
1057 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; | ||
1058 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; | ||
1059 | *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; | ||
1060 | } | ||
1061 | /* Fill the unused entries with the broadcast address. */ | ||
1062 | memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12); | ||
1063 | setup_frm = &tp->setup_frame[15*6]; | ||
1064 | |||
1065 | /* Fill the final entry with our physical address. */ | ||
1066 | eaddrs = (u16 *)dev->dev_addr; | ||
1067 | *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0]; | ||
1068 | *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1]; | ||
1069 | *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2]; | ||
1070 | } | ||
1071 | |||
1072 | |||
1073 | static void set_rx_mode(struct net_device *dev) | ||
1074 | { | ||
1075 | struct tulip_private *tp = netdev_priv(dev); | ||
1076 | void __iomem *ioaddr = tp->base_addr; | ||
1077 | int csr6; | ||
1078 | |||
1079 | csr6 = ioread32(ioaddr + CSR6) & ~0x00D5; | ||
1080 | |||
1081 | tp->csr6 &= ~0x00D5; | ||
1082 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | ||
1083 | tp->csr6 |= AcceptAllMulticast | AcceptAllPhys; | ||
1084 | csr6 |= AcceptAllMulticast | AcceptAllPhys; | ||
1085 | } else if ((netdev_mc_count(dev) > 1000) || | ||
1086 | (dev->flags & IFF_ALLMULTI)) { | ||
1087 | /* Too many to filter well -- accept all multicasts. */ | ||
1088 | tp->csr6 |= AcceptAllMulticast; | ||
1089 | csr6 |= AcceptAllMulticast; | ||
1090 | } else if (tp->flags & MC_HASH_ONLY) { | ||
1091 | /* Some work-alikes have only a 64-entry hash filter table. */ | ||
1092 | /* Should verify correctness on big-endian/__powerpc__ */ | ||
1093 | struct netdev_hw_addr *ha; | ||
1094 | if (netdev_mc_count(dev) > 64) { | ||
1095 | /* Arbitrary non-effective limit. */ | ||
1096 | tp->csr6 |= AcceptAllMulticast; | ||
1097 | csr6 |= AcceptAllMulticast; | ||
1098 | } else { | ||
1099 | u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ | ||
1100 | int filterbit; | ||
1101 | netdev_for_each_mc_addr(ha, dev) { | ||
1102 | if (tp->flags & COMET_MAC_ADDR) | ||
1103 | filterbit = ether_crc_le(ETH_ALEN, | ||
1104 | ha->addr); | ||
1105 | else | ||
1106 | filterbit = ether_crc(ETH_ALEN, | ||
1107 | ha->addr) >> 26; | ||
1108 | filterbit &= 0x3f; | ||
1109 | mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); | ||
1110 | if (tulip_debug > 2) | ||
1111 | dev_info(&dev->dev, | ||
1112 | "Added filter for %pM %08x bit %d\n", | ||
1113 | ha->addr, | ||
1114 | ether_crc(ETH_ALEN, ha->addr), | ||
1115 | filterbit); | ||
1116 | } | ||
1117 | if (mc_filter[0] == tp->mc_filter[0] && | ||
1118 | mc_filter[1] == tp->mc_filter[1]) | ||
1119 | ; /* No change. */ | ||
1120 | else if (tp->flags & IS_ASIX) { | ||
1121 | iowrite32(2, ioaddr + CSR13); | ||
1122 | iowrite32(mc_filter[0], ioaddr + CSR14); | ||
1123 | iowrite32(3, ioaddr + CSR13); | ||
1124 | iowrite32(mc_filter[1], ioaddr + CSR14); | ||
1125 | } else if (tp->flags & COMET_MAC_ADDR) { | ||
1126 | iowrite32(mc_filter[0], ioaddr + CSR27); | ||
1127 | iowrite32(mc_filter[1], ioaddr + CSR28); | ||
1128 | } | ||
1129 | tp->mc_filter[0] = mc_filter[0]; | ||
1130 | tp->mc_filter[1] = mc_filter[1]; | ||
1131 | } | ||
1132 | } else { | ||
1133 | unsigned long flags; | ||
1134 | u32 tx_flags = 0x08000000 | 192; | ||
1135 | |||
1136 | /* Note that only the low-address shortword of setup_frame is valid! | ||
1137 | The values are doubled for big-endian architectures. */ | ||
1138 | if (netdev_mc_count(dev) > 14) { | ||
1139 | /* Must use a multicast hash table. */ | ||
1140 | build_setup_frame_hash(tp->setup_frame, dev); | ||
1141 | tx_flags = 0x08400000 | 192; | ||
1142 | } else { | ||
1143 | build_setup_frame_perfect(tp->setup_frame, dev); | ||
1144 | } | ||
1145 | |||
1146 | spin_lock_irqsave(&tp->lock, flags); | ||
1147 | |||
1148 | if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) { | ||
1149 | /* Same setup recently queued, we need not add it. */ | ||
1150 | } else { | ||
1151 | unsigned int entry; | ||
1152 | int dummy = -1; | ||
1153 | |||
1154 | /* Now add this frame to the Tx list. */ | ||
1155 | |||
1156 | entry = tp->cur_tx++ % TX_RING_SIZE; | ||
1157 | |||
1158 | if (entry != 0) { | ||
1159 | /* Avoid a chip errata by prefixing a dummy entry. */ | ||
1160 | tp->tx_buffers[entry].skb = NULL; | ||
1161 | tp->tx_buffers[entry].mapping = 0; | ||
1162 | tp->tx_ring[entry].length = | ||
1163 | (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0; | ||
1164 | tp->tx_ring[entry].buffer1 = 0; | ||
1165 | /* Must set DescOwned later to avoid race with chip */ | ||
1166 | dummy = entry; | ||
1167 | entry = tp->cur_tx++ % TX_RING_SIZE; | ||
1168 | |||
1169 | } | ||
1170 | |||
1171 | tp->tx_buffers[entry].skb = NULL; | ||
1172 | tp->tx_buffers[entry].mapping = | ||
1173 | pci_map_single(tp->pdev, tp->setup_frame, | ||
1174 | sizeof(tp->setup_frame), | ||
1175 | PCI_DMA_TODEVICE); | ||
1176 | /* Put the setup frame on the Tx list. */ | ||
1177 | if (entry == TX_RING_SIZE-1) | ||
1178 | tx_flags |= DESC_RING_WRAP; /* Wrap ring. */ | ||
1179 | tp->tx_ring[entry].length = cpu_to_le32(tx_flags); | ||
1180 | tp->tx_ring[entry].buffer1 = | ||
1181 | cpu_to_le32(tp->tx_buffers[entry].mapping); | ||
1182 | tp->tx_ring[entry].status = cpu_to_le32(DescOwned); | ||
1183 | if (dummy >= 0) | ||
1184 | tp->tx_ring[dummy].status = cpu_to_le32(DescOwned); | ||
1185 | if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) | ||
1186 | netif_stop_queue(dev); | ||
1187 | |||
1188 | /* Trigger an immediate transmit demand. */ | ||
1189 | iowrite32(0, ioaddr + CSR1); | ||
1190 | } | ||
1191 | |||
1192 | spin_unlock_irqrestore(&tp->lock, flags); | ||
1193 | } | ||
1194 | |||
1195 | iowrite32(csr6, ioaddr + CSR6); | ||
1196 | } | ||
1197 | |||
1198 | #ifdef CONFIG_TULIP_MWI | ||
1199 | static void __devinit tulip_mwi_config (struct pci_dev *pdev, | ||
1200 | struct net_device *dev) | ||
1201 | { | ||
1202 | struct tulip_private *tp = netdev_priv(dev); | ||
1203 | u8 cache; | ||
1204 | u16 pci_command; | ||
1205 | u32 csr0; | ||
1206 | |||
1207 | if (tulip_debug > 3) | ||
1208 | netdev_dbg(dev, "tulip_mwi_config()\n"); | ||
1209 | |||
1210 | tp->csr0 = csr0 = 0; | ||
1211 | |||
1212 | /* if we have any cache line size at all, we can do MRM and MWI */ | ||
1213 | csr0 |= MRM | MWI; | ||
1214 | |||
1215 | /* Enable MWI in the standard PCI command bit. | ||
1216 | * Check for the case where MWI is desired but not available | ||
1217 | */ | ||
1218 | pci_try_set_mwi(pdev); | ||
1219 | |||
1220 | /* read result from hardware (in case bit refused to enable) */ | ||
1221 | pci_read_config_word(pdev, PCI_COMMAND, &pci_command); | ||
1222 | if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE))) | ||
1223 | csr0 &= ~MWI; | ||
1224 | |||
1225 | /* if cache line size hardwired to zero, no MWI */ | ||
1226 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); | ||
1227 | if ((csr0 & MWI) && (cache == 0)) { | ||
1228 | csr0 &= ~MWI; | ||
1229 | pci_clear_mwi(pdev); | ||
1230 | } | ||
1231 | |||
1232 | /* assign per-cacheline-size cache alignment and | ||
1233 | * burst length values | ||
1234 | */ | ||
1235 | switch (cache) { | ||
1236 | case 8: | ||
1237 | csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift); | ||
1238 | break; | ||
1239 | case 16: | ||
1240 | csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift); | ||
1241 | break; | ||
1242 | case 32: | ||
1243 | csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift); | ||
1244 | break; | ||
1245 | default: | ||
1246 | cache = 0; | ||
1247 | break; | ||
1248 | } | ||
1249 | |||
1250 | /* if we have a good cache line size, we by now have a good | ||
1251 | * csr0, so save it and exit | ||
1252 | */ | ||
1253 | if (cache) | ||
1254 | goto out; | ||
1255 | |||
1256 | /* we don't have a good csr0 or cache line size, disable MWI */ | ||
1257 | if (csr0 & MWI) { | ||
1258 | pci_clear_mwi(pdev); | ||
1259 | csr0 &= ~MWI; | ||
1260 | } | ||
1261 | |||
1262 | /* sane defaults for burst length and cache alignment | ||
1263 | * originally from de4x5 driver | ||
1264 | */ | ||
1265 | csr0 |= (8 << BurstLenShift) | (1 << CALShift); | ||
1266 | |||
1267 | out: | ||
1268 | tp->csr0 = csr0; | ||
1269 | if (tulip_debug > 2) | ||
1270 | netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n", | ||
1271 | cache, csr0); | ||
1272 | } | ||
1273 | #endif | ||
1274 | |||
1275 | /* | ||
1276 | * Chips that have the MRM/reserved bit quirk and the burst quirk. That | ||
1277 | * is the DM910X and the on chip ULi devices | ||
1278 | */ | ||
1279 | |||
1280 | static int tulip_uli_dm_quirk(struct pci_dev *pdev) | ||
1281 | { | ||
1282 | if (pdev->vendor == 0x1282 && pdev->device == 0x9102) | ||
1283 | return 1; | ||
1284 | return 0; | ||
1285 | } | ||
1286 | |||
1287 | static const struct net_device_ops tulip_netdev_ops = { | ||
1288 | .ndo_open = tulip_open, | ||
1289 | .ndo_start_xmit = tulip_start_xmit, | ||
1290 | .ndo_tx_timeout = tulip_tx_timeout, | ||
1291 | .ndo_stop = tulip_close, | ||
1292 | .ndo_get_stats = tulip_get_stats, | ||
1293 | .ndo_do_ioctl = private_ioctl, | ||
1294 | .ndo_set_multicast_list = set_rx_mode, | ||
1295 | .ndo_change_mtu = eth_change_mtu, | ||
1296 | .ndo_set_mac_address = eth_mac_addr, | ||
1297 | .ndo_validate_addr = eth_validate_addr, | ||
1298 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1299 | .ndo_poll_controller = poll_tulip, | ||
1300 | #endif | ||
1301 | }; | ||
1302 | |||
1303 | DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = { | ||
1304 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) }, | ||
1305 | { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) }, | ||
1306 | { }, | ||
1307 | }; | ||
1308 | |||
1309 | static int __devinit tulip_init_one (struct pci_dev *pdev, | ||
1310 | const struct pci_device_id *ent) | ||
1311 | { | ||
1312 | struct tulip_private *tp; | ||
1313 | /* See note below on the multiport cards. */ | ||
1314 | static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; | ||
1315 | static int last_irq; | ||
1316 | static int multiport_cnt; /* For four-port boards w/one EEPROM */ | ||
1317 | int i, irq; | ||
1318 | unsigned short sum; | ||
1319 | unsigned char *ee_data; | ||
1320 | struct net_device *dev; | ||
1321 | void __iomem *ioaddr; | ||
1322 | static int board_idx = -1; | ||
1323 | int chip_idx = ent->driver_data; | ||
1324 | const char *chip_name = tulip_tbl[chip_idx].chip_name; | ||
1325 | unsigned int eeprom_missing = 0; | ||
1326 | unsigned int force_csr0 = 0; | ||
1327 | |||
1328 | #ifndef MODULE | ||
1329 | if (tulip_debug > 0) | ||
1330 | printk_once(KERN_INFO "%s", version); | ||
1331 | #endif | ||
1332 | |||
1333 | board_idx++; | ||
1334 | |||
1335 | /* | ||
1336 | * Lan media wire a tulip chip to a wan interface. Needs a very | ||
1337 | * different driver (lmc driver) | ||
1338 | */ | ||
1339 | |||
1340 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) { | ||
1341 | pr_err("skipping LMC card\n"); | ||
1342 | return -ENODEV; | ||
1343 | } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE && | ||
1344 | (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 || | ||
1345 | pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 || | ||
1346 | pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) { | ||
1347 | pr_err("skipping SBE T3E3 port\n"); | ||
1348 | return -ENODEV; | ||
1349 | } | ||
1350 | |||
1351 | /* | ||
1352 | * DM910x chips should be handled by the dmfe driver, except | ||
1353 | * on-board chips on SPARC systems. Also, early DM9100s need | ||
1354 | * software CRC which only the dmfe driver supports. | ||
1355 | */ | ||
1356 | |||
1357 | #ifdef CONFIG_TULIP_DM910X | ||
1358 | if (chip_idx == DM910X) { | ||
1359 | struct device_node *dp; | ||
1360 | |||
1361 | if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && | ||
1362 | pdev->revision < 0x30) { | ||
1363 | pr_info("skipping early DM9100 with Crc bug (use dmfe)\n"); | ||
1364 | return -ENODEV; | ||
1365 | } | ||
1366 | |||
1367 | dp = pci_device_to_OF_node(pdev); | ||
1368 | if (!(dp && of_get_property(dp, "local-mac-address", NULL))) { | ||
1369 | pr_info("skipping DM910x expansion card (use dmfe)\n"); | ||
1370 | return -ENODEV; | ||
1371 | } | ||
1372 | } | ||
1373 | #endif | ||
1374 | |||
1375 | /* | ||
1376 | * Looks for early PCI chipsets where people report hangs | ||
1377 | * without the workarounds being on. | ||
1378 | */ | ||
1379 | |||
1380 | /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache | ||
1381 | aligned. Aries might need this too. The Saturn errata are not | ||
1382 | pretty reading but thankfully it's an old 486 chipset. | ||
1383 | |||
1384 | 2. The dreaded SiS496 486 chipset. Same workaround as Intel | ||
1385 | Saturn. | ||
1386 | */ | ||
1387 | |||
1388 | if (pci_dev_present(early_486_chipsets)) { | ||
1389 | csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift); | ||
1390 | force_csr0 = 1; | ||
1391 | } | ||
1392 | |||
1393 | /* bugfix: the ASIX must have a burst limit or horrible things happen. */ | ||
1394 | if (chip_idx == AX88140) { | ||
1395 | if ((csr0 & 0x3f00) == 0) | ||
1396 | csr0 |= 0x2000; | ||
1397 | } | ||
1398 | |||
1399 | /* PNIC doesn't have MWI/MRL/MRM... */ | ||
1400 | if (chip_idx == LC82C168) | ||
1401 | csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */ | ||
1402 | |||
1403 | /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */ | ||
1404 | if (tulip_uli_dm_quirk(pdev)) { | ||
1405 | csr0 &= ~0x01f100ff; | ||
1406 | #if defined(CONFIG_SPARC) | ||
1407 | csr0 = (csr0 & ~0xff00) | 0xe000; | ||
1408 | #endif | ||
1409 | } | ||
1410 | /* | ||
1411 | * And back to business | ||
1412 | */ | ||
1413 | |||
1414 | i = pci_enable_device(pdev); | ||
1415 | if (i) { | ||
1416 | pr_err("Cannot enable tulip board #%d, aborting\n", board_idx); | ||
1417 | return i; | ||
1418 | } | ||
1419 | |||
1420 | /* The chip will fail to enter a low-power state later unless | ||
1421 | * first explicitly commanded into D0 */ | ||
1422 | if (pci_set_power_state(pdev, PCI_D0)) { | ||
1423 | pr_notice("Failed to set power state to D0\n"); | ||
1424 | } | ||
1425 | |||
1426 | irq = pdev->irq; | ||
1427 | |||
1428 | /* alloc_etherdev ensures aligned and zeroed private structures */ | ||
1429 | dev = alloc_etherdev (sizeof (*tp)); | ||
1430 | if (!dev) { | ||
1431 | pr_err("ether device alloc failed, aborting\n"); | ||
1432 | return -ENOMEM; | ||
1433 | } | ||
1434 | |||
1435 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
1436 | if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) { | ||
1437 | pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n", | ||
1438 | pci_name(pdev), | ||
1439 | (unsigned long long)pci_resource_len (pdev, 0), | ||
1440 | (unsigned long long)pci_resource_start (pdev, 0)); | ||
1441 | goto err_out_free_netdev; | ||
1442 | } | ||
1443 | |||
1444 | /* grab all resources from both PIO and MMIO regions, as we | ||
1445 | * don't want anyone else messing around with our hardware */ | ||
1446 | if (pci_request_regions (pdev, DRV_NAME)) | ||
1447 | goto err_out_free_netdev; | ||
1448 | |||
1449 | ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size); | ||
1450 | |||
1451 | if (!ioaddr) | ||
1452 | goto err_out_free_res; | ||
1453 | |||
1454 | /* | ||
1455 | * initialize private data structure 'tp' | ||
1456 | * it is zeroed and aligned in alloc_etherdev | ||
1457 | */ | ||
1458 | tp = netdev_priv(dev); | ||
1459 | tp->dev = dev; | ||
1460 | |||
1461 | tp->rx_ring = pci_alloc_consistent(pdev, | ||
1462 | sizeof(struct tulip_rx_desc) * RX_RING_SIZE + | ||
1463 | sizeof(struct tulip_tx_desc) * TX_RING_SIZE, | ||
1464 | &tp->rx_ring_dma); | ||
1465 | if (!tp->rx_ring) | ||
1466 | goto err_out_mtable; | ||
1467 | tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE); | ||
1468 | tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE; | ||
1469 | |||
1470 | tp->chip_id = chip_idx; | ||
1471 | tp->flags = tulip_tbl[chip_idx].flags; | ||
1472 | |||
1473 | tp->wolinfo.supported = 0; | ||
1474 | tp->wolinfo.wolopts = 0; | ||
1475 | /* COMET: Enable power management only for AN983B */ | ||
1476 | if (chip_idx == COMET ) { | ||
1477 | u32 sig; | ||
1478 | pci_read_config_dword (pdev, 0x80, &sig); | ||
1479 | if (sig == 0x09811317) { | ||
1480 | tp->flags |= COMET_PM; | ||
1481 | tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC; | ||
1482 | pr_info("%s: Enabled WOL support for AN983B\n", | ||
1483 | __func__); | ||
1484 | } | ||
1485 | } | ||
1486 | tp->pdev = pdev; | ||
1487 | tp->base_addr = ioaddr; | ||
1488 | tp->revision = pdev->revision; | ||
1489 | tp->csr0 = csr0; | ||
1490 | spin_lock_init(&tp->lock); | ||
1491 | spin_lock_init(&tp->mii_lock); | ||
1492 | init_timer(&tp->timer); | ||
1493 | tp->timer.data = (unsigned long)dev; | ||
1494 | tp->timer.function = tulip_tbl[tp->chip_id].media_timer; | ||
1495 | |||
1496 | INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); | ||
1497 | |||
1498 | dev->base_addr = (unsigned long)ioaddr; | ||
1499 | |||
1500 | #ifdef CONFIG_TULIP_MWI | ||
1501 | if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) | ||
1502 | tulip_mwi_config (pdev, dev); | ||
1503 | #endif | ||
1504 | |||
1505 | /* Stop the chip's Tx and Rx processes. */ | ||
1506 | tulip_stop_rxtx(tp); | ||
1507 | |||
1508 | pci_set_master(pdev); | ||
1509 | |||
1510 | #ifdef CONFIG_GSC | ||
1511 | if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) { | ||
1512 | switch (pdev->subsystem_device) { | ||
1513 | default: | ||
1514 | break; | ||
1515 | case 0x1061: | ||
1516 | case 0x1062: | ||
1517 | case 0x1063: | ||
1518 | case 0x1098: | ||
1519 | case 0x1099: | ||
1520 | case 0x10EE: | ||
1521 | tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE; | ||
1522 | chip_name = "GSC DS21140 Tulip"; | ||
1523 | } | ||
1524 | } | ||
1525 | #endif | ||
1526 | |||
1527 | /* Clear the missed-packet counter. */ | ||
1528 | ioread32(ioaddr + CSR8); | ||
1529 | |||
1530 | /* The station address ROM is read byte serially. The register must | ||
1531 | be polled, waiting for the value to be read bit serially from the | ||
1532 | EEPROM. | ||
1533 | */ | ||
1534 | ee_data = tp->eeprom; | ||
1535 | memset(ee_data, 0, sizeof(tp->eeprom)); | ||
1536 | sum = 0; | ||
1537 | if (chip_idx == LC82C168) { | ||
1538 | for (i = 0; i < 3; i++) { | ||
1539 | int value, boguscnt = 100000; | ||
1540 | iowrite32(0x600 | i, ioaddr + 0x98); | ||
1541 | do { | ||
1542 | value = ioread32(ioaddr + CSR9); | ||
1543 | } while (value < 0 && --boguscnt > 0); | ||
1544 | put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i); | ||
1545 | sum += value & 0xffff; | ||
1546 | } | ||
1547 | } else if (chip_idx == COMET) { | ||
1548 | /* No need to read the EEPROM. */ | ||
1549 | put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr); | ||
1550 | put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4); | ||
1551 | for (i = 0; i < 6; i ++) | ||
1552 | sum += dev->dev_addr[i]; | ||
1553 | } else { | ||
1554 | /* A serial EEPROM interface, we read now and sort it out later. */ | ||
1555 | int sa_offset = 0; | ||
1556 | int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6; | ||
1557 | int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16); | ||
1558 | |||
1559 | if (ee_max_addr > sizeof(tp->eeprom)) | ||
1560 | ee_max_addr = sizeof(tp->eeprom); | ||
1561 | |||
1562 | for (i = 0; i < ee_max_addr ; i += sizeof(u16)) { | ||
1563 | u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size); | ||
1564 | ee_data[i] = data & 0xff; | ||
1565 | ee_data[i + 1] = data >> 8; | ||
1566 | } | ||
1567 | |||
1568 | /* DEC now has a specification (see Notes) but early board makers | ||
1569 | just put the address in the first EEPROM locations. */ | ||
1570 | /* This does memcmp(ee_data, ee_data+16, 8) */ | ||
1571 | for (i = 0; i < 8; i ++) | ||
1572 | if (ee_data[i] != ee_data[16+i]) | ||
1573 | sa_offset = 20; | ||
1574 | if (chip_idx == CONEXANT) { | ||
1575 | /* Check that the tuple type and length is correct. */ | ||
1576 | if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6) | ||
1577 | sa_offset = 0x19A; | ||
1578 | } else if (ee_data[0] == 0xff && ee_data[1] == 0xff && | ||
1579 | ee_data[2] == 0) { | ||
1580 | sa_offset = 2; /* Grrr, damn Matrox boards. */ | ||
1581 | multiport_cnt = 4; | ||
1582 | } | ||
1583 | #ifdef CONFIG_MIPS_COBALT | ||
1584 | if ((pdev->bus->number == 0) && | ||
1585 | ((PCI_SLOT(pdev->devfn) == 7) || | ||
1586 | (PCI_SLOT(pdev->devfn) == 12))) { | ||
1587 | /* Cobalt MAC address in first EEPROM locations. */ | ||
1588 | sa_offset = 0; | ||
1589 | /* Ensure our media table fixup get's applied */ | ||
1590 | memcpy(ee_data + 16, ee_data, 8); | ||
1591 | } | ||
1592 | #endif | ||
1593 | #ifdef CONFIG_GSC | ||
1594 | /* Check to see if we have a broken srom */ | ||
1595 | if (ee_data[0] == 0x61 && ee_data[1] == 0x10) { | ||
1596 | /* pci_vendor_id and subsystem_id are swapped */ | ||
1597 | ee_data[0] = ee_data[2]; | ||
1598 | ee_data[1] = ee_data[3]; | ||
1599 | ee_data[2] = 0x61; | ||
1600 | ee_data[3] = 0x10; | ||
1601 | |||
1602 | /* HSC-PCI boards need to be byte-swaped and shifted | ||
1603 | * up 1 word. This shift needs to happen at the end | ||
1604 | * of the MAC first because of the 2 byte overlap. | ||
1605 | */ | ||
1606 | for (i = 4; i >= 0; i -= 2) { | ||
1607 | ee_data[17 + i + 3] = ee_data[17 + i]; | ||
1608 | ee_data[16 + i + 5] = ee_data[16 + i]; | ||
1609 | } | ||
1610 | } | ||
1611 | #endif | ||
1612 | |||
1613 | for (i = 0; i < 6; i ++) { | ||
1614 | dev->dev_addr[i] = ee_data[i + sa_offset]; | ||
1615 | sum += ee_data[i + sa_offset]; | ||
1616 | } | ||
1617 | } | ||
1618 | /* Lite-On boards have the address byte-swapped. */ | ||
1619 | if ((dev->dev_addr[0] == 0xA0 || | ||
1620 | dev->dev_addr[0] == 0xC0 || | ||
1621 | dev->dev_addr[0] == 0x02) && | ||
1622 | dev->dev_addr[1] == 0x00) | ||
1623 | for (i = 0; i < 6; i+=2) { | ||
1624 | char tmp = dev->dev_addr[i]; | ||
1625 | dev->dev_addr[i] = dev->dev_addr[i+1]; | ||
1626 | dev->dev_addr[i+1] = tmp; | ||
1627 | } | ||
1628 | /* On the Zynx 315 Etherarray and other multiport boards only the | ||
1629 | first Tulip has an EEPROM. | ||
1630 | On Sparc systems the mac address is held in the OBP property | ||
1631 | "local-mac-address". | ||
1632 | The addresses of the subsequent ports are derived from the first. | ||
1633 | Many PCI BIOSes also incorrectly report the IRQ line, so we correct | ||
1634 | that here as well. */ | ||
1635 | if (sum == 0 || sum == 6*0xff) { | ||
1636 | #if defined(CONFIG_SPARC) | ||
1637 | struct device_node *dp = pci_device_to_OF_node(pdev); | ||
1638 | const unsigned char *addr; | ||
1639 | int len; | ||
1640 | #endif | ||
1641 | eeprom_missing = 1; | ||
1642 | for (i = 0; i < 5; i++) | ||
1643 | dev->dev_addr[i] = last_phys_addr[i]; | ||
1644 | dev->dev_addr[i] = last_phys_addr[i] + 1; | ||
1645 | #if defined(CONFIG_SPARC) | ||
1646 | addr = of_get_property(dp, "local-mac-address", &len); | ||
1647 | if (addr && len == 6) | ||
1648 | memcpy(dev->dev_addr, addr, 6); | ||
1649 | #endif | ||
1650 | #if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */ | ||
1651 | if (last_irq) | ||
1652 | irq = last_irq; | ||
1653 | #endif | ||
1654 | } | ||
1655 | |||
1656 | for (i = 0; i < 6; i++) | ||
1657 | last_phys_addr[i] = dev->dev_addr[i]; | ||
1658 | last_irq = irq; | ||
1659 | dev->irq = irq; | ||
1660 | |||
1661 | /* The lower four bits are the media type. */ | ||
1662 | if (board_idx >= 0 && board_idx < MAX_UNITS) { | ||
1663 | if (options[board_idx] & MEDIA_MASK) | ||
1664 | tp->default_port = options[board_idx] & MEDIA_MASK; | ||
1665 | if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0) | ||
1666 | tp->full_duplex = 1; | ||
1667 | if (mtu[board_idx] > 0) | ||
1668 | dev->mtu = mtu[board_idx]; | ||
1669 | } | ||
1670 | if (dev->mem_start & MEDIA_MASK) | ||
1671 | tp->default_port = dev->mem_start & MEDIA_MASK; | ||
1672 | if (tp->default_port) { | ||
1673 | pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n", | ||
1674 | board_idx, medianame[tp->default_port & MEDIA_MASK]); | ||
1675 | tp->medialock = 1; | ||
1676 | if (tulip_media_cap[tp->default_port] & MediaAlwaysFD) | ||
1677 | tp->full_duplex = 1; | ||
1678 | } | ||
1679 | if (tp->full_duplex) | ||
1680 | tp->full_duplex_lock = 1; | ||
1681 | |||
1682 | if (tulip_media_cap[tp->default_port] & MediaIsMII) { | ||
1683 | static const u16 media2advert[] = { | ||
1684 | 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 | ||
1685 | }; | ||
1686 | tp->mii_advertise = media2advert[tp->default_port - 9]; | ||
1687 | tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */ | ||
1688 | } | ||
1689 | |||
1690 | if (tp->flags & HAS_MEDIA_TABLE) { | ||
1691 | sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */ | ||
1692 | tulip_parse_eeprom(dev); | ||
1693 | strcpy(dev->name, "eth%d"); /* un-hack */ | ||
1694 | } | ||
1695 | |||
1696 | if ((tp->flags & ALWAYS_CHECK_MII) || | ||
1697 | (tp->mtable && tp->mtable->has_mii) || | ||
1698 | ( ! tp->mtable && (tp->flags & HAS_MII))) { | ||
1699 | if (tp->mtable && tp->mtable->has_mii) { | ||
1700 | for (i = 0; i < tp->mtable->leafcount; i++) | ||
1701 | if (tp->mtable->mleaf[i].media == 11) { | ||
1702 | tp->cur_index = i; | ||
1703 | tp->saved_if_port = dev->if_port; | ||
1704 | tulip_select_media(dev, 2); | ||
1705 | dev->if_port = tp->saved_if_port; | ||
1706 | break; | ||
1707 | } | ||
1708 | } | ||
1709 | |||
1710 | /* Find the connected MII xcvrs. | ||
1711 | Doing this in open() would allow detecting external xcvrs | ||
1712 | later, but takes much time. */ | ||
1713 | tulip_find_mii (dev, board_idx); | ||
1714 | } | ||
1715 | |||
1716 | /* The Tulip-specific entries in the device structure. */ | ||
1717 | dev->netdev_ops = &tulip_netdev_ops; | ||
1718 | dev->watchdog_timeo = TX_TIMEOUT; | ||
1719 | #ifdef CONFIG_TULIP_NAPI | ||
1720 | netif_napi_add(dev, &tp->napi, tulip_poll, 16); | ||
1721 | #endif | ||
1722 | SET_ETHTOOL_OPS(dev, &ops); | ||
1723 | |||
1724 | if (register_netdev(dev)) | ||
1725 | goto err_out_free_ring; | ||
1726 | |||
1727 | pci_set_drvdata(pdev, dev); | ||
1728 | |||
1729 | dev_info(&dev->dev, | ||
1730 | #ifdef CONFIG_TULIP_MMIO | ||
1731 | "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n", | ||
1732 | #else | ||
1733 | "%s rev %d at Port %#llx,%s %pM, IRQ %d\n", | ||
1734 | #endif | ||
1735 | chip_name, pdev->revision, | ||
1736 | (unsigned long long)pci_resource_start(pdev, TULIP_BAR), | ||
1737 | eeprom_missing ? " EEPROM not present," : "", | ||
1738 | dev->dev_addr, irq); | ||
1739 | |||
1740 | if (tp->chip_id == PNIC2) | ||
1741 | tp->link_change = pnic2_lnk_change; | ||
1742 | else if (tp->flags & HAS_NWAY) | ||
1743 | tp->link_change = t21142_lnk_change; | ||
1744 | else if (tp->flags & HAS_PNICNWAY) | ||
1745 | tp->link_change = pnic_lnk_change; | ||
1746 | |||
1747 | /* Reset the xcvr interface and turn on heartbeat. */ | ||
1748 | switch (chip_idx) { | ||
1749 | case DC21140: | ||
1750 | case DM910X: | ||
1751 | default: | ||
1752 | if (tp->mtable) | ||
1753 | iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12); | ||
1754 | break; | ||
1755 | case DC21142: | ||
1756 | if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) { | ||
1757 | iowrite32(csr6_mask_defstate, ioaddr + CSR6); | ||
1758 | iowrite32(0x0000, ioaddr + CSR13); | ||
1759 | iowrite32(0x0000, ioaddr + CSR14); | ||
1760 | iowrite32(csr6_mask_hdcap, ioaddr + CSR6); | ||
1761 | } else | ||
1762 | t21142_start_nway(dev); | ||
1763 | break; | ||
1764 | case PNIC2: | ||
1765 | /* just do a reset for sanity sake */ | ||
1766 | iowrite32(0x0000, ioaddr + CSR13); | ||
1767 | iowrite32(0x0000, ioaddr + CSR14); | ||
1768 | break; | ||
1769 | case LC82C168: | ||
1770 | if ( ! tp->mii_cnt) { | ||
1771 | tp->nway = 1; | ||
1772 | tp->nwayset = 0; | ||
1773 | iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6); | ||
1774 | iowrite32(0x30, ioaddr + CSR12); | ||
1775 | iowrite32(0x0001F078, ioaddr + CSR6); | ||
1776 | iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */ | ||
1777 | } | ||
1778 | break; | ||
1779 | case MX98713: | ||
1780 | case COMPEX9881: | ||
1781 | iowrite32(0x00000000, ioaddr + CSR6); | ||
1782 | iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */ | ||
1783 | iowrite32(0x00000001, ioaddr + CSR13); | ||
1784 | break; | ||
1785 | case MX98715: | ||
1786 | case MX98725: | ||
1787 | iowrite32(0x01a80000, ioaddr + CSR6); | ||
1788 | iowrite32(0xFFFFFFFF, ioaddr + CSR14); | ||
1789 | iowrite32(0x00001000, ioaddr + CSR12); | ||
1790 | break; | ||
1791 | case COMET: | ||
1792 | /* No initialization necessary. */ | ||
1793 | break; | ||
1794 | } | ||
1795 | |||
1796 | /* put the chip in snooze mode until opened */ | ||
1797 | tulip_set_power_state (tp, 0, 1); | ||
1798 | |||
1799 | return 0; | ||
1800 | |||
1801 | err_out_free_ring: | ||
1802 | pci_free_consistent (pdev, | ||
1803 | sizeof (struct tulip_rx_desc) * RX_RING_SIZE + | ||
1804 | sizeof (struct tulip_tx_desc) * TX_RING_SIZE, | ||
1805 | tp->rx_ring, tp->rx_ring_dma); | ||
1806 | |||
1807 | err_out_mtable: | ||
1808 | kfree (tp->mtable); | ||
1809 | pci_iounmap(pdev, ioaddr); | ||
1810 | |||
1811 | err_out_free_res: | ||
1812 | pci_release_regions (pdev); | ||
1813 | |||
1814 | err_out_free_netdev: | ||
1815 | free_netdev (dev); | ||
1816 | return -ENODEV; | ||
1817 | } | ||
1818 | |||
1819 | |||
1820 | /* set the registers according to the given wolopts */ | ||
1821 | static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts) | ||
1822 | { | ||
1823 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1824 | struct tulip_private *tp = netdev_priv(dev); | ||
1825 | void __iomem *ioaddr = tp->base_addr; | ||
1826 | |||
1827 | if (tp->flags & COMET_PM) { | ||
1828 | |||
1829 | unsigned int tmp; | ||
1830 | |||
1831 | tmp = ioread32(ioaddr + CSR18); | ||
1832 | tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a); | ||
1833 | tmp |= comet_csr18_pm_mode; | ||
1834 | iowrite32(tmp, ioaddr + CSR18); | ||
1835 | |||
1836 | /* Set the Wake-up Control/Status Register to the given WOL options*/ | ||
1837 | tmp = ioread32(ioaddr + CSR13); | ||
1838 | tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre); | ||
1839 | if (wolopts & WAKE_MAGIC) | ||
1840 | tmp |= comet_csr13_mpre; | ||
1841 | if (wolopts & WAKE_PHY) | ||
1842 | tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce; | ||
1843 | /* Clear the event flags */ | ||
1844 | tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc; | ||
1845 | iowrite32(tmp, ioaddr + CSR13); | ||
1846 | } | ||
1847 | } | ||
1848 | |||
1849 | #ifdef CONFIG_PM | ||
1850 | |||
1851 | |||
1852 | static int tulip_suspend (struct pci_dev *pdev, pm_message_t state) | ||
1853 | { | ||
1854 | pci_power_t pstate; | ||
1855 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1856 | struct tulip_private *tp = netdev_priv(dev); | ||
1857 | |||
1858 | if (!dev) | ||
1859 | return -EINVAL; | ||
1860 | |||
1861 | if (!netif_running(dev)) | ||
1862 | goto save_state; | ||
1863 | |||
1864 | tulip_down(dev); | ||
1865 | |||
1866 | netif_device_detach(dev); | ||
1867 | free_irq(dev->irq, dev); | ||
1868 | |||
1869 | save_state: | ||
1870 | pci_save_state(pdev); | ||
1871 | pci_disable_device(pdev); | ||
1872 | pstate = pci_choose_state(pdev, state); | ||
1873 | if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) { | ||
1874 | int rc; | ||
1875 | |||
1876 | tulip_set_wolopts(pdev, tp->wolinfo.wolopts); | ||
1877 | rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts); | ||
1878 | if (rc) | ||
1879 | pr_err("pci_enable_wake failed (%d)\n", rc); | ||
1880 | } | ||
1881 | pci_set_power_state(pdev, pstate); | ||
1882 | |||
1883 | return 0; | ||
1884 | } | ||
1885 | |||
1886 | |||
1887 | static int tulip_resume(struct pci_dev *pdev) | ||
1888 | { | ||
1889 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1890 | struct tulip_private *tp = netdev_priv(dev); | ||
1891 | void __iomem *ioaddr = tp->base_addr; | ||
1892 | int retval; | ||
1893 | unsigned int tmp; | ||
1894 | |||
1895 | if (!dev) | ||
1896 | return -EINVAL; | ||
1897 | |||
1898 | pci_set_power_state(pdev, PCI_D0); | ||
1899 | pci_restore_state(pdev); | ||
1900 | |||
1901 | if (!netif_running(dev)) | ||
1902 | return 0; | ||
1903 | |||
1904 | if ((retval = pci_enable_device(pdev))) { | ||
1905 | pr_err("pci_enable_device failed in resume\n"); | ||
1906 | return retval; | ||
1907 | } | ||
1908 | |||
1909 | if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { | ||
1910 | pr_err("request_irq failed in resume\n"); | ||
1911 | return retval; | ||
1912 | } | ||
1913 | |||
1914 | if (tp->flags & COMET_PM) { | ||
1915 | pci_enable_wake(pdev, PCI_D3hot, 0); | ||
1916 | pci_enable_wake(pdev, PCI_D3cold, 0); | ||
1917 | |||
1918 | /* Clear the PMES flag */ | ||
1919 | tmp = ioread32(ioaddr + CSR20); | ||
1920 | tmp |= comet_csr20_pmes; | ||
1921 | iowrite32(tmp, ioaddr + CSR20); | ||
1922 | |||
1923 | /* Disable all wake-up events */ | ||
1924 | tulip_set_wolopts(pdev, 0); | ||
1925 | } | ||
1926 | netif_device_attach(dev); | ||
1927 | |||
1928 | if (netif_running(dev)) | ||
1929 | tulip_up(dev); | ||
1930 | |||
1931 | return 0; | ||
1932 | } | ||
1933 | |||
1934 | #endif /* CONFIG_PM */ | ||
1935 | |||
1936 | |||
1937 | static void __devexit tulip_remove_one (struct pci_dev *pdev) | ||
1938 | { | ||
1939 | struct net_device *dev = pci_get_drvdata (pdev); | ||
1940 | struct tulip_private *tp; | ||
1941 | |||
1942 | if (!dev) | ||
1943 | return; | ||
1944 | |||
1945 | tp = netdev_priv(dev); | ||
1946 | unregister_netdev(dev); | ||
1947 | pci_free_consistent (pdev, | ||
1948 | sizeof (struct tulip_rx_desc) * RX_RING_SIZE + | ||
1949 | sizeof (struct tulip_tx_desc) * TX_RING_SIZE, | ||
1950 | tp->rx_ring, tp->rx_ring_dma); | ||
1951 | kfree (tp->mtable); | ||
1952 | pci_iounmap(pdev, tp->base_addr); | ||
1953 | free_netdev (dev); | ||
1954 | pci_release_regions (pdev); | ||
1955 | pci_set_drvdata (pdev, NULL); | ||
1956 | |||
1957 | /* pci_power_off (pdev, -1); */ | ||
1958 | } | ||
1959 | |||
1960 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1961 | /* | ||
1962 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
1963 | * without having to re-enable interrupts. It's not called while | ||
1964 | * the interrupt routine is executing. | ||
1965 | */ | ||
1966 | |||
1967 | static void poll_tulip (struct net_device *dev) | ||
1968 | { | ||
1969 | /* disable_irq here is not very nice, but with the lockless | ||
1970 | interrupt handler we have no other choice. */ | ||
1971 | disable_irq(dev->irq); | ||
1972 | tulip_interrupt (dev->irq, dev); | ||
1973 | enable_irq(dev->irq); | ||
1974 | } | ||
1975 | #endif | ||
1976 | |||
1977 | static struct pci_driver tulip_driver = { | ||
1978 | .name = DRV_NAME, | ||
1979 | .id_table = tulip_pci_tbl, | ||
1980 | .probe = tulip_init_one, | ||
1981 | .remove = __devexit_p(tulip_remove_one), | ||
1982 | #ifdef CONFIG_PM | ||
1983 | .suspend = tulip_suspend, | ||
1984 | .resume = tulip_resume, | ||
1985 | #endif /* CONFIG_PM */ | ||
1986 | }; | ||
1987 | |||
1988 | |||
1989 | static int __init tulip_init (void) | ||
1990 | { | ||
1991 | #ifdef MODULE | ||
1992 | pr_info("%s", version); | ||
1993 | #endif | ||
1994 | |||
1995 | /* copy module parms into globals */ | ||
1996 | tulip_rx_copybreak = rx_copybreak; | ||
1997 | tulip_max_interrupt_work = max_interrupt_work; | ||
1998 | |||
1999 | /* probe for and init boards */ | ||
2000 | return pci_register_driver(&tulip_driver); | ||
2001 | } | ||
2002 | |||
2003 | |||
2004 | static void __exit tulip_cleanup (void) | ||
2005 | { | ||
2006 | pci_unregister_driver (&tulip_driver); | ||
2007 | } | ||
2008 | |||
2009 | |||
2010 | module_init(tulip_init); | ||
2011 | module_exit(tulip_cleanup); | ||
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c new file mode 100644 index 000000000000..9e63f406f72d --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/uli526x.c | |||
@@ -0,0 +1,1850 @@ | |||
1 | /* | ||
2 | This program is free software; you can redistribute it and/or | ||
3 | modify it under the terms of the GNU General Public License | ||
4 | as published by the Free Software Foundation; either version 2 | ||
5 | of the License, or (at your option) any later version. | ||
6 | |||
7 | This program is distributed in the hope that it will be useful, | ||
8 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | GNU General Public License for more details. | ||
11 | |||
12 | |||
13 | */ | ||
14 | |||
15 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
16 | |||
17 | #define DRV_NAME "uli526x" | ||
18 | #define DRV_VERSION "0.9.3" | ||
19 | #define DRV_RELDATE "2005-7-29" | ||
20 | |||
21 | #include <linux/module.h> | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/timer.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/ioport.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/pci.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/spinlock.h> | ||
37 | #include <linux/dma-mapping.h> | ||
38 | #include <linux/bitops.h> | ||
39 | |||
40 | #include <asm/processor.h> | ||
41 | #include <asm/io.h> | ||
42 | #include <asm/dma.h> | ||
43 | #include <asm/uaccess.h> | ||
44 | |||
45 | |||
46 | /* Board/System/Debug information/definition ---------------- */ | ||
47 | #define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/ | ||
48 | #define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/ | ||
49 | |||
50 | #define ULI526X_IO_SIZE 0x100 | ||
51 | #define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */ | ||
52 | #define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */ | ||
53 | #define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */ | ||
54 | #define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */ | ||
55 | #define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT) | ||
56 | #define TX_BUF_ALLOC 0x600 | ||
57 | #define RX_ALLOC_SIZE 0x620 | ||
58 | #define ULI526X_RESET 1 | ||
59 | #define CR0_DEFAULT 0 | ||
60 | #define CR6_DEFAULT 0x22200000 | ||
61 | #define CR7_DEFAULT 0x180c1 | ||
62 | #define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */ | ||
63 | #define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */ | ||
64 | #define MAX_PACKET_SIZE 1514 | ||
65 | #define ULI5261_MAX_MULTICAST 14 | ||
66 | #define RX_COPY_SIZE 100 | ||
67 | #define MAX_CHECK_PACKET 0x8000 | ||
68 | |||
69 | #define ULI526X_10MHF 0 | ||
70 | #define ULI526X_100MHF 1 | ||
71 | #define ULI526X_10MFD 4 | ||
72 | #define ULI526X_100MFD 5 | ||
73 | #define ULI526X_AUTO 8 | ||
74 | |||
75 | #define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */ | ||
76 | #define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */ | ||
77 | #define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */ | ||
78 | #define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */ | ||
79 | #define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */ | ||
80 | #define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */ | ||
81 | |||
82 | #define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */ | ||
83 | #define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */ | ||
84 | #define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */ | ||
85 | |||
86 | #define ULI526X_DBUG(dbug_now, msg, value) \ | ||
87 | do { \ | ||
88 | if (uli526x_debug || (dbug_now)) \ | ||
89 | pr_err("%s %lx\n", (msg), (long) (value)); \ | ||
90 | } while (0) | ||
91 | |||
92 | #define SHOW_MEDIA_TYPE(mode) \ | ||
93 | pr_err("Change Speed to %sMhz %s duplex\n", \ | ||
94 | mode & 1 ? "100" : "10", \ | ||
95 | mode & 4 ? "full" : "half"); | ||
96 | |||
97 | |||
98 | /* CR9 definition: SROM/MII */ | ||
99 | #define CR9_SROM_READ 0x4800 | ||
100 | #define CR9_SRCS 0x1 | ||
101 | #define CR9_SRCLK 0x2 | ||
102 | #define CR9_CRDOUT 0x8 | ||
103 | #define SROM_DATA_0 0x0 | ||
104 | #define SROM_DATA_1 0x4 | ||
105 | #define PHY_DATA_1 0x20000 | ||
106 | #define PHY_DATA_0 0x00000 | ||
107 | #define MDCLKH 0x10000 | ||
108 | |||
109 | #define PHY_POWER_DOWN 0x800 | ||
110 | |||
111 | #define SROM_V41_CODE 0x14 | ||
112 | |||
113 | #define SROM_CLK_WRITE(data, ioaddr) \ | ||
114 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
115 | udelay(5); \ | ||
116 | outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \ | ||
117 | udelay(5); \ | ||
118 | outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \ | ||
119 | udelay(5); | ||
120 | |||
121 | /* Structure/enum declaration ------------------------------- */ | ||
122 | struct tx_desc { | ||
123 | __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ | ||
124 | char *tx_buf_ptr; /* Data for us */ | ||
125 | struct tx_desc *next_tx_desc; | ||
126 | } __attribute__(( aligned(32) )); | ||
127 | |||
128 | struct rx_desc { | ||
129 | __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */ | ||
130 | struct sk_buff *rx_skb_ptr; /* Data for us */ | ||
131 | struct rx_desc *next_rx_desc; | ||
132 | } __attribute__(( aligned(32) )); | ||
133 | |||
134 | struct uli526x_board_info { | ||
135 | u32 chip_id; /* Chip vendor/Device ID */ | ||
136 | struct net_device *next_dev; /* next device */ | ||
137 | struct pci_dev *pdev; /* PCI device */ | ||
138 | spinlock_t lock; | ||
139 | |||
140 | long ioaddr; /* I/O base address */ | ||
141 | u32 cr0_data; | ||
142 | u32 cr5_data; | ||
143 | u32 cr6_data; | ||
144 | u32 cr7_data; | ||
145 | u32 cr15_data; | ||
146 | |||
147 | /* pointer for memory physical address */ | ||
148 | dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */ | ||
149 | dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */ | ||
150 | dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */ | ||
151 | dma_addr_t first_tx_desc_dma; | ||
152 | dma_addr_t first_rx_desc_dma; | ||
153 | |||
154 | /* descriptor pointer */ | ||
155 | unsigned char *buf_pool_ptr; /* Tx buffer pool memory */ | ||
156 | unsigned char *buf_pool_start; /* Tx buffer pool align dword */ | ||
157 | unsigned char *desc_pool_ptr; /* descriptor pool memory */ | ||
158 | struct tx_desc *first_tx_desc; | ||
159 | struct tx_desc *tx_insert_ptr; | ||
160 | struct tx_desc *tx_remove_ptr; | ||
161 | struct rx_desc *first_rx_desc; | ||
162 | struct rx_desc *rx_insert_ptr; | ||
163 | struct rx_desc *rx_ready_ptr; /* packet come pointer */ | ||
164 | unsigned long tx_packet_cnt; /* transmitted packet count */ | ||
165 | unsigned long rx_avail_cnt; /* available rx descriptor count */ | ||
166 | unsigned long interval_rx_cnt; /* rx packet count a callback time */ | ||
167 | |||
168 | u16 dbug_cnt; | ||
169 | u16 NIC_capability; /* NIC media capability */ | ||
170 | u16 PHY_reg4; /* Saved Phyxcer register 4 value */ | ||
171 | |||
172 | u8 media_mode; /* user specify media mode */ | ||
173 | u8 op_mode; /* real work media mode */ | ||
174 | u8 phy_addr; | ||
175 | u8 link_failed; /* Ever link failed */ | ||
176 | u8 wait_reset; /* Hardware failed, need to reset */ | ||
177 | struct timer_list timer; | ||
178 | |||
179 | /* Driver defined statistic counter */ | ||
180 | unsigned long tx_fifo_underrun; | ||
181 | unsigned long tx_loss_carrier; | ||
182 | unsigned long tx_no_carrier; | ||
183 | unsigned long tx_late_collision; | ||
184 | unsigned long tx_excessive_collision; | ||
185 | unsigned long tx_jabber_timeout; | ||
186 | unsigned long reset_count; | ||
187 | unsigned long reset_cr8; | ||
188 | unsigned long reset_fatal; | ||
189 | unsigned long reset_TXtimeout; | ||
190 | |||
191 | /* NIC SROM data */ | ||
192 | unsigned char srom[128]; | ||
193 | u8 init; | ||
194 | }; | ||
195 | |||
196 | enum uli526x_offsets { | ||
197 | DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20, | ||
198 | DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48, | ||
199 | DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70, | ||
200 | DCR15 = 0x78 | ||
201 | }; | ||
202 | |||
203 | enum uli526x_CR6_bits { | ||
204 | CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80, | ||
205 | CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000, | ||
206 | CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000 | ||
207 | }; | ||
208 | |||
209 | /* Global variable declaration ----------------------------- */ | ||
210 | static int __devinitdata printed_version; | ||
211 | static const char version[] __devinitconst = | ||
212 | "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")"; | ||
213 | |||
214 | static int uli526x_debug; | ||
215 | static unsigned char uli526x_media_mode = ULI526X_AUTO; | ||
216 | static u32 uli526x_cr6_user_set; | ||
217 | |||
218 | /* For module input parameter */ | ||
219 | static int debug; | ||
220 | static u32 cr6set; | ||
221 | static int mode = 8; | ||
222 | |||
223 | /* function declaration ------------------------------------- */ | ||
224 | static int uli526x_open(struct net_device *); | ||
225 | static netdev_tx_t uli526x_start_xmit(struct sk_buff *, | ||
226 | struct net_device *); | ||
227 | static int uli526x_stop(struct net_device *); | ||
228 | static void uli526x_set_filter_mode(struct net_device *); | ||
229 | static const struct ethtool_ops netdev_ethtool_ops; | ||
230 | static u16 read_srom_word(long, int); | ||
231 | static irqreturn_t uli526x_interrupt(int, void *); | ||
232 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
233 | static void uli526x_poll(struct net_device *dev); | ||
234 | #endif | ||
235 | static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); | ||
236 | static void allocate_rx_buffer(struct uli526x_board_info *); | ||
237 | static void update_cr6(u32, unsigned long); | ||
238 | static void send_filter_frame(struct net_device *, int); | ||
239 | static u16 phy_read(unsigned long, u8, u8, u32); | ||
240 | static u16 phy_readby_cr10(unsigned long, u8, u8); | ||
241 | static void phy_write(unsigned long, u8, u8, u16, u32); | ||
242 | static void phy_writeby_cr10(unsigned long, u8, u8, u16); | ||
243 | static void phy_write_1bit(unsigned long, u32, u32); | ||
244 | static u16 phy_read_1bit(unsigned long, u32); | ||
245 | static u8 uli526x_sense_speed(struct uli526x_board_info *); | ||
246 | static void uli526x_process_mode(struct uli526x_board_info *); | ||
247 | static void uli526x_timer(unsigned long); | ||
248 | static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *); | ||
249 | static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *); | ||
250 | static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *); | ||
251 | static void uli526x_dynamic_reset(struct net_device *); | ||
252 | static void uli526x_free_rxbuffer(struct uli526x_board_info *); | ||
253 | static void uli526x_init(struct net_device *); | ||
254 | static void uli526x_set_phyxcer(struct uli526x_board_info *); | ||
255 | |||
256 | /* ULI526X network board routine ---------------------------- */ | ||
257 | |||
258 | static const struct net_device_ops netdev_ops = { | ||
259 | .ndo_open = uli526x_open, | ||
260 | .ndo_stop = uli526x_stop, | ||
261 | .ndo_start_xmit = uli526x_start_xmit, | ||
262 | .ndo_set_multicast_list = uli526x_set_filter_mode, | ||
263 | .ndo_change_mtu = eth_change_mtu, | ||
264 | .ndo_set_mac_address = eth_mac_addr, | ||
265 | .ndo_validate_addr = eth_validate_addr, | ||
266 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
267 | .ndo_poll_controller = uli526x_poll, | ||
268 | #endif | ||
269 | }; | ||
270 | |||
271 | /* | ||
272 | * Search ULI526X board, allocate space and register it | ||
273 | */ | ||
274 | |||
275 | static int __devinit uli526x_init_one (struct pci_dev *pdev, | ||
276 | const struct pci_device_id *ent) | ||
277 | { | ||
278 | struct uli526x_board_info *db; /* board information structure */ | ||
279 | struct net_device *dev; | ||
280 | int i, err; | ||
281 | |||
282 | ULI526X_DBUG(0, "uli526x_init_one()", 0); | ||
283 | |||
284 | if (!printed_version++) | ||
285 | pr_info("%s\n", version); | ||
286 | |||
287 | /* Init network device */ | ||
288 | dev = alloc_etherdev(sizeof(*db)); | ||
289 | if (dev == NULL) | ||
290 | return -ENOMEM; | ||
291 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
292 | |||
293 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
294 | pr_warn("32-bit PCI DMA not available\n"); | ||
295 | err = -ENODEV; | ||
296 | goto err_out_free; | ||
297 | } | ||
298 | |||
299 | /* Enable Master/IO access, Disable memory access */ | ||
300 | err = pci_enable_device(pdev); | ||
301 | if (err) | ||
302 | goto err_out_free; | ||
303 | |||
304 | if (!pci_resource_start(pdev, 0)) { | ||
305 | pr_err("I/O base is zero\n"); | ||
306 | err = -ENODEV; | ||
307 | goto err_out_disable; | ||
308 | } | ||
309 | |||
310 | if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) { | ||
311 | pr_err("Allocated I/O size too small\n"); | ||
312 | err = -ENODEV; | ||
313 | goto err_out_disable; | ||
314 | } | ||
315 | |||
316 | if (pci_request_regions(pdev, DRV_NAME)) { | ||
317 | pr_err("Failed to request PCI regions\n"); | ||
318 | err = -ENODEV; | ||
319 | goto err_out_disable; | ||
320 | } | ||
321 | |||
322 | /* Init system & device */ | ||
323 | db = netdev_priv(dev); | ||
324 | |||
325 | /* Allocate Tx/Rx descriptor memory */ | ||
326 | db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); | ||
327 | if(db->desc_pool_ptr == NULL) | ||
328 | { | ||
329 | err = -ENOMEM; | ||
330 | goto err_out_nomem; | ||
331 | } | ||
332 | db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); | ||
333 | if(db->buf_pool_ptr == NULL) | ||
334 | { | ||
335 | err = -ENOMEM; | ||
336 | goto err_out_nomem; | ||
337 | } | ||
338 | |||
339 | db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; | ||
340 | db->first_tx_desc_dma = db->desc_pool_dma_ptr; | ||
341 | db->buf_pool_start = db->buf_pool_ptr; | ||
342 | db->buf_pool_dma_start = db->buf_pool_dma_ptr; | ||
343 | |||
344 | db->chip_id = ent->driver_data; | ||
345 | db->ioaddr = pci_resource_start(pdev, 0); | ||
346 | |||
347 | db->pdev = pdev; | ||
348 | db->init = 1; | ||
349 | |||
350 | dev->base_addr = db->ioaddr; | ||
351 | dev->irq = pdev->irq; | ||
352 | pci_set_drvdata(pdev, dev); | ||
353 | |||
354 | /* Register some necessary functions */ | ||
355 | dev->netdev_ops = &netdev_ops; | ||
356 | dev->ethtool_ops = &netdev_ethtool_ops; | ||
357 | |||
358 | spin_lock_init(&db->lock); | ||
359 | |||
360 | |||
361 | /* read 64 word srom data */ | ||
362 | for (i = 0; i < 64; i++) | ||
363 | ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); | ||
364 | |||
365 | /* Set Node address */ | ||
366 | if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ | ||
367 | { | ||
368 | outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode | ||
369 | outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port | ||
370 | outl(0, db->ioaddr + DCR14); //Clear reset port | ||
371 | outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer | ||
372 | outl(0, db->ioaddr + DCR14); //Clear reset port | ||
373 | outl(0, db->ioaddr + DCR13); //Clear CR13 | ||
374 | outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port | ||
375 | //Read MAC address from CR14 | ||
376 | for (i = 0; i < 6; i++) | ||
377 | dev->dev_addr[i] = inl(db->ioaddr + DCR14); | ||
378 | //Read end | ||
379 | outl(0, db->ioaddr + DCR13); //Clear CR13 | ||
380 | outl(0, db->ioaddr + DCR0); //Clear CR0 | ||
381 | udelay(10); | ||
382 | } | ||
383 | else /*Exist SROM*/ | ||
384 | { | ||
385 | for (i = 0; i < 6; i++) | ||
386 | dev->dev_addr[i] = db->srom[20 + i]; | ||
387 | } | ||
388 | err = register_netdev (dev); | ||
389 | if (err) | ||
390 | goto err_out_res; | ||
391 | |||
392 | netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n", | ||
393 | ent->driver_data >> 16, pci_name(pdev), | ||
394 | dev->dev_addr, dev->irq); | ||
395 | |||
396 | pci_set_master(pdev); | ||
397 | |||
398 | return 0; | ||
399 | |||
400 | err_out_res: | ||
401 | pci_release_regions(pdev); | ||
402 | err_out_nomem: | ||
403 | if(db->desc_pool_ptr) | ||
404 | pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, | ||
405 | db->desc_pool_ptr, db->desc_pool_dma_ptr); | ||
406 | |||
407 | if(db->buf_pool_ptr != NULL) | ||
408 | pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | ||
409 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | ||
410 | err_out_disable: | ||
411 | pci_disable_device(pdev); | ||
412 | err_out_free: | ||
413 | pci_set_drvdata(pdev, NULL); | ||
414 | free_netdev(dev); | ||
415 | |||
416 | return err; | ||
417 | } | ||
418 | |||
419 | |||
420 | static void __devexit uli526x_remove_one (struct pci_dev *pdev) | ||
421 | { | ||
422 | struct net_device *dev = pci_get_drvdata(pdev); | ||
423 | struct uli526x_board_info *db = netdev_priv(dev); | ||
424 | |||
425 | ULI526X_DBUG(0, "uli526x_remove_one()", 0); | ||
426 | |||
427 | pci_free_consistent(db->pdev, sizeof(struct tx_desc) * | ||
428 | DESC_ALL_CNT + 0x20, db->desc_pool_ptr, | ||
429 | db->desc_pool_dma_ptr); | ||
430 | pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, | ||
431 | db->buf_pool_ptr, db->buf_pool_dma_ptr); | ||
432 | unregister_netdev(dev); | ||
433 | pci_release_regions(pdev); | ||
434 | free_netdev(dev); /* free board information */ | ||
435 | pci_set_drvdata(pdev, NULL); | ||
436 | pci_disable_device(pdev); | ||
437 | ULI526X_DBUG(0, "uli526x_remove_one() exit", 0); | ||
438 | } | ||
439 | |||
440 | |||
441 | /* | ||
442 | * Open the interface. | ||
443 | * The interface is opened whenever "ifconfig" activates it. | ||
444 | */ | ||
445 | |||
446 | static int uli526x_open(struct net_device *dev) | ||
447 | { | ||
448 | int ret; | ||
449 | struct uli526x_board_info *db = netdev_priv(dev); | ||
450 | |||
451 | ULI526X_DBUG(0, "uli526x_open", 0); | ||
452 | |||
453 | /* system variable init */ | ||
454 | db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set; | ||
455 | db->tx_packet_cnt = 0; | ||
456 | db->rx_avail_cnt = 0; | ||
457 | db->link_failed = 1; | ||
458 | netif_carrier_off(dev); | ||
459 | db->wait_reset = 0; | ||
460 | |||
461 | db->NIC_capability = 0xf; /* All capability*/ | ||
462 | db->PHY_reg4 = 0x1e0; | ||
463 | |||
464 | /* CR6 operation mode decision */ | ||
465 | db->cr6_data |= ULI526X_TXTH_256; | ||
466 | db->cr0_data = CR0_DEFAULT; | ||
467 | |||
468 | /* Initialize ULI526X board */ | ||
469 | uli526x_init(dev); | ||
470 | |||
471 | ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev); | ||
472 | if (ret) | ||
473 | return ret; | ||
474 | |||
475 | /* Active System Interface */ | ||
476 | netif_wake_queue(dev); | ||
477 | |||
478 | /* set and active a timer process */ | ||
479 | init_timer(&db->timer); | ||
480 | db->timer.expires = ULI526X_TIMER_WUT + HZ * 2; | ||
481 | db->timer.data = (unsigned long)dev; | ||
482 | db->timer.function = uli526x_timer; | ||
483 | add_timer(&db->timer); | ||
484 | |||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | |||
489 | /* Initialize ULI526X board | ||
490 | * Reset ULI526X board | ||
491 | * Initialize TX/Rx descriptor chain structure | ||
492 | * Send the set-up frame | ||
493 | * Enable Tx/Rx machine | ||
494 | */ | ||
495 | |||
496 | static void uli526x_init(struct net_device *dev) | ||
497 | { | ||
498 | struct uli526x_board_info *db = netdev_priv(dev); | ||
499 | unsigned long ioaddr = db->ioaddr; | ||
500 | u8 phy_tmp; | ||
501 | u8 timeout; | ||
502 | u16 phy_value; | ||
503 | u16 phy_reg_reset; | ||
504 | |||
505 | |||
506 | ULI526X_DBUG(0, "uli526x_init()", 0); | ||
507 | |||
508 | /* Reset M526x MAC controller */ | ||
509 | outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */ | ||
510 | udelay(100); | ||
511 | outl(db->cr0_data, ioaddr + DCR0); | ||
512 | udelay(5); | ||
513 | |||
514 | /* Phy addr : In some boards,M5261/M5263 phy address != 1 */ | ||
515 | db->phy_addr = 1; | ||
516 | for(phy_tmp=0;phy_tmp<32;phy_tmp++) | ||
517 | { | ||
518 | phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add | ||
519 | if(phy_value != 0xffff&&phy_value!=0) | ||
520 | { | ||
521 | db->phy_addr = phy_tmp; | ||
522 | break; | ||
523 | } | ||
524 | } | ||
525 | if(phy_tmp == 32) | ||
526 | pr_warn("Can not find the phy address!!!\n"); | ||
527 | /* Parser SROM and media mode */ | ||
528 | db->media_mode = uli526x_media_mode; | ||
529 | |||
530 | /* phyxcer capability setting */ | ||
531 | phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); | ||
532 | phy_reg_reset = (phy_reg_reset | 0x8000); | ||
533 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); | ||
534 | |||
535 | /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management | ||
536 | * functions") or phy data sheet for details on phy reset | ||
537 | */ | ||
538 | udelay(500); | ||
539 | timeout = 10; | ||
540 | while (timeout-- && | ||
541 | phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000) | ||
542 | udelay(100); | ||
543 | |||
544 | /* Process Phyxcer Media Mode */ | ||
545 | uli526x_set_phyxcer(db); | ||
546 | |||
547 | /* Media Mode Process */ | ||
548 | if ( !(db->media_mode & ULI526X_AUTO) ) | ||
549 | db->op_mode = db->media_mode; /* Force Mode */ | ||
550 | |||
551 | /* Initialize Transmit/Receive decriptor and CR3/4 */ | ||
552 | uli526x_descriptor_init(db, ioaddr); | ||
553 | |||
554 | /* Init CR6 to program M526X operation */ | ||
555 | update_cr6(db->cr6_data, ioaddr); | ||
556 | |||
557 | /* Send setup frame */ | ||
558 | send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */ | ||
559 | |||
560 | /* Init CR7, interrupt active bit */ | ||
561 | db->cr7_data = CR7_DEFAULT; | ||
562 | outl(db->cr7_data, ioaddr + DCR7); | ||
563 | |||
564 | /* Init CR15, Tx jabber and Rx watchdog timer */ | ||
565 | outl(db->cr15_data, ioaddr + DCR15); | ||
566 | |||
567 | /* Enable ULI526X Tx/Rx function */ | ||
568 | db->cr6_data |= CR6_RXSC | CR6_TXSC; | ||
569 | update_cr6(db->cr6_data, ioaddr); | ||
570 | } | ||
571 | |||
572 | |||
573 | /* | ||
574 | * Hardware start transmission. | ||
575 | * Send a packet to media from the upper layer. | ||
576 | */ | ||
577 | |||
578 | static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb, | ||
579 | struct net_device *dev) | ||
580 | { | ||
581 | struct uli526x_board_info *db = netdev_priv(dev); | ||
582 | struct tx_desc *txptr; | ||
583 | unsigned long flags; | ||
584 | |||
585 | ULI526X_DBUG(0, "uli526x_start_xmit", 0); | ||
586 | |||
587 | /* Resource flag check */ | ||
588 | netif_stop_queue(dev); | ||
589 | |||
590 | /* Too large packet check */ | ||
591 | if (skb->len > MAX_PACKET_SIZE) { | ||
592 | netdev_err(dev, "big packet = %d\n", (u16)skb->len); | ||
593 | dev_kfree_skb(skb); | ||
594 | return NETDEV_TX_OK; | ||
595 | } | ||
596 | |||
597 | spin_lock_irqsave(&db->lock, flags); | ||
598 | |||
599 | /* No Tx resource check, it never happen nromally */ | ||
600 | if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) { | ||
601 | spin_unlock_irqrestore(&db->lock, flags); | ||
602 | netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt); | ||
603 | return NETDEV_TX_BUSY; | ||
604 | } | ||
605 | |||
606 | /* Disable NIC interrupt */ | ||
607 | outl(0, dev->base_addr + DCR7); | ||
608 | |||
609 | /* transmit this packet */ | ||
610 | txptr = db->tx_insert_ptr; | ||
611 | skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len); | ||
612 | txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len); | ||
613 | |||
614 | /* Point to next transmit free descriptor */ | ||
615 | db->tx_insert_ptr = txptr->next_tx_desc; | ||
616 | |||
617 | /* Transmit Packet Process */ | ||
618 | if ( (db->tx_packet_cnt < TX_DESC_CNT) ) { | ||
619 | txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ | ||
620 | db->tx_packet_cnt++; /* Ready to send */ | ||
621 | outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ | ||
622 | dev->trans_start = jiffies; /* saved time stamp */ | ||
623 | } | ||
624 | |||
625 | /* Tx resource check */ | ||
626 | if ( db->tx_packet_cnt < TX_FREE_DESC_CNT ) | ||
627 | netif_wake_queue(dev); | ||
628 | |||
629 | /* Restore CR7 to enable interrupt */ | ||
630 | spin_unlock_irqrestore(&db->lock, flags); | ||
631 | outl(db->cr7_data, dev->base_addr + DCR7); | ||
632 | |||
633 | /* free this SKB */ | ||
634 | dev_kfree_skb(skb); | ||
635 | |||
636 | return NETDEV_TX_OK; | ||
637 | } | ||
638 | |||
639 | |||
640 | /* | ||
641 | * Stop the interface. | ||
642 | * The interface is stopped when it is brought. | ||
643 | */ | ||
644 | |||
645 | static int uli526x_stop(struct net_device *dev) | ||
646 | { | ||
647 | struct uli526x_board_info *db = netdev_priv(dev); | ||
648 | unsigned long ioaddr = dev->base_addr; | ||
649 | |||
650 | ULI526X_DBUG(0, "uli526x_stop", 0); | ||
651 | |||
652 | /* disable system */ | ||
653 | netif_stop_queue(dev); | ||
654 | |||
655 | /* deleted timer */ | ||
656 | del_timer_sync(&db->timer); | ||
657 | |||
658 | /* Reset & stop ULI526X board */ | ||
659 | outl(ULI526X_RESET, ioaddr + DCR0); | ||
660 | udelay(5); | ||
661 | phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); | ||
662 | |||
663 | /* free interrupt */ | ||
664 | free_irq(dev->irq, dev); | ||
665 | |||
666 | /* free allocated rx buffer */ | ||
667 | uli526x_free_rxbuffer(db); | ||
668 | |||
669 | return 0; | ||
670 | } | ||
671 | |||
672 | |||
673 | /* | ||
674 | * M5261/M5263 insterrupt handler | ||
675 | * receive the packet to upper layer, free the transmitted packet | ||
676 | */ | ||
677 | |||
678 | static irqreturn_t uli526x_interrupt(int irq, void *dev_id) | ||
679 | { | ||
680 | struct net_device *dev = dev_id; | ||
681 | struct uli526x_board_info *db = netdev_priv(dev); | ||
682 | unsigned long ioaddr = dev->base_addr; | ||
683 | unsigned long flags; | ||
684 | |||
685 | spin_lock_irqsave(&db->lock, flags); | ||
686 | outl(0, ioaddr + DCR7); | ||
687 | |||
688 | /* Got ULI526X status */ | ||
689 | db->cr5_data = inl(ioaddr + DCR5); | ||
690 | outl(db->cr5_data, ioaddr + DCR5); | ||
691 | if ( !(db->cr5_data & 0x180c1) ) { | ||
692 | /* Restore CR7 to enable interrupt mask */ | ||
693 | outl(db->cr7_data, ioaddr + DCR7); | ||
694 | spin_unlock_irqrestore(&db->lock, flags); | ||
695 | return IRQ_HANDLED; | ||
696 | } | ||
697 | |||
698 | /* Check system status */ | ||
699 | if (db->cr5_data & 0x2000) { | ||
700 | /* system bus error happen */ | ||
701 | ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data); | ||
702 | db->reset_fatal++; | ||
703 | db->wait_reset = 1; /* Need to RESET */ | ||
704 | spin_unlock_irqrestore(&db->lock, flags); | ||
705 | return IRQ_HANDLED; | ||
706 | } | ||
707 | |||
708 | /* Received the coming packet */ | ||
709 | if ( (db->cr5_data & 0x40) && db->rx_avail_cnt ) | ||
710 | uli526x_rx_packet(dev, db); | ||
711 | |||
712 | /* reallocate rx descriptor buffer */ | ||
713 | if (db->rx_avail_cnt<RX_DESC_CNT) | ||
714 | allocate_rx_buffer(db); | ||
715 | |||
716 | /* Free the transmitted descriptor */ | ||
717 | if ( db->cr5_data & 0x01) | ||
718 | uli526x_free_tx_pkt(dev, db); | ||
719 | |||
720 | /* Restore CR7 to enable interrupt mask */ | ||
721 | outl(db->cr7_data, ioaddr + DCR7); | ||
722 | |||
723 | spin_unlock_irqrestore(&db->lock, flags); | ||
724 | return IRQ_HANDLED; | ||
725 | } | ||
726 | |||
727 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
728 | static void uli526x_poll(struct net_device *dev) | ||
729 | { | ||
730 | /* ISR grabs the irqsave lock, so this should be safe */ | ||
731 | uli526x_interrupt(dev->irq, dev); | ||
732 | } | ||
733 | #endif | ||
734 | |||
735 | /* | ||
736 | * Free TX resource after TX complete | ||
737 | */ | ||
738 | |||
739 | static void uli526x_free_tx_pkt(struct net_device *dev, | ||
740 | struct uli526x_board_info * db) | ||
741 | { | ||
742 | struct tx_desc *txptr; | ||
743 | u32 tdes0; | ||
744 | |||
745 | txptr = db->tx_remove_ptr; | ||
746 | while(db->tx_packet_cnt) { | ||
747 | tdes0 = le32_to_cpu(txptr->tdes0); | ||
748 | if (tdes0 & 0x80000000) | ||
749 | break; | ||
750 | |||
751 | /* A packet sent completed */ | ||
752 | db->tx_packet_cnt--; | ||
753 | dev->stats.tx_packets++; | ||
754 | |||
755 | /* Transmit statistic counter */ | ||
756 | if ( tdes0 != 0x7fffffff ) { | ||
757 | dev->stats.collisions += (tdes0 >> 3) & 0xf; | ||
758 | dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff; | ||
759 | if (tdes0 & TDES0_ERR_MASK) { | ||
760 | dev->stats.tx_errors++; | ||
761 | if (tdes0 & 0x0002) { /* UnderRun */ | ||
762 | db->tx_fifo_underrun++; | ||
763 | if ( !(db->cr6_data & CR6_SFT) ) { | ||
764 | db->cr6_data = db->cr6_data | CR6_SFT; | ||
765 | update_cr6(db->cr6_data, db->ioaddr); | ||
766 | } | ||
767 | } | ||
768 | if (tdes0 & 0x0100) | ||
769 | db->tx_excessive_collision++; | ||
770 | if (tdes0 & 0x0200) | ||
771 | db->tx_late_collision++; | ||
772 | if (tdes0 & 0x0400) | ||
773 | db->tx_no_carrier++; | ||
774 | if (tdes0 & 0x0800) | ||
775 | db->tx_loss_carrier++; | ||
776 | if (tdes0 & 0x4000) | ||
777 | db->tx_jabber_timeout++; | ||
778 | } | ||
779 | } | ||
780 | |||
781 | txptr = txptr->next_tx_desc; | ||
782 | }/* End of while */ | ||
783 | |||
784 | /* Update TX remove pointer to next */ | ||
785 | db->tx_remove_ptr = txptr; | ||
786 | |||
787 | /* Resource available check */ | ||
788 | if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT ) | ||
789 | netif_wake_queue(dev); /* Active upper layer, send again */ | ||
790 | } | ||
791 | |||
792 | |||
793 | /* | ||
794 | * Receive the come packet and pass to upper layer | ||
795 | */ | ||
796 | |||
797 | static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db) | ||
798 | { | ||
799 | struct rx_desc *rxptr; | ||
800 | struct sk_buff *skb; | ||
801 | int rxlen; | ||
802 | u32 rdes0; | ||
803 | |||
804 | rxptr = db->rx_ready_ptr; | ||
805 | |||
806 | while(db->rx_avail_cnt) { | ||
807 | rdes0 = le32_to_cpu(rxptr->rdes0); | ||
808 | if (rdes0 & 0x80000000) /* packet owner check */ | ||
809 | { | ||
810 | break; | ||
811 | } | ||
812 | |||
813 | db->rx_avail_cnt--; | ||
814 | db->interval_rx_cnt++; | ||
815 | |||
816 | pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE); | ||
817 | if ( (rdes0 & 0x300) != 0x300) { | ||
818 | /* A packet without First/Last flag */ | ||
819 | /* reuse this SKB */ | ||
820 | ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); | ||
821 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); | ||
822 | } else { | ||
823 | /* A packet with First/Last flag */ | ||
824 | rxlen = ( (rdes0 >> 16) & 0x3fff) - 4; | ||
825 | |||
826 | /* error summary bit check */ | ||
827 | if (rdes0 & 0x8000) { | ||
828 | /* This is a error packet */ | ||
829 | dev->stats.rx_errors++; | ||
830 | if (rdes0 & 1) | ||
831 | dev->stats.rx_fifo_errors++; | ||
832 | if (rdes0 & 2) | ||
833 | dev->stats.rx_crc_errors++; | ||
834 | if (rdes0 & 0x80) | ||
835 | dev->stats.rx_length_errors++; | ||
836 | } | ||
837 | |||
838 | if ( !(rdes0 & 0x8000) || | ||
839 | ((db->cr6_data & CR6_PM) && (rxlen>6)) ) { | ||
840 | struct sk_buff *new_skb = NULL; | ||
841 | |||
842 | skb = rxptr->rx_skb_ptr; | ||
843 | |||
844 | /* Good packet, send to upper layer */ | ||
845 | /* Shorst packet used new SKB */ | ||
846 | if ((rxlen < RX_COPY_SIZE) && | ||
847 | (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) { | ||
848 | skb = new_skb; | ||
849 | /* size less than COPY_SIZE, allocate a rxlen SKB */ | ||
850 | skb_reserve(skb, 2); /* 16byte align */ | ||
851 | memcpy(skb_put(skb, rxlen), | ||
852 | skb_tail_pointer(rxptr->rx_skb_ptr), | ||
853 | rxlen); | ||
854 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); | ||
855 | } else | ||
856 | skb_put(skb, rxlen); | ||
857 | |||
858 | skb->protocol = eth_type_trans(skb, dev); | ||
859 | netif_rx(skb); | ||
860 | dev->stats.rx_packets++; | ||
861 | dev->stats.rx_bytes += rxlen; | ||
862 | |||
863 | } else { | ||
864 | /* Reuse SKB buffer when the packet is error */ | ||
865 | ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0); | ||
866 | uli526x_reuse_skb(db, rxptr->rx_skb_ptr); | ||
867 | } | ||
868 | } | ||
869 | |||
870 | rxptr = rxptr->next_rx_desc; | ||
871 | } | ||
872 | |||
873 | db->rx_ready_ptr = rxptr; | ||
874 | } | ||
875 | |||
876 | |||
877 | /* | ||
878 | * Set ULI526X multicast address | ||
879 | */ | ||
880 | |||
881 | static void uli526x_set_filter_mode(struct net_device * dev) | ||
882 | { | ||
883 | struct uli526x_board_info *db = netdev_priv(dev); | ||
884 | unsigned long flags; | ||
885 | |||
886 | ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0); | ||
887 | spin_lock_irqsave(&db->lock, flags); | ||
888 | |||
889 | if (dev->flags & IFF_PROMISC) { | ||
890 | ULI526X_DBUG(0, "Enable PROM Mode", 0); | ||
891 | db->cr6_data |= CR6_PM | CR6_PBF; | ||
892 | update_cr6(db->cr6_data, db->ioaddr); | ||
893 | spin_unlock_irqrestore(&db->lock, flags); | ||
894 | return; | ||
895 | } | ||
896 | |||
897 | if (dev->flags & IFF_ALLMULTI || | ||
898 | netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) { | ||
899 | ULI526X_DBUG(0, "Pass all multicast address", | ||
900 | netdev_mc_count(dev)); | ||
901 | db->cr6_data &= ~(CR6_PM | CR6_PBF); | ||
902 | db->cr6_data |= CR6_PAM; | ||
903 | spin_unlock_irqrestore(&db->lock, flags); | ||
904 | return; | ||
905 | } | ||
906 | |||
907 | ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev)); | ||
908 | send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */ | ||
909 | spin_unlock_irqrestore(&db->lock, flags); | ||
910 | } | ||
911 | |||
912 | static void | ||
913 | ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd) | ||
914 | { | ||
915 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
916 | SUPPORTED_10baseT_Full | | ||
917 | SUPPORTED_100baseT_Half | | ||
918 | SUPPORTED_100baseT_Full | | ||
919 | SUPPORTED_Autoneg | | ||
920 | SUPPORTED_MII); | ||
921 | |||
922 | ecmd->advertising = (ADVERTISED_10baseT_Half | | ||
923 | ADVERTISED_10baseT_Full | | ||
924 | ADVERTISED_100baseT_Half | | ||
925 | ADVERTISED_100baseT_Full | | ||
926 | ADVERTISED_Autoneg | | ||
927 | ADVERTISED_MII); | ||
928 | |||
929 | |||
930 | ecmd->port = PORT_MII; | ||
931 | ecmd->phy_address = db->phy_addr; | ||
932 | |||
933 | ecmd->transceiver = XCVR_EXTERNAL; | ||
934 | |||
935 | ethtool_cmd_speed_set(ecmd, SPEED_10); | ||
936 | ecmd->duplex = DUPLEX_HALF; | ||
937 | |||
938 | if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD) | ||
939 | { | ||
940 | ethtool_cmd_speed_set(ecmd, SPEED_100); | ||
941 | } | ||
942 | if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD) | ||
943 | { | ||
944 | ecmd->duplex = DUPLEX_FULL; | ||
945 | } | ||
946 | if(db->link_failed) | ||
947 | { | ||
948 | ethtool_cmd_speed_set(ecmd, -1); | ||
949 | ecmd->duplex = -1; | ||
950 | } | ||
951 | |||
952 | if (db->media_mode & ULI526X_AUTO) | ||
953 | { | ||
954 | ecmd->autoneg = AUTONEG_ENABLE; | ||
955 | } | ||
956 | } | ||
957 | |||
958 | static void netdev_get_drvinfo(struct net_device *dev, | ||
959 | struct ethtool_drvinfo *info) | ||
960 | { | ||
961 | struct uli526x_board_info *np = netdev_priv(dev); | ||
962 | |||
963 | strcpy(info->driver, DRV_NAME); | ||
964 | strcpy(info->version, DRV_VERSION); | ||
965 | if (np->pdev) | ||
966 | strcpy(info->bus_info, pci_name(np->pdev)); | ||
967 | else | ||
968 | sprintf(info->bus_info, "EISA 0x%lx %d", | ||
969 | dev->base_addr, dev->irq); | ||
970 | } | ||
971 | |||
972 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { | ||
973 | struct uli526x_board_info *np = netdev_priv(dev); | ||
974 | |||
975 | ULi_ethtool_gset(np, cmd); | ||
976 | |||
977 | return 0; | ||
978 | } | ||
979 | |||
980 | static u32 netdev_get_link(struct net_device *dev) { | ||
981 | struct uli526x_board_info *np = netdev_priv(dev); | ||
982 | |||
983 | if(np->link_failed) | ||
984 | return 0; | ||
985 | else | ||
986 | return 1; | ||
987 | } | ||
988 | |||
989 | static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
990 | { | ||
991 | wol->supported = WAKE_PHY | WAKE_MAGIC; | ||
992 | wol->wolopts = 0; | ||
993 | } | ||
994 | |||
995 | static const struct ethtool_ops netdev_ethtool_ops = { | ||
996 | .get_drvinfo = netdev_get_drvinfo, | ||
997 | .get_settings = netdev_get_settings, | ||
998 | .get_link = netdev_get_link, | ||
999 | .get_wol = uli526x_get_wol, | ||
1000 | }; | ||
1001 | |||
1002 | /* | ||
1003 | * A periodic timer routine | ||
1004 | * Dynamic media sense, allocate Rx buffer... | ||
1005 | */ | ||
1006 | |||
1007 | static void uli526x_timer(unsigned long data) | ||
1008 | { | ||
1009 | u32 tmp_cr8; | ||
1010 | unsigned char tmp_cr12=0; | ||
1011 | struct net_device *dev = (struct net_device *) data; | ||
1012 | struct uli526x_board_info *db = netdev_priv(dev); | ||
1013 | unsigned long flags; | ||
1014 | |||
1015 | //ULI526X_DBUG(0, "uli526x_timer()", 0); | ||
1016 | spin_lock_irqsave(&db->lock, flags); | ||
1017 | |||
1018 | |||
1019 | /* Dynamic reset ULI526X : system error or transmit time-out */ | ||
1020 | tmp_cr8 = inl(db->ioaddr + DCR8); | ||
1021 | if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { | ||
1022 | db->reset_cr8++; | ||
1023 | db->wait_reset = 1; | ||
1024 | } | ||
1025 | db->interval_rx_cnt = 0; | ||
1026 | |||
1027 | /* TX polling kick monitor */ | ||
1028 | if ( db->tx_packet_cnt && | ||
1029 | time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) { | ||
1030 | outl(0x1, dev->base_addr + DCR1); // Tx polling again | ||
1031 | |||
1032 | // TX Timeout | ||
1033 | if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) { | ||
1034 | db->reset_TXtimeout++; | ||
1035 | db->wait_reset = 1; | ||
1036 | netdev_err(dev, " Tx timeout - resetting\n"); | ||
1037 | } | ||
1038 | } | ||
1039 | |||
1040 | if (db->wait_reset) { | ||
1041 | ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt); | ||
1042 | db->reset_count++; | ||
1043 | uli526x_dynamic_reset(dev); | ||
1044 | db->timer.expires = ULI526X_TIMER_WUT; | ||
1045 | add_timer(&db->timer); | ||
1046 | spin_unlock_irqrestore(&db->lock, flags); | ||
1047 | return; | ||
1048 | } | ||
1049 | |||
1050 | /* Link status check, Dynamic media type change */ | ||
1051 | if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0) | ||
1052 | tmp_cr12 = 3; | ||
1053 | |||
1054 | if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { | ||
1055 | /* Link Failed */ | ||
1056 | ULI526X_DBUG(0, "Link Failed", tmp_cr12); | ||
1057 | netif_carrier_off(dev); | ||
1058 | netdev_info(dev, "NIC Link is Down\n"); | ||
1059 | db->link_failed = 1; | ||
1060 | |||
1061 | /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ | ||
1062 | /* AUTO don't need */ | ||
1063 | if ( !(db->media_mode & 0x8) ) | ||
1064 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); | ||
1065 | |||
1066 | /* AUTO mode, if INT phyxcer link failed, select EXT device */ | ||
1067 | if (db->media_mode & ULI526X_AUTO) { | ||
1068 | db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ | ||
1069 | update_cr6(db->cr6_data, db->ioaddr); | ||
1070 | } | ||
1071 | } else | ||
1072 | if ((tmp_cr12 & 0x3) && db->link_failed) { | ||
1073 | ULI526X_DBUG(0, "Link link OK", tmp_cr12); | ||
1074 | db->link_failed = 0; | ||
1075 | |||
1076 | /* Auto Sense Speed */ | ||
1077 | if ( (db->media_mode & ULI526X_AUTO) && | ||
1078 | uli526x_sense_speed(db) ) | ||
1079 | db->link_failed = 1; | ||
1080 | uli526x_process_mode(db); | ||
1081 | |||
1082 | if(db->link_failed==0) | ||
1083 | { | ||
1084 | netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n", | ||
1085 | (db->op_mode == ULI526X_100MHF || | ||
1086 | db->op_mode == ULI526X_100MFD) | ||
1087 | ? 100 : 10, | ||
1088 | (db->op_mode == ULI526X_10MFD || | ||
1089 | db->op_mode == ULI526X_100MFD) | ||
1090 | ? "Full" : "Half"); | ||
1091 | netif_carrier_on(dev); | ||
1092 | } | ||
1093 | /* SHOW_MEDIA_TYPE(db->op_mode); */ | ||
1094 | } | ||
1095 | else if(!(tmp_cr12 & 0x3) && db->link_failed) | ||
1096 | { | ||
1097 | if(db->init==1) | ||
1098 | { | ||
1099 | netdev_info(dev, "NIC Link is Down\n"); | ||
1100 | netif_carrier_off(dev); | ||
1101 | } | ||
1102 | } | ||
1103 | db->init=0; | ||
1104 | |||
1105 | /* Timer active again */ | ||
1106 | db->timer.expires = ULI526X_TIMER_WUT; | ||
1107 | add_timer(&db->timer); | ||
1108 | spin_unlock_irqrestore(&db->lock, flags); | ||
1109 | } | ||
1110 | |||
1111 | |||
1112 | /* | ||
1113 | * Stop ULI526X board | ||
1114 | * Free Tx/Rx allocated memory | ||
1115 | * Init system variable | ||
1116 | */ | ||
1117 | |||
1118 | static void uli526x_reset_prepare(struct net_device *dev) | ||
1119 | { | ||
1120 | struct uli526x_board_info *db = netdev_priv(dev); | ||
1121 | |||
1122 | /* Sopt MAC controller */ | ||
1123 | db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ | ||
1124 | update_cr6(db->cr6_data, dev->base_addr); | ||
1125 | outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ | ||
1126 | outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); | ||
1127 | |||
1128 | /* Disable upper layer interface */ | ||
1129 | netif_stop_queue(dev); | ||
1130 | |||
1131 | /* Free Rx Allocate buffer */ | ||
1132 | uli526x_free_rxbuffer(db); | ||
1133 | |||
1134 | /* system variable init */ | ||
1135 | db->tx_packet_cnt = 0; | ||
1136 | db->rx_avail_cnt = 0; | ||
1137 | db->link_failed = 1; | ||
1138 | db->init=1; | ||
1139 | db->wait_reset = 0; | ||
1140 | } | ||
1141 | |||
1142 | |||
1143 | /* | ||
1144 | * Dynamic reset the ULI526X board | ||
1145 | * Stop ULI526X board | ||
1146 | * Free Tx/Rx allocated memory | ||
1147 | * Reset ULI526X board | ||
1148 | * Re-initialize ULI526X board | ||
1149 | */ | ||
1150 | |||
1151 | static void uli526x_dynamic_reset(struct net_device *dev) | ||
1152 | { | ||
1153 | ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0); | ||
1154 | |||
1155 | uli526x_reset_prepare(dev); | ||
1156 | |||
1157 | /* Re-initialize ULI526X board */ | ||
1158 | uli526x_init(dev); | ||
1159 | |||
1160 | /* Restart upper layer interface */ | ||
1161 | netif_wake_queue(dev); | ||
1162 | } | ||
1163 | |||
1164 | |||
1165 | #ifdef CONFIG_PM | ||
1166 | |||
1167 | /* | ||
1168 | * Suspend the interface. | ||
1169 | */ | ||
1170 | |||
1171 | static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state) | ||
1172 | { | ||
1173 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1174 | pci_power_t power_state; | ||
1175 | int err; | ||
1176 | |||
1177 | ULI526X_DBUG(0, "uli526x_suspend", 0); | ||
1178 | |||
1179 | if (!netdev_priv(dev)) | ||
1180 | return 0; | ||
1181 | |||
1182 | pci_save_state(pdev); | ||
1183 | |||
1184 | if (!netif_running(dev)) | ||
1185 | return 0; | ||
1186 | |||
1187 | netif_device_detach(dev); | ||
1188 | uli526x_reset_prepare(dev); | ||
1189 | |||
1190 | power_state = pci_choose_state(pdev, state); | ||
1191 | pci_enable_wake(pdev, power_state, 0); | ||
1192 | err = pci_set_power_state(pdev, power_state); | ||
1193 | if (err) { | ||
1194 | netif_device_attach(dev); | ||
1195 | /* Re-initialize ULI526X board */ | ||
1196 | uli526x_init(dev); | ||
1197 | /* Restart upper layer interface */ | ||
1198 | netif_wake_queue(dev); | ||
1199 | } | ||
1200 | |||
1201 | return err; | ||
1202 | } | ||
1203 | |||
1204 | /* | ||
1205 | * Resume the interface. | ||
1206 | */ | ||
1207 | |||
1208 | static int uli526x_resume(struct pci_dev *pdev) | ||
1209 | { | ||
1210 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1211 | int err; | ||
1212 | |||
1213 | ULI526X_DBUG(0, "uli526x_resume", 0); | ||
1214 | |||
1215 | if (!netdev_priv(dev)) | ||
1216 | return 0; | ||
1217 | |||
1218 | pci_restore_state(pdev); | ||
1219 | |||
1220 | if (!netif_running(dev)) | ||
1221 | return 0; | ||
1222 | |||
1223 | err = pci_set_power_state(pdev, PCI_D0); | ||
1224 | if (err) { | ||
1225 | netdev_warn(dev, "Could not put device into D0\n"); | ||
1226 | return err; | ||
1227 | } | ||
1228 | |||
1229 | netif_device_attach(dev); | ||
1230 | /* Re-initialize ULI526X board */ | ||
1231 | uli526x_init(dev); | ||
1232 | /* Restart upper layer interface */ | ||
1233 | netif_wake_queue(dev); | ||
1234 | |||
1235 | return 0; | ||
1236 | } | ||
1237 | |||
1238 | #else /* !CONFIG_PM */ | ||
1239 | |||
1240 | #define uli526x_suspend NULL | ||
1241 | #define uli526x_resume NULL | ||
1242 | |||
1243 | #endif /* !CONFIG_PM */ | ||
1244 | |||
1245 | |||
1246 | /* | ||
1247 | * free all allocated rx buffer | ||
1248 | */ | ||
1249 | |||
1250 | static void uli526x_free_rxbuffer(struct uli526x_board_info * db) | ||
1251 | { | ||
1252 | ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0); | ||
1253 | |||
1254 | /* free allocated rx buffer */ | ||
1255 | while (db->rx_avail_cnt) { | ||
1256 | dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr); | ||
1257 | db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc; | ||
1258 | db->rx_avail_cnt--; | ||
1259 | } | ||
1260 | } | ||
1261 | |||
1262 | |||
1263 | /* | ||
1264 | * Reuse the SK buffer | ||
1265 | */ | ||
1266 | |||
1267 | static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb) | ||
1268 | { | ||
1269 | struct rx_desc *rxptr = db->rx_insert_ptr; | ||
1270 | |||
1271 | if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { | ||
1272 | rxptr->rx_skb_ptr = skb; | ||
1273 | rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, | ||
1274 | skb_tail_pointer(skb), | ||
1275 | RX_ALLOC_SIZE, | ||
1276 | PCI_DMA_FROMDEVICE)); | ||
1277 | wmb(); | ||
1278 | rxptr->rdes0 = cpu_to_le32(0x80000000); | ||
1279 | db->rx_avail_cnt++; | ||
1280 | db->rx_insert_ptr = rxptr->next_rx_desc; | ||
1281 | } else | ||
1282 | ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt); | ||
1283 | } | ||
1284 | |||
1285 | |||
1286 | /* | ||
1287 | * Initialize transmit/Receive descriptor | ||
1288 | * Using Chain structure, and allocate Tx/Rx buffer | ||
1289 | */ | ||
1290 | |||
1291 | static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr) | ||
1292 | { | ||
1293 | struct tx_desc *tmp_tx; | ||
1294 | struct rx_desc *tmp_rx; | ||
1295 | unsigned char *tmp_buf; | ||
1296 | dma_addr_t tmp_tx_dma, tmp_rx_dma; | ||
1297 | dma_addr_t tmp_buf_dma; | ||
1298 | int i; | ||
1299 | |||
1300 | ULI526X_DBUG(0, "uli526x_descriptor_init()", 0); | ||
1301 | |||
1302 | /* tx descriptor start pointer */ | ||
1303 | db->tx_insert_ptr = db->first_tx_desc; | ||
1304 | db->tx_remove_ptr = db->first_tx_desc; | ||
1305 | outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ | ||
1306 | |||
1307 | /* rx descriptor start pointer */ | ||
1308 | db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1309 | db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; | ||
1310 | db->rx_insert_ptr = db->first_rx_desc; | ||
1311 | db->rx_ready_ptr = db->first_rx_desc; | ||
1312 | outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ | ||
1313 | |||
1314 | /* Init Transmit chain */ | ||
1315 | tmp_buf = db->buf_pool_start; | ||
1316 | tmp_buf_dma = db->buf_pool_dma_start; | ||
1317 | tmp_tx_dma = db->first_tx_desc_dma; | ||
1318 | for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) { | ||
1319 | tmp_tx->tx_buf_ptr = tmp_buf; | ||
1320 | tmp_tx->tdes0 = cpu_to_le32(0); | ||
1321 | tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */ | ||
1322 | tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma); | ||
1323 | tmp_tx_dma += sizeof(struct tx_desc); | ||
1324 | tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma); | ||
1325 | tmp_tx->next_tx_desc = tmp_tx + 1; | ||
1326 | tmp_buf = tmp_buf + TX_BUF_ALLOC; | ||
1327 | tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC; | ||
1328 | } | ||
1329 | (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma); | ||
1330 | tmp_tx->next_tx_desc = db->first_tx_desc; | ||
1331 | |||
1332 | /* Init Receive descriptor chain */ | ||
1333 | tmp_rx_dma=db->first_rx_desc_dma; | ||
1334 | for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) { | ||
1335 | tmp_rx->rdes0 = cpu_to_le32(0); | ||
1336 | tmp_rx->rdes1 = cpu_to_le32(0x01000600); | ||
1337 | tmp_rx_dma += sizeof(struct rx_desc); | ||
1338 | tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma); | ||
1339 | tmp_rx->next_rx_desc = tmp_rx + 1; | ||
1340 | } | ||
1341 | (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma); | ||
1342 | tmp_rx->next_rx_desc = db->first_rx_desc; | ||
1343 | |||
1344 | /* pre-allocate Rx buffer */ | ||
1345 | allocate_rx_buffer(db); | ||
1346 | } | ||
1347 | |||
1348 | |||
1349 | /* | ||
1350 | * Update CR6 value | ||
1351 | * Firstly stop ULI526X, then written value and start | ||
1352 | */ | ||
1353 | |||
1354 | static void update_cr6(u32 cr6_data, unsigned long ioaddr) | ||
1355 | { | ||
1356 | |||
1357 | outl(cr6_data, ioaddr + DCR6); | ||
1358 | udelay(5); | ||
1359 | } | ||
1360 | |||
1361 | |||
1362 | /* | ||
1363 | * Send a setup frame for M5261/M5263 | ||
1364 | * This setup frame initialize ULI526X address filter mode | ||
1365 | */ | ||
1366 | |||
1367 | #ifdef __BIG_ENDIAN | ||
1368 | #define FLT_SHIFT 16 | ||
1369 | #else | ||
1370 | #define FLT_SHIFT 0 | ||
1371 | #endif | ||
1372 | |||
1373 | static void send_filter_frame(struct net_device *dev, int mc_cnt) | ||
1374 | { | ||
1375 | struct uli526x_board_info *db = netdev_priv(dev); | ||
1376 | struct netdev_hw_addr *ha; | ||
1377 | struct tx_desc *txptr; | ||
1378 | u16 * addrptr; | ||
1379 | u32 * suptr; | ||
1380 | int i; | ||
1381 | |||
1382 | ULI526X_DBUG(0, "send_filter_frame()", 0); | ||
1383 | |||
1384 | txptr = db->tx_insert_ptr; | ||
1385 | suptr = (u32 *) txptr->tx_buf_ptr; | ||
1386 | |||
1387 | /* Node address */ | ||
1388 | addrptr = (u16 *) dev->dev_addr; | ||
1389 | *suptr++ = addrptr[0] << FLT_SHIFT; | ||
1390 | *suptr++ = addrptr[1] << FLT_SHIFT; | ||
1391 | *suptr++ = addrptr[2] << FLT_SHIFT; | ||
1392 | |||
1393 | /* broadcast address */ | ||
1394 | *suptr++ = 0xffff << FLT_SHIFT; | ||
1395 | *suptr++ = 0xffff << FLT_SHIFT; | ||
1396 | *suptr++ = 0xffff << FLT_SHIFT; | ||
1397 | |||
1398 | /* fit the multicast address */ | ||
1399 | netdev_for_each_mc_addr(ha, dev) { | ||
1400 | addrptr = (u16 *) ha->addr; | ||
1401 | *suptr++ = addrptr[0] << FLT_SHIFT; | ||
1402 | *suptr++ = addrptr[1] << FLT_SHIFT; | ||
1403 | *suptr++ = addrptr[2] << FLT_SHIFT; | ||
1404 | } | ||
1405 | |||
1406 | for (i = netdev_mc_count(dev); i < 14; i++) { | ||
1407 | *suptr++ = 0xffff << FLT_SHIFT; | ||
1408 | *suptr++ = 0xffff << FLT_SHIFT; | ||
1409 | *suptr++ = 0xffff << FLT_SHIFT; | ||
1410 | } | ||
1411 | |||
1412 | /* prepare the setup frame */ | ||
1413 | db->tx_insert_ptr = txptr->next_tx_desc; | ||
1414 | txptr->tdes1 = cpu_to_le32(0x890000c0); | ||
1415 | |||
1416 | /* Resource Check and Send the setup packet */ | ||
1417 | if (db->tx_packet_cnt < TX_DESC_CNT) { | ||
1418 | /* Resource Empty */ | ||
1419 | db->tx_packet_cnt++; | ||
1420 | txptr->tdes0 = cpu_to_le32(0x80000000); | ||
1421 | update_cr6(db->cr6_data | 0x2000, dev->base_addr); | ||
1422 | outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ | ||
1423 | update_cr6(db->cr6_data, dev->base_addr); | ||
1424 | dev->trans_start = jiffies; | ||
1425 | } else | ||
1426 | netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); | ||
1427 | } | ||
1428 | |||
1429 | |||
1430 | /* | ||
1431 | * Allocate rx buffer, | ||
1432 | * As possible as allocate maxiumn Rx buffer | ||
1433 | */ | ||
1434 | |||
1435 | static void allocate_rx_buffer(struct uli526x_board_info *db) | ||
1436 | { | ||
1437 | struct rx_desc *rxptr; | ||
1438 | struct sk_buff *skb; | ||
1439 | |||
1440 | rxptr = db->rx_insert_ptr; | ||
1441 | |||
1442 | while(db->rx_avail_cnt < RX_DESC_CNT) { | ||
1443 | if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) | ||
1444 | break; | ||
1445 | rxptr->rx_skb_ptr = skb; /* FIXME (?) */ | ||
1446 | rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, | ||
1447 | skb_tail_pointer(skb), | ||
1448 | RX_ALLOC_SIZE, | ||
1449 | PCI_DMA_FROMDEVICE)); | ||
1450 | wmb(); | ||
1451 | rxptr->rdes0 = cpu_to_le32(0x80000000); | ||
1452 | rxptr = rxptr->next_rx_desc; | ||
1453 | db->rx_avail_cnt++; | ||
1454 | } | ||
1455 | |||
1456 | db->rx_insert_ptr = rxptr; | ||
1457 | } | ||
1458 | |||
1459 | |||
1460 | /* | ||
1461 | * Read one word data from the serial ROM | ||
1462 | */ | ||
1463 | |||
1464 | static u16 read_srom_word(long ioaddr, int offset) | ||
1465 | { | ||
1466 | int i; | ||
1467 | u16 srom_data = 0; | ||
1468 | long cr9_ioaddr = ioaddr + DCR9; | ||
1469 | |||
1470 | outl(CR9_SROM_READ, cr9_ioaddr); | ||
1471 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1472 | |||
1473 | /* Send the Read Command 110b */ | ||
1474 | SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); | ||
1475 | SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); | ||
1476 | SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); | ||
1477 | |||
1478 | /* Send the offset */ | ||
1479 | for (i = 5; i >= 0; i--) { | ||
1480 | srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; | ||
1481 | SROM_CLK_WRITE(srom_data, cr9_ioaddr); | ||
1482 | } | ||
1483 | |||
1484 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1485 | |||
1486 | for (i = 16; i > 0; i--) { | ||
1487 | outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); | ||
1488 | udelay(5); | ||
1489 | srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); | ||
1490 | outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); | ||
1491 | udelay(5); | ||
1492 | } | ||
1493 | |||
1494 | outl(CR9_SROM_READ, cr9_ioaddr); | ||
1495 | return srom_data; | ||
1496 | } | ||
1497 | |||
1498 | |||
1499 | /* | ||
1500 | * Auto sense the media mode | ||
1501 | */ | ||
1502 | |||
1503 | static u8 uli526x_sense_speed(struct uli526x_board_info * db) | ||
1504 | { | ||
1505 | u8 ErrFlag = 0; | ||
1506 | u16 phy_mode; | ||
1507 | |||
1508 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1509 | phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); | ||
1510 | |||
1511 | if ( (phy_mode & 0x24) == 0x24 ) { | ||
1512 | |||
1513 | phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); | ||
1514 | if(phy_mode&0x8000) | ||
1515 | phy_mode = 0x8000; | ||
1516 | else if(phy_mode&0x4000) | ||
1517 | phy_mode = 0x4000; | ||
1518 | else if(phy_mode&0x2000) | ||
1519 | phy_mode = 0x2000; | ||
1520 | else | ||
1521 | phy_mode = 0x1000; | ||
1522 | |||
1523 | switch (phy_mode) { | ||
1524 | case 0x1000: db->op_mode = ULI526X_10MHF; break; | ||
1525 | case 0x2000: db->op_mode = ULI526X_10MFD; break; | ||
1526 | case 0x4000: db->op_mode = ULI526X_100MHF; break; | ||
1527 | case 0x8000: db->op_mode = ULI526X_100MFD; break; | ||
1528 | default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break; | ||
1529 | } | ||
1530 | } else { | ||
1531 | db->op_mode = ULI526X_10MHF; | ||
1532 | ULI526X_DBUG(0, "Link Failed :", phy_mode); | ||
1533 | ErrFlag = 1; | ||
1534 | } | ||
1535 | |||
1536 | return ErrFlag; | ||
1537 | } | ||
1538 | |||
1539 | |||
1540 | /* | ||
1541 | * Set 10/100 phyxcer capability | ||
1542 | * AUTO mode : phyxcer register4 is NIC capability | ||
1543 | * Force mode: phyxcer register4 is the force media | ||
1544 | */ | ||
1545 | |||
1546 | static void uli526x_set_phyxcer(struct uli526x_board_info *db) | ||
1547 | { | ||
1548 | u16 phy_reg; | ||
1549 | |||
1550 | /* Phyxcer capability setting */ | ||
1551 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; | ||
1552 | |||
1553 | if (db->media_mode & ULI526X_AUTO) { | ||
1554 | /* AUTO Mode */ | ||
1555 | phy_reg |= db->PHY_reg4; | ||
1556 | } else { | ||
1557 | /* Force Mode */ | ||
1558 | switch(db->media_mode) { | ||
1559 | case ULI526X_10MHF: phy_reg |= 0x20; break; | ||
1560 | case ULI526X_10MFD: phy_reg |= 0x40; break; | ||
1561 | case ULI526X_100MHF: phy_reg |= 0x80; break; | ||
1562 | case ULI526X_100MFD: phy_reg |= 0x100; break; | ||
1563 | } | ||
1564 | |||
1565 | } | ||
1566 | |||
1567 | /* Write new capability to Phyxcer Reg4 */ | ||
1568 | if ( !(phy_reg & 0x01e0)) { | ||
1569 | phy_reg|=db->PHY_reg4; | ||
1570 | db->media_mode|=ULI526X_AUTO; | ||
1571 | } | ||
1572 | phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); | ||
1573 | |||
1574 | /* Restart Auto-Negotiation */ | ||
1575 | phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); | ||
1576 | udelay(50); | ||
1577 | } | ||
1578 | |||
1579 | |||
1580 | /* | ||
1581 | * Process op-mode | ||
1582 | AUTO mode : PHY controller in Auto-negotiation Mode | ||
1583 | * Force mode: PHY controller in force mode with HUB | ||
1584 | * N-way force capability with SWITCH | ||
1585 | */ | ||
1586 | |||
1587 | static void uli526x_process_mode(struct uli526x_board_info *db) | ||
1588 | { | ||
1589 | u16 phy_reg; | ||
1590 | |||
1591 | /* Full Duplex Mode Check */ | ||
1592 | if (db->op_mode & 0x4) | ||
1593 | db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */ | ||
1594 | else | ||
1595 | db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */ | ||
1596 | |||
1597 | update_cr6(db->cr6_data, db->ioaddr); | ||
1598 | |||
1599 | /* 10/100M phyxcer force mode need */ | ||
1600 | if ( !(db->media_mode & 0x8)) { | ||
1601 | /* Forece Mode */ | ||
1602 | phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); | ||
1603 | if ( !(phy_reg & 0x1) ) { | ||
1604 | /* parter without N-Way capability */ | ||
1605 | phy_reg = 0x0; | ||
1606 | switch(db->op_mode) { | ||
1607 | case ULI526X_10MHF: phy_reg = 0x0; break; | ||
1608 | case ULI526X_10MFD: phy_reg = 0x100; break; | ||
1609 | case ULI526X_100MHF: phy_reg = 0x2000; break; | ||
1610 | case ULI526X_100MFD: phy_reg = 0x2100; break; | ||
1611 | } | ||
1612 | phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); | ||
1613 | } | ||
1614 | } | ||
1615 | } | ||
1616 | |||
1617 | |||
1618 | /* | ||
1619 | * Write a word to Phy register | ||
1620 | */ | ||
1621 | |||
1622 | static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id) | ||
1623 | { | ||
1624 | u16 i; | ||
1625 | unsigned long ioaddr; | ||
1626 | |||
1627 | if(chip_id == PCI_ULI5263_ID) | ||
1628 | { | ||
1629 | phy_writeby_cr10(iobase, phy_addr, offset, phy_data); | ||
1630 | return; | ||
1631 | } | ||
1632 | /* M5261/M5263 Chip */ | ||
1633 | ioaddr = iobase + DCR9; | ||
1634 | |||
1635 | /* Send 33 synchronization clock to Phy controller */ | ||
1636 | for (i = 0; i < 35; i++) | ||
1637 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1638 | |||
1639 | /* Send start command(01) to Phy */ | ||
1640 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1641 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1642 | |||
1643 | /* Send write command(01) to Phy */ | ||
1644 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1645 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1646 | |||
1647 | /* Send Phy address */ | ||
1648 | for (i = 0x10; i > 0; i = i >> 1) | ||
1649 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1650 | |||
1651 | /* Send register address */ | ||
1652 | for (i = 0x10; i > 0; i = i >> 1) | ||
1653 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1654 | |||
1655 | /* written trasnition */ | ||
1656 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1657 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1658 | |||
1659 | /* Write a word data to PHY controller */ | ||
1660 | for ( i = 0x8000; i > 0; i >>= 1) | ||
1661 | phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1662 | |||
1663 | } | ||
1664 | |||
1665 | |||
1666 | /* | ||
1667 | * Read a word data from phy register | ||
1668 | */ | ||
1669 | |||
1670 | static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) | ||
1671 | { | ||
1672 | int i; | ||
1673 | u16 phy_data; | ||
1674 | unsigned long ioaddr; | ||
1675 | |||
1676 | if(chip_id == PCI_ULI5263_ID) | ||
1677 | return phy_readby_cr10(iobase, phy_addr, offset); | ||
1678 | /* M5261/M5263 Chip */ | ||
1679 | ioaddr = iobase + DCR9; | ||
1680 | |||
1681 | /* Send 33 synchronization clock to Phy controller */ | ||
1682 | for (i = 0; i < 35; i++) | ||
1683 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1684 | |||
1685 | /* Send start command(01) to Phy */ | ||
1686 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1687 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1688 | |||
1689 | /* Send read command(10) to Phy */ | ||
1690 | phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); | ||
1691 | phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); | ||
1692 | |||
1693 | /* Send Phy address */ | ||
1694 | for (i = 0x10; i > 0; i = i >> 1) | ||
1695 | phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1696 | |||
1697 | /* Send register address */ | ||
1698 | for (i = 0x10; i > 0; i = i >> 1) | ||
1699 | phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); | ||
1700 | |||
1701 | /* Skip transition state */ | ||
1702 | phy_read_1bit(ioaddr, chip_id); | ||
1703 | |||
1704 | /* read 16bit data */ | ||
1705 | for (phy_data = 0, i = 0; i < 16; i++) { | ||
1706 | phy_data <<= 1; | ||
1707 | phy_data |= phy_read_1bit(ioaddr, chip_id); | ||
1708 | } | ||
1709 | |||
1710 | return phy_data; | ||
1711 | } | ||
1712 | |||
1713 | static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) | ||
1714 | { | ||
1715 | unsigned long ioaddr,cr10_value; | ||
1716 | |||
1717 | ioaddr = iobase + DCR10; | ||
1718 | cr10_value = phy_addr; | ||
1719 | cr10_value = (cr10_value<<5) + offset; | ||
1720 | cr10_value = (cr10_value<<16) + 0x08000000; | ||
1721 | outl(cr10_value,ioaddr); | ||
1722 | udelay(1); | ||
1723 | while(1) | ||
1724 | { | ||
1725 | cr10_value = inl(ioaddr); | ||
1726 | if(cr10_value&0x10000000) | ||
1727 | break; | ||
1728 | } | ||
1729 | return cr10_value & 0x0ffff; | ||
1730 | } | ||
1731 | |||
1732 | static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) | ||
1733 | { | ||
1734 | unsigned long ioaddr,cr10_value; | ||
1735 | |||
1736 | ioaddr = iobase + DCR10; | ||
1737 | cr10_value = phy_addr; | ||
1738 | cr10_value = (cr10_value<<5) + offset; | ||
1739 | cr10_value = (cr10_value<<16) + 0x04000000 + phy_data; | ||
1740 | outl(cr10_value,ioaddr); | ||
1741 | udelay(1); | ||
1742 | } | ||
1743 | /* | ||
1744 | * Write one bit data to Phy Controller | ||
1745 | */ | ||
1746 | |||
1747 | static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) | ||
1748 | { | ||
1749 | outl(phy_data , ioaddr); /* MII Clock Low */ | ||
1750 | udelay(1); | ||
1751 | outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ | ||
1752 | udelay(1); | ||
1753 | outl(phy_data , ioaddr); /* MII Clock Low */ | ||
1754 | udelay(1); | ||
1755 | } | ||
1756 | |||
1757 | |||
1758 | /* | ||
1759 | * Read one bit phy data from PHY controller | ||
1760 | */ | ||
1761 | |||
1762 | static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) | ||
1763 | { | ||
1764 | u16 phy_data; | ||
1765 | |||
1766 | outl(0x50000 , ioaddr); | ||
1767 | udelay(1); | ||
1768 | phy_data = ( inl(ioaddr) >> 19 ) & 0x1; | ||
1769 | outl(0x40000 , ioaddr); | ||
1770 | udelay(1); | ||
1771 | |||
1772 | return phy_data; | ||
1773 | } | ||
1774 | |||
1775 | |||
1776 | static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = { | ||
1777 | { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID }, | ||
1778 | { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID }, | ||
1779 | { 0, } | ||
1780 | }; | ||
1781 | MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl); | ||
1782 | |||
1783 | |||
1784 | static struct pci_driver uli526x_driver = { | ||
1785 | .name = "uli526x", | ||
1786 | .id_table = uli526x_pci_tbl, | ||
1787 | .probe = uli526x_init_one, | ||
1788 | .remove = __devexit_p(uli526x_remove_one), | ||
1789 | .suspend = uli526x_suspend, | ||
1790 | .resume = uli526x_resume, | ||
1791 | }; | ||
1792 | |||
1793 | MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw"); | ||
1794 | MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver"); | ||
1795 | MODULE_LICENSE("GPL"); | ||
1796 | |||
1797 | module_param(debug, int, 0644); | ||
1798 | module_param(mode, int, 0); | ||
1799 | module_param(cr6set, int, 0); | ||
1800 | MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)"); | ||
1801 | MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA"); | ||
1802 | |||
1803 | /* Description: | ||
1804 | * when user used insmod to add module, system invoked init_module() | ||
1805 | * to register the services. | ||
1806 | */ | ||
1807 | |||
1808 | static int __init uli526x_init_module(void) | ||
1809 | { | ||
1810 | |||
1811 | pr_info("%s\n", version); | ||
1812 | printed_version = 1; | ||
1813 | |||
1814 | ULI526X_DBUG(0, "init_module() ", debug); | ||
1815 | |||
1816 | if (debug) | ||
1817 | uli526x_debug = debug; /* set debug flag */ | ||
1818 | if (cr6set) | ||
1819 | uli526x_cr6_user_set = cr6set; | ||
1820 | |||
1821 | switch (mode) { | ||
1822 | case ULI526X_10MHF: | ||
1823 | case ULI526X_100MHF: | ||
1824 | case ULI526X_10MFD: | ||
1825 | case ULI526X_100MFD: | ||
1826 | uli526x_media_mode = mode; | ||
1827 | break; | ||
1828 | default: | ||
1829 | uli526x_media_mode = ULI526X_AUTO; | ||
1830 | break; | ||
1831 | } | ||
1832 | |||
1833 | return pci_register_driver(&uli526x_driver); | ||
1834 | } | ||
1835 | |||
1836 | |||
1837 | /* | ||
1838 | * Description: | ||
1839 | * when user used rmmod to delete module, system invoked clean_module() | ||
1840 | * to un-register all registered services. | ||
1841 | */ | ||
1842 | |||
1843 | static void __exit uli526x_cleanup_module(void) | ||
1844 | { | ||
1845 | ULI526X_DBUG(0, "uli526x_clean_module() ", debug); | ||
1846 | pci_unregister_driver(&uli526x_driver); | ||
1847 | } | ||
1848 | |||
1849 | module_init(uli526x_init_module); | ||
1850 | module_exit(uli526x_cleanup_module); | ||
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c new file mode 100644 index 000000000000..862eadf07191 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c | |||
@@ -0,0 +1,1670 @@ | |||
1 | /* winbond-840.c: A Linux PCI network adapter device driver. */ | ||
2 | /* | ||
3 | Written 1998-2001 by Donald Becker. | ||
4 | |||
5 | This software may be used and distributed according to the terms of | ||
6 | the GNU General Public License (GPL), incorporated herein by reference. | ||
7 | Drivers based on or derived from this code fall under the GPL and must | ||
8 | retain the authorship, copyright and license notice. This file is not | ||
9 | a complete program and may only be used when the entire operating | ||
10 | system is licensed under the GPL. | ||
11 | |||
12 | The author may be reached as becker@scyld.com, or C/O | ||
13 | Scyld Computing Corporation | ||
14 | 410 Severn Ave., Suite 210 | ||
15 | Annapolis MD 21403 | ||
16 | |||
17 | Support and updates available at | ||
18 | http://www.scyld.com/network/drivers.html | ||
19 | |||
20 | Do not remove the copyright information. | ||
21 | Do not change the version information unless an improvement has been made. | ||
22 | Merely removing my name, as Compex has done in the past, does not count | ||
23 | as an improvement. | ||
24 | |||
25 | Changelog: | ||
26 | * ported to 2.4 | ||
27 | ??? | ||
28 | * spin lock update, memory barriers, new style dma mappings | ||
29 | limit each tx buffer to < 1024 bytes | ||
30 | remove DescIntr from Rx descriptors (that's an Tx flag) | ||
31 | remove next pointer from Tx descriptors | ||
32 | synchronize tx_q_bytes | ||
33 | software reset in tx_timeout | ||
34 | Copyright (C) 2000 Manfred Spraul | ||
35 | * further cleanups | ||
36 | power management. | ||
37 | support for big endian descriptors | ||
38 | Copyright (C) 2001 Manfred Spraul | ||
39 | * ethtool support (jgarzik) | ||
40 | * Replace some MII-related magic numbers with constants (jgarzik) | ||
41 | |||
42 | TODO: | ||
43 | * enable pci_power_off | ||
44 | * Wake-On-LAN | ||
45 | */ | ||
46 | |||
47 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
48 | |||
49 | #define DRV_NAME "winbond-840" | ||
50 | #define DRV_VERSION "1.01-e" | ||
51 | #define DRV_RELDATE "Sep-11-2006" | ||
52 | |||
53 | |||
54 | /* Automatically extracted configuration info: | ||
55 | probe-func: winbond840_probe | ||
56 | config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840 | ||
57 | |||
58 | c-help-name: Winbond W89c840 PCI Ethernet support | ||
59 | c-help-symbol: CONFIG_WINBOND_840 | ||
60 | c-help: This driver is for the Winbond W89c840 chip. It also works with | ||
61 | c-help: the TX9882 chip on the Compex RL100-ATX board. | ||
62 | c-help: More specific information and updates are available from | ||
63 | c-help: http://www.scyld.com/network/drivers.html | ||
64 | */ | ||
65 | |||
66 | /* The user-configurable values. | ||
67 | These may be modified when a driver module is loaded.*/ | ||
68 | |||
69 | static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ | ||
70 | static int max_interrupt_work = 20; | ||
71 | /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). | ||
72 | The '840 uses a 64 element hash table based on the Ethernet CRC. */ | ||
73 | static int multicast_filter_limit = 32; | ||
74 | |||
75 | /* Set the copy breakpoint for the copy-only-tiny-frames scheme. | ||
76 | Setting to > 1518 effectively disables this feature. */ | ||
77 | static int rx_copybreak; | ||
78 | |||
79 | /* Used to pass the media type, etc. | ||
80 | Both 'options[]' and 'full_duplex[]' should exist for driver | ||
81 | interoperability. | ||
82 | The media type is usually passed in 'options[]'. | ||
83 | */ | ||
84 | #define MAX_UNITS 8 /* More are supported, limit only on options */ | ||
85 | static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | ||
86 | static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; | ||
87 | |||
88 | /* Operational parameters that are set at compile time. */ | ||
89 | |||
90 | /* Keep the ring sizes a power of two for compile efficiency. | ||
91 | The compiler will convert <unsigned>'%'<2^N> into a bit mask. | ||
92 | Making the Tx ring too large decreases the effectiveness of channel | ||
93 | bonding and packet priority. | ||
94 | There are no ill effects from too-large receive rings. */ | ||
95 | #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ | ||
96 | #define TX_QUEUE_LEN_RESTART 5 | ||
97 | |||
98 | #define TX_BUFLIMIT (1024-128) | ||
99 | |||
100 | /* The presumed FIFO size for working around the Tx-FIFO-overflow bug. | ||
101 | To avoid overflowing we don't queue again until we have room for a | ||
102 | full-size packet. | ||
103 | */ | ||
104 | #define TX_FIFO_SIZE (2048) | ||
105 | #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16) | ||
106 | |||
107 | |||
108 | /* Operational parameters that usually are not changed. */ | ||
109 | /* Time in jiffies before concluding the transmitter is hung. */ | ||
110 | #define TX_TIMEOUT (2*HZ) | ||
111 | |||
112 | /* Include files, designed to support most kernel versions 2.0.0 and later. */ | ||
113 | #include <linux/module.h> | ||
114 | #include <linux/kernel.h> | ||
115 | #include <linux/string.h> | ||
116 | #include <linux/timer.h> | ||
117 | #include <linux/errno.h> | ||
118 | #include <linux/ioport.h> | ||
119 | #include <linux/interrupt.h> | ||
120 | #include <linux/pci.h> | ||
121 | #include <linux/dma-mapping.h> | ||
122 | #include <linux/netdevice.h> | ||
123 | #include <linux/etherdevice.h> | ||
124 | #include <linux/skbuff.h> | ||
125 | #include <linux/init.h> | ||
126 | #include <linux/delay.h> | ||
127 | #include <linux/ethtool.h> | ||
128 | #include <linux/mii.h> | ||
129 | #include <linux/rtnetlink.h> | ||
130 | #include <linux/crc32.h> | ||
131 | #include <linux/bitops.h> | ||
132 | #include <asm/uaccess.h> | ||
133 | #include <asm/processor.h> /* Processor type for cache alignment. */ | ||
134 | #include <asm/io.h> | ||
135 | #include <asm/irq.h> | ||
136 | |||
137 | #include "tulip.h" | ||
138 | |||
139 | #undef PKT_BUF_SZ /* tulip.h also defines this */ | ||
140 | #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ | ||
141 | |||
142 | /* These identify the driver base version and may not be removed. */ | ||
143 | static const char version[] __initconst = | ||
144 | "v" DRV_VERSION " (2.4 port) " | ||
145 | DRV_RELDATE " Donald Becker <becker@scyld.com>\n" | ||
146 | " http://www.scyld.com/network/drivers.html\n"; | ||
147 | |||
148 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | ||
149 | MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver"); | ||
150 | MODULE_LICENSE("GPL"); | ||
151 | MODULE_VERSION(DRV_VERSION); | ||
152 | |||
153 | module_param(max_interrupt_work, int, 0); | ||
154 | module_param(debug, int, 0); | ||
155 | module_param(rx_copybreak, int, 0); | ||
156 | module_param(multicast_filter_limit, int, 0); | ||
157 | module_param_array(options, int, NULL, 0); | ||
158 | module_param_array(full_duplex, int, NULL, 0); | ||
159 | MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt"); | ||
160 | MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)"); | ||
161 | MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames"); | ||
162 | MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses"); | ||
163 | MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex"); | ||
164 | MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)"); | ||
165 | |||
166 | /* | ||
167 | Theory of Operation | ||
168 | |||
169 | I. Board Compatibility | ||
170 | |||
171 | This driver is for the Winbond w89c840 chip. | ||
172 | |||
173 | II. Board-specific settings | ||
174 | |||
175 | None. | ||
176 | |||
177 | III. Driver operation | ||
178 | |||
179 | This chip is very similar to the Digital 21*4* "Tulip" family. The first | ||
180 | twelve registers and the descriptor format are nearly identical. Read a | ||
181 | Tulip manual for operational details. | ||
182 | |||
183 | A significant difference is that the multicast filter and station address are | ||
184 | stored in registers rather than loaded through a pseudo-transmit packet. | ||
185 | |||
186 | Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a | ||
187 | full-sized packet we must use both data buffers in a descriptor. Thus the | ||
188 | driver uses ring mode where descriptors are implicitly sequential in memory, | ||
189 | rather than using the second descriptor address as a chain pointer to | ||
190 | subsequent descriptors. | ||
191 | |||
192 | IV. Notes | ||
193 | |||
194 | If you are going to almost clone a Tulip, why not go all the way and avoid | ||
195 | the need for a new driver? | ||
196 | |||
197 | IVb. References | ||
198 | |||
199 | http://www.scyld.com/expert/100mbps.html | ||
200 | http://www.scyld.com/expert/NWay.html | ||
201 | http://www.winbond.com.tw/ | ||
202 | |||
203 | IVc. Errata | ||
204 | |||
205 | A horrible bug exists in the transmit FIFO. Apparently the chip doesn't | ||
206 | correctly detect a full FIFO, and queuing more than 2048 bytes may result in | ||
207 | silent data corruption. | ||
208 | |||
209 | Test with 'ping -s 10000' on a fast computer. | ||
210 | |||
211 | */ | ||
212 | |||
213 | |||
214 | |||
215 | /* | ||
216 | PCI probe table. | ||
217 | */ | ||
218 | enum chip_capability_flags { | ||
219 | CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8, | ||
220 | }; | ||
221 | |||
222 | static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = { | ||
223 | { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 }, | ||
224 | { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, | ||
225 | { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, | ||
226 | { } | ||
227 | }; | ||
228 | MODULE_DEVICE_TABLE(pci, w840_pci_tbl); | ||
229 | |||
230 | enum { | ||
231 | netdev_res_size = 128, /* size of PCI BAR resource */ | ||
232 | }; | ||
233 | |||
234 | struct pci_id_info { | ||
235 | const char *name; | ||
236 | int drv_flags; /* Driver use, intended as capability flags. */ | ||
237 | }; | ||
238 | |||
239 | static const struct pci_id_info pci_id_tbl[] __devinitdata = { | ||
240 | { /* Sometime a Level-One switch card. */ | ||
241 | "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII}, | ||
242 | { "Winbond W89c840", CanHaveMII | HasBrokenTx}, | ||
243 | { "Compex RL100-ATX", CanHaveMII | HasBrokenTx}, | ||
244 | { } /* terminate list. */ | ||
245 | }; | ||
246 | |||
247 | /* This driver was written to use PCI memory space, however some x86 systems | ||
248 | work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config | ||
249 | */ | ||
250 | |||
251 | /* Offsets to the Command and Status Registers, "CSRs". | ||
252 | While similar to the Tulip, these registers are longword aligned. | ||
253 | Note: It's not useful to define symbolic names for every register bit in | ||
254 | the device. The name can only partially document the semantics and make | ||
255 | the driver longer and more difficult to read. | ||
256 | */ | ||
257 | enum w840_offsets { | ||
258 | PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08, | ||
259 | RxRingPtr=0x0C, TxRingPtr=0x10, | ||
260 | IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C, | ||
261 | RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C, | ||
262 | CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */ | ||
263 | MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40, | ||
264 | CurTxDescAddr=0x4C, CurTxBufAddr=0x50, | ||
265 | }; | ||
266 | |||
267 | /* Bits in the NetworkConfig register. */ | ||
268 | enum rx_mode_bits { | ||
269 | AcceptErr=0x80, | ||
270 | RxAcceptBroadcast=0x20, AcceptMulticast=0x10, | ||
271 | RxAcceptAllPhys=0x08, AcceptMyPhys=0x02, | ||
272 | }; | ||
273 | |||
274 | enum mii_reg_bits { | ||
275 | MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000, | ||
276 | MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000, | ||
277 | }; | ||
278 | |||
279 | /* The Tulip Rx and Tx buffer descriptors. */ | ||
280 | struct w840_rx_desc { | ||
281 | s32 status; | ||
282 | s32 length; | ||
283 | u32 buffer1; | ||
284 | u32 buffer2; | ||
285 | }; | ||
286 | |||
287 | struct w840_tx_desc { | ||
288 | s32 status; | ||
289 | s32 length; | ||
290 | u32 buffer1, buffer2; | ||
291 | }; | ||
292 | |||
293 | #define MII_CNT 1 /* winbond only supports one MII */ | ||
294 | struct netdev_private { | ||
295 | struct w840_rx_desc *rx_ring; | ||
296 | dma_addr_t rx_addr[RX_RING_SIZE]; | ||
297 | struct w840_tx_desc *tx_ring; | ||
298 | dma_addr_t tx_addr[TX_RING_SIZE]; | ||
299 | dma_addr_t ring_dma_addr; | ||
300 | /* The addresses of receive-in-place skbuffs. */ | ||
301 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | ||
302 | /* The saved address of a sent-in-place packet/buffer, for later free(). */ | ||
303 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | ||
304 | struct net_device_stats stats; | ||
305 | struct timer_list timer; /* Media monitoring timer. */ | ||
306 | /* Frequently used values: keep some adjacent for cache effect. */ | ||
307 | spinlock_t lock; | ||
308 | int chip_id, drv_flags; | ||
309 | struct pci_dev *pci_dev; | ||
310 | int csr6; | ||
311 | struct w840_rx_desc *rx_head_desc; | ||
312 | unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ | ||
313 | unsigned int rx_buf_sz; /* Based on MTU+slack. */ | ||
314 | unsigned int cur_tx, dirty_tx; | ||
315 | unsigned int tx_q_bytes; | ||
316 | unsigned int tx_full; /* The Tx queue is full. */ | ||
317 | /* MII transceiver section. */ | ||
318 | int mii_cnt; /* MII device addresses. */ | ||
319 | unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */ | ||
320 | u32 mii; | ||
321 | struct mii_if_info mii_if; | ||
322 | void __iomem *base_addr; | ||
323 | }; | ||
324 | |||
325 | static int eeprom_read(void __iomem *ioaddr, int location); | ||
326 | static int mdio_read(struct net_device *dev, int phy_id, int location); | ||
327 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | ||
328 | static int netdev_open(struct net_device *dev); | ||
329 | static int update_link(struct net_device *dev); | ||
330 | static void netdev_timer(unsigned long data); | ||
331 | static void init_rxtx_rings(struct net_device *dev); | ||
332 | static void free_rxtx_rings(struct netdev_private *np); | ||
333 | static void init_registers(struct net_device *dev); | ||
334 | static void tx_timeout(struct net_device *dev); | ||
335 | static int alloc_ringdesc(struct net_device *dev); | ||
336 | static void free_ringdesc(struct netdev_private *np); | ||
337 | static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); | ||
338 | static irqreturn_t intr_handler(int irq, void *dev_instance); | ||
339 | static void netdev_error(struct net_device *dev, int intr_status); | ||
340 | static int netdev_rx(struct net_device *dev); | ||
341 | static u32 __set_rx_mode(struct net_device *dev); | ||
342 | static void set_rx_mode(struct net_device *dev); | ||
343 | static struct net_device_stats *get_stats(struct net_device *dev); | ||
344 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
345 | static const struct ethtool_ops netdev_ethtool_ops; | ||
346 | static int netdev_close(struct net_device *dev); | ||
347 | |||
348 | static const struct net_device_ops netdev_ops = { | ||
349 | .ndo_open = netdev_open, | ||
350 | .ndo_stop = netdev_close, | ||
351 | .ndo_start_xmit = start_tx, | ||
352 | .ndo_get_stats = get_stats, | ||
353 | .ndo_set_multicast_list = set_rx_mode, | ||
354 | .ndo_do_ioctl = netdev_ioctl, | ||
355 | .ndo_tx_timeout = tx_timeout, | ||
356 | .ndo_change_mtu = eth_change_mtu, | ||
357 | .ndo_set_mac_address = eth_mac_addr, | ||
358 | .ndo_validate_addr = eth_validate_addr, | ||
359 | }; | ||
360 | |||
361 | static int __devinit w840_probe1 (struct pci_dev *pdev, | ||
362 | const struct pci_device_id *ent) | ||
363 | { | ||
364 | struct net_device *dev; | ||
365 | struct netdev_private *np; | ||
366 | static int find_cnt; | ||
367 | int chip_idx = ent->driver_data; | ||
368 | int irq; | ||
369 | int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; | ||
370 | void __iomem *ioaddr; | ||
371 | |||
372 | i = pci_enable_device(pdev); | ||
373 | if (i) return i; | ||
374 | |||
375 | pci_set_master(pdev); | ||
376 | |||
377 | irq = pdev->irq; | ||
378 | |||
379 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { | ||
380 | pr_warn("Device %s disabled due to DMA limitations\n", | ||
381 | pci_name(pdev)); | ||
382 | return -EIO; | ||
383 | } | ||
384 | dev = alloc_etherdev(sizeof(*np)); | ||
385 | if (!dev) | ||
386 | return -ENOMEM; | ||
387 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
388 | |||
389 | if (pci_request_regions(pdev, DRV_NAME)) | ||
390 | goto err_out_netdev; | ||
391 | |||
392 | ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size); | ||
393 | if (!ioaddr) | ||
394 | goto err_out_free_res; | ||
395 | |||
396 | for (i = 0; i < 3; i++) | ||
397 | ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i)); | ||
398 | |||
399 | /* Reset the chip to erase previous misconfiguration. | ||
400 | No hold time required! */ | ||
401 | iowrite32(0x00000001, ioaddr + PCIBusCfg); | ||
402 | |||
403 | dev->base_addr = (unsigned long)ioaddr; | ||
404 | dev->irq = irq; | ||
405 | |||
406 | np = netdev_priv(dev); | ||
407 | np->pci_dev = pdev; | ||
408 | np->chip_id = chip_idx; | ||
409 | np->drv_flags = pci_id_tbl[chip_idx].drv_flags; | ||
410 | spin_lock_init(&np->lock); | ||
411 | np->mii_if.dev = dev; | ||
412 | np->mii_if.mdio_read = mdio_read; | ||
413 | np->mii_if.mdio_write = mdio_write; | ||
414 | np->base_addr = ioaddr; | ||
415 | |||
416 | pci_set_drvdata(pdev, dev); | ||
417 | |||
418 | if (dev->mem_start) | ||
419 | option = dev->mem_start; | ||
420 | |||
421 | /* The lower four bits are the media type. */ | ||
422 | if (option > 0) { | ||
423 | if (option & 0x200) | ||
424 | np->mii_if.full_duplex = 1; | ||
425 | if (option & 15) | ||
426 | dev_info(&dev->dev, | ||
427 | "ignoring user supplied media type %d", | ||
428 | option & 15); | ||
429 | } | ||
430 | if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) | ||
431 | np->mii_if.full_duplex = 1; | ||
432 | |||
433 | if (np->mii_if.full_duplex) | ||
434 | np->mii_if.force_media = 1; | ||
435 | |||
436 | /* The chip-specific entries in the device structure. */ | ||
437 | dev->netdev_ops = &netdev_ops; | ||
438 | dev->ethtool_ops = &netdev_ethtool_ops; | ||
439 | dev->watchdog_timeo = TX_TIMEOUT; | ||
440 | |||
441 | i = register_netdev(dev); | ||
442 | if (i) | ||
443 | goto err_out_cleardev; | ||
444 | |||
445 | dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n", | ||
446 | pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq); | ||
447 | |||
448 | if (np->drv_flags & CanHaveMII) { | ||
449 | int phy, phy_idx = 0; | ||
450 | for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) { | ||
451 | int mii_status = mdio_read(dev, phy, MII_BMSR); | ||
452 | if (mii_status != 0xffff && mii_status != 0x0000) { | ||
453 | np->phys[phy_idx++] = phy; | ||
454 | np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); | ||
455 | np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+ | ||
456 | mdio_read(dev, phy, MII_PHYSID2); | ||
457 | dev_info(&dev->dev, | ||
458 | "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n", | ||
459 | np->mii, phy, mii_status, | ||
460 | np->mii_if.advertising); | ||
461 | } | ||
462 | } | ||
463 | np->mii_cnt = phy_idx; | ||
464 | np->mii_if.phy_id = np->phys[0]; | ||
465 | if (phy_idx == 0) { | ||
466 | dev_warn(&dev->dev, | ||
467 | "MII PHY not found -- this device may not operate correctly\n"); | ||
468 | } | ||
469 | } | ||
470 | |||
471 | find_cnt++; | ||
472 | return 0; | ||
473 | |||
474 | err_out_cleardev: | ||
475 | pci_set_drvdata(pdev, NULL); | ||
476 | pci_iounmap(pdev, ioaddr); | ||
477 | err_out_free_res: | ||
478 | pci_release_regions(pdev); | ||
479 | err_out_netdev: | ||
480 | free_netdev (dev); | ||
481 | return -ENODEV; | ||
482 | } | ||
483 | |||
484 | |||
485 | /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are | ||
486 | often serial bit streams generated by the host processor. | ||
487 | The example below is for the common 93c46 EEPROM, 64 16 bit words. */ | ||
488 | |||
489 | /* Delay between EEPROM clock transitions. | ||
490 | No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need | ||
491 | a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that | ||
492 | made udelay() unreliable. | ||
493 | The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is | ||
494 | deprecated. | ||
495 | */ | ||
496 | #define eeprom_delay(ee_addr) ioread32(ee_addr) | ||
497 | |||
498 | enum EEPROM_Ctrl_Bits { | ||
499 | EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805, | ||
500 | EE_ChipSelect=0x801, EE_DataIn=0x08, | ||
501 | }; | ||
502 | |||
503 | /* The EEPROM commands include the alway-set leading bit. */ | ||
504 | enum EEPROM_Cmds { | ||
505 | EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6), | ||
506 | }; | ||
507 | |||
508 | static int eeprom_read(void __iomem *addr, int location) | ||
509 | { | ||
510 | int i; | ||
511 | int retval = 0; | ||
512 | void __iomem *ee_addr = addr + EECtrl; | ||
513 | int read_cmd = location | EE_ReadCmd; | ||
514 | iowrite32(EE_ChipSelect, ee_addr); | ||
515 | |||
516 | /* Shift the read command bits out. */ | ||
517 | for (i = 10; i >= 0; i--) { | ||
518 | short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0; | ||
519 | iowrite32(dataval, ee_addr); | ||
520 | eeprom_delay(ee_addr); | ||
521 | iowrite32(dataval | EE_ShiftClk, ee_addr); | ||
522 | eeprom_delay(ee_addr); | ||
523 | } | ||
524 | iowrite32(EE_ChipSelect, ee_addr); | ||
525 | eeprom_delay(ee_addr); | ||
526 | |||
527 | for (i = 16; i > 0; i--) { | ||
528 | iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr); | ||
529 | eeprom_delay(ee_addr); | ||
530 | retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0); | ||
531 | iowrite32(EE_ChipSelect, ee_addr); | ||
532 | eeprom_delay(ee_addr); | ||
533 | } | ||
534 | |||
535 | /* Terminate the EEPROM access. */ | ||
536 | iowrite32(0, ee_addr); | ||
537 | return retval; | ||
538 | } | ||
539 | |||
540 | /* MII transceiver control section. | ||
541 | Read and write the MII registers using software-generated serial | ||
542 | MDIO protocol. See the MII specifications or DP83840A data sheet | ||
543 | for details. | ||
544 | |||
545 | The maximum data clock rate is 2.5 Mhz. The minimum timing is usually | ||
546 | met by back-to-back 33Mhz PCI cycles. */ | ||
547 | #define mdio_delay(mdio_addr) ioread32(mdio_addr) | ||
548 | |||
549 | /* Set iff a MII transceiver on any interface requires mdio preamble. | ||
550 | This only set with older transceivers, so the extra | ||
551 | code size of a per-interface flag is not worthwhile. */ | ||
552 | static char mii_preamble_required = 1; | ||
553 | |||
554 | #define MDIO_WRITE0 (MDIO_EnbOutput) | ||
555 | #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput) | ||
556 | |||
557 | /* Generate the preamble required for initial synchronization and | ||
558 | a few older transceivers. */ | ||
559 | static void mdio_sync(void __iomem *mdio_addr) | ||
560 | { | ||
561 | int bits = 32; | ||
562 | |||
563 | /* Establish sync by sending at least 32 logic ones. */ | ||
564 | while (--bits >= 0) { | ||
565 | iowrite32(MDIO_WRITE1, mdio_addr); | ||
566 | mdio_delay(mdio_addr); | ||
567 | iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); | ||
568 | mdio_delay(mdio_addr); | ||
569 | } | ||
570 | } | ||
571 | |||
572 | static int mdio_read(struct net_device *dev, int phy_id, int location) | ||
573 | { | ||
574 | struct netdev_private *np = netdev_priv(dev); | ||
575 | void __iomem *mdio_addr = np->base_addr + MIICtrl; | ||
576 | int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; | ||
577 | int i, retval = 0; | ||
578 | |||
579 | if (mii_preamble_required) | ||
580 | mdio_sync(mdio_addr); | ||
581 | |||
582 | /* Shift the read command bits out. */ | ||
583 | for (i = 15; i >= 0; i--) { | ||
584 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; | ||
585 | |||
586 | iowrite32(dataval, mdio_addr); | ||
587 | mdio_delay(mdio_addr); | ||
588 | iowrite32(dataval | MDIO_ShiftClk, mdio_addr); | ||
589 | mdio_delay(mdio_addr); | ||
590 | } | ||
591 | /* Read the two transition, 16 data, and wire-idle bits. */ | ||
592 | for (i = 20; i > 0; i--) { | ||
593 | iowrite32(MDIO_EnbIn, mdio_addr); | ||
594 | mdio_delay(mdio_addr); | ||
595 | retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0); | ||
596 | iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); | ||
597 | mdio_delay(mdio_addr); | ||
598 | } | ||
599 | return (retval>>1) & 0xffff; | ||
600 | } | ||
601 | |||
602 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value) | ||
603 | { | ||
604 | struct netdev_private *np = netdev_priv(dev); | ||
605 | void __iomem *mdio_addr = np->base_addr + MIICtrl; | ||
606 | int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; | ||
607 | int i; | ||
608 | |||
609 | if (location == 4 && phy_id == np->phys[0]) | ||
610 | np->mii_if.advertising = value; | ||
611 | |||
612 | if (mii_preamble_required) | ||
613 | mdio_sync(mdio_addr); | ||
614 | |||
615 | /* Shift the command bits out. */ | ||
616 | for (i = 31; i >= 0; i--) { | ||
617 | int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; | ||
618 | |||
619 | iowrite32(dataval, mdio_addr); | ||
620 | mdio_delay(mdio_addr); | ||
621 | iowrite32(dataval | MDIO_ShiftClk, mdio_addr); | ||
622 | mdio_delay(mdio_addr); | ||
623 | } | ||
624 | /* Clear out extra bits. */ | ||
625 | for (i = 2; i > 0; i--) { | ||
626 | iowrite32(MDIO_EnbIn, mdio_addr); | ||
627 | mdio_delay(mdio_addr); | ||
628 | iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); | ||
629 | mdio_delay(mdio_addr); | ||
630 | } | ||
631 | } | ||
632 | |||
633 | |||
634 | static int netdev_open(struct net_device *dev) | ||
635 | { | ||
636 | struct netdev_private *np = netdev_priv(dev); | ||
637 | void __iomem *ioaddr = np->base_addr; | ||
638 | int i; | ||
639 | |||
640 | iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ | ||
641 | |||
642 | netif_device_detach(dev); | ||
643 | i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); | ||
644 | if (i) | ||
645 | goto out_err; | ||
646 | |||
647 | if (debug > 1) | ||
648 | netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq); | ||
649 | |||
650 | if((i=alloc_ringdesc(dev))) | ||
651 | goto out_err; | ||
652 | |||
653 | spin_lock_irq(&np->lock); | ||
654 | netif_device_attach(dev); | ||
655 | init_registers(dev); | ||
656 | spin_unlock_irq(&np->lock); | ||
657 | |||
658 | netif_start_queue(dev); | ||
659 | if (debug > 2) | ||
660 | netdev_dbg(dev, "Done netdev_open()\n"); | ||
661 | |||
662 | /* Set the timer to check for link beat. */ | ||
663 | init_timer(&np->timer); | ||
664 | np->timer.expires = jiffies + 1*HZ; | ||
665 | np->timer.data = (unsigned long)dev; | ||
666 | np->timer.function = netdev_timer; /* timer handler */ | ||
667 | add_timer(&np->timer); | ||
668 | return 0; | ||
669 | out_err: | ||
670 | netif_device_attach(dev); | ||
671 | return i; | ||
672 | } | ||
673 | |||
674 | #define MII_DAVICOM_DM9101 0x0181b800 | ||
675 | |||
676 | static int update_link(struct net_device *dev) | ||
677 | { | ||
678 | struct netdev_private *np = netdev_priv(dev); | ||
679 | int duplex, fasteth, result, mii_reg; | ||
680 | |||
681 | /* BSMR */ | ||
682 | mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); | ||
683 | |||
684 | if (mii_reg == 0xffff) | ||
685 | return np->csr6; | ||
686 | /* reread: the link status bit is sticky */ | ||
687 | mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); | ||
688 | if (!(mii_reg & 0x4)) { | ||
689 | if (netif_carrier_ok(dev)) { | ||
690 | if (debug) | ||
691 | dev_info(&dev->dev, | ||
692 | "MII #%d reports no link. Disabling watchdog\n", | ||
693 | np->phys[0]); | ||
694 | netif_carrier_off(dev); | ||
695 | } | ||
696 | return np->csr6; | ||
697 | } | ||
698 | if (!netif_carrier_ok(dev)) { | ||
699 | if (debug) | ||
700 | dev_info(&dev->dev, | ||
701 | "MII #%d link is back. Enabling watchdog\n", | ||
702 | np->phys[0]); | ||
703 | netif_carrier_on(dev); | ||
704 | } | ||
705 | |||
706 | if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { | ||
707 | /* If the link partner doesn't support autonegotiation | ||
708 | * the MII detects it's abilities with the "parallel detection". | ||
709 | * Some MIIs update the LPA register to the result of the parallel | ||
710 | * detection, some don't. | ||
711 | * The Davicom PHY [at least 0181b800] doesn't. | ||
712 | * Instead bit 9 and 13 of the BMCR are updated to the result | ||
713 | * of the negotiation.. | ||
714 | */ | ||
715 | mii_reg = mdio_read(dev, np->phys[0], MII_BMCR); | ||
716 | duplex = mii_reg & BMCR_FULLDPLX; | ||
717 | fasteth = mii_reg & BMCR_SPEED100; | ||
718 | } else { | ||
719 | int negotiated; | ||
720 | mii_reg = mdio_read(dev, np->phys[0], MII_LPA); | ||
721 | negotiated = mii_reg & np->mii_if.advertising; | ||
722 | |||
723 | duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL); | ||
724 | fasteth = negotiated & 0x380; | ||
725 | } | ||
726 | duplex |= np->mii_if.force_media; | ||
727 | /* remove fastether and fullduplex */ | ||
728 | result = np->csr6 & ~0x20000200; | ||
729 | if (duplex) | ||
730 | result |= 0x200; | ||
731 | if (fasteth) | ||
732 | result |= 0x20000000; | ||
733 | if (result != np->csr6 && debug) | ||
734 | dev_info(&dev->dev, | ||
735 | "Setting %dMBit-%s-duplex based on MII#%d\n", | ||
736 | fasteth ? 100 : 10, duplex ? "full" : "half", | ||
737 | np->phys[0]); | ||
738 | return result; | ||
739 | } | ||
740 | |||
741 | #define RXTX_TIMEOUT 2000 | ||
742 | static inline void update_csr6(struct net_device *dev, int new) | ||
743 | { | ||
744 | struct netdev_private *np = netdev_priv(dev); | ||
745 | void __iomem *ioaddr = np->base_addr; | ||
746 | int limit = RXTX_TIMEOUT; | ||
747 | |||
748 | if (!netif_device_present(dev)) | ||
749 | new = 0; | ||
750 | if (new==np->csr6) | ||
751 | return; | ||
752 | /* stop both Tx and Rx processes */ | ||
753 | iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig); | ||
754 | /* wait until they have really stopped */ | ||
755 | for (;;) { | ||
756 | int csr5 = ioread32(ioaddr + IntrStatus); | ||
757 | int t; | ||
758 | |||
759 | t = (csr5 >> 17) & 0x07; | ||
760 | if (t==0||t==1) { | ||
761 | /* rx stopped */ | ||
762 | t = (csr5 >> 20) & 0x07; | ||
763 | if (t==0||t==1) | ||
764 | break; | ||
765 | } | ||
766 | |||
767 | limit--; | ||
768 | if(!limit) { | ||
769 | dev_info(&dev->dev, | ||
770 | "couldn't stop rxtx, IntrStatus %xh\n", csr5); | ||
771 | break; | ||
772 | } | ||
773 | udelay(1); | ||
774 | } | ||
775 | np->csr6 = new; | ||
776 | /* and restart them with the new configuration */ | ||
777 | iowrite32(np->csr6, ioaddr + NetworkConfig); | ||
778 | if (new & 0x200) | ||
779 | np->mii_if.full_duplex = 1; | ||
780 | } | ||
781 | |||
782 | static void netdev_timer(unsigned long data) | ||
783 | { | ||
784 | struct net_device *dev = (struct net_device *)data; | ||
785 | struct netdev_private *np = netdev_priv(dev); | ||
786 | void __iomem *ioaddr = np->base_addr; | ||
787 | |||
788 | if (debug > 2) | ||
789 | netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n", | ||
790 | ioread32(ioaddr + IntrStatus), | ||
791 | ioread32(ioaddr + NetworkConfig)); | ||
792 | spin_lock_irq(&np->lock); | ||
793 | update_csr6(dev, update_link(dev)); | ||
794 | spin_unlock_irq(&np->lock); | ||
795 | np->timer.expires = jiffies + 10*HZ; | ||
796 | add_timer(&np->timer); | ||
797 | } | ||
798 | |||
799 | static void init_rxtx_rings(struct net_device *dev) | ||
800 | { | ||
801 | struct netdev_private *np = netdev_priv(dev); | ||
802 | int i; | ||
803 | |||
804 | np->rx_head_desc = &np->rx_ring[0]; | ||
805 | np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE]; | ||
806 | |||
807 | /* Initial all Rx descriptors. */ | ||
808 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
809 | np->rx_ring[i].length = np->rx_buf_sz; | ||
810 | np->rx_ring[i].status = 0; | ||
811 | np->rx_skbuff[i] = NULL; | ||
812 | } | ||
813 | /* Mark the last entry as wrapping the ring. */ | ||
814 | np->rx_ring[i-1].length |= DescEndRing; | ||
815 | |||
816 | /* Fill in the Rx buffers. Handle allocation failure gracefully. */ | ||
817 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
818 | struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); | ||
819 | np->rx_skbuff[i] = skb; | ||
820 | if (skb == NULL) | ||
821 | break; | ||
822 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, | ||
823 | np->rx_buf_sz,PCI_DMA_FROMDEVICE); | ||
824 | |||
825 | np->rx_ring[i].buffer1 = np->rx_addr[i]; | ||
826 | np->rx_ring[i].status = DescOwned; | ||
827 | } | ||
828 | |||
829 | np->cur_rx = 0; | ||
830 | np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); | ||
831 | |||
832 | /* Initialize the Tx descriptors */ | ||
833 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
834 | np->tx_skbuff[i] = NULL; | ||
835 | np->tx_ring[i].status = 0; | ||
836 | } | ||
837 | np->tx_full = 0; | ||
838 | np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0; | ||
839 | |||
840 | iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr); | ||
841 | iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE, | ||
842 | np->base_addr + TxRingPtr); | ||
843 | |||
844 | } | ||
845 | |||
846 | static void free_rxtx_rings(struct netdev_private* np) | ||
847 | { | ||
848 | int i; | ||
849 | /* Free all the skbuffs in the Rx queue. */ | ||
850 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
851 | np->rx_ring[i].status = 0; | ||
852 | if (np->rx_skbuff[i]) { | ||
853 | pci_unmap_single(np->pci_dev, | ||
854 | np->rx_addr[i], | ||
855 | np->rx_skbuff[i]->len, | ||
856 | PCI_DMA_FROMDEVICE); | ||
857 | dev_kfree_skb(np->rx_skbuff[i]); | ||
858 | } | ||
859 | np->rx_skbuff[i] = NULL; | ||
860 | } | ||
861 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
862 | if (np->tx_skbuff[i]) { | ||
863 | pci_unmap_single(np->pci_dev, | ||
864 | np->tx_addr[i], | ||
865 | np->tx_skbuff[i]->len, | ||
866 | PCI_DMA_TODEVICE); | ||
867 | dev_kfree_skb(np->tx_skbuff[i]); | ||
868 | } | ||
869 | np->tx_skbuff[i] = NULL; | ||
870 | } | ||
871 | } | ||
872 | |||
873 | static void init_registers(struct net_device *dev) | ||
874 | { | ||
875 | struct netdev_private *np = netdev_priv(dev); | ||
876 | void __iomem *ioaddr = np->base_addr; | ||
877 | int i; | ||
878 | |||
879 | for (i = 0; i < 6; i++) | ||
880 | iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i); | ||
881 | |||
882 | /* Initialize other registers. */ | ||
883 | #ifdef __BIG_ENDIAN | ||
884 | i = (1<<20); /* Big-endian descriptors */ | ||
885 | #else | ||
886 | i = 0; | ||
887 | #endif | ||
888 | i |= (0x04<<2); /* skip length 4 u32 */ | ||
889 | i |= 0x02; /* give Rx priority */ | ||
890 | |||
891 | /* Configure the PCI bus bursts and FIFO thresholds. | ||
892 | 486: Set 8 longword cache alignment, 8 longword burst. | ||
893 | 586: Set 16 longword cache alignment, no burst limit. | ||
894 | Cache alignment bits 15:14 Burst length 13:8 | ||
895 | 0000 <not allowed> 0000 align to cache 0800 8 longwords | ||
896 | 4000 8 longwords 0100 1 longword 1000 16 longwords | ||
897 | 8000 16 longwords 0200 2 longwords 2000 32 longwords | ||
898 | C000 32 longwords 0400 4 longwords */ | ||
899 | |||
900 | #if defined (__i386__) && !defined(MODULE) | ||
901 | /* When not a module we can work around broken '486 PCI boards. */ | ||
902 | if (boot_cpu_data.x86 <= 4) { | ||
903 | i |= 0x4800; | ||
904 | dev_info(&dev->dev, | ||
905 | "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n"); | ||
906 | } else { | ||
907 | i |= 0xE000; | ||
908 | } | ||
909 | #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__) | ||
910 | i |= 0xE000; | ||
911 | #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) | ||
912 | i |= 0x4800; | ||
913 | #else | ||
914 | #warning Processor architecture undefined | ||
915 | i |= 0x4800; | ||
916 | #endif | ||
917 | iowrite32(i, ioaddr + PCIBusCfg); | ||
918 | |||
919 | np->csr6 = 0; | ||
920 | /* 128 byte Tx threshold; | ||
921 | Transmit on; Receive on; */ | ||
922 | update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev)); | ||
923 | |||
924 | /* Clear and Enable interrupts by setting the interrupt mask. */ | ||
925 | iowrite32(0x1A0F5, ioaddr + IntrStatus); | ||
926 | iowrite32(0x1A0F5, ioaddr + IntrEnable); | ||
927 | |||
928 | iowrite32(0, ioaddr + RxStartDemand); | ||
929 | } | ||
930 | |||
931 | static void tx_timeout(struct net_device *dev) | ||
932 | { | ||
933 | struct netdev_private *np = netdev_priv(dev); | ||
934 | void __iomem *ioaddr = np->base_addr; | ||
935 | |||
936 | dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n", | ||
937 | ioread32(ioaddr + IntrStatus)); | ||
938 | |||
939 | { | ||
940 | int i; | ||
941 | printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); | ||
942 | for (i = 0; i < RX_RING_SIZE; i++) | ||
943 | printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status); | ||
944 | printk(KERN_CONT "\n"); | ||
945 | printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); | ||
946 | for (i = 0; i < TX_RING_SIZE; i++) | ||
947 | printk(KERN_CONT " %08x", np->tx_ring[i].status); | ||
948 | printk(KERN_CONT "\n"); | ||
949 | } | ||
950 | printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n", | ||
951 | np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); | ||
952 | printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C)); | ||
953 | |||
954 | disable_irq(dev->irq); | ||
955 | spin_lock_irq(&np->lock); | ||
956 | /* | ||
957 | * Under high load dirty_tx and the internal tx descriptor pointer | ||
958 | * come out of sync, thus perform a software reset and reinitialize | ||
959 | * everything. | ||
960 | */ | ||
961 | |||
962 | iowrite32(1, np->base_addr+PCIBusCfg); | ||
963 | udelay(1); | ||
964 | |||
965 | free_rxtx_rings(np); | ||
966 | init_rxtx_rings(dev); | ||
967 | init_registers(dev); | ||
968 | spin_unlock_irq(&np->lock); | ||
969 | enable_irq(dev->irq); | ||
970 | |||
971 | netif_wake_queue(dev); | ||
972 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
973 | np->stats.tx_errors++; | ||
974 | } | ||
975 | |||
976 | /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ | ||
977 | static int alloc_ringdesc(struct net_device *dev) | ||
978 | { | ||
979 | struct netdev_private *np = netdev_priv(dev); | ||
980 | |||
981 | np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); | ||
982 | |||
983 | np->rx_ring = pci_alloc_consistent(np->pci_dev, | ||
984 | sizeof(struct w840_rx_desc)*RX_RING_SIZE + | ||
985 | sizeof(struct w840_tx_desc)*TX_RING_SIZE, | ||
986 | &np->ring_dma_addr); | ||
987 | if(!np->rx_ring) | ||
988 | return -ENOMEM; | ||
989 | init_rxtx_rings(dev); | ||
990 | return 0; | ||
991 | } | ||
992 | |||
993 | static void free_ringdesc(struct netdev_private *np) | ||
994 | { | ||
995 | pci_free_consistent(np->pci_dev, | ||
996 | sizeof(struct w840_rx_desc)*RX_RING_SIZE + | ||
997 | sizeof(struct w840_tx_desc)*TX_RING_SIZE, | ||
998 | np->rx_ring, np->ring_dma_addr); | ||
999 | |||
1000 | } | ||
1001 | |||
1002 | static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev) | ||
1003 | { | ||
1004 | struct netdev_private *np = netdev_priv(dev); | ||
1005 | unsigned entry; | ||
1006 | |||
1007 | /* Caution: the write order is important here, set the field | ||
1008 | with the "ownership" bits last. */ | ||
1009 | |||
1010 | /* Calculate the next Tx descriptor entry. */ | ||
1011 | entry = np->cur_tx % TX_RING_SIZE; | ||
1012 | |||
1013 | np->tx_addr[entry] = pci_map_single(np->pci_dev, | ||
1014 | skb->data,skb->len, PCI_DMA_TODEVICE); | ||
1015 | np->tx_skbuff[entry] = skb; | ||
1016 | |||
1017 | np->tx_ring[entry].buffer1 = np->tx_addr[entry]; | ||
1018 | if (skb->len < TX_BUFLIMIT) { | ||
1019 | np->tx_ring[entry].length = DescWholePkt | skb->len; | ||
1020 | } else { | ||
1021 | int len = skb->len - TX_BUFLIMIT; | ||
1022 | |||
1023 | np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT; | ||
1024 | np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT; | ||
1025 | } | ||
1026 | if(entry == TX_RING_SIZE-1) | ||
1027 | np->tx_ring[entry].length |= DescEndRing; | ||
1028 | |||
1029 | /* Now acquire the irq spinlock. | ||
1030 | * The difficult race is the ordering between | ||
1031 | * increasing np->cur_tx and setting DescOwned: | ||
1032 | * - if np->cur_tx is increased first the interrupt | ||
1033 | * handler could consider the packet as transmitted | ||
1034 | * since DescOwned is cleared. | ||
1035 | * - If DescOwned is set first the NIC could report the | ||
1036 | * packet as sent, but the interrupt handler would ignore it | ||
1037 | * since the np->cur_tx was not yet increased. | ||
1038 | */ | ||
1039 | spin_lock_irq(&np->lock); | ||
1040 | np->cur_tx++; | ||
1041 | |||
1042 | wmb(); /* flush length, buffer1, buffer2 */ | ||
1043 | np->tx_ring[entry].status = DescOwned; | ||
1044 | wmb(); /* flush status and kick the hardware */ | ||
1045 | iowrite32(0, np->base_addr + TxStartDemand); | ||
1046 | np->tx_q_bytes += skb->len; | ||
1047 | /* Work around horrible bug in the chip by marking the queue as full | ||
1048 | when we do not have FIFO room for a maximum sized packet. */ | ||
1049 | if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN || | ||
1050 | ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) { | ||
1051 | netif_stop_queue(dev); | ||
1052 | wmb(); | ||
1053 | np->tx_full = 1; | ||
1054 | } | ||
1055 | spin_unlock_irq(&np->lock); | ||
1056 | |||
1057 | if (debug > 4) { | ||
1058 | netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n", | ||
1059 | np->cur_tx, entry); | ||
1060 | } | ||
1061 | return NETDEV_TX_OK; | ||
1062 | } | ||
1063 | |||
1064 | static void netdev_tx_done(struct net_device *dev) | ||
1065 | { | ||
1066 | struct netdev_private *np = netdev_priv(dev); | ||
1067 | for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { | ||
1068 | int entry = np->dirty_tx % TX_RING_SIZE; | ||
1069 | int tx_status = np->tx_ring[entry].status; | ||
1070 | |||
1071 | if (tx_status < 0) | ||
1072 | break; | ||
1073 | if (tx_status & 0x8000) { /* There was an error, log it. */ | ||
1074 | #ifndef final_version | ||
1075 | if (debug > 1) | ||
1076 | netdev_dbg(dev, "Transmit error, Tx status %08x\n", | ||
1077 | tx_status); | ||
1078 | #endif | ||
1079 | np->stats.tx_errors++; | ||
1080 | if (tx_status & 0x0104) np->stats.tx_aborted_errors++; | ||
1081 | if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; | ||
1082 | if (tx_status & 0x0200) np->stats.tx_window_errors++; | ||
1083 | if (tx_status & 0x0002) np->stats.tx_fifo_errors++; | ||
1084 | if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0) | ||
1085 | np->stats.tx_heartbeat_errors++; | ||
1086 | } else { | ||
1087 | #ifndef final_version | ||
1088 | if (debug > 3) | ||
1089 | netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n", | ||
1090 | entry, tx_status); | ||
1091 | #endif | ||
1092 | np->stats.tx_bytes += np->tx_skbuff[entry]->len; | ||
1093 | np->stats.collisions += (tx_status >> 3) & 15; | ||
1094 | np->stats.tx_packets++; | ||
1095 | } | ||
1096 | /* Free the original skb. */ | ||
1097 | pci_unmap_single(np->pci_dev,np->tx_addr[entry], | ||
1098 | np->tx_skbuff[entry]->len, | ||
1099 | PCI_DMA_TODEVICE); | ||
1100 | np->tx_q_bytes -= np->tx_skbuff[entry]->len; | ||
1101 | dev_kfree_skb_irq(np->tx_skbuff[entry]); | ||
1102 | np->tx_skbuff[entry] = NULL; | ||
1103 | } | ||
1104 | if (np->tx_full && | ||
1105 | np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART && | ||
1106 | np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { | ||
1107 | /* The ring is no longer full, clear tbusy. */ | ||
1108 | np->tx_full = 0; | ||
1109 | wmb(); | ||
1110 | netif_wake_queue(dev); | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | /* The interrupt handler does all of the Rx thread work and cleans up | ||
1115 | after the Tx thread. */ | ||
1116 | static irqreturn_t intr_handler(int irq, void *dev_instance) | ||
1117 | { | ||
1118 | struct net_device *dev = (struct net_device *)dev_instance; | ||
1119 | struct netdev_private *np = netdev_priv(dev); | ||
1120 | void __iomem *ioaddr = np->base_addr; | ||
1121 | int work_limit = max_interrupt_work; | ||
1122 | int handled = 0; | ||
1123 | |||
1124 | if (!netif_device_present(dev)) | ||
1125 | return IRQ_NONE; | ||
1126 | do { | ||
1127 | u32 intr_status = ioread32(ioaddr + IntrStatus); | ||
1128 | |||
1129 | /* Acknowledge all of the current interrupt sources ASAP. */ | ||
1130 | iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus); | ||
1131 | |||
1132 | if (debug > 4) | ||
1133 | netdev_dbg(dev, "Interrupt, status %04x\n", intr_status); | ||
1134 | |||
1135 | if ((intr_status & (NormalIntr|AbnormalIntr)) == 0) | ||
1136 | break; | ||
1137 | |||
1138 | handled = 1; | ||
1139 | |||
1140 | if (intr_status & (RxIntr | RxNoBuf)) | ||
1141 | netdev_rx(dev); | ||
1142 | if (intr_status & RxNoBuf) | ||
1143 | iowrite32(0, ioaddr + RxStartDemand); | ||
1144 | |||
1145 | if (intr_status & (TxNoBuf | TxIntr) && | ||
1146 | np->cur_tx != np->dirty_tx) { | ||
1147 | spin_lock(&np->lock); | ||
1148 | netdev_tx_done(dev); | ||
1149 | spin_unlock(&np->lock); | ||
1150 | } | ||
1151 | |||
1152 | /* Abnormal error summary/uncommon events handlers. */ | ||
1153 | if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError | | ||
1154 | TimerInt | TxDied)) | ||
1155 | netdev_error(dev, intr_status); | ||
1156 | |||
1157 | if (--work_limit < 0) { | ||
1158 | dev_warn(&dev->dev, | ||
1159 | "Too much work at interrupt, status=0x%04x\n", | ||
1160 | intr_status); | ||
1161 | /* Set the timer to re-enable the other interrupts after | ||
1162 | 10*82usec ticks. */ | ||
1163 | spin_lock(&np->lock); | ||
1164 | if (netif_device_present(dev)) { | ||
1165 | iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable); | ||
1166 | iowrite32(10, ioaddr + GPTimer); | ||
1167 | } | ||
1168 | spin_unlock(&np->lock); | ||
1169 | break; | ||
1170 | } | ||
1171 | } while (1); | ||
1172 | |||
1173 | if (debug > 3) | ||
1174 | netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n", | ||
1175 | ioread32(ioaddr + IntrStatus)); | ||
1176 | return IRQ_RETVAL(handled); | ||
1177 | } | ||
1178 | |||
1179 | /* This routine is logically part of the interrupt handler, but separated | ||
1180 | for clarity and better register allocation. */ | ||
1181 | static int netdev_rx(struct net_device *dev) | ||
1182 | { | ||
1183 | struct netdev_private *np = netdev_priv(dev); | ||
1184 | int entry = np->cur_rx % RX_RING_SIZE; | ||
1185 | int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; | ||
1186 | |||
1187 | if (debug > 4) { | ||
1188 | netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n", | ||
1189 | entry, np->rx_ring[entry].status); | ||
1190 | } | ||
1191 | |||
1192 | /* If EOP is set on the next entry, it's a new packet. Send it up. */ | ||
1193 | while (--work_limit >= 0) { | ||
1194 | struct w840_rx_desc *desc = np->rx_head_desc; | ||
1195 | s32 status = desc->status; | ||
1196 | |||
1197 | if (debug > 4) | ||
1198 | netdev_dbg(dev, " netdev_rx() status was %08x\n", | ||
1199 | status); | ||
1200 | if (status < 0) | ||
1201 | break; | ||
1202 | if ((status & 0x38008300) != 0x0300) { | ||
1203 | if ((status & 0x38000300) != 0x0300) { | ||
1204 | /* Ingore earlier buffers. */ | ||
1205 | if ((status & 0xffff) != 0x7fff) { | ||
1206 | dev_warn(&dev->dev, | ||
1207 | "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n", | ||
1208 | np->cur_rx, status); | ||
1209 | np->stats.rx_length_errors++; | ||
1210 | } | ||
1211 | } else if (status & 0x8000) { | ||
1212 | /* There was a fatal error. */ | ||
1213 | if (debug > 2) | ||
1214 | netdev_dbg(dev, "Receive error, Rx status %08x\n", | ||
1215 | status); | ||
1216 | np->stats.rx_errors++; /* end of a packet.*/ | ||
1217 | if (status & 0x0890) np->stats.rx_length_errors++; | ||
1218 | if (status & 0x004C) np->stats.rx_frame_errors++; | ||
1219 | if (status & 0x0002) np->stats.rx_crc_errors++; | ||
1220 | } | ||
1221 | } else { | ||
1222 | struct sk_buff *skb; | ||
1223 | /* Omit the four octet CRC from the length. */ | ||
1224 | int pkt_len = ((status >> 16) & 0x7ff) - 4; | ||
1225 | |||
1226 | #ifndef final_version | ||
1227 | if (debug > 4) | ||
1228 | netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n", | ||
1229 | pkt_len, status); | ||
1230 | #endif | ||
1231 | /* Check if the packet is long enough to accept without copying | ||
1232 | to a minimally-sized skbuff. */ | ||
1233 | if (pkt_len < rx_copybreak && | ||
1234 | (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { | ||
1235 | skb_reserve(skb, 2); /* 16 byte align the IP header */ | ||
1236 | pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], | ||
1237 | np->rx_skbuff[entry]->len, | ||
1238 | PCI_DMA_FROMDEVICE); | ||
1239 | skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); | ||
1240 | skb_put(skb, pkt_len); | ||
1241 | pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], | ||
1242 | np->rx_skbuff[entry]->len, | ||
1243 | PCI_DMA_FROMDEVICE); | ||
1244 | } else { | ||
1245 | pci_unmap_single(np->pci_dev,np->rx_addr[entry], | ||
1246 | np->rx_skbuff[entry]->len, | ||
1247 | PCI_DMA_FROMDEVICE); | ||
1248 | skb_put(skb = np->rx_skbuff[entry], pkt_len); | ||
1249 | np->rx_skbuff[entry] = NULL; | ||
1250 | } | ||
1251 | #ifndef final_version /* Remove after testing. */ | ||
1252 | /* You will want this info for the initial debug. */ | ||
1253 | if (debug > 5) | ||
1254 | netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n", | ||
1255 | &skb->data[0], &skb->data[6], | ||
1256 | skb->data[12], skb->data[13], | ||
1257 | &skb->data[14]); | ||
1258 | #endif | ||
1259 | skb->protocol = eth_type_trans(skb, dev); | ||
1260 | netif_rx(skb); | ||
1261 | np->stats.rx_packets++; | ||
1262 | np->stats.rx_bytes += pkt_len; | ||
1263 | } | ||
1264 | entry = (++np->cur_rx) % RX_RING_SIZE; | ||
1265 | np->rx_head_desc = &np->rx_ring[entry]; | ||
1266 | } | ||
1267 | |||
1268 | /* Refill the Rx ring buffers. */ | ||
1269 | for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { | ||
1270 | struct sk_buff *skb; | ||
1271 | entry = np->dirty_rx % RX_RING_SIZE; | ||
1272 | if (np->rx_skbuff[entry] == NULL) { | ||
1273 | skb = dev_alloc_skb(np->rx_buf_sz); | ||
1274 | np->rx_skbuff[entry] = skb; | ||
1275 | if (skb == NULL) | ||
1276 | break; /* Better luck next round. */ | ||
1277 | np->rx_addr[entry] = pci_map_single(np->pci_dev, | ||
1278 | skb->data, | ||
1279 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1280 | np->rx_ring[entry].buffer1 = np->rx_addr[entry]; | ||
1281 | } | ||
1282 | wmb(); | ||
1283 | np->rx_ring[entry].status = DescOwned; | ||
1284 | } | ||
1285 | |||
1286 | return 0; | ||
1287 | } | ||
1288 | |||
1289 | static void netdev_error(struct net_device *dev, int intr_status) | ||
1290 | { | ||
1291 | struct netdev_private *np = netdev_priv(dev); | ||
1292 | void __iomem *ioaddr = np->base_addr; | ||
1293 | |||
1294 | if (debug > 2) | ||
1295 | netdev_dbg(dev, "Abnormal event, %08x\n", intr_status); | ||
1296 | if (intr_status == 0xffffffff) | ||
1297 | return; | ||
1298 | spin_lock(&np->lock); | ||
1299 | if (intr_status & TxFIFOUnderflow) { | ||
1300 | int new; | ||
1301 | /* Bump up the Tx threshold */ | ||
1302 | #if 0 | ||
1303 | /* This causes lots of dropped packets, | ||
1304 | * and under high load even tx_timeouts | ||
1305 | */ | ||
1306 | new = np->csr6 + 0x4000; | ||
1307 | #else | ||
1308 | new = (np->csr6 >> 14)&0x7f; | ||
1309 | if (new < 64) | ||
1310 | new *= 2; | ||
1311 | else | ||
1312 | new = 127; /* load full packet before starting */ | ||
1313 | new = (np->csr6 & ~(0x7F << 14)) | (new<<14); | ||
1314 | #endif | ||
1315 | netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new); | ||
1316 | update_csr6(dev, new); | ||
1317 | } | ||
1318 | if (intr_status & RxDied) { /* Missed a Rx frame. */ | ||
1319 | np->stats.rx_errors++; | ||
1320 | } | ||
1321 | if (intr_status & TimerInt) { | ||
1322 | /* Re-enable other interrupts. */ | ||
1323 | if (netif_device_present(dev)) | ||
1324 | iowrite32(0x1A0F5, ioaddr + IntrEnable); | ||
1325 | } | ||
1326 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | ||
1327 | iowrite32(0, ioaddr + RxStartDemand); | ||
1328 | spin_unlock(&np->lock); | ||
1329 | } | ||
1330 | |||
1331 | static struct net_device_stats *get_stats(struct net_device *dev) | ||
1332 | { | ||
1333 | struct netdev_private *np = netdev_priv(dev); | ||
1334 | void __iomem *ioaddr = np->base_addr; | ||
1335 | |||
1336 | /* The chip only need report frame silently dropped. */ | ||
1337 | spin_lock_irq(&np->lock); | ||
1338 | if (netif_running(dev) && netif_device_present(dev)) | ||
1339 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | ||
1340 | spin_unlock_irq(&np->lock); | ||
1341 | |||
1342 | return &np->stats; | ||
1343 | } | ||
1344 | |||
1345 | |||
1346 | static u32 __set_rx_mode(struct net_device *dev) | ||
1347 | { | ||
1348 | struct netdev_private *np = netdev_priv(dev); | ||
1349 | void __iomem *ioaddr = np->base_addr; | ||
1350 | u32 mc_filter[2]; /* Multicast hash filter */ | ||
1351 | u32 rx_mode; | ||
1352 | |||
1353 | if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ | ||
1354 | memset(mc_filter, 0xff, sizeof(mc_filter)); | ||
1355 | rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys | ||
1356 | | AcceptMyPhys; | ||
1357 | } else if ((netdev_mc_count(dev) > multicast_filter_limit) || | ||
1358 | (dev->flags & IFF_ALLMULTI)) { | ||
1359 | /* Too many to match, or accept all multicasts. */ | ||
1360 | memset(mc_filter, 0xff, sizeof(mc_filter)); | ||
1361 | rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; | ||
1362 | } else { | ||
1363 | struct netdev_hw_addr *ha; | ||
1364 | |||
1365 | memset(mc_filter, 0, sizeof(mc_filter)); | ||
1366 | netdev_for_each_mc_addr(ha, dev) { | ||
1367 | int filbit; | ||
1368 | |||
1369 | filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F; | ||
1370 | filbit &= 0x3f; | ||
1371 | mc_filter[filbit >> 5] |= 1 << (filbit & 31); | ||
1372 | } | ||
1373 | rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; | ||
1374 | } | ||
1375 | iowrite32(mc_filter[0], ioaddr + MulticastFilter0); | ||
1376 | iowrite32(mc_filter[1], ioaddr + MulticastFilter1); | ||
1377 | return rx_mode; | ||
1378 | } | ||
1379 | |||
1380 | static void set_rx_mode(struct net_device *dev) | ||
1381 | { | ||
1382 | struct netdev_private *np = netdev_priv(dev); | ||
1383 | u32 rx_mode = __set_rx_mode(dev); | ||
1384 | spin_lock_irq(&np->lock); | ||
1385 | update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode); | ||
1386 | spin_unlock_irq(&np->lock); | ||
1387 | } | ||
1388 | |||
1389 | static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) | ||
1390 | { | ||
1391 | struct netdev_private *np = netdev_priv(dev); | ||
1392 | |||
1393 | strcpy (info->driver, DRV_NAME); | ||
1394 | strcpy (info->version, DRV_VERSION); | ||
1395 | strcpy (info->bus_info, pci_name(np->pci_dev)); | ||
1396 | } | ||
1397 | |||
1398 | static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1399 | { | ||
1400 | struct netdev_private *np = netdev_priv(dev); | ||
1401 | int rc; | ||
1402 | |||
1403 | spin_lock_irq(&np->lock); | ||
1404 | rc = mii_ethtool_gset(&np->mii_if, cmd); | ||
1405 | spin_unlock_irq(&np->lock); | ||
1406 | |||
1407 | return rc; | ||
1408 | } | ||
1409 | |||
1410 | static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1411 | { | ||
1412 | struct netdev_private *np = netdev_priv(dev); | ||
1413 | int rc; | ||
1414 | |||
1415 | spin_lock_irq(&np->lock); | ||
1416 | rc = mii_ethtool_sset(&np->mii_if, cmd); | ||
1417 | spin_unlock_irq(&np->lock); | ||
1418 | |||
1419 | return rc; | ||
1420 | } | ||
1421 | |||
1422 | static int netdev_nway_reset(struct net_device *dev) | ||
1423 | { | ||
1424 | struct netdev_private *np = netdev_priv(dev); | ||
1425 | return mii_nway_restart(&np->mii_if); | ||
1426 | } | ||
1427 | |||
1428 | static u32 netdev_get_link(struct net_device *dev) | ||
1429 | { | ||
1430 | struct netdev_private *np = netdev_priv(dev); | ||
1431 | return mii_link_ok(&np->mii_if); | ||
1432 | } | ||
1433 | |||
1434 | static u32 netdev_get_msglevel(struct net_device *dev) | ||
1435 | { | ||
1436 | return debug; | ||
1437 | } | ||
1438 | |||
1439 | static void netdev_set_msglevel(struct net_device *dev, u32 value) | ||
1440 | { | ||
1441 | debug = value; | ||
1442 | } | ||
1443 | |||
1444 | static const struct ethtool_ops netdev_ethtool_ops = { | ||
1445 | .get_drvinfo = netdev_get_drvinfo, | ||
1446 | .get_settings = netdev_get_settings, | ||
1447 | .set_settings = netdev_set_settings, | ||
1448 | .nway_reset = netdev_nway_reset, | ||
1449 | .get_link = netdev_get_link, | ||
1450 | .get_msglevel = netdev_get_msglevel, | ||
1451 | .set_msglevel = netdev_set_msglevel, | ||
1452 | }; | ||
1453 | |||
1454 | static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
1455 | { | ||
1456 | struct mii_ioctl_data *data = if_mii(rq); | ||
1457 | struct netdev_private *np = netdev_priv(dev); | ||
1458 | |||
1459 | switch(cmd) { | ||
1460 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | ||
1461 | data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f; | ||
1462 | /* Fall Through */ | ||
1463 | |||
1464 | case SIOCGMIIREG: /* Read MII PHY register. */ | ||
1465 | spin_lock_irq(&np->lock); | ||
1466 | data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f); | ||
1467 | spin_unlock_irq(&np->lock); | ||
1468 | return 0; | ||
1469 | |||
1470 | case SIOCSMIIREG: /* Write MII PHY register. */ | ||
1471 | spin_lock_irq(&np->lock); | ||
1472 | mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); | ||
1473 | spin_unlock_irq(&np->lock); | ||
1474 | return 0; | ||
1475 | default: | ||
1476 | return -EOPNOTSUPP; | ||
1477 | } | ||
1478 | } | ||
1479 | |||
1480 | static int netdev_close(struct net_device *dev) | ||
1481 | { | ||
1482 | struct netdev_private *np = netdev_priv(dev); | ||
1483 | void __iomem *ioaddr = np->base_addr; | ||
1484 | |||
1485 | netif_stop_queue(dev); | ||
1486 | |||
1487 | if (debug > 1) { | ||
1488 | netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n", | ||
1489 | ioread32(ioaddr + IntrStatus), | ||
1490 | ioread32(ioaddr + NetworkConfig)); | ||
1491 | netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n", | ||
1492 | np->cur_tx, np->dirty_tx, | ||
1493 | np->cur_rx, np->dirty_rx); | ||
1494 | } | ||
1495 | |||
1496 | /* Stop the chip's Tx and Rx processes. */ | ||
1497 | spin_lock_irq(&np->lock); | ||
1498 | netif_device_detach(dev); | ||
1499 | update_csr6(dev, 0); | ||
1500 | iowrite32(0x0000, ioaddr + IntrEnable); | ||
1501 | spin_unlock_irq(&np->lock); | ||
1502 | |||
1503 | free_irq(dev->irq, dev); | ||
1504 | wmb(); | ||
1505 | netif_device_attach(dev); | ||
1506 | |||
1507 | if (ioread32(ioaddr + NetworkConfig) != 0xffffffff) | ||
1508 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | ||
1509 | |||
1510 | #ifdef __i386__ | ||
1511 | if (debug > 2) { | ||
1512 | int i; | ||
1513 | |||
1514 | printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring); | ||
1515 | for (i = 0; i < TX_RING_SIZE; i++) | ||
1516 | printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", | ||
1517 | i, np->tx_ring[i].length, | ||
1518 | np->tx_ring[i].status, np->tx_ring[i].buffer1); | ||
1519 | printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); | ||
1520 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1521 | printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n", | ||
1522 | i, np->rx_ring[i].length, | ||
1523 | np->rx_ring[i].status, np->rx_ring[i].buffer1); | ||
1524 | } | ||
1525 | } | ||
1526 | #endif /* __i386__ debugging only */ | ||
1527 | |||
1528 | del_timer_sync(&np->timer); | ||
1529 | |||
1530 | free_rxtx_rings(np); | ||
1531 | free_ringdesc(np); | ||
1532 | |||
1533 | return 0; | ||
1534 | } | ||
1535 | |||
1536 | static void __devexit w840_remove1 (struct pci_dev *pdev) | ||
1537 | { | ||
1538 | struct net_device *dev = pci_get_drvdata(pdev); | ||
1539 | |||
1540 | if (dev) { | ||
1541 | struct netdev_private *np = netdev_priv(dev); | ||
1542 | unregister_netdev(dev); | ||
1543 | pci_release_regions(pdev); | ||
1544 | pci_iounmap(pdev, np->base_addr); | ||
1545 | free_netdev(dev); | ||
1546 | } | ||
1547 | |||
1548 | pci_set_drvdata(pdev, NULL); | ||
1549 | } | ||
1550 | |||
1551 | #ifdef CONFIG_PM | ||
1552 | |||
1553 | /* | ||
1554 | * suspend/resume synchronization: | ||
1555 | * - open, close, do_ioctl: | ||
1556 | * rtnl_lock, & netif_device_detach after the rtnl_unlock. | ||
1557 | * - get_stats: | ||
1558 | * spin_lock_irq(np->lock), doesn't touch hw if not present | ||
1559 | * - start_xmit: | ||
1560 | * synchronize_irq + netif_tx_disable; | ||
1561 | * - tx_timeout: | ||
1562 | * netif_device_detach + netif_tx_disable; | ||
1563 | * - set_multicast_list | ||
1564 | * netif_device_detach + netif_tx_disable; | ||
1565 | * - interrupt handler | ||
1566 | * doesn't touch hw if not present, synchronize_irq waits for | ||
1567 | * running instances of the interrupt handler. | ||
1568 | * | ||
1569 | * Disabling hw requires clearing csr6 & IntrEnable. | ||
1570 | * update_csr6 & all function that write IntrEnable check netif_device_present | ||
1571 | * before settings any bits. | ||
1572 | * | ||
1573 | * Detach must occur under spin_unlock_irq(), interrupts from a detached | ||
1574 | * device would cause an irq storm. | ||
1575 | */ | ||
1576 | static int w840_suspend (struct pci_dev *pdev, pm_message_t state) | ||
1577 | { | ||
1578 | struct net_device *dev = pci_get_drvdata (pdev); | ||
1579 | struct netdev_private *np = netdev_priv(dev); | ||
1580 | void __iomem *ioaddr = np->base_addr; | ||
1581 | |||
1582 | rtnl_lock(); | ||
1583 | if (netif_running (dev)) { | ||
1584 | del_timer_sync(&np->timer); | ||
1585 | |||
1586 | spin_lock_irq(&np->lock); | ||
1587 | netif_device_detach(dev); | ||
1588 | update_csr6(dev, 0); | ||
1589 | iowrite32(0, ioaddr + IntrEnable); | ||
1590 | spin_unlock_irq(&np->lock); | ||
1591 | |||
1592 | synchronize_irq(dev->irq); | ||
1593 | netif_tx_disable(dev); | ||
1594 | |||
1595 | np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; | ||
1596 | |||
1597 | /* no more hardware accesses behind this line. */ | ||
1598 | |||
1599 | BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable)); | ||
1600 | |||
1601 | /* pci_power_off(pdev, -1); */ | ||
1602 | |||
1603 | free_rxtx_rings(np); | ||
1604 | } else { | ||
1605 | netif_device_detach(dev); | ||
1606 | } | ||
1607 | rtnl_unlock(); | ||
1608 | return 0; | ||
1609 | } | ||
1610 | |||
1611 | static int w840_resume (struct pci_dev *pdev) | ||
1612 | { | ||
1613 | struct net_device *dev = pci_get_drvdata (pdev); | ||
1614 | struct netdev_private *np = netdev_priv(dev); | ||
1615 | int retval = 0; | ||
1616 | |||
1617 | rtnl_lock(); | ||
1618 | if (netif_device_present(dev)) | ||
1619 | goto out; /* device not suspended */ | ||
1620 | if (netif_running(dev)) { | ||
1621 | if ((retval = pci_enable_device(pdev))) { | ||
1622 | dev_err(&dev->dev, | ||
1623 | "pci_enable_device failed in resume\n"); | ||
1624 | goto out; | ||
1625 | } | ||
1626 | spin_lock_irq(&np->lock); | ||
1627 | iowrite32(1, np->base_addr+PCIBusCfg); | ||
1628 | ioread32(np->base_addr+PCIBusCfg); | ||
1629 | udelay(1); | ||
1630 | netif_device_attach(dev); | ||
1631 | init_rxtx_rings(dev); | ||
1632 | init_registers(dev); | ||
1633 | spin_unlock_irq(&np->lock); | ||
1634 | |||
1635 | netif_wake_queue(dev); | ||
1636 | |||
1637 | mod_timer(&np->timer, jiffies + 1*HZ); | ||
1638 | } else { | ||
1639 | netif_device_attach(dev); | ||
1640 | } | ||
1641 | out: | ||
1642 | rtnl_unlock(); | ||
1643 | return retval; | ||
1644 | } | ||
1645 | #endif | ||
1646 | |||
1647 | static struct pci_driver w840_driver = { | ||
1648 | .name = DRV_NAME, | ||
1649 | .id_table = w840_pci_tbl, | ||
1650 | .probe = w840_probe1, | ||
1651 | .remove = __devexit_p(w840_remove1), | ||
1652 | #ifdef CONFIG_PM | ||
1653 | .suspend = w840_suspend, | ||
1654 | .resume = w840_resume, | ||
1655 | #endif | ||
1656 | }; | ||
1657 | |||
1658 | static int __init w840_init(void) | ||
1659 | { | ||
1660 | printk(version); | ||
1661 | return pci_register_driver(&w840_driver); | ||
1662 | } | ||
1663 | |||
1664 | static void __exit w840_exit(void) | ||
1665 | { | ||
1666 | pci_unregister_driver(&w840_driver); | ||
1667 | } | ||
1668 | |||
1669 | module_init(w840_init); | ||
1670 | module_exit(w840_exit); | ||
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c new file mode 100644 index 000000000000..988b8eb24d37 --- /dev/null +++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c | |||
@@ -0,0 +1,1154 @@ | |||
1 | /* | ||
2 | * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards | ||
3 | * | ||
4 | * This software is (C) by the respective authors, and licensed under the GPL | ||
5 | * License. | ||
6 | * | ||
7 | * Written by Arjan van de Ven for Red Hat, Inc. | ||
8 | * Based on work by Jeff Garzik, Doug Ledford and Donald Becker | ||
9 | * | ||
10 | * This software may be used and distributed according to the terms | ||
11 | * of the GNU General Public License, incorporated herein by reference. | ||
12 | * | ||
13 | * | ||
14 | * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $ | ||
15 | */ | ||
16 | |||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/string.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/ioport.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/etherdevice.h> | ||
29 | #include <linux/skbuff.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/bitops.h> | ||
33 | |||
34 | #include <asm/uaccess.h> | ||
35 | #include <asm/io.h> | ||
36 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
37 | #include <asm/irq.h> | ||
38 | #endif | ||
39 | |||
40 | MODULE_DESCRIPTION("Xircom Cardbus ethernet driver"); | ||
41 | MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>"); | ||
42 | MODULE_LICENSE("GPL"); | ||
43 | |||
44 | |||
45 | |||
46 | /* IO registers on the card, offsets */ | ||
47 | #define CSR0 0x00 | ||
48 | #define CSR1 0x08 | ||
49 | #define CSR2 0x10 | ||
50 | #define CSR3 0x18 | ||
51 | #define CSR4 0x20 | ||
52 | #define CSR5 0x28 | ||
53 | #define CSR6 0x30 | ||
54 | #define CSR7 0x38 | ||
55 | #define CSR8 0x40 | ||
56 | #define CSR9 0x48 | ||
57 | #define CSR10 0x50 | ||
58 | #define CSR11 0x58 | ||
59 | #define CSR12 0x60 | ||
60 | #define CSR13 0x68 | ||
61 | #define CSR14 0x70 | ||
62 | #define CSR15 0x78 | ||
63 | #define CSR16 0x80 | ||
64 | |||
65 | /* PCI registers */ | ||
66 | #define PCI_POWERMGMT 0x40 | ||
67 | |||
68 | /* Offsets of the buffers within the descriptor pages, in bytes */ | ||
69 | |||
70 | #define NUMDESCRIPTORS 4 | ||
71 | |||
72 | static int bufferoffsets[NUMDESCRIPTORS] = {128,2048,4096,6144}; | ||
73 | |||
74 | |||
75 | struct xircom_private { | ||
76 | /* Send and receive buffers, kernel-addressable and dma addressable forms */ | ||
77 | |||
78 | __le32 *rx_buffer; | ||
79 | __le32 *tx_buffer; | ||
80 | |||
81 | dma_addr_t rx_dma_handle; | ||
82 | dma_addr_t tx_dma_handle; | ||
83 | |||
84 | struct sk_buff *tx_skb[4]; | ||
85 | |||
86 | unsigned long io_port; | ||
87 | int open; | ||
88 | |||
89 | /* transmit_used is the rotating counter that indicates which transmit | ||
90 | descriptor has to be used next */ | ||
91 | int transmit_used; | ||
92 | |||
93 | /* Spinlock to serialize register operations. | ||
94 | It must be helt while manipulating the following registers: | ||
95 | CSR0, CSR6, CSR7, CSR9, CSR10, CSR15 | ||
96 | */ | ||
97 | spinlock_t lock; | ||
98 | |||
99 | struct pci_dev *pdev; | ||
100 | struct net_device *dev; | ||
101 | }; | ||
102 | |||
103 | |||
104 | /* Function prototypes */ | ||
105 | static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id); | ||
106 | static void xircom_remove(struct pci_dev *pdev); | ||
107 | static irqreturn_t xircom_interrupt(int irq, void *dev_instance); | ||
108 | static netdev_tx_t xircom_start_xmit(struct sk_buff *skb, | ||
109 | struct net_device *dev); | ||
110 | static int xircom_open(struct net_device *dev); | ||
111 | static int xircom_close(struct net_device *dev); | ||
112 | static void xircom_up(struct xircom_private *card); | ||
113 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
114 | static void xircom_poll_controller(struct net_device *dev); | ||
115 | #endif | ||
116 | |||
117 | static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset); | ||
118 | static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset); | ||
119 | static void read_mac_address(struct xircom_private *card); | ||
120 | static void transceiver_voodoo(struct xircom_private *card); | ||
121 | static void initialize_card(struct xircom_private *card); | ||
122 | static void trigger_transmit(struct xircom_private *card); | ||
123 | static void trigger_receive(struct xircom_private *card); | ||
124 | static void setup_descriptors(struct xircom_private *card); | ||
125 | static void remove_descriptors(struct xircom_private *card); | ||
126 | static int link_status_changed(struct xircom_private *card); | ||
127 | static void activate_receiver(struct xircom_private *card); | ||
128 | static void deactivate_receiver(struct xircom_private *card); | ||
129 | static void activate_transmitter(struct xircom_private *card); | ||
130 | static void deactivate_transmitter(struct xircom_private *card); | ||
131 | static void enable_transmit_interrupt(struct xircom_private *card); | ||
132 | static void enable_receive_interrupt(struct xircom_private *card); | ||
133 | static void enable_link_interrupt(struct xircom_private *card); | ||
134 | static void disable_all_interrupts(struct xircom_private *card); | ||
135 | static int link_status(struct xircom_private *card); | ||
136 | |||
137 | |||
138 | |||
139 | static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = { | ||
140 | {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, | ||
141 | {0,}, | ||
142 | }; | ||
143 | MODULE_DEVICE_TABLE(pci, xircom_pci_table); | ||
144 | |||
145 | static struct pci_driver xircom_ops = { | ||
146 | .name = "xircom_cb", | ||
147 | .id_table = xircom_pci_table, | ||
148 | .probe = xircom_probe, | ||
149 | .remove = xircom_remove, | ||
150 | .suspend =NULL, | ||
151 | .resume =NULL | ||
152 | }; | ||
153 | |||
154 | |||
155 | #if defined DEBUG && DEBUG > 1 | ||
156 | static void print_binary(unsigned int number) | ||
157 | { | ||
158 | int i,i2; | ||
159 | char buffer[64]; | ||
160 | memset(buffer,0,64); | ||
161 | i2=0; | ||
162 | for (i=31;i>=0;i--) { | ||
163 | if (number & (1<<i)) | ||
164 | buffer[i2++]='1'; | ||
165 | else | ||
166 | buffer[i2++]='0'; | ||
167 | if ((i&3)==0) | ||
168 | buffer[i2++]=' '; | ||
169 | } | ||
170 | pr_debug("%s\n",buffer); | ||
171 | } | ||
172 | #endif | ||
173 | |||
174 | static const struct net_device_ops netdev_ops = { | ||
175 | .ndo_open = xircom_open, | ||
176 | .ndo_stop = xircom_close, | ||
177 | .ndo_start_xmit = xircom_start_xmit, | ||
178 | .ndo_change_mtu = eth_change_mtu, | ||
179 | .ndo_set_mac_address = eth_mac_addr, | ||
180 | .ndo_validate_addr = eth_validate_addr, | ||
181 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
182 | .ndo_poll_controller = xircom_poll_controller, | ||
183 | #endif | ||
184 | }; | ||
185 | |||
186 | /* xircom_probe is the code that gets called on device insertion. | ||
187 | it sets up the hardware and registers the device to the networklayer. | ||
188 | |||
189 | TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the | ||
190 | first two packets that get send, and pump hates that. | ||
191 | |||
192 | */ | ||
193 | static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
194 | { | ||
195 | struct net_device *dev = NULL; | ||
196 | struct xircom_private *private; | ||
197 | unsigned long flags; | ||
198 | unsigned short tmp16; | ||
199 | |||
200 | /* First do the PCI initialisation */ | ||
201 | |||
202 | if (pci_enable_device(pdev)) | ||
203 | return -ENODEV; | ||
204 | |||
205 | /* disable all powermanagement */ | ||
206 | pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); | ||
207 | |||
208 | pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/ | ||
209 | |||
210 | /* clear PCI status, if any */ | ||
211 | pci_read_config_word (pdev,PCI_STATUS, &tmp16); | ||
212 | pci_write_config_word (pdev, PCI_STATUS,tmp16); | ||
213 | |||
214 | if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { | ||
215 | pr_err("%s: failed to allocate io-region\n", __func__); | ||
216 | return -ENODEV; | ||
217 | } | ||
218 | |||
219 | /* | ||
220 | Before changing the hardware, allocate the memory. | ||
221 | This way, we can fail gracefully if not enough memory | ||
222 | is available. | ||
223 | */ | ||
224 | dev = alloc_etherdev(sizeof(struct xircom_private)); | ||
225 | if (!dev) { | ||
226 | pr_err("%s: failed to allocate etherdev\n", __func__); | ||
227 | goto device_fail; | ||
228 | } | ||
229 | private = netdev_priv(dev); | ||
230 | |||
231 | /* Allocate the send/receive buffers */ | ||
232 | private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); | ||
233 | if (private->rx_buffer == NULL) { | ||
234 | pr_err("%s: no memory for rx buffer\n", __func__); | ||
235 | goto rx_buf_fail; | ||
236 | } | ||
237 | private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); | ||
238 | if (private->tx_buffer == NULL) { | ||
239 | pr_err("%s: no memory for tx buffer\n", __func__); | ||
240 | goto tx_buf_fail; | ||
241 | } | ||
242 | |||
243 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
244 | |||
245 | |||
246 | private->dev = dev; | ||
247 | private->pdev = pdev; | ||
248 | private->io_port = pci_resource_start(pdev, 0); | ||
249 | spin_lock_init(&private->lock); | ||
250 | dev->irq = pdev->irq; | ||
251 | dev->base_addr = private->io_port; | ||
252 | |||
253 | initialize_card(private); | ||
254 | read_mac_address(private); | ||
255 | setup_descriptors(private); | ||
256 | |||
257 | dev->netdev_ops = &netdev_ops; | ||
258 | pci_set_drvdata(pdev, dev); | ||
259 | |||
260 | if (register_netdev(dev)) { | ||
261 | pr_err("%s: netdevice registration failed\n", __func__); | ||
262 | goto reg_fail; | ||
263 | } | ||
264 | |||
265 | netdev_info(dev, "Xircom cardbus revision %i at irq %i\n", | ||
266 | pdev->revision, pdev->irq); | ||
267 | /* start the transmitter to get a heartbeat */ | ||
268 | /* TODO: send 2 dummy packets here */ | ||
269 | transceiver_voodoo(private); | ||
270 | |||
271 | spin_lock_irqsave(&private->lock,flags); | ||
272 | activate_transmitter(private); | ||
273 | activate_receiver(private); | ||
274 | spin_unlock_irqrestore(&private->lock,flags); | ||
275 | |||
276 | trigger_receive(private); | ||
277 | |||
278 | return 0; | ||
279 | |||
280 | reg_fail: | ||
281 | kfree(private->tx_buffer); | ||
282 | tx_buf_fail: | ||
283 | kfree(private->rx_buffer); | ||
284 | rx_buf_fail: | ||
285 | free_netdev(dev); | ||
286 | device_fail: | ||
287 | return -ENODEV; | ||
288 | } | ||
289 | |||
290 | |||
291 | /* | ||
292 | xircom_remove is called on module-unload or on device-eject. | ||
293 | it unregisters the irq, io-region and network device. | ||
294 | Interrupts and such are already stopped in the "ifconfig ethX down" | ||
295 | code. | ||
296 | */ | ||
297 | static void __devexit xircom_remove(struct pci_dev *pdev) | ||
298 | { | ||
299 | struct net_device *dev = pci_get_drvdata(pdev); | ||
300 | struct xircom_private *card = netdev_priv(dev); | ||
301 | |||
302 | pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle); | ||
303 | pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle); | ||
304 | |||
305 | release_region(dev->base_addr, 128); | ||
306 | unregister_netdev(dev); | ||
307 | free_netdev(dev); | ||
308 | pci_set_drvdata(pdev, NULL); | ||
309 | } | ||
310 | |||
311 | static irqreturn_t xircom_interrupt(int irq, void *dev_instance) | ||
312 | { | ||
313 | struct net_device *dev = (struct net_device *) dev_instance; | ||
314 | struct xircom_private *card = netdev_priv(dev); | ||
315 | unsigned int status; | ||
316 | int i; | ||
317 | |||
318 | spin_lock(&card->lock); | ||
319 | status = inl(card->io_port+CSR5); | ||
320 | |||
321 | #if defined DEBUG && DEBUG > 1 | ||
322 | print_binary(status); | ||
323 | pr_debug("tx status 0x%08x 0x%08x\n", | ||
324 | card->tx_buffer[0], card->tx_buffer[4]); | ||
325 | pr_debug("rx status 0x%08x 0x%08x\n", | ||
326 | card->rx_buffer[0], card->rx_buffer[4]); | ||
327 | #endif | ||
328 | /* Handle shared irq and hotplug */ | ||
329 | if (status == 0 || status == 0xffffffff) { | ||
330 | spin_unlock(&card->lock); | ||
331 | return IRQ_NONE; | ||
332 | } | ||
333 | |||
334 | if (link_status_changed(card)) { | ||
335 | int newlink; | ||
336 | netdev_dbg(dev, "Link status has changed\n"); | ||
337 | newlink = link_status(card); | ||
338 | netdev_info(dev, "Link is %d mbit\n", newlink); | ||
339 | if (newlink) | ||
340 | netif_carrier_on(dev); | ||
341 | else | ||
342 | netif_carrier_off(dev); | ||
343 | |||
344 | } | ||
345 | |||
346 | /* Clear all remaining interrupts */ | ||
347 | status |= 0xffffffff; /* FIXME: make this clear only the | ||
348 | real existing bits */ | ||
349 | outl(status,card->io_port+CSR5); | ||
350 | |||
351 | |||
352 | for (i=0;i<NUMDESCRIPTORS;i++) | ||
353 | investigate_write_descriptor(dev,card,i,bufferoffsets[i]); | ||
354 | for (i=0;i<NUMDESCRIPTORS;i++) | ||
355 | investigate_read_descriptor(dev,card,i,bufferoffsets[i]); | ||
356 | |||
357 | spin_unlock(&card->lock); | ||
358 | return IRQ_HANDLED; | ||
359 | } | ||
360 | |||
361 | static netdev_tx_t xircom_start_xmit(struct sk_buff *skb, | ||
362 | struct net_device *dev) | ||
363 | { | ||
364 | struct xircom_private *card; | ||
365 | unsigned long flags; | ||
366 | int nextdescriptor; | ||
367 | int desc; | ||
368 | |||
369 | card = netdev_priv(dev); | ||
370 | spin_lock_irqsave(&card->lock,flags); | ||
371 | |||
372 | /* First see if we can free some descriptors */ | ||
373 | for (desc=0;desc<NUMDESCRIPTORS;desc++) | ||
374 | investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]); | ||
375 | |||
376 | |||
377 | nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS); | ||
378 | desc = card->transmit_used; | ||
379 | |||
380 | /* only send the packet if the descriptor is free */ | ||
381 | if (card->tx_buffer[4*desc]==0) { | ||
382 | /* Copy the packet data; zero the memory first as the card | ||
383 | sometimes sends more than you ask it to. */ | ||
384 | |||
385 | memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536); | ||
386 | skb_copy_from_linear_data(skb, | ||
387 | &(card->tx_buffer[bufferoffsets[desc] / 4]), | ||
388 | skb->len); | ||
389 | /* FIXME: The specification tells us that the length we send HAS to be a multiple of | ||
390 | 4 bytes. */ | ||
391 | |||
392 | card->tx_buffer[4*desc+1] = cpu_to_le32(skb->len); | ||
393 | if (desc == NUMDESCRIPTORS - 1) /* bit 25: last descriptor of the ring */ | ||
394 | card->tx_buffer[4*desc+1] |= cpu_to_le32(1<<25); | ||
395 | |||
396 | card->tx_buffer[4*desc+1] |= cpu_to_le32(0xF0000000); | ||
397 | /* 0xF0... means want interrupts*/ | ||
398 | card->tx_skb[desc] = skb; | ||
399 | |||
400 | wmb(); | ||
401 | /* This gives the descriptor to the card */ | ||
402 | card->tx_buffer[4*desc] = cpu_to_le32(0x80000000); | ||
403 | trigger_transmit(card); | ||
404 | if (card->tx_buffer[nextdescriptor*4] & cpu_to_le32(0x8000000)) { | ||
405 | /* next descriptor is occupied... */ | ||
406 | netif_stop_queue(dev); | ||
407 | } | ||
408 | card->transmit_used = nextdescriptor; | ||
409 | spin_unlock_irqrestore(&card->lock,flags); | ||
410 | return NETDEV_TX_OK; | ||
411 | } | ||
412 | |||
413 | /* Uh oh... no free descriptor... drop the packet */ | ||
414 | netif_stop_queue(dev); | ||
415 | spin_unlock_irqrestore(&card->lock,flags); | ||
416 | trigger_transmit(card); | ||
417 | |||
418 | return NETDEV_TX_BUSY; | ||
419 | } | ||
420 | |||
421 | |||
422 | |||
423 | |||
424 | static int xircom_open(struct net_device *dev) | ||
425 | { | ||
426 | struct xircom_private *xp = netdev_priv(dev); | ||
427 | int retval; | ||
428 | |||
429 | netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", | ||
430 | dev->irq); | ||
431 | retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); | ||
432 | if (retval) | ||
433 | return retval; | ||
434 | |||
435 | xircom_up(xp); | ||
436 | xp->open = 1; | ||
437 | |||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | static int xircom_close(struct net_device *dev) | ||
442 | { | ||
443 | struct xircom_private *card; | ||
444 | unsigned long flags; | ||
445 | |||
446 | card = netdev_priv(dev); | ||
447 | netif_stop_queue(dev); /* we don't want new packets */ | ||
448 | |||
449 | |||
450 | spin_lock_irqsave(&card->lock,flags); | ||
451 | |||
452 | disable_all_interrupts(card); | ||
453 | #if 0 | ||
454 | /* We can enable this again once we send dummy packets on ifconfig ethX up */ | ||
455 | deactivate_receiver(card); | ||
456 | deactivate_transmitter(card); | ||
457 | #endif | ||
458 | remove_descriptors(card); | ||
459 | |||
460 | spin_unlock_irqrestore(&card->lock,flags); | ||
461 | |||
462 | card->open = 0; | ||
463 | free_irq(dev->irq,dev); | ||
464 | |||
465 | return 0; | ||
466 | |||
467 | } | ||
468 | |||
469 | |||
470 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
471 | static void xircom_poll_controller(struct net_device *dev) | ||
472 | { | ||
473 | disable_irq(dev->irq); | ||
474 | xircom_interrupt(dev->irq, dev); | ||
475 | enable_irq(dev->irq); | ||
476 | } | ||
477 | #endif | ||
478 | |||
479 | |||
480 | static void initialize_card(struct xircom_private *card) | ||
481 | { | ||
482 | unsigned int val; | ||
483 | unsigned long flags; | ||
484 | |||
485 | spin_lock_irqsave(&card->lock, flags); | ||
486 | |||
487 | /* First: reset the card */ | ||
488 | val = inl(card->io_port + CSR0); | ||
489 | val |= 0x01; /* Software reset */ | ||
490 | outl(val, card->io_port + CSR0); | ||
491 | |||
492 | udelay(100); /* give the card some time to reset */ | ||
493 | |||
494 | val = inl(card->io_port + CSR0); | ||
495 | val &= ~0x01; /* disable Software reset */ | ||
496 | outl(val, card->io_port + CSR0); | ||
497 | |||
498 | |||
499 | val = 0; /* Value 0x00 is a safe and conservative value | ||
500 | for the PCI configuration settings */ | ||
501 | outl(val, card->io_port + CSR0); | ||
502 | |||
503 | |||
504 | disable_all_interrupts(card); | ||
505 | deactivate_receiver(card); | ||
506 | deactivate_transmitter(card); | ||
507 | |||
508 | spin_unlock_irqrestore(&card->lock, flags); | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | trigger_transmit causes the card to check for frames to be transmitted. | ||
513 | This is accomplished by writing to the CSR1 port. The documentation | ||
514 | claims that the act of writing is sufficient and that the value is | ||
515 | ignored; I chose zero. | ||
516 | */ | ||
517 | static void trigger_transmit(struct xircom_private *card) | ||
518 | { | ||
519 | unsigned int val; | ||
520 | |||
521 | val = 0; | ||
522 | outl(val, card->io_port + CSR1); | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | trigger_receive causes the card to check for empty frames in the | ||
527 | descriptor list in which packets can be received. | ||
528 | This is accomplished by writing to the CSR2 port. The documentation | ||
529 | claims that the act of writing is sufficient and that the value is | ||
530 | ignored; I chose zero. | ||
531 | */ | ||
532 | static void trigger_receive(struct xircom_private *card) | ||
533 | { | ||
534 | unsigned int val; | ||
535 | |||
536 | val = 0; | ||
537 | outl(val, card->io_port + CSR2); | ||
538 | } | ||
539 | |||
540 | /* | ||
541 | setup_descriptors initializes the send and receive buffers to be valid | ||
542 | descriptors and programs the addresses into the card. | ||
543 | */ | ||
544 | static void setup_descriptors(struct xircom_private *card) | ||
545 | { | ||
546 | u32 address; | ||
547 | int i; | ||
548 | |||
549 | BUG_ON(card->rx_buffer == NULL); | ||
550 | BUG_ON(card->tx_buffer == NULL); | ||
551 | |||
552 | /* Receive descriptors */ | ||
553 | memset(card->rx_buffer, 0, 128); /* clear the descriptors */ | ||
554 | for (i=0;i<NUMDESCRIPTORS;i++ ) { | ||
555 | |||
556 | /* Rx Descr0: It's empty, let the card own it, no errors -> 0x80000000 */ | ||
557 | card->rx_buffer[i*4 + 0] = cpu_to_le32(0x80000000); | ||
558 | /* Rx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */ | ||
559 | card->rx_buffer[i*4 + 1] = cpu_to_le32(1536); | ||
560 | if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */ | ||
561 | card->rx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25); | ||
562 | |||
563 | /* Rx Descr2: address of the buffer | ||
564 | we store the buffer at the 2nd half of the page */ | ||
565 | |||
566 | address = card->rx_dma_handle; | ||
567 | card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); | ||
568 | /* Rx Desc3: address of 2nd buffer -> 0 */ | ||
569 | card->rx_buffer[i*4 + 3] = 0; | ||
570 | } | ||
571 | |||
572 | wmb(); | ||
573 | /* Write the receive descriptor ring address to the card */ | ||
574 | address = card->rx_dma_handle; | ||
575 | outl(address, card->io_port + CSR3); /* Receive descr list address */ | ||
576 | |||
577 | |||
578 | /* transmit descriptors */ | ||
579 | memset(card->tx_buffer, 0, 128); /* clear the descriptors */ | ||
580 | |||
581 | for (i=0;i<NUMDESCRIPTORS;i++ ) { | ||
582 | /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */ | ||
583 | card->tx_buffer[i*4 + 0] = 0x00000000; | ||
584 | /* Tx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */ | ||
585 | card->tx_buffer[i*4 + 1] = cpu_to_le32(1536); | ||
586 | if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */ | ||
587 | card->tx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25); | ||
588 | |||
589 | /* Tx Descr2: address of the buffer | ||
590 | we store the buffer at the 2nd half of the page */ | ||
591 | address = card->tx_dma_handle; | ||
592 | card->tx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]); | ||
593 | /* Tx Desc3: address of 2nd buffer -> 0 */ | ||
594 | card->tx_buffer[i*4 + 3] = 0; | ||
595 | } | ||
596 | |||
597 | wmb(); | ||
598 | /* wite the transmit descriptor ring to the card */ | ||
599 | address = card->tx_dma_handle; | ||
600 | outl(address, card->io_port + CSR4); /* xmit descr list address */ | ||
601 | } | ||
602 | |||
603 | /* | ||
604 | remove_descriptors informs the card the descriptors are no longer | ||
605 | valid by setting the address in the card to 0x00. | ||
606 | */ | ||
607 | static void remove_descriptors(struct xircom_private *card) | ||
608 | { | ||
609 | unsigned int val; | ||
610 | |||
611 | val = 0; | ||
612 | outl(val, card->io_port + CSR3); /* Receive descriptor address */ | ||
613 | outl(val, card->io_port + CSR4); /* Send descriptor address */ | ||
614 | } | ||
615 | |||
616 | /* | ||
617 | link_status_changed returns 1 if the card has indicated that | ||
618 | the link status has changed. The new link status has to be read from CSR12. | ||
619 | |||
620 | This function also clears the status-bit. | ||
621 | */ | ||
622 | static int link_status_changed(struct xircom_private *card) | ||
623 | { | ||
624 | unsigned int val; | ||
625 | |||
626 | val = inl(card->io_port + CSR5); /* Status register */ | ||
627 | |||
628 | if ((val & (1 << 27)) == 0) /* no change */ | ||
629 | return 0; | ||
630 | |||
631 | /* clear the event by writing a 1 to the bit in the | ||
632 | status register. */ | ||
633 | val = (1 << 27); | ||
634 | outl(val, card->io_port + CSR5); | ||
635 | |||
636 | return 1; | ||
637 | } | ||
638 | |||
639 | |||
640 | /* | ||
641 | transmit_active returns 1 if the transmitter on the card is | ||
642 | in a non-stopped state. | ||
643 | */ | ||
644 | static int transmit_active(struct xircom_private *card) | ||
645 | { | ||
646 | unsigned int val; | ||
647 | |||
648 | val = inl(card->io_port + CSR5); /* Status register */ | ||
649 | |||
650 | if ((val & (7 << 20)) == 0) /* transmitter disabled */ | ||
651 | return 0; | ||
652 | |||
653 | return 1; | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | receive_active returns 1 if the receiver on the card is | ||
658 | in a non-stopped state. | ||
659 | */ | ||
660 | static int receive_active(struct xircom_private *card) | ||
661 | { | ||
662 | unsigned int val; | ||
663 | |||
664 | val = inl(card->io_port + CSR5); /* Status register */ | ||
665 | |||
666 | if ((val & (7 << 17)) == 0) /* receiver disabled */ | ||
667 | return 0; | ||
668 | |||
669 | return 1; | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | activate_receiver enables the receiver on the card. | ||
674 | Before being allowed to active the receiver, the receiver | ||
675 | must be completely de-activated. To achieve this, | ||
676 | this code actually disables the receiver first; then it waits for the | ||
677 | receiver to become inactive, then it activates the receiver and then | ||
678 | it waits for the receiver to be active. | ||
679 | |||
680 | must be called with the lock held and interrupts disabled. | ||
681 | */ | ||
682 | static void activate_receiver(struct xircom_private *card) | ||
683 | { | ||
684 | unsigned int val; | ||
685 | int counter; | ||
686 | |||
687 | val = inl(card->io_port + CSR6); /* Operation mode */ | ||
688 | |||
689 | /* If the "active" bit is set and the receiver is already | ||
690 | active, no need to do the expensive thing */ | ||
691 | if ((val&2) && (receive_active(card))) | ||
692 | return; | ||
693 | |||
694 | |||
695 | val = val & ~2; /* disable the receiver */ | ||
696 | outl(val, card->io_port + CSR6); | ||
697 | |||
698 | counter = 10; | ||
699 | while (counter > 0) { | ||
700 | if (!receive_active(card)) | ||
701 | break; | ||
702 | /* wait a while */ | ||
703 | udelay(50); | ||
704 | counter--; | ||
705 | if (counter <= 0) | ||
706 | netdev_err(card->dev, "Receiver failed to deactivate\n"); | ||
707 | } | ||
708 | |||
709 | /* enable the receiver */ | ||
710 | val = inl(card->io_port + CSR6); /* Operation mode */ | ||
711 | val = val | 2; /* enable the receiver */ | ||
712 | outl(val, card->io_port + CSR6); | ||
713 | |||
714 | /* now wait for the card to activate again */ | ||
715 | counter = 10; | ||
716 | while (counter > 0) { | ||
717 | if (receive_active(card)) | ||
718 | break; | ||
719 | /* wait a while */ | ||
720 | udelay(50); | ||
721 | counter--; | ||
722 | if (counter <= 0) | ||
723 | netdev_err(card->dev, | ||
724 | "Receiver failed to re-activate\n"); | ||
725 | } | ||
726 | } | ||
727 | |||
728 | /* | ||
729 | deactivate_receiver disables the receiver on the card. | ||
730 | To achieve this this code disables the receiver first; | ||
731 | then it waits for the receiver to become inactive. | ||
732 | |||
733 | must be called with the lock held and interrupts disabled. | ||
734 | */ | ||
735 | static void deactivate_receiver(struct xircom_private *card) | ||
736 | { | ||
737 | unsigned int val; | ||
738 | int counter; | ||
739 | |||
740 | val = inl(card->io_port + CSR6); /* Operation mode */ | ||
741 | val = val & ~2; /* disable the receiver */ | ||
742 | outl(val, card->io_port + CSR6); | ||
743 | |||
744 | counter = 10; | ||
745 | while (counter > 0) { | ||
746 | if (!receive_active(card)) | ||
747 | break; | ||
748 | /* wait a while */ | ||
749 | udelay(50); | ||
750 | counter--; | ||
751 | if (counter <= 0) | ||
752 | netdev_err(card->dev, "Receiver failed to deactivate\n"); | ||
753 | } | ||
754 | } | ||
755 | |||
756 | |||
757 | /* | ||
758 | activate_transmitter enables the transmitter on the card. | ||
759 | Before being allowed to active the transmitter, the transmitter | ||
760 | must be completely de-activated. To achieve this, | ||
761 | this code actually disables the transmitter first; then it waits for the | ||
762 | transmitter to become inactive, then it activates the transmitter and then | ||
763 | it waits for the transmitter to be active again. | ||
764 | |||
765 | must be called with the lock held and interrupts disabled. | ||
766 | */ | ||
767 | static void activate_transmitter(struct xircom_private *card) | ||
768 | { | ||
769 | unsigned int val; | ||
770 | int counter; | ||
771 | |||
772 | val = inl(card->io_port + CSR6); /* Operation mode */ | ||
773 | |||
774 | /* If the "active" bit is set and the receiver is already | ||
775 | active, no need to do the expensive thing */ | ||
776 | if ((val&(1<<13)) && (transmit_active(card))) | ||
777 | return; | ||
778 | |||
779 | val = val & ~(1 << 13); /* disable the transmitter */ | ||
780 | outl(val, card->io_port + CSR6); | ||
781 | |||
782 | counter = 10; | ||
783 | while (counter > 0) { | ||
784 | if (!transmit_active(card)) | ||
785 | break; | ||
786 | /* wait a while */ | ||
787 | udelay(50); | ||
788 | counter--; | ||
789 | if (counter <= 0) | ||
790 | netdev_err(card->dev, | ||
791 | "Transmitter failed to deactivate\n"); | ||
792 | } | ||
793 | |||
794 | /* enable the transmitter */ | ||
795 | val = inl(card->io_port + CSR6); /* Operation mode */ | ||
796 | val = val | (1 << 13); /* enable the transmitter */ | ||
797 | outl(val, card->io_port + CSR6); | ||
798 | |||
799 | /* now wait for the card to activate again */ | ||
800 | counter = 10; | ||
801 | while (counter > 0) { | ||
802 | if (transmit_active(card)) | ||
803 | break; | ||
804 | /* wait a while */ | ||
805 | udelay(50); | ||
806 | counter--; | ||
807 | if (counter <= 0) | ||
808 | netdev_err(card->dev, | ||
809 | "Transmitter failed to re-activate\n"); | ||
810 | } | ||
811 | } | ||
812 | |||
813 | /* | ||
814 | deactivate_transmitter disables the transmitter on the card. | ||
815 | To achieve this this code disables the transmitter first; | ||
816 | then it waits for the transmitter to become inactive. | ||
817 | |||
818 | must be called with the lock held and interrupts disabled. | ||
819 | */ | ||
820 | static void deactivate_transmitter(struct xircom_private *card) | ||
821 | { | ||
822 | unsigned int val; | ||
823 | int counter; | ||
824 | |||
825 | val = inl(card->io_port + CSR6); /* Operation mode */ | ||
826 | val = val & ~2; /* disable the transmitter */ | ||
827 | outl(val, card->io_port + CSR6); | ||
828 | |||
829 | counter = 20; | ||
830 | while (counter > 0) { | ||
831 | if (!transmit_active(card)) | ||
832 | break; | ||
833 | /* wait a while */ | ||
834 | udelay(50); | ||
835 | counter--; | ||
836 | if (counter <= 0) | ||
837 | netdev_err(card->dev, | ||
838 | "Transmitter failed to deactivate\n"); | ||
839 | } | ||
840 | } | ||
841 | |||
842 | |||
843 | /* | ||
844 | enable_transmit_interrupt enables the transmit interrupt | ||
845 | |||
846 | must be called with the lock held and interrupts disabled. | ||
847 | */ | ||
848 | static void enable_transmit_interrupt(struct xircom_private *card) | ||
849 | { | ||
850 | unsigned int val; | ||
851 | |||
852 | val = inl(card->io_port + CSR7); /* Interrupt enable register */ | ||
853 | val |= 1; /* enable the transmit interrupt */ | ||
854 | outl(val, card->io_port + CSR7); | ||
855 | } | ||
856 | |||
857 | |||
858 | /* | ||
859 | enable_receive_interrupt enables the receive interrupt | ||
860 | |||
861 | must be called with the lock held and interrupts disabled. | ||
862 | */ | ||
863 | static void enable_receive_interrupt(struct xircom_private *card) | ||
864 | { | ||
865 | unsigned int val; | ||
866 | |||
867 | val = inl(card->io_port + CSR7); /* Interrupt enable register */ | ||
868 | val = val | (1 << 6); /* enable the receive interrupt */ | ||
869 | outl(val, card->io_port + CSR7); | ||
870 | } | ||
871 | |||
872 | /* | ||
873 | enable_link_interrupt enables the link status change interrupt | ||
874 | |||
875 | must be called with the lock held and interrupts disabled. | ||
876 | */ | ||
877 | static void enable_link_interrupt(struct xircom_private *card) | ||
878 | { | ||
879 | unsigned int val; | ||
880 | |||
881 | val = inl(card->io_port + CSR7); /* Interrupt enable register */ | ||
882 | val = val | (1 << 27); /* enable the link status chage interrupt */ | ||
883 | outl(val, card->io_port + CSR7); | ||
884 | } | ||
885 | |||
886 | |||
887 | |||
888 | /* | ||
889 | disable_all_interrupts disables all interrupts | ||
890 | |||
891 | must be called with the lock held and interrupts disabled. | ||
892 | */ | ||
893 | static void disable_all_interrupts(struct xircom_private *card) | ||
894 | { | ||
895 | unsigned int val; | ||
896 | |||
897 | val = 0; /* disable all interrupts */ | ||
898 | outl(val, card->io_port + CSR7); | ||
899 | } | ||
900 | |||
901 | /* | ||
902 | enable_common_interrupts enables several weird interrupts | ||
903 | |||
904 | must be called with the lock held and interrupts disabled. | ||
905 | */ | ||
906 | static void enable_common_interrupts(struct xircom_private *card) | ||
907 | { | ||
908 | unsigned int val; | ||
909 | |||
910 | val = inl(card->io_port + CSR7); /* Interrupt enable register */ | ||
911 | val |= (1<<16); /* Normal Interrupt Summary */ | ||
912 | val |= (1<<15); /* Abnormal Interrupt Summary */ | ||
913 | val |= (1<<13); /* Fatal bus error */ | ||
914 | val |= (1<<8); /* Receive Process Stopped */ | ||
915 | val |= (1<<7); /* Receive Buffer Unavailable */ | ||
916 | val |= (1<<5); /* Transmit Underflow */ | ||
917 | val |= (1<<2); /* Transmit Buffer Unavailable */ | ||
918 | val |= (1<<1); /* Transmit Process Stopped */ | ||
919 | outl(val, card->io_port + CSR7); | ||
920 | } | ||
921 | |||
922 | /* | ||
923 | enable_promisc starts promisc mode | ||
924 | |||
925 | must be called with the lock held and interrupts disabled. | ||
926 | */ | ||
927 | static int enable_promisc(struct xircom_private *card) | ||
928 | { | ||
929 | unsigned int val; | ||
930 | |||
931 | val = inl(card->io_port + CSR6); | ||
932 | val = val | (1 << 6); | ||
933 | outl(val, card->io_port + CSR6); | ||
934 | |||
935 | return 1; | ||
936 | } | ||
937 | |||
938 | |||
939 | |||
940 | |||
941 | /* | ||
942 | link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what. | ||
943 | |||
944 | Must be called in locked state with interrupts disabled | ||
945 | */ | ||
946 | static int link_status(struct xircom_private *card) | ||
947 | { | ||
948 | unsigned int val; | ||
949 | |||
950 | val = inb(card->io_port + CSR12); | ||
951 | |||
952 | if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ | ||
953 | return 10; | ||
954 | if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ | ||
955 | return 100; | ||
956 | |||
957 | /* If we get here -> no link at all */ | ||
958 | |||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | |||
963 | |||
964 | |||
965 | |||
966 | /* | ||
967 | read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure. | ||
968 | |||
969 | This function will take the spinlock itself and can, as a result, not be called with the lock helt. | ||
970 | */ | ||
971 | static void read_mac_address(struct xircom_private *card) | ||
972 | { | ||
973 | unsigned char j, tuple, link, data_id, data_count; | ||
974 | unsigned long flags; | ||
975 | int i; | ||
976 | |||
977 | spin_lock_irqsave(&card->lock, flags); | ||
978 | |||
979 | outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ | ||
980 | for (i = 0x100; i < 0x1f7; i += link + 2) { | ||
981 | outl(i, card->io_port + CSR10); | ||
982 | tuple = inl(card->io_port + CSR9) & 0xff; | ||
983 | outl(i + 1, card->io_port + CSR10); | ||
984 | link = inl(card->io_port + CSR9) & 0xff; | ||
985 | outl(i + 2, card->io_port + CSR10); | ||
986 | data_id = inl(card->io_port + CSR9) & 0xff; | ||
987 | outl(i + 3, card->io_port + CSR10); | ||
988 | data_count = inl(card->io_port + CSR9) & 0xff; | ||
989 | if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { | ||
990 | /* | ||
991 | * This is it. We have the data we want. | ||
992 | */ | ||
993 | for (j = 0; j < 6; j++) { | ||
994 | outl(i + j + 4, card->io_port + CSR10); | ||
995 | card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff; | ||
996 | } | ||
997 | break; | ||
998 | } else if (link == 0) { | ||
999 | break; | ||
1000 | } | ||
1001 | } | ||
1002 | spin_unlock_irqrestore(&card->lock, flags); | ||
1003 | pr_debug(" %pM\n", card->dev->dev_addr); | ||
1004 | } | ||
1005 | |||
1006 | |||
1007 | /* | ||
1008 | transceiver_voodoo() enables the external UTP plug thingy. | ||
1009 | it's called voodoo as I stole this code and cannot cross-reference | ||
1010 | it with the specification. | ||
1011 | */ | ||
1012 | static void transceiver_voodoo(struct xircom_private *card) | ||
1013 | { | ||
1014 | unsigned long flags; | ||
1015 | |||
1016 | /* disable all powermanagement */ | ||
1017 | pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); | ||
1018 | |||
1019 | setup_descriptors(card); | ||
1020 | |||
1021 | spin_lock_irqsave(&card->lock, flags); | ||
1022 | |||
1023 | outl(0x0008, card->io_port + CSR15); | ||
1024 | udelay(25); | ||
1025 | outl(0xa8050000, card->io_port + CSR15); | ||
1026 | udelay(25); | ||
1027 | outl(0xa00f0000, card->io_port + CSR15); | ||
1028 | udelay(25); | ||
1029 | |||
1030 | spin_unlock_irqrestore(&card->lock, flags); | ||
1031 | |||
1032 | netif_start_queue(card->dev); | ||
1033 | } | ||
1034 | |||
1035 | |||
1036 | static void xircom_up(struct xircom_private *card) | ||
1037 | { | ||
1038 | unsigned long flags; | ||
1039 | int i; | ||
1040 | |||
1041 | /* disable all powermanagement */ | ||
1042 | pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000); | ||
1043 | |||
1044 | setup_descriptors(card); | ||
1045 | |||
1046 | spin_lock_irqsave(&card->lock, flags); | ||
1047 | |||
1048 | |||
1049 | enable_link_interrupt(card); | ||
1050 | enable_transmit_interrupt(card); | ||
1051 | enable_receive_interrupt(card); | ||
1052 | enable_common_interrupts(card); | ||
1053 | enable_promisc(card); | ||
1054 | |||
1055 | /* The card can have received packets already, read them away now */ | ||
1056 | for (i=0;i<NUMDESCRIPTORS;i++) | ||
1057 | investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]); | ||
1058 | |||
1059 | |||
1060 | spin_unlock_irqrestore(&card->lock, flags); | ||
1061 | trigger_receive(card); | ||
1062 | trigger_transmit(card); | ||
1063 | netif_start_queue(card->dev); | ||
1064 | } | ||
1065 | |||
1066 | /* Bufferoffset is in BYTES */ | ||
1067 | static void | ||
1068 | investigate_read_descriptor(struct net_device *dev, struct xircom_private *card, | ||
1069 | int descnr, unsigned int bufferoffset) | ||
1070 | { | ||
1071 | int status; | ||
1072 | |||
1073 | status = le32_to_cpu(card->rx_buffer[4*descnr]); | ||
1074 | |||
1075 | if (status > 0) { /* packet received */ | ||
1076 | |||
1077 | /* TODO: discard error packets */ | ||
1078 | |||
1079 | short pkt_len = ((status >> 16) & 0x7ff) - 4; | ||
1080 | /* minus 4, we don't want the CRC */ | ||
1081 | struct sk_buff *skb; | ||
1082 | |||
1083 | if (pkt_len > 1518) { | ||
1084 | netdev_err(dev, "Packet length %i is bogus\n", pkt_len); | ||
1085 | pkt_len = 1518; | ||
1086 | } | ||
1087 | |||
1088 | skb = dev_alloc_skb(pkt_len + 2); | ||
1089 | if (skb == NULL) { | ||
1090 | dev->stats.rx_dropped++; | ||
1091 | goto out; | ||
1092 | } | ||
1093 | skb_reserve(skb, 2); | ||
1094 | skb_copy_to_linear_data(skb, | ||
1095 | &card->rx_buffer[bufferoffset / 4], | ||
1096 | pkt_len); | ||
1097 | skb_put(skb, pkt_len); | ||
1098 | skb->protocol = eth_type_trans(skb, dev); | ||
1099 | netif_rx(skb); | ||
1100 | dev->stats.rx_packets++; | ||
1101 | dev->stats.rx_bytes += pkt_len; | ||
1102 | |||
1103 | out: | ||
1104 | /* give the buffer back to the card */ | ||
1105 | card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000); | ||
1106 | trigger_receive(card); | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1110 | |||
1111 | /* Bufferoffset is in BYTES */ | ||
1112 | static void | ||
1113 | investigate_write_descriptor(struct net_device *dev, | ||
1114 | struct xircom_private *card, | ||
1115 | int descnr, unsigned int bufferoffset) | ||
1116 | { | ||
1117 | int status; | ||
1118 | |||
1119 | status = le32_to_cpu(card->tx_buffer[4*descnr]); | ||
1120 | #if 0 | ||
1121 | if (status & 0x8000) { /* Major error */ | ||
1122 | pr_err("Major transmit error status %x\n", status); | ||
1123 | card->tx_buffer[4*descnr] = 0; | ||
1124 | netif_wake_queue (dev); | ||
1125 | } | ||
1126 | #endif | ||
1127 | if (status > 0) { /* bit 31 is 0 when done */ | ||
1128 | if (card->tx_skb[descnr]!=NULL) { | ||
1129 | dev->stats.tx_bytes += card->tx_skb[descnr]->len; | ||
1130 | dev_kfree_skb_irq(card->tx_skb[descnr]); | ||
1131 | } | ||
1132 | card->tx_skb[descnr] = NULL; | ||
1133 | /* Bit 8 in the status field is 1 if there was a collision */ | ||
1134 | if (status & (1 << 8)) | ||
1135 | dev->stats.collisions++; | ||
1136 | card->tx_buffer[4*descnr] = 0; /* descriptor is free again */ | ||
1137 | netif_wake_queue (dev); | ||
1138 | dev->stats.tx_packets++; | ||
1139 | } | ||
1140 | } | ||
1141 | |||
1142 | static int __init xircom_init(void) | ||
1143 | { | ||
1144 | return pci_register_driver(&xircom_ops); | ||
1145 | } | ||
1146 | |||
1147 | static void __exit xircom_exit(void) | ||
1148 | { | ||
1149 | pci_unregister_driver(&xircom_ops); | ||
1150 | } | ||
1151 | |||
1152 | module_init(xircom_init) | ||
1153 | module_exit(xircom_exit) | ||
1154 | |||