aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/dec/tulip
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/dec/tulip')
-rw-r--r--drivers/net/ethernet/dec/tulip/21142.c257
-rw-r--r--drivers/net/ethernet/dec/tulip/Kconfig172
-rw-r--r--drivers/net/ethernet/dec/tulip/Makefile19
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c2215
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c5599
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.h1017
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c2253
-rw-r--r--drivers/net/ethernet/dec/tulip/eeprom.c383
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c808
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c553
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c170
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic2.c403
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c176
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h570
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2008
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c1850
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c1670
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c1154
18 files changed, 21277 insertions, 0 deletions
diff --git a/drivers/net/ethernet/dec/tulip/21142.c b/drivers/net/ethernet/dec/tulip/21142.c
new file mode 100644
index 000000000000..25b8deedbef8
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/21142.c
@@ -0,0 +1,257 @@
1/*
2 drivers/net/tulip/21142.c
3
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 DC21143 manual "21143 PCI/CardBus 10/100Mb/s Ethernet LAN Controller
11 Hardware Reference Manual" is currently available at :
12 http://developer.intel.com/design/network/manuals/278074.htm
13
14 Please submit bugs to http://bugzilla.kernel.org/ .
15*/
16
17#include <linux/delay.h>
18#include "tulip.h"
19
20
21static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
22u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
23static u16 t21142_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
24
25
26/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
27 of available transceivers. */
28void t21142_media_task(struct work_struct *work)
29{
30 struct tulip_private *tp =
31 container_of(work, struct tulip_private, media_work);
32 struct net_device *dev = tp->dev;
33 void __iomem *ioaddr = tp->base_addr;
34 int csr12 = ioread32(ioaddr + CSR12);
35 int next_tick = 60*HZ;
36 int new_csr6 = 0;
37 int csr14 = ioread32(ioaddr + CSR14);
38
39 /* CSR12[LS10,LS100] are not reliable during autonegotiation */
40 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
41 csr12 |= 6;
42 if (tulip_debug > 2)
43 dev_info(&dev->dev, "21143 negotiation status %08x, %s\n",
44 csr12, medianame[dev->if_port]);
45 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
46 if (tulip_check_duplex(dev) < 0) {
47 netif_carrier_off(dev);
48 next_tick = 3*HZ;
49 } else {
50 netif_carrier_on(dev);
51 next_tick = 60*HZ;
52 }
53 } else if (tp->nwayset) {
54 /* Don't screw up a negotiated session! */
55 if (tulip_debug > 1)
56 dev_info(&dev->dev,
57 "Using NWay-set %s media, csr12 %08x\n",
58 medianame[dev->if_port], csr12);
59 } else if (tp->medialock) {
60 ;
61 } else if (dev->if_port == 3) {
62 if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
63 if (tulip_debug > 1)
64 dev_info(&dev->dev,
65 "No 21143 100baseTx link beat, %08x, trying NWay\n",
66 csr12);
67 t21142_start_nway(dev);
68 next_tick = 3*HZ;
69 }
70 } else if ((csr12 & 0x7000) != 0x5000) {
71 /* Negotiation failed. Search media types. */
72 if (tulip_debug > 1)
73 dev_info(&dev->dev,
74 "21143 negotiation failed, status %08x\n",
75 csr12);
76 if (!(csr12 & 4)) { /* 10mbps link beat good. */
77 new_csr6 = 0x82420000;
78 dev->if_port = 0;
79 iowrite32(0, ioaddr + CSR13);
80 iowrite32(0x0003FFFF, ioaddr + CSR14);
81 iowrite16(t21142_csr15[dev->if_port], ioaddr + CSR15);
82 iowrite32(t21142_csr13[dev->if_port], ioaddr + CSR13);
83 } else {
84 /* Select 100mbps port to check for link beat. */
85 new_csr6 = 0x83860000;
86 dev->if_port = 3;
87 iowrite32(0, ioaddr + CSR13);
88 iowrite32(0x0003FFFF, ioaddr + CSR14);
89 iowrite16(8, ioaddr + CSR15);
90 iowrite32(1, ioaddr + CSR13);
91 }
92 if (tulip_debug > 1)
93 dev_info(&dev->dev, "Testing new 21143 media %s\n",
94 medianame[dev->if_port]);
95 if (new_csr6 != (tp->csr6 & ~0x00D5)) {
96 tp->csr6 &= 0x00D5;
97 tp->csr6 |= new_csr6;
98 iowrite32(0x0301, ioaddr + CSR12);
99 tulip_restart_rxtx(tp);
100 }
101 next_tick = 3*HZ;
102 }
103
104 /* mod_timer synchronizes us with potential add_timer calls
105 * from interrupts.
106 */
107 mod_timer(&tp->timer, RUN_AT(next_tick));
108}
109
110
111void t21142_start_nway(struct net_device *dev)
112{
113 struct tulip_private *tp = netdev_priv(dev);
114 void __iomem *ioaddr = tp->base_addr;
115 int csr14 = ((tp->sym_advertise & 0x0780) << 9) |
116 ((tp->sym_advertise & 0x0020) << 1) | 0xffbf;
117
118 dev->if_port = 0;
119 tp->nway = tp->mediasense = 1;
120 tp->nwayset = tp->lpar = 0;
121 if (tulip_debug > 1)
122 netdev_dbg(dev, "Restarting 21143 autonegotiation, csr14=%08x\n",
123 csr14);
124 iowrite32(0x0001, ioaddr + CSR13);
125 udelay(100);
126 iowrite32(csr14, ioaddr + CSR14);
127 tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
128 iowrite32(tp->csr6, ioaddr + CSR6);
129 if (tp->mtable && tp->mtable->csr15dir) {
130 iowrite32(tp->mtable->csr15dir, ioaddr + CSR15);
131 iowrite32(tp->mtable->csr15val, ioaddr + CSR15);
132 } else
133 iowrite16(0x0008, ioaddr + CSR15);
134 iowrite32(0x1301, ioaddr + CSR12); /* Trigger NWAY. */
135}
136
137
138
139void t21142_lnk_change(struct net_device *dev, int csr5)
140{
141 struct tulip_private *tp = netdev_priv(dev);
142 void __iomem *ioaddr = tp->base_addr;
143 int csr12 = ioread32(ioaddr + CSR12);
144 int csr14 = ioread32(ioaddr + CSR14);
145
146 /* CSR12[LS10,LS100] are not reliable during autonegotiation */
147 if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
148 csr12 |= 6;
149 if (tulip_debug > 1)
150 dev_info(&dev->dev,
151 "21143 link status interrupt %08x, CSR5 %x, %08x\n",
152 csr12, csr5, csr14);
153
154 /* If NWay finished and we have a negotiated partner capability. */
155 if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
156 int setup_done = 0;
157 int negotiated = tp->sym_advertise & (csr12 >> 16);
158 tp->lpar = csr12 >> 16;
159 tp->nwayset = 1;
160 /* If partner cannot negotiate, it is 10Mbps Half Duplex */
161 if (!(csr12 & 0x8000)) dev->if_port = 0;
162 else if (negotiated & 0x0100) dev->if_port = 5;
163 else if (negotiated & 0x0080) dev->if_port = 3;
164 else if (negotiated & 0x0040) dev->if_port = 4;
165 else if (negotiated & 0x0020) dev->if_port = 0;
166 else {
167 tp->nwayset = 0;
168 if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
169 dev->if_port = 3;
170 }
171 tp->full_duplex = (tulip_media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0;
172
173 if (tulip_debug > 1) {
174 if (tp->nwayset)
175 dev_info(&dev->dev,
176 "Switching to %s based on link negotiation %04x & %04x = %04x\n",
177 medianame[dev->if_port],
178 tp->sym_advertise, tp->lpar,
179 negotiated);
180 else
181 dev_info(&dev->dev,
182 "Autonegotiation failed, using %s, link beat status %04x\n",
183 medianame[dev->if_port], csr12);
184 }
185
186 if (tp->mtable) {
187 int i;
188 for (i = 0; i < tp->mtable->leafcount; i++)
189 if (tp->mtable->mleaf[i].media == dev->if_port) {
190 int startup = ! ((tp->chip_id == DC21143 && (tp->revision == 48 || tp->revision == 65)));
191 tp->cur_index = i;
192 tulip_select_media(dev, startup);
193 setup_done = 1;
194 break;
195 }
196 }
197 if ( ! setup_done) {
198 tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000) | (tp->csr6 & 0x20ff);
199 if (tp->full_duplex)
200 tp->csr6 |= 0x0200;
201 iowrite32(1, ioaddr + CSR13);
202 }
203#if 0 /* Restart shouldn't be needed. */
204 iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
205 if (tulip_debug > 2)
206 netdev_dbg(dev, " Restarting Tx and Rx, CSR5 is %08x\n",
207 ioread32(ioaddr + CSR5));
208#endif
209 tulip_start_rxtx(tp);
210 if (tulip_debug > 2)
211 netdev_dbg(dev, " Setting CSR6 %08x/%x CSR12 %08x\n",
212 tp->csr6, ioread32(ioaddr + CSR6),
213 ioread32(ioaddr + CSR12));
214 } else if ((tp->nwayset && (csr5 & 0x08000000) &&
215 (dev->if_port == 3 || dev->if_port == 5) &&
216 (csr12 & 2) == 2) ||
217 (tp->nway && (csr5 & (TPLnkFail)))) {
218 /* Link blew? Maybe restart NWay. */
219 del_timer_sync(&tp->timer);
220 t21142_start_nway(dev);
221 tp->timer.expires = RUN_AT(3*HZ);
222 add_timer(&tp->timer);
223 } else if (dev->if_port == 3 || dev->if_port == 5) {
224 if (tulip_debug > 1)
225 dev_info(&dev->dev, "21143 %s link beat %s\n",
226 medianame[dev->if_port],
227 (csr12 & 2) ? "failed" : "good");
228 if ((csr12 & 2) && ! tp->medialock) {
229 del_timer_sync(&tp->timer);
230 t21142_start_nway(dev);
231 tp->timer.expires = RUN_AT(3*HZ);
232 add_timer(&tp->timer);
233 } else if (dev->if_port == 5)
234 iowrite32(csr14 & ~0x080, ioaddr + CSR14);
235 } else if (dev->if_port == 0 || dev->if_port == 4) {
236 if ((csr12 & 4) == 0)
237 dev_info(&dev->dev, "21143 10baseT link beat good\n");
238 } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
239 if (tulip_debug)
240 dev_info(&dev->dev, "21143 10mbps sensed media\n");
241 dev->if_port = 0;
242 } else if (tp->nwayset) {
243 if (tulip_debug)
244 dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n",
245 medianame[dev->if_port], tp->csr6);
246 } else { /* 100mbps link beat good. */
247 if (tulip_debug)
248 dev_info(&dev->dev, "21143 100baseTx sensed media\n");
249 dev->if_port = 3;
250 tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
251 iowrite32(0x0003FF7F, ioaddr + CSR14);
252 iowrite32(0x0301, ioaddr + CSR12);
253 tulip_restart_rxtx(tp);
254 }
255}
256
257
diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig
new file mode 100644
index 000000000000..1203be0436e2
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/Kconfig
@@ -0,0 +1,172 @@
1#
2# Tulip family network device configuration
3#
4
5config NET_TULIP
6 bool "DEC - Tulip devices"
7 depends on (PCI || EISA || CARDBUS)
8 ---help---
9 This selects the "Tulip" family of EISA/PCI network cards.
10
11if NET_TULIP
12
13config DE2104X
14 tristate "Early DECchip Tulip (dc2104x) PCI support"
15 depends on PCI
16 select CRC32
17 ---help---
18 This driver is developed for the SMC EtherPower series Ethernet
19 cards and also works with cards based on the DECchip
20 21040 (Tulip series) chips. Some LinkSys PCI cards are
21 of this type. (If your card is NOT SMC EtherPower 10/100 PCI
22 (smc9332dst), you can also try the driver for "Generic DECchip"
23 cards, below. However, most people with a network card of this type
24 will say Y here.) Do read the Ethernet-HOWTO, available from
25 <http://www.tldp.org/docs.html#howto>.
26
27 To compile this driver as a module, choose M here. The module will
28 be called de2104x.
29
30config DE2104X_DSL
31 int "Descriptor Skip Length in 32 bit longwords"
32 depends on DE2104X
33 range 0 31
34 default 0
35 ---help---
36 Setting this value allows to align ring buffer descriptors into their
37 own cache lines. Value of 4 corresponds to the typical 32 byte line
38 (the descriptor is 16 bytes). This is necessary on systems that lack
39 cache coherence, an example is PowerMac 5500. Otherwise 0 is safe.
40 Default is 0, and range is 0 to 31.
41
42config TULIP
43 tristate "DECchip Tulip (dc2114x) PCI support"
44 depends on PCI
45 select CRC32
46 ---help---
47 This driver is developed for the SMC EtherPower series Ethernet
48 cards and also works with cards based on the DECchip
49 21140 (Tulip series) chips. Some LinkSys PCI cards are
50 of this type. (If your card is NOT SMC EtherPower 10/100 PCI
51 (smc9332dst), you can also try the driver for "Generic DECchip"
52 cards, above. However, most people with a network card of this type
53 will say Y here.) Do read the Ethernet-HOWTO, available from
54 <http://www.tldp.org/docs.html#howto>.
55
56 To compile this driver as a module, choose M here. The module will
57 be called tulip.
58
59config TULIP_MWI
60 bool "New bus configuration (EXPERIMENTAL)"
61 depends on TULIP && EXPERIMENTAL
62 ---help---
63 This configures your Tulip card specifically for the card and
64 system cache line size type you are using.
65
66 This is experimental code, not yet tested on many boards.
67
68 If unsure, say N.
69
70config TULIP_MMIO
71 bool "Use PCI shared mem for NIC registers"
72 depends on TULIP
73 ---help---
74 Use PCI shared memory for the NIC registers, rather than going through
75 the Tulip's PIO (programmed I/O ports). Faster, but could produce
76 obscure bugs if your mainboard has memory controller timing issues.
77 If in doubt, say N.
78
79config TULIP_NAPI
80 bool "Use RX polling (NAPI)"
81 depends on TULIP
82 ---help---
83 NAPI is a new driver API designed to reduce CPU and interrupt load
84 when the driver is receiving lots of packets from the card. It is
85 still somewhat experimental and thus not yet enabled by default.
86
87 If your estimated Rx load is 10kpps or more, or if the card will be
88 deployed on potentially unfriendly networks (e.g. in a firewall),
89 then say Y here.
90
91 If in doubt, say N.
92
93config TULIP_NAPI_HW_MITIGATION
94 bool "Use Interrupt Mitigation"
95 depends on TULIP_NAPI
96 ---help---
97 Use HW to reduce RX interrupts. Not strictly necessary since NAPI
98 reduces RX interrupts by itself. Interrupt mitigation reduces RX
99 interrupts even at low levels of traffic at the cost of a small
100 latency.
101
102 If in doubt, say Y.
103
104config TULIP_DM910X
105 def_bool y
106 depends on TULIP && SPARC
107
108config DE4X5
109 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
110 depends on (PCI || EISA)
111 select CRC32
112 ---help---
113 This is support for the DIGITAL series of PCI/EISA Ethernet cards.
114 These include the DE425, DE434, DE435, DE450 and DE500 models. If
115 you have a network card of this type, say Y and read the
116 Ethernet-HOWTO, available from
117 <http://www.tldp.org/docs.html#howto>. More specific
118 information is contained in
119 <file:Documentation/networking/de4x5.txt>.
120
121 To compile this driver as a module, choose M here. The module will
122 be called de4x5.
123
124config WINBOND_840
125 tristate "Winbond W89c840 Ethernet support"
126 depends on PCI
127 select CRC32
128 select NET_CORE
129 select MII
130 ---help---
131 This driver is for the Winbond W89c840 chip. It also works with
132 the TX9882 chip on the Compex RL100-ATX board.
133 More specific information and updates are available from
134 <http://www.scyld.com/network/drivers.html>.
135
136config DM9102
137 tristate "Davicom DM910x/DM980x support"
138 depends on PCI
139 select CRC32
140 ---help---
141 This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
142 Davicom (<http://www.davicom.com.tw/>). If you have such a network
143 (Ethernet) card, say Y. Some information is contained in the file
144 <file:Documentation/networking/dmfe.txt>.
145
146 To compile this driver as a module, choose M here. The module will
147 be called dmfe.
148
149config ULI526X
150 tristate "ULi M526x controller support"
151 depends on PCI
152 select CRC32
153 ---help---
154 This driver is for ULi M5261/M5263 10/100M Ethernet Controller
155 (<http://www.nvidia.com/page/uli_drivers.html>).
156
157 To compile this driver as a module, choose M here. The module will
158 be called uli526x.
159
160config PCMCIA_XIRCOM
161 tristate "Xircom CardBus support"
162 depends on CARDBUS
163 ---help---
164 This driver is for the Digital "Tulip" Ethernet CardBus adapters.
165 It should work with most DEC 21*4*-based chips/ethercards, as well
166 as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
167 ASIX.
168
169 To compile this driver as a module, choose M here. The module will
170 be called xircom_cb. If unsure, say N.
171
172endif # NET_TULIP
diff --git a/drivers/net/ethernet/dec/tulip/Makefile b/drivers/net/ethernet/dec/tulip/Makefile
new file mode 100644
index 000000000000..5e8be38b45bb
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/Makefile
@@ -0,0 +1,19 @@
1#
2# Makefile for the Linux "Tulip" family network device drivers.
3#
4
5ccflags-$(CONFIG_NET_TULIP) := -DDEBUG
6
7obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o
8obj-$(CONFIG_DM9102) += dmfe.o
9obj-$(CONFIG_WINBOND_840) += winbond-840.o
10obj-$(CONFIG_DE2104X) += de2104x.o
11obj-$(CONFIG_TULIP) += tulip.o
12obj-$(CONFIG_DE4X5) += de4x5.o
13obj-$(CONFIG_ULI526X) += uli526x.o
14
15# Declare multi-part drivers.
16
17tulip-objs := eeprom.o interrupt.o media.o \
18 timer.o tulip_core.o \
19 21142.o pnic.o pnic2.o
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
new file mode 100644
index 000000000000..1427739d9a51
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -0,0 +1,2215 @@
1/* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
2/*
3 Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
4
5 Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
6 Written/copyright 1994-2001 by Donald Becker. [tulip.c]
7
8 This software may be used and distributed according to the terms of
9 the GNU General Public License (GPL), incorporated herein by reference.
10 Drivers based on or derived from this code fall under the GPL and must
11 retain the authorship, copyright and license notice. This file is not
12 a complete program and may only be used when the entire operating
13 system is licensed under the GPL.
14
15 See the file COPYING in this distribution for more information.
16
17 TODO, in rough priority order:
18 * Support forcing media type with a module parameter,
19 like dl2k.c/sundance.c
20 * Constants (module parms?) for Rx work limit
21 * Complete reset on PciErr
22 * Jumbo frames / dev->change_mtu
23 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
24 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
25 * Implement Tx software interrupt mitigation via
26 Tx descriptor bit
27
28 */
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#define DRV_NAME "de2104x"
33#define DRV_VERSION "0.7"
34#define DRV_RELDATE "Mar 17, 2004"
35
36#include <linux/module.h>
37#include <linux/kernel.h>
38#include <linux/netdevice.h>
39#include <linux/etherdevice.h>
40#include <linux/init.h>
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/delay.h>
44#include <linux/ethtool.h>
45#include <linux/compiler.h>
46#include <linux/rtnetlink.h>
47#include <linux/crc32.h>
48#include <linux/slab.h>
49
50#include <asm/io.h>
51#include <asm/irq.h>
52#include <asm/uaccess.h>
53#include <asm/unaligned.h>
54
55/* These identify the driver base version and may not be removed. */
56static char version[] =
57"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
58
59MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
60MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
61MODULE_LICENSE("GPL");
62MODULE_VERSION(DRV_VERSION);
63
64static int debug = -1;
65module_param (debug, int, 0);
66MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
67
68/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
69#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
70 defined(CONFIG_SPARC) || defined(__ia64__) || \
71 defined(__sh__) || defined(__mips__)
72static int rx_copybreak = 1518;
73#else
74static int rx_copybreak = 100;
75#endif
76module_param (rx_copybreak, int, 0);
77MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
78
79#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
80 NETIF_MSG_PROBE | \
81 NETIF_MSG_LINK | \
82 NETIF_MSG_IFDOWN | \
83 NETIF_MSG_IFUP | \
84 NETIF_MSG_RX_ERR | \
85 NETIF_MSG_TX_ERR)
86
87/* Descriptor skip length in 32 bit longwords. */
88#ifndef CONFIG_DE2104X_DSL
89#define DSL 0
90#else
91#define DSL CONFIG_DE2104X_DSL
92#endif
93
94#define DE_RX_RING_SIZE 64
95#define DE_TX_RING_SIZE 64
96#define DE_RING_BYTES \
97 ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
98 (sizeof(struct de_desc) * DE_TX_RING_SIZE))
99#define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
100#define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
101#define TX_BUFFS_AVAIL(CP) \
102 (((CP)->tx_tail <= (CP)->tx_head) ? \
103 (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
104 (CP)->tx_tail - (CP)->tx_head - 1)
105
106#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
107#define RX_OFFSET 2
108
109#define DE_SETUP_SKB ((struct sk_buff *) 1)
110#define DE_DUMMY_SKB ((struct sk_buff *) 2)
111#define DE_SETUP_FRAME_WORDS 96
112#define DE_EEPROM_WORDS 256
113#define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
114#define DE_MAX_MEDIA 5
115
116#define DE_MEDIA_TP_AUTO 0
117#define DE_MEDIA_BNC 1
118#define DE_MEDIA_AUI 2
119#define DE_MEDIA_TP 3
120#define DE_MEDIA_TP_FD 4
121#define DE_MEDIA_INVALID DE_MAX_MEDIA
122#define DE_MEDIA_FIRST 0
123#define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
124#define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
125
126#define DE_TIMER_LINK (60 * HZ)
127#define DE_TIMER_NO_LINK (5 * HZ)
128
129#define DE_NUM_REGS 16
130#define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
131#define DE_REGS_VER 1
132
133/* Time in jiffies before concluding the transmitter is hung. */
134#define TX_TIMEOUT (6*HZ)
135
136/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
137 to support a pre-NWay full-duplex signaling mechanism using short frames.
138 No one knows what it should be, but if left at its default value some
139 10base2(!) packets trigger a full-duplex-request interrupt. */
140#define FULL_DUPLEX_MAGIC 0x6969
141
142enum {
143 /* NIC registers */
144 BusMode = 0x00,
145 TxPoll = 0x08,
146 RxPoll = 0x10,
147 RxRingAddr = 0x18,
148 TxRingAddr = 0x20,
149 MacStatus = 0x28,
150 MacMode = 0x30,
151 IntrMask = 0x38,
152 RxMissed = 0x40,
153 ROMCmd = 0x48,
154 CSR11 = 0x58,
155 SIAStatus = 0x60,
156 CSR13 = 0x68,
157 CSR14 = 0x70,
158 CSR15 = 0x78,
159 PCIPM = 0x40,
160
161 /* BusMode bits */
162 CmdReset = (1 << 0),
163 CacheAlign16 = 0x00008000,
164 BurstLen4 = 0x00000400,
165 DescSkipLen = (DSL << 2),
166
167 /* Rx/TxPoll bits */
168 NormalTxPoll = (1 << 0),
169 NormalRxPoll = (1 << 0),
170
171 /* Tx/Rx descriptor status bits */
172 DescOwn = (1 << 31),
173 RxError = (1 << 15),
174 RxErrLong = (1 << 7),
175 RxErrCRC = (1 << 1),
176 RxErrFIFO = (1 << 0),
177 RxErrRunt = (1 << 11),
178 RxErrFrame = (1 << 14),
179 RingEnd = (1 << 25),
180 FirstFrag = (1 << 29),
181 LastFrag = (1 << 30),
182 TxError = (1 << 15),
183 TxFIFOUnder = (1 << 1),
184 TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
185 TxMaxCol = (1 << 8),
186 TxOWC = (1 << 9),
187 TxJabber = (1 << 14),
188 SetupFrame = (1 << 27),
189 TxSwInt = (1 << 31),
190
191 /* MacStatus bits */
192 IntrOK = (1 << 16),
193 IntrErr = (1 << 15),
194 RxIntr = (1 << 6),
195 RxEmpty = (1 << 7),
196 TxIntr = (1 << 0),
197 TxEmpty = (1 << 2),
198 PciErr = (1 << 13),
199 TxState = (1 << 22) | (1 << 21) | (1 << 20),
200 RxState = (1 << 19) | (1 << 18) | (1 << 17),
201 LinkFail = (1 << 12),
202 LinkPass = (1 << 4),
203 RxStopped = (1 << 8),
204 TxStopped = (1 << 1),
205
206 /* MacMode bits */
207 TxEnable = (1 << 13),
208 RxEnable = (1 << 1),
209 RxTx = TxEnable | RxEnable,
210 FullDuplex = (1 << 9),
211 AcceptAllMulticast = (1 << 7),
212 AcceptAllPhys = (1 << 6),
213 BOCnt = (1 << 5),
214 MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
215 RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
216
217 /* ROMCmd bits */
218 EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
219 EE_CS = 0x01, /* EEPROM chip select. */
220 EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
221 EE_WRITE_0 = 0x01,
222 EE_WRITE_1 = 0x05,
223 EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
224 EE_ENB = (0x4800 | EE_CS),
225
226 /* The EEPROM commands include the alway-set leading bit. */
227 EE_READ_CMD = 6,
228
229 /* RxMissed bits */
230 RxMissedOver = (1 << 16),
231 RxMissedMask = 0xffff,
232
233 /* SROM-related bits */
234 SROMC0InfoLeaf = 27,
235 MediaBlockMask = 0x3f,
236 MediaCustomCSRs = (1 << 6),
237
238 /* PCIPM bits */
239 PM_Sleep = (1 << 31),
240 PM_Snooze = (1 << 30),
241 PM_Mask = PM_Sleep | PM_Snooze,
242
243 /* SIAStatus bits */
244 NWayState = (1 << 14) | (1 << 13) | (1 << 12),
245 NWayRestart = (1 << 12),
246 NonselPortActive = (1 << 9),
247 SelPortActive = (1 << 8),
248 LinkFailStatus = (1 << 2),
249 NetCxnErr = (1 << 1),
250};
251
252static const u32 de_intr_mask =
253 IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
254 LinkPass | LinkFail | PciErr;
255
256/*
257 * Set the programmable burst length to 4 longwords for all:
258 * DMA errors result without these values. Cache align 16 long.
259 */
260static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
261
262struct de_srom_media_block {
263 u8 opts;
264 u16 csr13;
265 u16 csr14;
266 u16 csr15;
267} __packed;
268
269struct de_srom_info_leaf {
270 u16 default_media;
271 u8 n_blocks;
272 u8 unused;
273} __packed;
274
275struct de_desc {
276 __le32 opts1;
277 __le32 opts2;
278 __le32 addr1;
279 __le32 addr2;
280#if DSL
281 __le32 skip[DSL];
282#endif
283};
284
285struct media_info {
286 u16 type; /* DE_MEDIA_xxx */
287 u16 csr13;
288 u16 csr14;
289 u16 csr15;
290};
291
292struct ring_info {
293 struct sk_buff *skb;
294 dma_addr_t mapping;
295};
296
297struct de_private {
298 unsigned tx_head;
299 unsigned tx_tail;
300 unsigned rx_tail;
301
302 void __iomem *regs;
303 struct net_device *dev;
304 spinlock_t lock;
305
306 struct de_desc *rx_ring;
307 struct de_desc *tx_ring;
308 struct ring_info tx_skb[DE_TX_RING_SIZE];
309 struct ring_info rx_skb[DE_RX_RING_SIZE];
310 unsigned rx_buf_sz;
311 dma_addr_t ring_dma;
312
313 u32 msg_enable;
314
315 struct net_device_stats net_stats;
316
317 struct pci_dev *pdev;
318
319 u16 setup_frame[DE_SETUP_FRAME_WORDS];
320
321 u32 media_type;
322 u32 media_supported;
323 u32 media_advertise;
324 struct media_info media[DE_MAX_MEDIA];
325 struct timer_list media_timer;
326
327 u8 *ee_data;
328 unsigned board_idx;
329 unsigned de21040 : 1;
330 unsigned media_lock : 1;
331};
332
333
334static void de_set_rx_mode (struct net_device *dev);
335static void de_tx (struct de_private *de);
336static void de_clean_rings (struct de_private *de);
337static void de_media_interrupt (struct de_private *de, u32 status);
338static void de21040_media_timer (unsigned long data);
339static void de21041_media_timer (unsigned long data);
340static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
341
342
343static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
344 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
345 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
346 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
347 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
348 { },
349};
350MODULE_DEVICE_TABLE(pci, de_pci_tbl);
351
352static const char * const media_name[DE_MAX_MEDIA] = {
353 "10baseT auto",
354 "BNC",
355 "AUI",
356 "10baseT-HD",
357 "10baseT-FD"
358};
359
360/* 21040 transceiver register settings:
361 * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
362static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
363static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
364static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
365
366/* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
367static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
368static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
369/* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
370static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
371static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
372
373
374#define dr32(reg) ioread32(de->regs + (reg))
375#define dw32(reg, val) iowrite32((val), de->regs + (reg))
376
377
378static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
379 u32 status, u32 len)
380{
381 netif_dbg(de, rx_err, de->dev,
382 "rx err, slot %d status 0x%x len %d\n",
383 rx_tail, status, len);
384
385 if ((status & 0x38000300) != 0x0300) {
386 /* Ingore earlier buffers. */
387 if ((status & 0xffff) != 0x7fff) {
388 netif_warn(de, rx_err, de->dev,
389 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
390 status);
391 de->net_stats.rx_length_errors++;
392 }
393 } else if (status & RxError) {
394 /* There was a fatal error. */
395 de->net_stats.rx_errors++; /* end of a packet.*/
396 if (status & 0x0890) de->net_stats.rx_length_errors++;
397 if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
398 if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
399 }
400}
401
402static void de_rx (struct de_private *de)
403{
404 unsigned rx_tail = de->rx_tail;
405 unsigned rx_work = DE_RX_RING_SIZE;
406 unsigned drop = 0;
407 int rc;
408
409 while (--rx_work) {
410 u32 status, len;
411 dma_addr_t mapping;
412 struct sk_buff *skb, *copy_skb;
413 unsigned copying_skb, buflen;
414
415 skb = de->rx_skb[rx_tail].skb;
416 BUG_ON(!skb);
417 rmb();
418 status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
419 if (status & DescOwn)
420 break;
421
422 len = ((status >> 16) & 0x7ff) - 4;
423 mapping = de->rx_skb[rx_tail].mapping;
424
425 if (unlikely(drop)) {
426 de->net_stats.rx_dropped++;
427 goto rx_next;
428 }
429
430 if (unlikely((status & 0x38008300) != 0x0300)) {
431 de_rx_err_acct(de, rx_tail, status, len);
432 goto rx_next;
433 }
434
435 copying_skb = (len <= rx_copybreak);
436
437 netif_dbg(de, rx_status, de->dev,
438 "rx slot %d status 0x%x len %d copying? %d\n",
439 rx_tail, status, len, copying_skb);
440
441 buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
442 copy_skb = dev_alloc_skb (buflen);
443 if (unlikely(!copy_skb)) {
444 de->net_stats.rx_dropped++;
445 drop = 1;
446 rx_work = 100;
447 goto rx_next;
448 }
449
450 if (!copying_skb) {
451 pci_unmap_single(de->pdev, mapping,
452 buflen, PCI_DMA_FROMDEVICE);
453 skb_put(skb, len);
454
455 mapping =
456 de->rx_skb[rx_tail].mapping =
457 pci_map_single(de->pdev, copy_skb->data,
458 buflen, PCI_DMA_FROMDEVICE);
459 de->rx_skb[rx_tail].skb = copy_skb;
460 } else {
461 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
462 skb_reserve(copy_skb, RX_OFFSET);
463 skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
464 len);
465 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
466
467 /* We'll reuse the original ring buffer. */
468 skb = copy_skb;
469 }
470
471 skb->protocol = eth_type_trans (skb, de->dev);
472
473 de->net_stats.rx_packets++;
474 de->net_stats.rx_bytes += skb->len;
475 rc = netif_rx (skb);
476 if (rc == NET_RX_DROP)
477 drop = 1;
478
479rx_next:
480 if (rx_tail == (DE_RX_RING_SIZE - 1))
481 de->rx_ring[rx_tail].opts2 =
482 cpu_to_le32(RingEnd | de->rx_buf_sz);
483 else
484 de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
485 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
486 wmb();
487 de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
488 rx_tail = NEXT_RX(rx_tail);
489 }
490
491 if (!rx_work)
492 netdev_warn(de->dev, "rx work limit reached\n");
493
494 de->rx_tail = rx_tail;
495}
496
497static irqreturn_t de_interrupt (int irq, void *dev_instance)
498{
499 struct net_device *dev = dev_instance;
500 struct de_private *de = netdev_priv(dev);
501 u32 status;
502
503 status = dr32(MacStatus);
504 if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
505 return IRQ_NONE;
506
507 netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
508 status, dr32(MacMode),
509 de->rx_tail, de->tx_head, de->tx_tail);
510
511 dw32(MacStatus, status);
512
513 if (status & (RxIntr | RxEmpty)) {
514 de_rx(de);
515 if (status & RxEmpty)
516 dw32(RxPoll, NormalRxPoll);
517 }
518
519 spin_lock(&de->lock);
520
521 if (status & (TxIntr | TxEmpty))
522 de_tx(de);
523
524 if (status & (LinkPass | LinkFail))
525 de_media_interrupt(de, status);
526
527 spin_unlock(&de->lock);
528
529 if (status & PciErr) {
530 u16 pci_status;
531
532 pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
533 pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
534 netdev_err(de->dev,
535 "PCI bus error, status=%08x, PCI status=%04x\n",
536 status, pci_status);
537 }
538
539 return IRQ_HANDLED;
540}
541
542static void de_tx (struct de_private *de)
543{
544 unsigned tx_head = de->tx_head;
545 unsigned tx_tail = de->tx_tail;
546
547 while (tx_tail != tx_head) {
548 struct sk_buff *skb;
549 u32 status;
550
551 rmb();
552 status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
553 if (status & DescOwn)
554 break;
555
556 skb = de->tx_skb[tx_tail].skb;
557 BUG_ON(!skb);
558 if (unlikely(skb == DE_DUMMY_SKB))
559 goto next;
560
561 if (unlikely(skb == DE_SETUP_SKB)) {
562 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
563 sizeof(de->setup_frame), PCI_DMA_TODEVICE);
564 goto next;
565 }
566
567 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
568 skb->len, PCI_DMA_TODEVICE);
569
570 if (status & LastFrag) {
571 if (status & TxError) {
572 netif_dbg(de, tx_err, de->dev,
573 "tx err, status 0x%x\n",
574 status);
575 de->net_stats.tx_errors++;
576 if (status & TxOWC)
577 de->net_stats.tx_window_errors++;
578 if (status & TxMaxCol)
579 de->net_stats.tx_aborted_errors++;
580 if (status & TxLinkFail)
581 de->net_stats.tx_carrier_errors++;
582 if (status & TxFIFOUnder)
583 de->net_stats.tx_fifo_errors++;
584 } else {
585 de->net_stats.tx_packets++;
586 de->net_stats.tx_bytes += skb->len;
587 netif_dbg(de, tx_done, de->dev,
588 "tx done, slot %d\n", tx_tail);
589 }
590 dev_kfree_skb_irq(skb);
591 }
592
593next:
594 de->tx_skb[tx_tail].skb = NULL;
595
596 tx_tail = NEXT_TX(tx_tail);
597 }
598
599 de->tx_tail = tx_tail;
600
601 if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
602 netif_wake_queue(de->dev);
603}
604
605static netdev_tx_t de_start_xmit (struct sk_buff *skb,
606 struct net_device *dev)
607{
608 struct de_private *de = netdev_priv(dev);
609 unsigned int entry, tx_free;
610 u32 mapping, len, flags = FirstFrag | LastFrag;
611 struct de_desc *txd;
612
613 spin_lock_irq(&de->lock);
614
615 tx_free = TX_BUFFS_AVAIL(de);
616 if (tx_free == 0) {
617 netif_stop_queue(dev);
618 spin_unlock_irq(&de->lock);
619 return NETDEV_TX_BUSY;
620 }
621 tx_free--;
622
623 entry = de->tx_head;
624
625 txd = &de->tx_ring[entry];
626
627 len = skb->len;
628 mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
629 if (entry == (DE_TX_RING_SIZE - 1))
630 flags |= RingEnd;
631 if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
632 flags |= TxSwInt;
633 flags |= len;
634 txd->opts2 = cpu_to_le32(flags);
635 txd->addr1 = cpu_to_le32(mapping);
636
637 de->tx_skb[entry].skb = skb;
638 de->tx_skb[entry].mapping = mapping;
639 wmb();
640
641 txd->opts1 = cpu_to_le32(DescOwn);
642 wmb();
643
644 de->tx_head = NEXT_TX(entry);
645 netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
646 entry, skb->len);
647
648 if (tx_free == 0)
649 netif_stop_queue(dev);
650
651 spin_unlock_irq(&de->lock);
652
653 /* Trigger an immediate transmit demand. */
654 dw32(TxPoll, NormalTxPoll);
655
656 return NETDEV_TX_OK;
657}
658
659/* Set or clear the multicast filter for this adaptor.
660 Note that we only use exclusion around actually queueing the
661 new frame, not around filling de->setup_frame. This is non-deterministic
662 when re-entered but still correct. */
663
664#undef set_bit_le
665#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
666
667static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
668{
669 struct de_private *de = netdev_priv(dev);
670 u16 hash_table[32];
671 struct netdev_hw_addr *ha;
672 int i;
673 u16 *eaddrs;
674
675 memset(hash_table, 0, sizeof(hash_table));
676 set_bit_le(255, hash_table); /* Broadcast entry */
677 /* This should work on big-endian machines as well. */
678 netdev_for_each_mc_addr(ha, dev) {
679 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
680
681 set_bit_le(index, hash_table);
682 }
683
684 for (i = 0; i < 32; i++) {
685 *setup_frm++ = hash_table[i];
686 *setup_frm++ = hash_table[i];
687 }
688 setup_frm = &de->setup_frame[13*6];
689
690 /* Fill the final entry with our physical address. */
691 eaddrs = (u16 *)dev->dev_addr;
692 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
693 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
694 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
695}
696
697static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
698{
699 struct de_private *de = netdev_priv(dev);
700 struct netdev_hw_addr *ha;
701 u16 *eaddrs;
702
703 /* We have <= 14 addresses so we can use the wonderful
704 16 address perfect filtering of the Tulip. */
705 netdev_for_each_mc_addr(ha, dev) {
706 eaddrs = (u16 *) ha->addr;
707 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
708 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
709 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
710 }
711 /* Fill the unused entries with the broadcast address. */
712 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
713 setup_frm = &de->setup_frame[15*6];
714
715 /* Fill the final entry with our physical address. */
716 eaddrs = (u16 *)dev->dev_addr;
717 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
718 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
719 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
720}
721
722
723static void __de_set_rx_mode (struct net_device *dev)
724{
725 struct de_private *de = netdev_priv(dev);
726 u32 macmode;
727 unsigned int entry;
728 u32 mapping;
729 struct de_desc *txd;
730 struct de_desc *dummy_txd = NULL;
731
732 macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
733
734 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
735 macmode |= AcceptAllMulticast | AcceptAllPhys;
736 goto out;
737 }
738
739 if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
740 /* Too many to filter well -- accept all multicasts. */
741 macmode |= AcceptAllMulticast;
742 goto out;
743 }
744
745 /* Note that only the low-address shortword of setup_frame is valid!
746 The values are doubled for big-endian architectures. */
747 if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
748 build_setup_frame_hash (de->setup_frame, dev);
749 else
750 build_setup_frame_perfect (de->setup_frame, dev);
751
752 /*
753 * Now add this frame to the Tx list.
754 */
755
756 entry = de->tx_head;
757
758 /* Avoid a chip errata by prefixing a dummy entry. */
759 if (entry != 0) {
760 de->tx_skb[entry].skb = DE_DUMMY_SKB;
761
762 dummy_txd = &de->tx_ring[entry];
763 dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
764 cpu_to_le32(RingEnd) : 0;
765 dummy_txd->addr1 = 0;
766
767 /* Must set DescOwned later to avoid race with chip */
768
769 entry = NEXT_TX(entry);
770 }
771
772 de->tx_skb[entry].skb = DE_SETUP_SKB;
773 de->tx_skb[entry].mapping = mapping =
774 pci_map_single (de->pdev, de->setup_frame,
775 sizeof (de->setup_frame), PCI_DMA_TODEVICE);
776
777 /* Put the setup frame on the Tx list. */
778 txd = &de->tx_ring[entry];
779 if (entry == (DE_TX_RING_SIZE - 1))
780 txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
781 else
782 txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
783 txd->addr1 = cpu_to_le32(mapping);
784 wmb();
785
786 txd->opts1 = cpu_to_le32(DescOwn);
787 wmb();
788
789 if (dummy_txd) {
790 dummy_txd->opts1 = cpu_to_le32(DescOwn);
791 wmb();
792 }
793
794 de->tx_head = NEXT_TX(entry);
795
796 if (TX_BUFFS_AVAIL(de) == 0)
797 netif_stop_queue(dev);
798
799 /* Trigger an immediate transmit demand. */
800 dw32(TxPoll, NormalTxPoll);
801
802out:
803 if (macmode != dr32(MacMode))
804 dw32(MacMode, macmode);
805}
806
807static void de_set_rx_mode (struct net_device *dev)
808{
809 unsigned long flags;
810 struct de_private *de = netdev_priv(dev);
811
812 spin_lock_irqsave (&de->lock, flags);
813 __de_set_rx_mode(dev);
814 spin_unlock_irqrestore (&de->lock, flags);
815}
816
817static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
818{
819 if (unlikely(rx_missed & RxMissedOver))
820 de->net_stats.rx_missed_errors += RxMissedMask;
821 else
822 de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
823}
824
825static void __de_get_stats(struct de_private *de)
826{
827 u32 tmp = dr32(RxMissed); /* self-clearing */
828
829 de_rx_missed(de, tmp);
830}
831
832static struct net_device_stats *de_get_stats(struct net_device *dev)
833{
834 struct de_private *de = netdev_priv(dev);
835
836 /* The chip only need report frame silently dropped. */
837 spin_lock_irq(&de->lock);
838 if (netif_running(dev) && netif_device_present(dev))
839 __de_get_stats(de);
840 spin_unlock_irq(&de->lock);
841
842 return &de->net_stats;
843}
844
845static inline int de_is_running (struct de_private *de)
846{
847 return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
848}
849
850static void de_stop_rxtx (struct de_private *de)
851{
852 u32 macmode;
853 unsigned int i = 1300/100;
854
855 macmode = dr32(MacMode);
856 if (macmode & RxTx) {
857 dw32(MacMode, macmode & ~RxTx);
858 dr32(MacMode);
859 }
860
861 /* wait until in-flight frame completes.
862 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
863 * Typically expect this loop to end in < 50 us on 100BT.
864 */
865 while (--i) {
866 if (!de_is_running(de))
867 return;
868 udelay(100);
869 }
870
871 netdev_warn(de->dev, "timeout expired, stopping DMA\n");
872}
873
874static inline void de_start_rxtx (struct de_private *de)
875{
876 u32 macmode;
877
878 macmode = dr32(MacMode);
879 if ((macmode & RxTx) != RxTx) {
880 dw32(MacMode, macmode | RxTx);
881 dr32(MacMode);
882 }
883}
884
885static void de_stop_hw (struct de_private *de)
886{
887
888 udelay(5);
889 dw32(IntrMask, 0);
890
891 de_stop_rxtx(de);
892
893 dw32(MacStatus, dr32(MacStatus));
894
895 udelay(10);
896
897 de->rx_tail = 0;
898 de->tx_head = de->tx_tail = 0;
899}
900
901static void de_link_up(struct de_private *de)
902{
903 if (!netif_carrier_ok(de->dev)) {
904 netif_carrier_on(de->dev);
905 netif_info(de, link, de->dev, "link up, media %s\n",
906 media_name[de->media_type]);
907 }
908}
909
910static void de_link_down(struct de_private *de)
911{
912 if (netif_carrier_ok(de->dev)) {
913 netif_carrier_off(de->dev);
914 netif_info(de, link, de->dev, "link down\n");
915 }
916}
917
918static void de_set_media (struct de_private *de)
919{
920 unsigned media = de->media_type;
921 u32 macmode = dr32(MacMode);
922
923 if (de_is_running(de))
924 netdev_warn(de->dev, "chip is running while changing media!\n");
925
926 if (de->de21040)
927 dw32(CSR11, FULL_DUPLEX_MAGIC);
928 dw32(CSR13, 0); /* Reset phy */
929 dw32(CSR14, de->media[media].csr14);
930 dw32(CSR15, de->media[media].csr15);
931 dw32(CSR13, de->media[media].csr13);
932
933 /* must delay 10ms before writing to other registers,
934 * especially CSR6
935 */
936 mdelay(10);
937
938 if (media == DE_MEDIA_TP_FD)
939 macmode |= FullDuplex;
940 else
941 macmode &= ~FullDuplex;
942
943 netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
944 netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
945 dr32(MacMode), dr32(SIAStatus),
946 dr32(CSR13), dr32(CSR14), dr32(CSR15));
947 netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
948 macmode, de->media[media].csr13,
949 de->media[media].csr14, de->media[media].csr15);
950 if (macmode != dr32(MacMode))
951 dw32(MacMode, macmode);
952}
953
954static void de_next_media (struct de_private *de, const u32 *media,
955 unsigned int n_media)
956{
957 unsigned int i;
958
959 for (i = 0; i < n_media; i++) {
960 if (de_ok_to_advertise(de, media[i])) {
961 de->media_type = media[i];
962 return;
963 }
964 }
965}
966
967static void de21040_media_timer (unsigned long data)
968{
969 struct de_private *de = (struct de_private *) data;
970 struct net_device *dev = de->dev;
971 u32 status = dr32(SIAStatus);
972 unsigned int carrier;
973 unsigned long flags;
974
975 carrier = (status & NetCxnErr) ? 0 : 1;
976
977 if (carrier) {
978 if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
979 goto no_link_yet;
980
981 de->media_timer.expires = jiffies + DE_TIMER_LINK;
982 add_timer(&de->media_timer);
983 if (!netif_carrier_ok(dev))
984 de_link_up(de);
985 else
986 netif_info(de, timer, dev, "%s link ok, status %x\n",
987 media_name[de->media_type], status);
988 return;
989 }
990
991 de_link_down(de);
992
993 if (de->media_lock)
994 return;
995
996 if (de->media_type == DE_MEDIA_AUI) {
997 static const u32 next_state = DE_MEDIA_TP;
998 de_next_media(de, &next_state, 1);
999 } else {
1000 static const u32 next_state = DE_MEDIA_AUI;
1001 de_next_media(de, &next_state, 1);
1002 }
1003
1004 spin_lock_irqsave(&de->lock, flags);
1005 de_stop_rxtx(de);
1006 spin_unlock_irqrestore(&de->lock, flags);
1007 de_set_media(de);
1008 de_start_rxtx(de);
1009
1010no_link_yet:
1011 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1012 add_timer(&de->media_timer);
1013
1014 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1015 media_name[de->media_type], status);
1016}
1017
1018static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
1019{
1020 switch (new_media) {
1021 case DE_MEDIA_TP_AUTO:
1022 if (!(de->media_advertise & ADVERTISED_Autoneg))
1023 return 0;
1024 if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
1025 return 0;
1026 break;
1027 case DE_MEDIA_BNC:
1028 if (!(de->media_advertise & ADVERTISED_BNC))
1029 return 0;
1030 break;
1031 case DE_MEDIA_AUI:
1032 if (!(de->media_advertise & ADVERTISED_AUI))
1033 return 0;
1034 break;
1035 case DE_MEDIA_TP:
1036 if (!(de->media_advertise & ADVERTISED_10baseT_Half))
1037 return 0;
1038 break;
1039 case DE_MEDIA_TP_FD:
1040 if (!(de->media_advertise & ADVERTISED_10baseT_Full))
1041 return 0;
1042 break;
1043 }
1044
1045 return 1;
1046}
1047
1048static void de21041_media_timer (unsigned long data)
1049{
1050 struct de_private *de = (struct de_private *) data;
1051 struct net_device *dev = de->dev;
1052 u32 status = dr32(SIAStatus);
1053 unsigned int carrier;
1054 unsigned long flags;
1055
1056 /* clear port active bits */
1057 dw32(SIAStatus, NonselPortActive | SelPortActive);
1058
1059 carrier = (status & NetCxnErr) ? 0 : 1;
1060
1061 if (carrier) {
1062 if ((de->media_type == DE_MEDIA_TP_AUTO ||
1063 de->media_type == DE_MEDIA_TP ||
1064 de->media_type == DE_MEDIA_TP_FD) &&
1065 (status & LinkFailStatus))
1066 goto no_link_yet;
1067
1068 de->media_timer.expires = jiffies + DE_TIMER_LINK;
1069 add_timer(&de->media_timer);
1070 if (!netif_carrier_ok(dev))
1071 de_link_up(de);
1072 else
1073 netif_info(de, timer, dev,
1074 "%s link ok, mode %x status %x\n",
1075 media_name[de->media_type],
1076 dr32(MacMode), status);
1077 return;
1078 }
1079
1080 de_link_down(de);
1081
1082 /* if media type locked, don't switch media */
1083 if (de->media_lock)
1084 goto set_media;
1085
1086 /* if activity detected, use that as hint for new media type */
1087 if (status & NonselPortActive) {
1088 unsigned int have_media = 1;
1089
1090 /* if AUI/BNC selected, then activity is on TP port */
1091 if (de->media_type == DE_MEDIA_AUI ||
1092 de->media_type == DE_MEDIA_BNC) {
1093 if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
1094 de->media_type = DE_MEDIA_TP_AUTO;
1095 else
1096 have_media = 0;
1097 }
1098
1099 /* TP selected. If there is only TP and BNC, then it's BNC */
1100 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
1101 de_ok_to_advertise(de, DE_MEDIA_BNC))
1102 de->media_type = DE_MEDIA_BNC;
1103
1104 /* TP selected. If there is only TP and AUI, then it's AUI */
1105 else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
1106 de_ok_to_advertise(de, DE_MEDIA_AUI))
1107 de->media_type = DE_MEDIA_AUI;
1108
1109 /* otherwise, ignore the hint */
1110 else
1111 have_media = 0;
1112
1113 if (have_media)
1114 goto set_media;
1115 }
1116
1117 /*
1118 * Absent or ambiguous activity hint, move to next advertised
1119 * media state. If de->media_type is left unchanged, this
1120 * simply resets the PHY and reloads the current media settings.
1121 */
1122 if (de->media_type == DE_MEDIA_AUI) {
1123 static const u32 next_states[] = {
1124 DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1125 };
1126 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1127 } else if (de->media_type == DE_MEDIA_BNC) {
1128 static const u32 next_states[] = {
1129 DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1130 };
1131 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1132 } else {
1133 static const u32 next_states[] = {
1134 DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1135 };
1136 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1137 }
1138
1139set_media:
1140 spin_lock_irqsave(&de->lock, flags);
1141 de_stop_rxtx(de);
1142 spin_unlock_irqrestore(&de->lock, flags);
1143 de_set_media(de);
1144 de_start_rxtx(de);
1145
1146no_link_yet:
1147 de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
1148 add_timer(&de->media_timer);
1149
1150 netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
1151 media_name[de->media_type], status);
1152}
1153
1154static void de_media_interrupt (struct de_private *de, u32 status)
1155{
1156 if (status & LinkPass) {
1157 /* Ignore if current media is AUI or BNC and we can't use TP */
1158 if ((de->media_type == DE_MEDIA_AUI ||
1159 de->media_type == DE_MEDIA_BNC) &&
1160 (de->media_lock ||
1161 !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
1162 return;
1163 /* If current media is not TP, change it to TP */
1164 if ((de->media_type == DE_MEDIA_AUI ||
1165 de->media_type == DE_MEDIA_BNC)) {
1166 de->media_type = DE_MEDIA_TP_AUTO;
1167 de_stop_rxtx(de);
1168 de_set_media(de);
1169 de_start_rxtx(de);
1170 }
1171 de_link_up(de);
1172 mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
1173 return;
1174 }
1175
1176 BUG_ON(!(status & LinkFail));
1177 /* Mark the link as down only if current media is TP */
1178 if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
1179 de->media_type != DE_MEDIA_BNC) {
1180 de_link_down(de);
1181 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1182 }
1183}
1184
1185static int de_reset_mac (struct de_private *de)
1186{
1187 u32 status, tmp;
1188
1189 /*
1190 * Reset MAC. de4x5.c and tulip.c examined for "advice"
1191 * in this area.
1192 */
1193
1194 if (dr32(BusMode) == 0xffffffff)
1195 return -EBUSY;
1196
1197 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
1198 dw32 (BusMode, CmdReset);
1199 mdelay (1);
1200
1201 dw32 (BusMode, de_bus_mode);
1202 mdelay (1);
1203
1204 for (tmp = 0; tmp < 5; tmp++) {
1205 dr32 (BusMode);
1206 mdelay (1);
1207 }
1208
1209 mdelay (1);
1210
1211 status = dr32(MacStatus);
1212 if (status & (RxState | TxState))
1213 return -EBUSY;
1214 if (status == 0xffffffff)
1215 return -ENODEV;
1216 return 0;
1217}
1218
1219static void de_adapter_wake (struct de_private *de)
1220{
1221 u32 pmctl;
1222
1223 if (de->de21040)
1224 return;
1225
1226 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1227 if (pmctl & PM_Mask) {
1228 pmctl &= ~PM_Mask;
1229 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1230
1231 /* de4x5.c delays, so we do too */
1232 msleep(10);
1233 }
1234}
1235
1236static void de_adapter_sleep (struct de_private *de)
1237{
1238 u32 pmctl;
1239
1240 if (de->de21040)
1241 return;
1242
1243 dw32(CSR13, 0); /* Reset phy */
1244 pci_read_config_dword(de->pdev, PCIPM, &pmctl);
1245 pmctl |= PM_Sleep;
1246 pci_write_config_dword(de->pdev, PCIPM, pmctl);
1247}
1248
1249static int de_init_hw (struct de_private *de)
1250{
1251 struct net_device *dev = de->dev;
1252 u32 macmode;
1253 int rc;
1254
1255 de_adapter_wake(de);
1256
1257 macmode = dr32(MacMode) & ~MacModeClear;
1258
1259 rc = de_reset_mac(de);
1260 if (rc)
1261 return rc;
1262
1263 de_set_media(de); /* reset phy */
1264
1265 dw32(RxRingAddr, de->ring_dma);
1266 dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
1267
1268 dw32(MacMode, RxTx | macmode);
1269
1270 dr32(RxMissed); /* self-clearing */
1271
1272 dw32(IntrMask, de_intr_mask);
1273
1274 de_set_rx_mode(dev);
1275
1276 return 0;
1277}
1278
1279static int de_refill_rx (struct de_private *de)
1280{
1281 unsigned i;
1282
1283 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1284 struct sk_buff *skb;
1285
1286 skb = dev_alloc_skb(de->rx_buf_sz);
1287 if (!skb)
1288 goto err_out;
1289
1290 skb->dev = de->dev;
1291
1292 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1293 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1294 de->rx_skb[i].skb = skb;
1295
1296 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
1297 if (i == (DE_RX_RING_SIZE - 1))
1298 de->rx_ring[i].opts2 =
1299 cpu_to_le32(RingEnd | de->rx_buf_sz);
1300 else
1301 de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
1302 de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
1303 de->rx_ring[i].addr2 = 0;
1304 }
1305
1306 return 0;
1307
1308err_out:
1309 de_clean_rings(de);
1310 return -ENOMEM;
1311}
1312
1313static int de_init_rings (struct de_private *de)
1314{
1315 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1316 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1317
1318 de->rx_tail = 0;
1319 de->tx_head = de->tx_tail = 0;
1320
1321 return de_refill_rx (de);
1322}
1323
1324static int de_alloc_rings (struct de_private *de)
1325{
1326 de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
1327 if (!de->rx_ring)
1328 return -ENOMEM;
1329 de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
1330 return de_init_rings(de);
1331}
1332
1333static void de_clean_rings (struct de_private *de)
1334{
1335 unsigned i;
1336
1337 memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
1338 de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1339 wmb();
1340 memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
1341 de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
1342 wmb();
1343
1344 for (i = 0; i < DE_RX_RING_SIZE; i++) {
1345 if (de->rx_skb[i].skb) {
1346 pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
1347 de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1348 dev_kfree_skb(de->rx_skb[i].skb);
1349 }
1350 }
1351
1352 for (i = 0; i < DE_TX_RING_SIZE; i++) {
1353 struct sk_buff *skb = de->tx_skb[i].skb;
1354 if ((skb) && (skb != DE_DUMMY_SKB)) {
1355 if (skb != DE_SETUP_SKB) {
1356 de->net_stats.tx_dropped++;
1357 pci_unmap_single(de->pdev,
1358 de->tx_skb[i].mapping,
1359 skb->len, PCI_DMA_TODEVICE);
1360 dev_kfree_skb(skb);
1361 } else {
1362 pci_unmap_single(de->pdev,
1363 de->tx_skb[i].mapping,
1364 sizeof(de->setup_frame),
1365 PCI_DMA_TODEVICE);
1366 }
1367 }
1368 }
1369
1370 memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
1371 memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
1372}
1373
1374static void de_free_rings (struct de_private *de)
1375{
1376 de_clean_rings(de);
1377 pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
1378 de->rx_ring = NULL;
1379 de->tx_ring = NULL;
1380}
1381
1382static int de_open (struct net_device *dev)
1383{
1384 struct de_private *de = netdev_priv(dev);
1385 int rc;
1386
1387 netif_dbg(de, ifup, dev, "enabling interface\n");
1388
1389 de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1390
1391 rc = de_alloc_rings(de);
1392 if (rc) {
1393 netdev_err(dev, "ring allocation failure, err=%d\n", rc);
1394 return rc;
1395 }
1396
1397 dw32(IntrMask, 0);
1398
1399 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1400 if (rc) {
1401 netdev_err(dev, "IRQ %d request failure, err=%d\n",
1402 dev->irq, rc);
1403 goto err_out_free;
1404 }
1405
1406 rc = de_init_hw(de);
1407 if (rc) {
1408 netdev_err(dev, "h/w init failure, err=%d\n", rc);
1409 goto err_out_free_irq;
1410 }
1411
1412 netif_start_queue(dev);
1413 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1414
1415 return 0;
1416
1417err_out_free_irq:
1418 free_irq(dev->irq, dev);
1419err_out_free:
1420 de_free_rings(de);
1421 return rc;
1422}
1423
1424static int de_close (struct net_device *dev)
1425{
1426 struct de_private *de = netdev_priv(dev);
1427 unsigned long flags;
1428
1429 netif_dbg(de, ifdown, dev, "disabling interface\n");
1430
1431 del_timer_sync(&de->media_timer);
1432
1433 spin_lock_irqsave(&de->lock, flags);
1434 de_stop_hw(de);
1435 netif_stop_queue(dev);
1436 netif_carrier_off(dev);
1437 spin_unlock_irqrestore(&de->lock, flags);
1438
1439 free_irq(dev->irq, dev);
1440
1441 de_free_rings(de);
1442 de_adapter_sleep(de);
1443 return 0;
1444}
1445
1446static void de_tx_timeout (struct net_device *dev)
1447{
1448 struct de_private *de = netdev_priv(dev);
1449
1450 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1451 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
1452 de->rx_tail, de->tx_head, de->tx_tail);
1453
1454 del_timer_sync(&de->media_timer);
1455
1456 disable_irq(dev->irq);
1457 spin_lock_irq(&de->lock);
1458
1459 de_stop_hw(de);
1460 netif_stop_queue(dev);
1461 netif_carrier_off(dev);
1462
1463 spin_unlock_irq(&de->lock);
1464 enable_irq(dev->irq);
1465
1466 /* Update the error counts. */
1467 __de_get_stats(de);
1468
1469 synchronize_irq(dev->irq);
1470 de_clean_rings(de);
1471
1472 de_init_rings(de);
1473
1474 de_init_hw(de);
1475
1476 netif_wake_queue(dev);
1477}
1478
1479static void __de_get_regs(struct de_private *de, u8 *buf)
1480{
1481 int i;
1482 u32 *rbuf = (u32 *)buf;
1483
1484 /* read all CSRs */
1485 for (i = 0; i < DE_NUM_REGS; i++)
1486 rbuf[i] = dr32(i * 8);
1487
1488 /* handle self-clearing RxMissed counter, CSR8 */
1489 de_rx_missed(de, rbuf[8]);
1490}
1491
1492static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1493{
1494 ecmd->supported = de->media_supported;
1495 ecmd->transceiver = XCVR_INTERNAL;
1496 ecmd->phy_address = 0;
1497 ecmd->advertising = de->media_advertise;
1498
1499 switch (de->media_type) {
1500 case DE_MEDIA_AUI:
1501 ecmd->port = PORT_AUI;
1502 break;
1503 case DE_MEDIA_BNC:
1504 ecmd->port = PORT_BNC;
1505 break;
1506 default:
1507 ecmd->port = PORT_TP;
1508 break;
1509 }
1510
1511 ethtool_cmd_speed_set(ecmd, 10);
1512
1513 if (dr32(MacMode) & FullDuplex)
1514 ecmd->duplex = DUPLEX_FULL;
1515 else
1516 ecmd->duplex = DUPLEX_HALF;
1517
1518 if (de->media_lock)
1519 ecmd->autoneg = AUTONEG_DISABLE;
1520 else
1521 ecmd->autoneg = AUTONEG_ENABLE;
1522
1523 /* ignore maxtxpkt, maxrxpkt for now */
1524
1525 return 0;
1526}
1527
1528static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
1529{
1530 u32 new_media;
1531 unsigned int media_lock;
1532
1533 if (ethtool_cmd_speed(ecmd) != 10)
1534 return -EINVAL;
1535 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
1536 return -EINVAL;
1537 if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
1538 return -EINVAL;
1539 if (de->de21040 && ecmd->port == PORT_BNC)
1540 return -EINVAL;
1541 if (ecmd->transceiver != XCVR_INTERNAL)
1542 return -EINVAL;
1543 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
1544 return -EINVAL;
1545 if (ecmd->advertising & ~de->media_supported)
1546 return -EINVAL;
1547 if (ecmd->autoneg == AUTONEG_ENABLE &&
1548 (!(ecmd->advertising & ADVERTISED_Autoneg)))
1549 return -EINVAL;
1550
1551 switch (ecmd->port) {
1552 case PORT_AUI:
1553 new_media = DE_MEDIA_AUI;
1554 if (!(ecmd->advertising & ADVERTISED_AUI))
1555 return -EINVAL;
1556 break;
1557 case PORT_BNC:
1558 new_media = DE_MEDIA_BNC;
1559 if (!(ecmd->advertising & ADVERTISED_BNC))
1560 return -EINVAL;
1561 break;
1562 default:
1563 if (ecmd->autoneg == AUTONEG_ENABLE)
1564 new_media = DE_MEDIA_TP_AUTO;
1565 else if (ecmd->duplex == DUPLEX_FULL)
1566 new_media = DE_MEDIA_TP_FD;
1567 else
1568 new_media = DE_MEDIA_TP;
1569 if (!(ecmd->advertising & ADVERTISED_TP))
1570 return -EINVAL;
1571 if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
1572 return -EINVAL;
1573 break;
1574 }
1575
1576 media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
1577
1578 if ((new_media == de->media_type) &&
1579 (media_lock == de->media_lock) &&
1580 (ecmd->advertising == de->media_advertise))
1581 return 0; /* nothing to change */
1582
1583 de_link_down(de);
1584 mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
1585 de_stop_rxtx(de);
1586
1587 de->media_type = new_media;
1588 de->media_lock = media_lock;
1589 de->media_advertise = ecmd->advertising;
1590 de_set_media(de);
1591 if (netif_running(de->dev))
1592 de_start_rxtx(de);
1593
1594 return 0;
1595}
1596
1597static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
1598{
1599 struct de_private *de = netdev_priv(dev);
1600
1601 strcpy (info->driver, DRV_NAME);
1602 strcpy (info->version, DRV_VERSION);
1603 strcpy (info->bus_info, pci_name(de->pdev));
1604 info->eedump_len = DE_EEPROM_SIZE;
1605}
1606
1607static int de_get_regs_len(struct net_device *dev)
1608{
1609 return DE_REGS_SIZE;
1610}
1611
1612static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1613{
1614 struct de_private *de = netdev_priv(dev);
1615 int rc;
1616
1617 spin_lock_irq(&de->lock);
1618 rc = __de_get_settings(de, ecmd);
1619 spin_unlock_irq(&de->lock);
1620
1621 return rc;
1622}
1623
1624static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1625{
1626 struct de_private *de = netdev_priv(dev);
1627 int rc;
1628
1629 spin_lock_irq(&de->lock);
1630 rc = __de_set_settings(de, ecmd);
1631 spin_unlock_irq(&de->lock);
1632
1633 return rc;
1634}
1635
1636static u32 de_get_msglevel(struct net_device *dev)
1637{
1638 struct de_private *de = netdev_priv(dev);
1639
1640 return de->msg_enable;
1641}
1642
1643static void de_set_msglevel(struct net_device *dev, u32 msglvl)
1644{
1645 struct de_private *de = netdev_priv(dev);
1646
1647 de->msg_enable = msglvl;
1648}
1649
1650static int de_get_eeprom(struct net_device *dev,
1651 struct ethtool_eeprom *eeprom, u8 *data)
1652{
1653 struct de_private *de = netdev_priv(dev);
1654
1655 if (!de->ee_data)
1656 return -EOPNOTSUPP;
1657 if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
1658 (eeprom->len != DE_EEPROM_SIZE))
1659 return -EINVAL;
1660 memcpy(data, de->ee_data, eeprom->len);
1661
1662 return 0;
1663}
1664
1665static int de_nway_reset(struct net_device *dev)
1666{
1667 struct de_private *de = netdev_priv(dev);
1668 u32 status;
1669
1670 if (de->media_type != DE_MEDIA_TP_AUTO)
1671 return -EINVAL;
1672 if (netif_carrier_ok(de->dev))
1673 de_link_down(de);
1674
1675 status = dr32(SIAStatus);
1676 dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
1677 netif_info(de, link, dev, "link nway restart, status %x,%x\n",
1678 status, dr32(SIAStatus));
1679 return 0;
1680}
1681
1682static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1683 void *data)
1684{
1685 struct de_private *de = netdev_priv(dev);
1686
1687 regs->version = (DE_REGS_VER << 2) | de->de21040;
1688
1689 spin_lock_irq(&de->lock);
1690 __de_get_regs(de, data);
1691 spin_unlock_irq(&de->lock);
1692}
1693
1694static const struct ethtool_ops de_ethtool_ops = {
1695 .get_link = ethtool_op_get_link,
1696 .get_drvinfo = de_get_drvinfo,
1697 .get_regs_len = de_get_regs_len,
1698 .get_settings = de_get_settings,
1699 .set_settings = de_set_settings,
1700 .get_msglevel = de_get_msglevel,
1701 .set_msglevel = de_set_msglevel,
1702 .get_eeprom = de_get_eeprom,
1703 .nway_reset = de_nway_reset,
1704 .get_regs = de_get_regs,
1705};
1706
1707static void __devinit de21040_get_mac_address (struct de_private *de)
1708{
1709 unsigned i;
1710
1711 dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
1712 udelay(5);
1713
1714 for (i = 0; i < 6; i++) {
1715 int value, boguscnt = 100000;
1716 do {
1717 value = dr32(ROMCmd);
1718 rmb();
1719 } while (value < 0 && --boguscnt > 0);
1720 de->dev->dev_addr[i] = value;
1721 udelay(1);
1722 if (boguscnt <= 0)
1723 pr_warn("timeout reading 21040 MAC address byte %u\n",
1724 i);
1725 }
1726}
1727
1728static void __devinit de21040_get_media_info(struct de_private *de)
1729{
1730 unsigned int i;
1731
1732 de->media_type = DE_MEDIA_TP;
1733 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
1734 SUPPORTED_10baseT_Half | SUPPORTED_AUI;
1735 de->media_advertise = de->media_supported;
1736
1737 for (i = 0; i < DE_MAX_MEDIA; i++) {
1738 switch (i) {
1739 case DE_MEDIA_AUI:
1740 case DE_MEDIA_TP:
1741 case DE_MEDIA_TP_FD:
1742 de->media[i].type = i;
1743 de->media[i].csr13 = t21040_csr13[i];
1744 de->media[i].csr14 = t21040_csr14[i];
1745 de->media[i].csr15 = t21040_csr15[i];
1746 break;
1747 default:
1748 de->media[i].type = DE_MEDIA_INVALID;
1749 break;
1750 }
1751 }
1752}
1753
1754/* Note: this routine returns extra data bits for size detection. */
1755static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
1756{
1757 int i;
1758 unsigned retval = 0;
1759 void __iomem *ee_addr = regs + ROMCmd;
1760 int read_cmd = location | (EE_READ_CMD << addr_len);
1761
1762 writel(EE_ENB & ~EE_CS, ee_addr);
1763 writel(EE_ENB, ee_addr);
1764
1765 /* Shift the read command bits out. */
1766 for (i = 4 + addr_len; i >= 0; i--) {
1767 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1768 writel(EE_ENB | dataval, ee_addr);
1769 readl(ee_addr);
1770 writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1771 readl(ee_addr);
1772 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1773 }
1774 writel(EE_ENB, ee_addr);
1775 readl(ee_addr);
1776
1777 for (i = 16; i > 0; i--) {
1778 writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
1779 readl(ee_addr);
1780 retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
1781 writel(EE_ENB, ee_addr);
1782 readl(ee_addr);
1783 }
1784
1785 /* Terminate the EEPROM access. */
1786 writel(EE_ENB & ~EE_CS, ee_addr);
1787 return retval;
1788}
1789
1790static void __devinit de21041_get_srom_info (struct de_private *de)
1791{
1792 unsigned i, sa_offset = 0, ofs;
1793 u8 ee_data[DE_EEPROM_SIZE + 6] = {};
1794 unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
1795 struct de_srom_info_leaf *il;
1796 void *bufp;
1797
1798 /* download entire eeprom */
1799 for (i = 0; i < DE_EEPROM_WORDS; i++)
1800 ((__le16 *)ee_data)[i] =
1801 cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
1802
1803 /* DEC now has a specification but early board makers
1804 just put the address in the first EEPROM locations. */
1805 /* This does memcmp(eedata, eedata+16, 8) */
1806
1807#ifndef CONFIG_MIPS_COBALT
1808
1809 for (i = 0; i < 8; i ++)
1810 if (ee_data[i] != ee_data[16+i])
1811 sa_offset = 20;
1812
1813#endif
1814
1815 /* store MAC address */
1816 for (i = 0; i < 6; i ++)
1817 de->dev->dev_addr[i] = ee_data[i + sa_offset];
1818
1819 /* get offset of controller 0 info leaf. ignore 2nd byte. */
1820 ofs = ee_data[SROMC0InfoLeaf];
1821 if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
1822 goto bad_srom;
1823
1824 /* get pointer to info leaf */
1825 il = (struct de_srom_info_leaf *) &ee_data[ofs];
1826
1827 /* paranoia checks */
1828 if (il->n_blocks == 0)
1829 goto bad_srom;
1830 if ((sizeof(ee_data) - ofs) <
1831 (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
1832 goto bad_srom;
1833
1834 /* get default media type */
1835 switch (get_unaligned(&il->default_media)) {
1836 case 0x0001: de->media_type = DE_MEDIA_BNC; break;
1837 case 0x0002: de->media_type = DE_MEDIA_AUI; break;
1838 case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
1839 default: de->media_type = DE_MEDIA_TP_AUTO; break;
1840 }
1841
1842 if (netif_msg_probe(de))
1843 pr_info("de%d: SROM leaf offset %u, default media %s\n",
1844 de->board_idx, ofs, media_name[de->media_type]);
1845
1846 /* init SIA register values to defaults */
1847 for (i = 0; i < DE_MAX_MEDIA; i++) {
1848 de->media[i].type = DE_MEDIA_INVALID;
1849 de->media[i].csr13 = 0xffff;
1850 de->media[i].csr14 = 0xffff;
1851 de->media[i].csr15 = 0xffff;
1852 }
1853
1854 /* parse media blocks to see what medias are supported,
1855 * and if any custom CSR values are provided
1856 */
1857 bufp = ((void *)il) + sizeof(*il);
1858 for (i = 0; i < il->n_blocks; i++) {
1859 struct de_srom_media_block *ib = bufp;
1860 unsigned idx;
1861
1862 /* index based on media type in media block */
1863 switch(ib->opts & MediaBlockMask) {
1864 case 0: /* 10baseT */
1865 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
1866 | SUPPORTED_Autoneg;
1867 idx = DE_MEDIA_TP;
1868 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1869 break;
1870 case 1: /* BNC */
1871 de->media_supported |= SUPPORTED_BNC;
1872 idx = DE_MEDIA_BNC;
1873 break;
1874 case 2: /* AUI */
1875 de->media_supported |= SUPPORTED_AUI;
1876 idx = DE_MEDIA_AUI;
1877 break;
1878 case 4: /* 10baseT-FD */
1879 de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
1880 | SUPPORTED_Autoneg;
1881 idx = DE_MEDIA_TP_FD;
1882 de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
1883 break;
1884 default:
1885 goto bad_srom;
1886 }
1887
1888 de->media[idx].type = idx;
1889
1890 if (netif_msg_probe(de))
1891 pr_info("de%d: media block #%u: %s",
1892 de->board_idx, i,
1893 media_name[de->media[idx].type]);
1894
1895 bufp += sizeof (ib->opts);
1896
1897 if (ib->opts & MediaCustomCSRs) {
1898 de->media[idx].csr13 = get_unaligned(&ib->csr13);
1899 de->media[idx].csr14 = get_unaligned(&ib->csr14);
1900 de->media[idx].csr15 = get_unaligned(&ib->csr15);
1901 bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
1902 sizeof(ib->csr15);
1903
1904 if (netif_msg_probe(de))
1905 pr_cont(" (%x,%x,%x)\n",
1906 de->media[idx].csr13,
1907 de->media[idx].csr14,
1908 de->media[idx].csr15);
1909
1910 } else {
1911 if (netif_msg_probe(de))
1912 pr_cont("\n");
1913 }
1914
1915 if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
1916 break;
1917 }
1918
1919 de->media_advertise = de->media_supported;
1920
1921fill_defaults:
1922 /* fill in defaults, for cases where custom CSRs not used */
1923 for (i = 0; i < DE_MAX_MEDIA; i++) {
1924 if (de->media[i].csr13 == 0xffff)
1925 de->media[i].csr13 = t21041_csr13[i];
1926 if (de->media[i].csr14 == 0xffff) {
1927 /* autonegotiation is broken at least on some chip
1928 revisions - rev. 0x21 works, 0x11 does not */
1929 if (de->pdev->revision < 0x20)
1930 de->media[i].csr14 = t21041_csr14_brk[i];
1931 else
1932 de->media[i].csr14 = t21041_csr14[i];
1933 }
1934 if (de->media[i].csr15 == 0xffff)
1935 de->media[i].csr15 = t21041_csr15[i];
1936 }
1937
1938 de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
1939
1940 return;
1941
1942bad_srom:
1943 /* for error cases, it's ok to assume we support all these */
1944 for (i = 0; i < DE_MAX_MEDIA; i++)
1945 de->media[i].type = i;
1946 de->media_supported =
1947 SUPPORTED_10baseT_Half |
1948 SUPPORTED_10baseT_Full |
1949 SUPPORTED_Autoneg |
1950 SUPPORTED_TP |
1951 SUPPORTED_AUI |
1952 SUPPORTED_BNC;
1953 goto fill_defaults;
1954}
1955
1956static const struct net_device_ops de_netdev_ops = {
1957 .ndo_open = de_open,
1958 .ndo_stop = de_close,
1959 .ndo_set_rx_mode = de_set_rx_mode,
1960 .ndo_start_xmit = de_start_xmit,
1961 .ndo_get_stats = de_get_stats,
1962 .ndo_tx_timeout = de_tx_timeout,
1963 .ndo_change_mtu = eth_change_mtu,
1964 .ndo_set_mac_address = eth_mac_addr,
1965 .ndo_validate_addr = eth_validate_addr,
1966};
1967
1968static int __devinit de_init_one (struct pci_dev *pdev,
1969 const struct pci_device_id *ent)
1970{
1971 struct net_device *dev;
1972 struct de_private *de;
1973 int rc;
1974 void __iomem *regs;
1975 unsigned long pciaddr;
1976 static int board_idx = -1;
1977
1978 board_idx++;
1979
1980#ifndef MODULE
1981 if (board_idx == 0)
1982 pr_info("%s\n", version);
1983#endif
1984
1985 /* allocate a new ethernet device structure, and fill in defaults */
1986 dev = alloc_etherdev(sizeof(struct de_private));
1987 if (!dev)
1988 return -ENOMEM;
1989
1990 dev->netdev_ops = &de_netdev_ops;
1991 SET_NETDEV_DEV(dev, &pdev->dev);
1992 dev->ethtool_ops = &de_ethtool_ops;
1993 dev->watchdog_timeo = TX_TIMEOUT;
1994
1995 de = netdev_priv(dev);
1996 de->de21040 = ent->driver_data == 0 ? 1 : 0;
1997 de->pdev = pdev;
1998 de->dev = dev;
1999 de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
2000 de->board_idx = board_idx;
2001 spin_lock_init (&de->lock);
2002 init_timer(&de->media_timer);
2003 if (de->de21040)
2004 de->media_timer.function = de21040_media_timer;
2005 else
2006 de->media_timer.function = de21041_media_timer;
2007 de->media_timer.data = (unsigned long) de;
2008
2009 netif_carrier_off(dev);
2010
2011 /* wake up device, assign resources */
2012 rc = pci_enable_device(pdev);
2013 if (rc)
2014 goto err_out_free;
2015
2016 /* reserve PCI resources to ensure driver atomicity */
2017 rc = pci_request_regions(pdev, DRV_NAME);
2018 if (rc)
2019 goto err_out_disable;
2020
2021 /* check for invalid IRQ value */
2022 if (pdev->irq < 2) {
2023 rc = -EIO;
2024 pr_err("invalid irq (%d) for pci dev %s\n",
2025 pdev->irq, pci_name(pdev));
2026 goto err_out_res;
2027 }
2028
2029 dev->irq = pdev->irq;
2030
2031 /* obtain and check validity of PCI I/O address */
2032 pciaddr = pci_resource_start(pdev, 1);
2033 if (!pciaddr) {
2034 rc = -EIO;
2035 pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
2036 goto err_out_res;
2037 }
2038 if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
2039 rc = -EIO;
2040 pr_err("MMIO resource (%llx) too small on pci dev %s\n",
2041 (unsigned long long)pci_resource_len(pdev, 1),
2042 pci_name(pdev));
2043 goto err_out_res;
2044 }
2045
2046 /* remap CSR registers */
2047 regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
2048 if (!regs) {
2049 rc = -EIO;
2050 pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
2051 (unsigned long long)pci_resource_len(pdev, 1),
2052 pciaddr, pci_name(pdev));
2053 goto err_out_res;
2054 }
2055 dev->base_addr = (unsigned long) regs;
2056 de->regs = regs;
2057
2058 de_adapter_wake(de);
2059
2060 /* make sure hardware is not running */
2061 rc = de_reset_mac(de);
2062 if (rc) {
2063 pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
2064 goto err_out_iomap;
2065 }
2066
2067 /* get MAC address, initialize default media type and
2068 * get list of supported media
2069 */
2070 if (de->de21040) {
2071 de21040_get_mac_address(de);
2072 de21040_get_media_info(de);
2073 } else {
2074 de21041_get_srom_info(de);
2075 }
2076
2077 /* register new network interface with kernel */
2078 rc = register_netdev(dev);
2079 if (rc)
2080 goto err_out_iomap;
2081
2082 /* print info about board and interface just registered */
2083 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
2084 de->de21040 ? "21040" : "21041",
2085 dev->base_addr,
2086 dev->dev_addr,
2087 dev->irq);
2088
2089 pci_set_drvdata(pdev, dev);
2090
2091 /* enable busmastering */
2092 pci_set_master(pdev);
2093
2094 /* put adapter to sleep */
2095 de_adapter_sleep(de);
2096
2097 return 0;
2098
2099err_out_iomap:
2100 kfree(de->ee_data);
2101 iounmap(regs);
2102err_out_res:
2103 pci_release_regions(pdev);
2104err_out_disable:
2105 pci_disable_device(pdev);
2106err_out_free:
2107 free_netdev(dev);
2108 return rc;
2109}
2110
2111static void __devexit de_remove_one (struct pci_dev *pdev)
2112{
2113 struct net_device *dev = pci_get_drvdata(pdev);
2114 struct de_private *de = netdev_priv(dev);
2115
2116 BUG_ON(!dev);
2117 unregister_netdev(dev);
2118 kfree(de->ee_data);
2119 iounmap(de->regs);
2120 pci_release_regions(pdev);
2121 pci_disable_device(pdev);
2122 pci_set_drvdata(pdev, NULL);
2123 free_netdev(dev);
2124}
2125
2126#ifdef CONFIG_PM
2127
2128static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2129{
2130 struct net_device *dev = pci_get_drvdata (pdev);
2131 struct de_private *de = netdev_priv(dev);
2132
2133 rtnl_lock();
2134 if (netif_running (dev)) {
2135 del_timer_sync(&de->media_timer);
2136
2137 disable_irq(dev->irq);
2138 spin_lock_irq(&de->lock);
2139
2140 de_stop_hw(de);
2141 netif_stop_queue(dev);
2142 netif_device_detach(dev);
2143 netif_carrier_off(dev);
2144
2145 spin_unlock_irq(&de->lock);
2146 enable_irq(dev->irq);
2147
2148 /* Update the error counts. */
2149 __de_get_stats(de);
2150
2151 synchronize_irq(dev->irq);
2152 de_clean_rings(de);
2153
2154 de_adapter_sleep(de);
2155 pci_disable_device(pdev);
2156 } else {
2157 netif_device_detach(dev);
2158 }
2159 rtnl_unlock();
2160 return 0;
2161}
2162
2163static int de_resume (struct pci_dev *pdev)
2164{
2165 struct net_device *dev = pci_get_drvdata (pdev);
2166 struct de_private *de = netdev_priv(dev);
2167 int retval = 0;
2168
2169 rtnl_lock();
2170 if (netif_device_present(dev))
2171 goto out;
2172 if (!netif_running(dev))
2173 goto out_attach;
2174 if ((retval = pci_enable_device(pdev))) {
2175 netdev_err(dev, "pci_enable_device failed in resume\n");
2176 goto out;
2177 }
2178 pci_set_master(pdev);
2179 de_init_rings(de);
2180 de_init_hw(de);
2181out_attach:
2182 netif_device_attach(dev);
2183out:
2184 rtnl_unlock();
2185 return 0;
2186}
2187
2188#endif /* CONFIG_PM */
2189
2190static struct pci_driver de_driver = {
2191 .name = DRV_NAME,
2192 .id_table = de_pci_tbl,
2193 .probe = de_init_one,
2194 .remove = __devexit_p(de_remove_one),
2195#ifdef CONFIG_PM
2196 .suspend = de_suspend,
2197 .resume = de_resume,
2198#endif
2199};
2200
2201static int __init de_init (void)
2202{
2203#ifdef MODULE
2204 pr_info("%s\n", version);
2205#endif
2206 return pci_register_driver(&de_driver);
2207}
2208
2209static void __exit de_exit (void)
2210{
2211 pci_unregister_driver (&de_driver);
2212}
2213
2214module_init(de_init);
2215module_exit(de_exit);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
new file mode 100644
index 000000000000..871bcaa7068d
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -0,0 +1,5599 @@
1/* de4x5.c: A DIGITAL DC21x4x DECchip and DE425/DE434/DE435/DE450/DE500
2 ethernet driver for Linux.
3
4 Copyright 1994, 1995 Digital Equipment Corporation.
5
6 Testing resources for this driver have been made available
7 in part by NASA Ames Research Center (mjacob@nas.nasa.gov).
8
9 The author may be reached at davies@maniac.ultranet.com.
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published by the
13 Free Software Foundation; either version 2 of the License, or (at your
14 option) any later version.
15
16 THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
17 WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
18 MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
19 NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
22 USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
23 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 You should have received a copy of the GNU General Public License along
28 with this program; if not, write to the Free Software Foundation, Inc.,
29 675 Mass Ave, Cambridge, MA 02139, USA.
30
31 Originally, this driver was written for the Digital Equipment
32 Corporation series of EtherWORKS ethernet cards:
33
34 DE425 TP/COAX EISA
35 DE434 TP PCI
36 DE435 TP/COAX/AUI PCI
37 DE450 TP/COAX/AUI PCI
38 DE500 10/100 PCI Fasternet
39
40 but it will now attempt to support all cards which conform to the
41 Digital Semiconductor SROM Specification. The driver currently
42 recognises the following chips:
43
44 DC21040 (no SROM)
45 DC21041[A]
46 DC21140[A]
47 DC21142
48 DC21143
49
50 So far the driver is known to work with the following cards:
51
52 KINGSTON
53 Linksys
54 ZNYX342
55 SMC8432
56 SMC9332 (w/new SROM)
57 ZNYX31[45]
58 ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
59
60 The driver has been tested on a relatively busy network using the DE425,
61 DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
62 16M of data to a DECstation 5000/200 as follows:
63
64 TCP UDP
65 TX RX TX RX
66 DE425 1030k 997k 1170k 1128k
67 DE434 1063k 995k 1170k 1125k
68 DE435 1063k 995k 1170k 1125k
69 DE500 1063k 998k 1170k 1125k in 10Mb/s mode
70
71 All values are typical (in kBytes/sec) from a sample of 4 for each
72 measurement. Their error is +/-20k on a quiet (private) network and also
73 depend on what load the CPU has.
74
75 =========================================================================
76 This driver has been written substantially from scratch, although its
77 inheritance of style and stack interface from 'ewrk3.c' and in turn from
78 Donald Becker's 'lance.c' should be obvious. With the module autoload of
79 every usable DECchip board, I pinched Donald's 'next_module' field to
80 link my modules together.
81
82 Up to 15 EISA cards can be supported under this driver, limited primarily
83 by the available IRQ lines. I have checked different configurations of
84 multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
85 problem yet (provided you have at least depca.c v0.38) ...
86
87 PCI support has been added to allow the driver to work with the DE434,
88 DE435, DE450 and DE500 cards. The I/O accesses are a bit of a kludge due
89 to the differences in the EISA and PCI CSR address offsets from the base
90 address.
91
92 The ability to load this driver as a loadable module has been included
93 and used extensively during the driver development (to save those long
94 reboot sequences). Loadable module support under PCI and EISA has been
95 achieved by letting the driver autoprobe as if it were compiled into the
96 kernel. Do make sure you're not sharing interrupts with anything that
97 cannot accommodate interrupt sharing!
98
99 To utilise this ability, you have to do 8 things:
100
101 0) have a copy of the loadable modules code installed on your system.
102 1) copy de4x5.c from the /linux/drivers/net directory to your favourite
103 temporary directory.
104 2) for fixed autoprobes (not recommended), edit the source code near
105 line 5594 to reflect the I/O address you're using, or assign these when
106 loading by:
107
108 insmod de4x5 io=0xghh where g = bus number
109 hh = device number
110
111 NB: autoprobing for modules is now supported by default. You may just
112 use:
113
114 insmod de4x5
115
116 to load all available boards. For a specific board, still use
117 the 'io=?' above.
118 3) compile de4x5.c, but include -DMODULE in the command line to ensure
119 that the correct bits are compiled (see end of source code).
120 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
121 kernel with the de4x5 configuration turned off and reboot.
122 5) insmod de4x5 [io=0xghh]
123 6) run the net startup bits for your new eth?? interface(s) manually
124 (usually /etc/rc.inet[12] at boot time).
125 7) enjoy!
126
127 To unload a module, turn off the associated interface(s)
128 'ifconfig eth?? down' then 'rmmod de4x5'.
129
130 Automedia detection is included so that in principal you can disconnect
131 from, e.g. TP, reconnect to BNC and things will still work (after a
132 pause whilst the driver figures out where its media went). My tests
133 using ping showed that it appears to work....
134
135 By default, the driver will now autodetect any DECchip based card.
136 Should you have a need to restrict the driver to DIGITAL only cards, you
137 can compile with a DEC_ONLY define, or if loading as a module, use the
138 'dec_only=1' parameter.
139
140 I've changed the timing routines to use the kernel timer and scheduling
141 functions so that the hangs and other assorted problems that occurred
142 while autosensing the media should be gone. A bonus for the DC21040
143 auto media sense algorithm is that it can now use one that is more in
144 line with the rest (the DC21040 chip doesn't have a hardware timer).
145 The downside is the 1 'jiffies' (10ms) resolution.
146
147 IEEE 802.3u MII interface code has been added in anticipation that some
148 products may use it in the future.
149
150 The SMC9332 card has a non-compliant SROM which needs fixing - I have
151 patched this driver to detect it because the SROM format used complies
152 to a previous DEC-STD format.
153
154 I have removed the buffer copies needed for receive on Intels. I cannot
155 remove them for Alphas since the Tulip hardware only does longword
156 aligned DMA transfers and the Alphas get alignment traps with non
157 longword aligned data copies (which makes them really slow). No comment.
158
159 I have added SROM decoding routines to make this driver work with any
160 card that supports the Digital Semiconductor SROM spec. This will help
161 all cards running the dc2114x series chips in particular. Cards using
162 the dc2104x chips should run correctly with the basic driver. I'm in
163 debt to <mjacob@feral.com> for the testing and feedback that helped get
164 this feature working. So far we have tested KINGSTON, SMC8432, SMC9332
165 (with the latest SROM complying with the SROM spec V3: their first was
166 broken), ZNYX342 and LinkSys. ZYNX314 (dual 21041 MAC) and ZNYX 315
167 (quad 21041 MAC) cards also appear to work despite their incorrectly
168 wired IRQs.
169
170 I have added a temporary fix for interrupt problems when some SCSI cards
171 share the same interrupt as the DECchip based cards. The problem occurs
172 because the SCSI card wants to grab the interrupt as a fast interrupt
173 (runs the service routine with interrupts turned off) vs. this card
174 which really needs to run the service routine with interrupts turned on.
175 This driver will now add the interrupt service routine as a fast
176 interrupt if it is bounced from the slow interrupt. THIS IS NOT A
177 RECOMMENDED WAY TO RUN THE DRIVER and has been done for a limited time
178 until people sort out their compatibility issues and the kernel
179 interrupt service code is fixed. YOU SHOULD SEPARATE OUT THE FAST
180 INTERRUPT CARDS FROM THE SLOW INTERRUPT CARDS to ensure that they do not
181 run on the same interrupt. PCMCIA/CardBus is another can of worms...
182
183 Finally, I think I have really fixed the module loading problem with
184 more than one DECchip based card. As a side effect, I don't mess with
185 the device structure any more which means that if more than 1 card in
186 2.0.x is installed (4 in 2.1.x), the user will have to edit
187 linux/drivers/net/Space.c to make room for them. Hence, module loading
188 is the preferred way to use this driver, since it doesn't have this
189 limitation.
190
191 Where SROM media detection is used and full duplex is specified in the
192 SROM, the feature is ignored unless lp->params.fdx is set at compile
193 time OR during a module load (insmod de4x5 args='eth??:fdx' [see
194 below]). This is because there is no way to automatically detect full
195 duplex links except through autonegotiation. When I include the
196 autonegotiation feature in the SROM autoconf code, this detection will
197 occur automatically for that case.
198
199 Command line arguments are now allowed, similar to passing arguments
200 through LILO. This will allow a per adapter board set up of full duplex
201 and media. The only lexical constraints are: the board name (dev->name)
202 appears in the list before its parameters. The list of parameters ends
203 either at the end of the parameter list or with another board name. The
204 following parameters are allowed:
205
206 fdx for full duplex
207 autosense to set the media/speed; with the following
208 sub-parameters:
209 TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
210
211 Case sensitivity is important for the sub-parameters. They *must* be
212 upper case. Examples:
213
214 insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
215
216 For a compiled in driver, at or above line 548, place e.g.
217 #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
218
219 Yes, I know full duplex isn't permissible on BNC or AUI; they're just
220 examples. By default, full duplex is turned off and AUTO is the default
221 autosense setting. In reality, I expect only the full duplex option to
222 be used. Note the use of single quotes in the two examples above and the
223 lack of commas to separate items. ALSO, you must get the requested media
224 correct in relation to what the adapter SROM says it has. There's no way
225 to determine this in advance other than by trial and error and common
226 sense, e.g. call a BNC connectored port 'BNC', not '10Mb'.
227
228 Changed the bus probing. EISA used to be done first, followed by PCI.
229 Most people probably don't even know what a de425 is today and the EISA
230 probe has messed up some SCSI cards in the past, so now PCI is always
231 probed first followed by EISA if a) the architecture allows EISA and
232 either b) there have been no PCI cards detected or c) an EISA probe is
233 forced by the user. To force a probe include "force_eisa" in your
234 insmod "args" line; for built-in kernels either change the driver to do
235 this automatically or include #define DE4X5_FORCE_EISA on or before
236 line 1040 in the driver.
237
238 TO DO:
239 ------
240
241 Revision History
242 ----------------
243
244 Version Date Description
245
246 0.1 17-Nov-94 Initial writing. ALPHA code release.
247 0.2 13-Jan-95 Added PCI support for DE435's.
248 0.21 19-Jan-95 Added auto media detection.
249 0.22 10-Feb-95 Fix interrupt handler call <chris@cosy.sbg.ac.at>.
250 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
251 Add request/release_region code.
252 Add loadable modules support for PCI.
253 Clean up loadable modules support.
254 0.23 28-Feb-95 Added DC21041 and DC21140 support.
255 Fix missed frame counter value and initialisation.
256 Fixed EISA probe.
257 0.24 11-Apr-95 Change delay routine to use <linux/udelay>.
258 Change TX_BUFFS_AVAIL macro.
259 Change media autodetection to allow manual setting.
260 Completed DE500 (DC21140) support.
261 0.241 18-Apr-95 Interim release without DE500 Autosense Algorithm.
262 0.242 10-May-95 Minor changes.
263 0.30 12-Jun-95 Timer fix for DC21140.
264 Portability changes.
265 Add ALPHA changes from <jestabro@ant.tay1.dec.com>.
266 Add DE500 semi automatic autosense.
267 Add Link Fail interrupt TP failure detection.
268 Add timer based link change detection.
269 Plugged a memory leak in de4x5_queue_pkt().
270 0.31 13-Jun-95 Fixed PCI stuff for 1.3.1.
271 0.32 26-Jun-95 Added verify_area() calls in de4x5_ioctl() from a
272 suggestion by <heiko@colossus.escape.de>.
273 0.33 8-Aug-95 Add shared interrupt support (not released yet).
274 0.331 21-Aug-95 Fix de4x5_open() with fast CPUs.
275 Fix de4x5_interrupt().
276 Fix dc21140_autoconf() mess.
277 No shared interrupt support.
278 0.332 11-Sep-95 Added MII management interface routines.
279 0.40 5-Mar-96 Fix setup frame timeout <maartenb@hpkuipc.cern.ch>.
280 Add kernel timer code (h/w is too flaky).
281 Add MII based PHY autosense.
282 Add new multicasting code.
283 Add new autosense algorithms for media/mode
284 selection using kernel scheduling/timing.
285 Re-formatted.
286 Made changes suggested by <jeff@router.patch.net>:
287 Change driver to detect all DECchip based cards
288 with DEC_ONLY restriction a special case.
289 Changed driver to autoprobe as a module. No irq
290 checking is done now - assume BIOS is good!
291 Added SMC9332 detection <manabe@Roy.dsl.tutics.ac.jp>
292 0.41 21-Mar-96 Don't check for get_hw_addr checksum unless DEC card
293 only <niles@axp745gsfc.nasa.gov>
294 Fix for multiple PCI cards reported by <jos@xos.nl>
295 Duh, put the IRQF_SHARED flag into request_interrupt().
296 Fix SMC ethernet address in enet_det[].
297 Print chip name instead of "UNKNOWN" during boot.
298 0.42 26-Apr-96 Fix MII write TA bit error.
299 Fix bug in dc21040 and dc21041 autosense code.
300 Remove buffer copies on receive for Intels.
301 Change sk_buff handling during media disconnects to
302 eliminate DUP packets.
303 Add dynamic TX thresholding.
304 Change all chips to use perfect multicast filtering.
305 Fix alloc_device() bug <jari@markkus2.fimr.fi>
306 0.43 21-Jun-96 Fix unconnected media TX retry bug.
307 Add Accton to the list of broken cards.
308 Fix TX under-run bug for non DC21140 chips.
309 Fix boot command probe bug in alloc_device() as
310 reported by <koen.gadeyne@barco.com> and
311 <orava@nether.tky.hut.fi>.
312 Add cache locks to prevent a race condition as
313 reported by <csd@microplex.com> and
314 <baba@beckman.uiuc.edu>.
315 Upgraded alloc_device() code.
316 0.431 28-Jun-96 Fix potential bug in queue_pkt() from discussion
317 with <csd@microplex.com>
318 0.44 13-Aug-96 Fix RX overflow bug in 2114[023] chips.
319 Fix EISA probe bugs reported by <os2@kpi.kharkov.ua>
320 and <michael@compurex.com>.
321 0.441 9-Sep-96 Change dc21041_autoconf() to probe quiet BNC media
322 with a loopback packet.
323 0.442 9-Sep-96 Include AUI in dc21041 media printout. Bug reported
324 by <bhat@mundook.cs.mu.OZ.AU>
325 0.45 8-Dec-96 Include endian functions for PPC use, from work
326 by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
327 0.451 28-Dec-96 Added fix to allow autoprobe for modules after
328 suggestion from <mjacob@feral.com>.
329 0.5 30-Jan-97 Added SROM decoding functions.
330 Updated debug flags.
331 Fix sleep/wakeup calls for PCI cards, bug reported
332 by <cross@gweep.lkg.dec.com>.
333 Added multi-MAC, one SROM feature from discussion
334 with <mjacob@feral.com>.
335 Added full module autoprobe capability.
336 Added attempt to use an SMC9332 with broken SROM.
337 Added fix for ZYNX multi-mac cards that didn't
338 get their IRQs wired correctly.
339 0.51 13-Feb-97 Added endian fixes for the SROM accesses from
340 <paubert@iram.es>
341 Fix init_connection() to remove extra device reset.
342 Fix MAC/PHY reset ordering in dc21140m_autoconf().
343 Fix initialisation problem with lp->timeout in
344 typeX_infoblock() from <paubert@iram.es>.
345 Fix MII PHY reset problem from work done by
346 <paubert@iram.es>.
347 0.52 26-Apr-97 Some changes may not credit the right people -
348 a disk crash meant I lost some mail.
349 Change RX interrupt routine to drop rather than
350 defer packets to avoid hang reported by
351 <g.thomas@opengroup.org>.
352 Fix srom_exec() to return for COMPACT and type 1
353 infoblocks.
354 Added DC21142 and DC21143 functions.
355 Added byte counters from <phil@tazenda.demon.co.uk>
356 Added IRQF_DISABLED temporary fix from
357 <mjacob@feral.com>.
358 0.53 12-Nov-97 Fix the *_probe() to include 'eth??' name during
359 module load: bug reported by
360 <Piete.Brooks@cl.cam.ac.uk>
361 Fix multi-MAC, one SROM, to work with 2114x chips:
362 bug reported by <cmetz@inner.net>.
363 Make above search independent of BIOS device scan
364 direction.
365 Completed DC2114[23] autosense functions.
366 0.531 21-Dec-97 Fix DE500-XA 100Mb/s bug reported by
367 <robin@intercore.com
368 Fix type1_infoblock() bug introduced in 0.53, from
369 problem reports by
370 <parmee@postecss.ncrfran.france.ncr.com> and
371 <jo@ice.dillingen.baynet.de>.
372 Added argument list to set up each board from either
373 a module's command line or a compiled in #define.
374 Added generic MII PHY functionality to deal with
375 newer PHY chips.
376 Fix the mess in 2.1.67.
377 0.532 5-Jan-98 Fix bug in mii_get_phy() reported by
378 <redhat@cococo.net>.
379 Fix bug in pci_probe() for 64 bit systems reported
380 by <belliott@accessone.com>.
381 0.533 9-Jan-98 Fix more 64 bit bugs reported by <jal@cs.brown.edu>.
382 0.534 24-Jan-98 Fix last (?) endian bug from <geert@linux-m68k.org>
383 0.535 21-Feb-98 Fix Ethernet Address PROM reset bug for DC21040.
384 0.536 21-Mar-98 Change pci_probe() to use the pci_dev structure.
385 **Incompatible with 2.0.x from here.**
386 0.540 5-Jul-98 Atomicize assertion of dev->interrupt for SMP
387 from <lma@varesearch.com>
388 Add TP, AUI and BNC cases to 21140m_autoconf() for
389 case where a 21140 under SROM control uses, e.g. AUI
390 from problem report by <delchini@lpnp09.in2p3.fr>
391 Add MII parallel detection to 2114x_autoconf() for
392 case where no autonegotiation partner exists from
393 problem report by <mlapsley@ndirect.co.uk>.
394 Add ability to force connection type directly even
395 when using SROM control from problem report by
396 <earl@exis.net>.
397 Updated the PCI interface to conform with the latest
398 version. I hope nothing is broken...
399 Add TX done interrupt modification from suggestion
400 by <Austin.Donnelly@cl.cam.ac.uk>.
401 Fix is_anc_capable() bug reported by
402 <Austin.Donnelly@cl.cam.ac.uk>.
403 Fix type[13]_infoblock() bug: during MII search, PHY
404 lp->rst not run because lp->ibn not initialised -
405 from report & fix by <paubert@iram.es>.
406 Fix probe bug with EISA & PCI cards present from
407 report by <eirik@netcom.com>.
408 0.541 24-Aug-98 Fix compiler problems associated with i386-string
409 ops from multiple bug reports and temporary fix
410 from <paubert@iram.es>.
411 Fix pci_probe() to correctly emulate the old
412 pcibios_find_class() function.
413 Add an_exception() for old ZYNX346 and fix compile
414 warning on PPC & SPARC, from <ecd@skynet.be>.
415 Fix lastPCI to correctly work with compiled in
416 kernels and modules from bug report by
417 <Zlatko.Calusic@CARNet.hr> et al.
418 0.542 15-Sep-98 Fix dc2114x_autoconf() to stop multiple messages
419 when media is unconnected.
420 Change dev->interrupt to lp->interrupt to ensure
421 alignment for Alpha's and avoid their unaligned
422 access traps. This flag is merely for log messages:
423 should do something more definitive though...
424 0.543 30-Dec-98 Add SMP spin locking.
425 0.544 8-May-99 Fix for buggy SROM in Motorola embedded boards using
426 a 21143 by <mmporter@home.com>.
427 Change PCI/EISA bus probing order.
428 0.545 28-Nov-99 Further Moto SROM bug fix from
429 <mporter@eng.mcd.mot.com>
430 Remove double checking for DEBUG_RX in de4x5_dbg_rx()
431 from report by <geert@linux-m68k.org>
432 0.546 22-Feb-01 Fixes Alpha XP1000 oops. The srom_search function
433 was causing a page fault when initializing the
434 variable 'pb', on a non de4x5 PCI device, in this
435 case a PCI bridge (DEC chip 21152). The value of
436 'pb' is now only initialized if a de4x5 chip is
437 present.
438 <france@handhelds.org>
439 0.547 08-Nov-01 Use library crc32 functions by <Matt_Domsch@dell.com>
440 0.548 30-Aug-03 Big 2.6 cleanup. Ported to PCI/EISA probing and
441 generic DMA APIs. Fixed DE425 support on Alpha.
442 <maz@wild-wind.fr.eu.org>
443 =========================================================================
444*/
445
446#include <linux/module.h>
447#include <linux/kernel.h>
448#include <linux/string.h>
449#include <linux/interrupt.h>
450#include <linux/ptrace.h>
451#include <linux/errno.h>
452#include <linux/ioport.h>
453#include <linux/pci.h>
454#include <linux/eisa.h>
455#include <linux/delay.h>
456#include <linux/init.h>
457#include <linux/spinlock.h>
458#include <linux/crc32.h>
459#include <linux/netdevice.h>
460#include <linux/etherdevice.h>
461#include <linux/skbuff.h>
462#include <linux/time.h>
463#include <linux/types.h>
464#include <linux/unistd.h>
465#include <linux/ctype.h>
466#include <linux/dma-mapping.h>
467#include <linux/moduleparam.h>
468#include <linux/bitops.h>
469#include <linux/gfp.h>
470
471#include <asm/io.h>
472#include <asm/dma.h>
473#include <asm/byteorder.h>
474#include <asm/unaligned.h>
475#include <asm/uaccess.h>
476#ifdef CONFIG_PPC_PMAC
477#include <asm/machdep.h>
478#endif /* CONFIG_PPC_PMAC */
479
480#include "de4x5.h"
481
482static const char version[] __devinitconst =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484
485#define c_char const char
486
487/*
488** MII Information
489*/
490struct phy_table {
491 int reset; /* Hard reset required? */
492 int id; /* IEEE OUI */
493 int ta; /* One cycle TA time - 802.3u is confusing here */
494 struct { /* Non autonegotiation (parallel) speed det. */
495 int reg;
496 int mask;
497 int value;
498 } spd;
499};
500
501struct mii_phy {
502 int reset; /* Hard reset required? */
503 int id; /* IEEE OUI */
504 int ta; /* One cycle TA time */
505 struct { /* Non autonegotiation (parallel) speed det. */
506 int reg;
507 int mask;
508 int value;
509 } spd;
510 int addr; /* MII address for the PHY */
511 u_char *gep; /* Start of GEP sequence block in SROM */
512 u_char *rst; /* Start of reset sequence in SROM */
513 u_int mc; /* Media Capabilities */
514 u_int ana; /* NWay Advertisement */
515 u_int fdx; /* Full DupleX capabilities for each media */
516 u_int ttm; /* Transmit Threshold Mode for each media */
517 u_int mci; /* 21142 MII Connector Interrupt info */
518};
519
520#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */
521
522struct sia_phy {
523 u_char mc; /* Media Code */
524 u_char ext; /* csr13-15 valid when set */
525 int csr13; /* SIA Connectivity Register */
526 int csr14; /* SIA TX/RX Register */
527 int csr15; /* SIA General Register */
528 int gepc; /* SIA GEP Control Information */
529 int gep; /* SIA GEP Data */
530};
531
532/*
533** Define the know universe of PHY devices that can be
534** recognised by this driver.
535*/
536static struct phy_table phy_info[] = {
537 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}}, /* National TX */
538 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}}, /* Broadcom T4 */
539 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}}, /* SEEQ T4 */
540 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}}, /* Cypress T4 */
541 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}} /* Level One LTX970 */
542};
543
544/*
545** These GENERIC values assumes that the PHY devices follow 802.3u and
546** allow parallel detection to set the link partner ability register.
547** Detection of 100Base-TX [H/F Duplex] and 100Base-T4 is supported.
548*/
549#define GENERIC_REG 0x05 /* Autoneg. Link Partner Advertisement Reg. */
550#define GENERIC_MASK MII_ANLPA_100M /* All 100Mb/s Technologies */
551#define GENERIC_VALUE MII_ANLPA_100M /* 100B-TX, 100B-TX FDX, 100B-T4 */
552
553/*
554** Define special SROM detection cases
555*/
556static c_char enet_det[][ETH_ALEN] = {
557 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
558 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
559};
560
561#define SMC 1
562#define ACCTON 2
563
564/*
565** SROM Repair definitions. If a broken SROM is detected a card may
566** use this information to help figure out what to do. This is a
567** "stab in the dark" and so far for SMC9332's only.
568*/
569static c_char srom_repair_info[][100] = {
570 {0x00,0x1e,0x00,0x00,0x00,0x08, /* SMC9332 */
571 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
572 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
573 0x00,0x18,}
574};
575
576
577#ifdef DE4X5_DEBUG
578static int de4x5_debug = DE4X5_DEBUG;
579#else
580/*static int de4x5_debug = (DEBUG_MII | DEBUG_SROM | DEBUG_PCICFG | DEBUG_MEDIA | DEBUG_VERSION);*/
581static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
582#endif
583
584/*
585** Allow per adapter set up. For modules this is simply a command line
586** parameter, e.g.:
587** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
588**
589** For a compiled in driver, place e.g.
590** #define DE4X5_PARM "eth0:fdx autosense=AUI eth2:autosense=TP"
591** here
592*/
593#ifdef DE4X5_PARM
594static char *args = DE4X5_PARM;
595#else
596static char *args;
597#endif
598
599struct parameters {
600 bool fdx;
601 int autosense;
602};
603
604#define DE4X5_AUTOSENSE_MS 250 /* msec autosense tick (DE500) */
605
606#define DE4X5_NDA 0xffe0 /* No Device (I/O) Address */
607
608/*
609** Ethernet PROM defines
610*/
611#define PROBE_LENGTH 32
612#define ETH_PROM_SIG 0xAA5500FFUL
613
614/*
615** Ethernet Info
616*/
617#define PKT_BUF_SZ 1536 /* Buffer size for each Tx/Rx buffer */
618#define IEEE802_3_SZ 1518 /* Packet + CRC */
619#define MAX_PKT_SZ 1514 /* Maximum ethernet packet length */
620#define MAX_DAT_SZ 1500 /* Maximum ethernet data length */
621#define MIN_DAT_SZ 1 /* Minimum ethernet data length */
622#define PKT_HDR_LEN 14 /* Addresses and data length info */
623#define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
624#define QUEUE_PKT_TIMEOUT (3*HZ) /* 3 second timeout */
625
626
627/*
628** EISA bus defines
629*/
630#define DE4X5_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
631#define DE4X5_EISA_TOTAL_SIZE 0x100 /* I/O address extent */
632
633#define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
634
635#define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
636#define DE4X5_NAME_LENGTH 8
637
638static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
639
640/*
641** Ethernet PROM defines for DC21040
642*/
643#define PROBE_LENGTH 32
644#define ETH_PROM_SIG 0xAA5500FFUL
645
646/*
647** PCI Bus defines
648*/
649#define PCI_MAX_BUS_NUM 8
650#define DE4X5_PCI_TOTAL_SIZE 0x80 /* I/O address extent */
651#define DE4X5_CLASS_CODE 0x00020000 /* Network controller, Ethernet */
652
653/*
654** Memory Alignment. Each descriptor is 4 longwords long. To force a
655** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
656** DESC_ALIGN. ALIGN aligns the start address of the private memory area
657** and hence the RX descriptor ring's first entry.
658*/
659#define DE4X5_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
660#define DE4X5_ALIGN8 ((u_long)8 - 1) /* 2 longword align */
661#define DE4X5_ALIGN16 ((u_long)16 - 1) /* 4 longword align */
662#define DE4X5_ALIGN32 ((u_long)32 - 1) /* 8 longword align */
663#define DE4X5_ALIGN64 ((u_long)64 - 1) /* 16 longword align */
664#define DE4X5_ALIGN128 ((u_long)128 - 1) /* 32 longword align */
665
666#define DE4X5_ALIGN DE4X5_ALIGN32 /* Keep the DC21040 happy... */
667#define DE4X5_CACHE_ALIGN CAL_16LONG
668#define DESC_SKIP_LEN DSL_0 /* Must agree with DESC_ALIGN */
669/*#define DESC_ALIGN u32 dummy[4]; / * Must agree with DESC_SKIP_LEN */
670#define DESC_ALIGN
671
672#ifndef DEC_ONLY /* See README.de4x5 for using this */
673static int dec_only;
674#else
675static int dec_only = 1;
676#endif
677
678/*
679** DE4X5 IRQ ENABLE/DISABLE
680*/
681#define ENABLE_IRQs { \
682 imr |= lp->irq_en;\
683 outl(imr, DE4X5_IMR); /* Enable the IRQs */\
684}
685
686#define DISABLE_IRQs {\
687 imr = inl(DE4X5_IMR);\
688 imr &= ~lp->irq_en;\
689 outl(imr, DE4X5_IMR); /* Disable the IRQs */\
690}
691
692#define UNMASK_IRQs {\
693 imr |= lp->irq_mask;\
694 outl(imr, DE4X5_IMR); /* Unmask the IRQs */\
695}
696
697#define MASK_IRQs {\
698 imr = inl(DE4X5_IMR);\
699 imr &= ~lp->irq_mask;\
700 outl(imr, DE4X5_IMR); /* Mask the IRQs */\
701}
702
703/*
704** DE4X5 START/STOP
705*/
706#define START_DE4X5 {\
707 omr = inl(DE4X5_OMR);\
708 omr |= OMR_ST | OMR_SR;\
709 outl(omr, DE4X5_OMR); /* Enable the TX and/or RX */\
710}
711
712#define STOP_DE4X5 {\
713 omr = inl(DE4X5_OMR);\
714 omr &= ~(OMR_ST|OMR_SR);\
715 outl(omr, DE4X5_OMR); /* Disable the TX and/or RX */ \
716}
717
718/*
719** DE4X5 SIA RESET
720*/
721#define RESET_SIA outl(0, DE4X5_SICR); /* Reset SIA connectivity regs */
722
723/*
724** DE500 AUTOSENSE TIMER INTERVAL (MILLISECS)
725*/
726#define DE4X5_AUTOSENSE_MS 250
727
728/*
729** SROM Structure
730*/
731struct de4x5_srom {
732 char sub_vendor_id[2];
733 char sub_system_id[2];
734 char reserved[12];
735 char id_block_crc;
736 char reserved2;
737 char version;
738 char num_controllers;
739 char ieee_addr[6];
740 char info[100];
741 short chksum;
742};
743#define SUB_VENDOR_ID 0x500a
744
745/*
746** DE4X5 Descriptors. Make sure that all the RX buffers are contiguous
747** and have sizes of both a power of 2 and a multiple of 4.
748** A size of 256 bytes for each buffer could be chosen because over 90% of
749** all packets in our network are <256 bytes long and 64 longword alignment
750** is possible. 1536 showed better 'ttcp' performance. Take your pick. 32 TX
751** descriptors are needed for machines with an ALPHA CPU.
752*/
753#define NUM_RX_DESC 8 /* Number of RX descriptors */
754#define NUM_TX_DESC 32 /* Number of TX descriptors */
755#define RX_BUFF_SZ 1536 /* Power of 2 for kmalloc and */
756 /* Multiple of 4 for DC21040 */
757 /* Allows 512 byte alignment */
758struct de4x5_desc {
759 volatile __le32 status;
760 __le32 des1;
761 __le32 buf;
762 __le32 next;
763 DESC_ALIGN
764};
765
766/*
767** The DE4X5 private structure
768*/
769#define DE4X5_PKT_STAT_SZ 16
770#define DE4X5_PKT_BIN_SZ 128 /* Should be >=100 unless you
771 increase DE4X5_PKT_STAT_SZ */
772
773struct pkt_stats {
774 u_int bins[DE4X5_PKT_STAT_SZ]; /* Private stats counters */
775 u_int unicast;
776 u_int multicast;
777 u_int broadcast;
778 u_int excessive_collisions;
779 u_int tx_underruns;
780 u_int excessive_underruns;
781 u_int rx_runt_frames;
782 u_int rx_collision;
783 u_int rx_dribble;
784 u_int rx_overflow;
785};
786
787struct de4x5_private {
788 char adapter_name[80]; /* Adapter name */
789 u_long interrupt; /* Aligned ISR flag */
790 struct de4x5_desc *rx_ring; /* RX descriptor ring */
791 struct de4x5_desc *tx_ring; /* TX descriptor ring */
792 struct sk_buff *tx_skb[NUM_TX_DESC]; /* TX skb for freeing when sent */
793 struct sk_buff *rx_skb[NUM_RX_DESC]; /* RX skb's */
794 int rx_new, rx_old; /* RX descriptor ring pointers */
795 int tx_new, tx_old; /* TX descriptor ring pointers */
796 char setup_frame[SETUP_FRAME_LEN]; /* Holds MCA and PA info. */
797 char frame[64]; /* Min sized packet for loopback*/
798 spinlock_t lock; /* Adapter specific spinlock */
799 struct net_device_stats stats; /* Public stats */
800 struct pkt_stats pktStats; /* Private stats counters */
801 char rxRingSize;
802 char txRingSize;
803 int bus; /* EISA or PCI */
804 int bus_num; /* PCI Bus number */
805 int device; /* Device number on PCI bus */
806 int state; /* Adapter OPENED or CLOSED */
807 int chipset; /* DC21040, DC21041 or DC21140 */
808 s32 irq_mask; /* Interrupt Mask (Enable) bits */
809 s32 irq_en; /* Summary interrupt bits */
810 int media; /* Media (eg TP), mode (eg 100B)*/
811 int c_media; /* Remember the last media conn */
812 bool fdx; /* media full duplex flag */
813 int linkOK; /* Link is OK */
814 int autosense; /* Allow/disallow autosensing */
815 bool tx_enable; /* Enable descriptor polling */
816 int setup_f; /* Setup frame filtering type */
817 int local_state; /* State within a 'media' state */
818 struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
819 struct sia_phy sia; /* SIA PHY Information */
820 int active; /* Index to active PHY device */
821 int mii_cnt; /* Number of attached PHY's */
822 int timeout; /* Scheduling counter */
823 struct timer_list timer; /* Timer info for kernel */
824 int tmp; /* Temporary global per card */
825 struct {
826 u_long lock; /* Lock the cache accesses */
827 s32 csr0; /* Saved Bus Mode Register */
828 s32 csr6; /* Saved Operating Mode Reg. */
829 s32 csr7; /* Saved IRQ Mask Register */
830 s32 gep; /* Saved General Purpose Reg. */
831 s32 gepc; /* Control info for GEP */
832 s32 csr13; /* Saved SIA Connectivity Reg. */
833 s32 csr14; /* Saved SIA TX/RX Register */
834 s32 csr15; /* Saved SIA General Register */
835 int save_cnt; /* Flag if state already saved */
836 struct sk_buff_head queue; /* Save the (re-ordered) skb's */
837 } cache;
838 struct de4x5_srom srom; /* A copy of the SROM */
839 int cfrv; /* Card CFRV copy */
840 int rx_ovf; /* Check for 'RX overflow' tag */
841 bool useSROM; /* For non-DEC card use SROM */
842 bool useMII; /* Infoblock using the MII */
843 int asBitValid; /* Autosense bits in GEP? */
844 int asPolarity; /* 0 => asserted high */
845 int asBit; /* Autosense bit number in GEP */
846 int defMedium; /* SROM default medium */
847 int tcount; /* Last infoblock number */
848 int infoblock_init; /* Initialised this infoblock? */
849 int infoleaf_offset; /* SROM infoleaf for controller */
850 s32 infoblock_csr6; /* csr6 value in SROM infoblock */
851 int infoblock_media; /* infoblock media */
852 int (*infoleaf_fn)(struct net_device *); /* Pointer to infoleaf function */
853 u_char *rst; /* Pointer to Type 5 reset info */
854 u_char ibn; /* Infoblock number */
855 struct parameters params; /* Command line/ #defined params */
856 struct device *gendev; /* Generic device */
857 dma_addr_t dma_rings; /* DMA handle for rings */
858 int dma_size; /* Size of the DMA area */
859 char *rx_bufs; /* rx bufs on alpha, sparc, ... */
860};
861
862/*
863** To get around certain poxy cards that don't provide an SROM
864** for the second and more DECchip, I have to key off the first
865** chip's address. I'll assume there's not a bad SROM iff:
866**
867** o the chipset is the same
868** o the bus number is the same and > 0
869** o the sum of all the returned hw address bytes is 0 or 0x5fa
870**
871** Also have to save the irq for those cards whose hardware designers
872** can't follow the PCI to PCI Bridge Architecture spec.
873*/
874static struct {
875 int chipset;
876 int bus;
877 int irq;
878 u_char addr[ETH_ALEN];
879} last = {0,};
880
881/*
882** The transmit ring full condition is described by the tx_old and tx_new
883** pointers by:
884** tx_old = tx_new Empty ring
885** tx_old = tx_new+1 Full ring
886** tx_old+txRingSize = tx_new+1 Full ring (wrapped condition)
887*/
888#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
891
892#define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
893
894/*
895** Public Functions
896*/
897static int de4x5_open(struct net_device *dev);
898static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
899 struct net_device *dev);
900static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
901static int de4x5_close(struct net_device *dev);
902static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
903static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
904static void set_multicast_list(struct net_device *dev);
905static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
906
907/*
908** Private functions
909*/
910static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
911static int de4x5_init(struct net_device *dev);
912static int de4x5_sw_reset(struct net_device *dev);
913static int de4x5_rx(struct net_device *dev);
914static int de4x5_tx(struct net_device *dev);
915static void de4x5_ast(struct net_device *dev);
916static int de4x5_txur(struct net_device *dev);
917static int de4x5_rx_ovfc(struct net_device *dev);
918
919static int autoconf_media(struct net_device *dev);
920static void create_packet(struct net_device *dev, char *frame, int len);
921static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
922static int dc21040_autoconf(struct net_device *dev);
923static int dc21041_autoconf(struct net_device *dev);
924static int dc21140m_autoconf(struct net_device *dev);
925static int dc2114x_autoconf(struct net_device *dev);
926static int srom_autoconf(struct net_device *dev);
927static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
928static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
929static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
930static int test_for_100Mb(struct net_device *dev, int msec);
931static int wait_for_link(struct net_device *dev);
932static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
933static int is_spd_100(struct net_device *dev);
934static int is_100_up(struct net_device *dev);
935static int is_10_up(struct net_device *dev);
936static int is_anc_capable(struct net_device *dev);
937static int ping_media(struct net_device *dev, int msec);
938static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
939static void de4x5_free_rx_buffs(struct net_device *dev);
940static void de4x5_free_tx_buffs(struct net_device *dev);
941static void de4x5_save_skbs(struct net_device *dev);
942static void de4x5_rst_desc_ring(struct net_device *dev);
943static void de4x5_cache_state(struct net_device *dev, int flag);
944static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
945static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
946static struct sk_buff *de4x5_get_cache(struct net_device *dev);
947static void de4x5_setup_intr(struct net_device *dev);
948static void de4x5_init_connection(struct net_device *dev);
949static int de4x5_reset_phy(struct net_device *dev);
950static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
951static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
952static int test_tp(struct net_device *dev, s32 msec);
953static int EISA_signature(char *name, struct device *device);
954static int PCI_signature(char *name, struct de4x5_private *lp);
955static void DevicePresent(struct net_device *dev, u_long iobase);
956static void enet_addr_rst(u_long aprom_addr);
957static int de4x5_bad_srom(struct de4x5_private *lp);
958static short srom_rd(u_long address, u_char offset);
959static void srom_latch(u_int command, u_long address);
960static void srom_command(u_int command, u_long address);
961static void srom_address(u_int command, u_long address, u_char offset);
962static short srom_data(u_int command, u_long address);
963/*static void srom_busy(u_int command, u_long address);*/
964static void sendto_srom(u_int command, u_long addr);
965static int getfrom_srom(u_long addr);
966static int srom_map_media(struct net_device *dev);
967static int srom_infoleaf_info(struct net_device *dev);
968static void srom_init(struct net_device *dev);
969static void srom_exec(struct net_device *dev, u_char *p);
970static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
971static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
972static int mii_rdata(u_long ioaddr);
973static void mii_wdata(int data, int len, u_long ioaddr);
974static void mii_ta(u_long rw, u_long ioaddr);
975static int mii_swap(int data, int len);
976static void mii_address(u_char addr, u_long ioaddr);
977static void sendto_mii(u32 command, int data, u_long ioaddr);
978static int getfrom_mii(u32 command, u_long ioaddr);
979static int mii_get_oui(u_char phyaddr, u_long ioaddr);
980static int mii_get_phy(struct net_device *dev);
981static void SetMulticastFilter(struct net_device *dev);
982static int get_hw_addr(struct net_device *dev);
983static void srom_repair(struct net_device *dev, int card);
984static int test_bad_enet(struct net_device *dev, int status);
985static int an_exception(struct de4x5_private *lp);
986static char *build_setup_frame(struct net_device *dev, int mode);
987static void disable_ast(struct net_device *dev);
988static long de4x5_switch_mac_port(struct net_device *dev);
989static int gep_rd(struct net_device *dev);
990static void gep_wr(s32 data, struct net_device *dev);
991static void yawn(struct net_device *dev, int state);
992static void de4x5_parse_params(struct net_device *dev);
993static void de4x5_dbg_open(struct net_device *dev);
994static void de4x5_dbg_mii(struct net_device *dev, int k);
995static void de4x5_dbg_media(struct net_device *dev);
996static void de4x5_dbg_srom(struct de4x5_srom *p);
997static void de4x5_dbg_rx(struct sk_buff *skb, int len);
998static int de4x5_strncmp(char *a, char *b, int n);
999static int dc21041_infoleaf(struct net_device *dev);
1000static int dc21140_infoleaf(struct net_device *dev);
1001static int dc21142_infoleaf(struct net_device *dev);
1002static int dc21143_infoleaf(struct net_device *dev);
1003static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1004static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1005static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1006static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1007static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1008static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1009static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1010
1011/*
1012** Note now that module autoprobing is allowed under EISA and PCI. The
1013** IRQ lines will not be auto-detected; instead I'll rely on the BIOSes
1014** to "do the right thing".
1015*/
1016
1017static int io=0x0;/* EDIT THIS LINE FOR YOUR CONFIGURATION IF NEEDED */
1018
1019module_param(io, int, 0);
1020module_param(de4x5_debug, int, 0);
1021module_param(dec_only, int, 0);
1022module_param(args, charp, 0);
1023
1024MODULE_PARM_DESC(io, "de4x5 I/O base address");
1025MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1026MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1027MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1028MODULE_LICENSE("GPL");
1029
1030/*
1031** List the SROM infoleaf functions and chipsets
1032*/
1033struct InfoLeaf {
1034 int chipset;
1035 int (*fn)(struct net_device *);
1036};
1037static struct InfoLeaf infoleaf_array[] = {
1038 {DC21041, dc21041_infoleaf},
1039 {DC21140, dc21140_infoleaf},
1040 {DC21142, dc21142_infoleaf},
1041 {DC21143, dc21143_infoleaf}
1042};
1043#define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1044
1045/*
1046** List the SROM info block functions
1047*/
1048static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1049 type0_infoblock,
1050 type1_infoblock,
1051 type2_infoblock,
1052 type3_infoblock,
1053 type4_infoblock,
1054 type5_infoblock,
1055 compact_infoblock
1056};
1057
1058#define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1059
1060/*
1061** Miscellaneous defines...
1062*/
1063#define RESET_DE4X5 {\
1064 int i;\
1065 i=inl(DE4X5_BMR);\
1066 mdelay(1);\
1067 outl(i | BMR_SWR, DE4X5_BMR);\
1068 mdelay(1);\
1069 outl(i, DE4X5_BMR);\
1070 mdelay(1);\
1071 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1072 mdelay(1);\
1073}
1074
1075#define PHY_HARD_RESET {\
1076 outl(GEP_HRST, DE4X5_GEP); /* Hard RESET the PHY dev. */\
1077 mdelay(1); /* Assert for 1ms */\
1078 outl(0x00, DE4X5_GEP);\
1079 mdelay(2); /* Wait for 2ms */\
1080}
1081
1082static const struct net_device_ops de4x5_netdev_ops = {
1083 .ndo_open = de4x5_open,
1084 .ndo_stop = de4x5_close,
1085 .ndo_start_xmit = de4x5_queue_pkt,
1086 .ndo_get_stats = de4x5_get_stats,
1087 .ndo_set_rx_mode = set_multicast_list,
1088 .ndo_do_ioctl = de4x5_ioctl,
1089 .ndo_change_mtu = eth_change_mtu,
1090 .ndo_set_mac_address= eth_mac_addr,
1091 .ndo_validate_addr = eth_validate_addr,
1092};
1093
1094
1095static int __devinit
1096de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1097{
1098 char name[DE4X5_NAME_LENGTH + 1];
1099 struct de4x5_private *lp = netdev_priv(dev);
1100 struct pci_dev *pdev = NULL;
1101 int i, status=0;
1102
1103 dev_set_drvdata(gendev, dev);
1104
1105 /* Ensure we're not sleeping */
1106 if (lp->bus == EISA) {
1107 outb(WAKEUP, PCI_CFPM);
1108 } else {
1109 pdev = to_pci_dev (gendev);
1110 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1111 }
1112 mdelay(10);
1113
1114 RESET_DE4X5;
1115
1116 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1117 return -ENXIO; /* Hardware could not reset */
1118 }
1119
1120 /*
1121 ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
1122 */
1123 lp->useSROM = false;
1124 if (lp->bus == PCI) {
1125 PCI_signature(name, lp);
1126 } else {
1127 EISA_signature(name, gendev);
1128 }
1129
1130 if (*name == '\0') { /* Not found a board signature */
1131 return -ENXIO;
1132 }
1133
1134 dev->base_addr = iobase;
1135 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1136
1137 status = get_hw_addr(dev);
1138 printk(", h/w address %pM\n", dev->dev_addr);
1139
1140 if (status != 0) {
1141 printk(" which has an Ethernet PROM CRC error.\n");
1142 return -ENXIO;
1143 } else {
1144 skb_queue_head_init(&lp->cache.queue);
1145 lp->cache.gepc = GEP_INIT;
1146 lp->asBit = GEP_SLNK;
1147 lp->asPolarity = GEP_SLNK;
1148 lp->asBitValid = ~0;
1149 lp->timeout = -1;
1150 lp->gendev = gendev;
1151 spin_lock_init(&lp->lock);
1152 init_timer(&lp->timer);
1153 lp->timer.function = (void (*)(unsigned long))de4x5_ast;
1154 lp->timer.data = (unsigned long)dev;
1155 de4x5_parse_params(dev);
1156
1157 /*
1158 ** Choose correct autosensing in case someone messed up
1159 */
1160 lp->autosense = lp->params.autosense;
1161 if (lp->chipset != DC21140) {
1162 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1163 lp->params.autosense = TP;
1164 }
1165 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1166 lp->params.autosense = BNC;
1167 }
1168 }
1169 lp->fdx = lp->params.fdx;
1170 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1171
1172 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1173#if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1174 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1175#endif
1176 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1177 &lp->dma_rings, GFP_ATOMIC);
1178 if (lp->rx_ring == NULL) {
1179 return -ENOMEM;
1180 }
1181
1182 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1183
1184 /*
1185 ** Set up the RX descriptor ring (Intels)
1186 ** Allocate contiguous receive buffers, long word aligned (Alphas)
1187 */
1188#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1189 for (i=0; i<NUM_RX_DESC; i++) {
1190 lp->rx_ring[i].status = 0;
1191 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1192 lp->rx_ring[i].buf = 0;
1193 lp->rx_ring[i].next = 0;
1194 lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
1195 }
1196
1197#else
1198 {
1199 dma_addr_t dma_rx_bufs;
1200
1201 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1202 * sizeof(struct de4x5_desc);
1203 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1204 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1205 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1206 for (i=0; i<NUM_RX_DESC; i++) {
1207 lp->rx_ring[i].status = 0;
1208 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1209 lp->rx_ring[i].buf =
1210 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1211 lp->rx_ring[i].next = 0;
1212 lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry */
1213 }
1214
1215 }
1216#endif
1217
1218 barrier();
1219
1220 lp->rxRingSize = NUM_RX_DESC;
1221 lp->txRingSize = NUM_TX_DESC;
1222
1223 /* Write the end of list marker to the descriptor lists */
1224 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1225 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1226
1227 /* Tell the adapter where the TX/RX rings are located. */
1228 outl(lp->dma_rings, DE4X5_RRBA);
1229 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1230 DE4X5_TRBA);
1231
1232 /* Initialise the IRQ mask and Enable/Disable */
1233 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1234 lp->irq_en = IMR_NIM | IMR_AIM;
1235
1236 /* Create a loopback packet frame for later media probing */
1237 create_packet(dev, lp->frame, sizeof(lp->frame));
1238
1239 /* Check if the RX overflow bug needs testing for */
1240 i = lp->cfrv & 0x000000fe;
1241 if ((lp->chipset == DC21140) && (i == 0x20)) {
1242 lp->rx_ovf = 1;
1243 }
1244
1245 /* Initialise the SROM pointers if possible */
1246 if (lp->useSROM) {
1247 lp->state = INITIALISED;
1248 if (srom_infoleaf_info(dev)) {
1249 dma_free_coherent (gendev, lp->dma_size,
1250 lp->rx_ring, lp->dma_rings);
1251 return -ENXIO;
1252 }
1253 srom_init(dev);
1254 }
1255
1256 lp->state = CLOSED;
1257
1258 /*
1259 ** Check for an MII interface
1260 */
1261 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1262 mii_get_phy(dev);
1263 }
1264
1265 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1266 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1267 }
1268
1269 if (de4x5_debug & DEBUG_VERSION) {
1270 printk(version);
1271 }
1272
1273 /* The DE4X5-specific entries in the device structure. */
1274 SET_NETDEV_DEV(dev, gendev);
1275 dev->netdev_ops = &de4x5_netdev_ops;
1276 dev->mem_start = 0;
1277
1278 /* Fill in the generic fields of the device structure. */
1279 if ((status = register_netdev (dev))) {
1280 dma_free_coherent (gendev, lp->dma_size,
1281 lp->rx_ring, lp->dma_rings);
1282 return status;
1283 }
1284
1285 /* Let the adapter sleep to save power */
1286 yawn(dev, SLEEP);
1287
1288 return status;
1289}
1290
1291
1292static int
1293de4x5_open(struct net_device *dev)
1294{
1295 struct de4x5_private *lp = netdev_priv(dev);
1296 u_long iobase = dev->base_addr;
1297 int i, status = 0;
1298 s32 omr;
1299
1300 /* Allocate the RX buffers */
1301 for (i=0; i<lp->rxRingSize; i++) {
1302 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1303 de4x5_free_rx_buffs(dev);
1304 return -EAGAIN;
1305 }
1306 }
1307
1308 /*
1309 ** Wake up the adapter
1310 */
1311 yawn(dev, WAKEUP);
1312
1313 /*
1314 ** Re-initialize the DE4X5...
1315 */
1316 status = de4x5_init(dev);
1317 spin_lock_init(&lp->lock);
1318 lp->state = OPEN;
1319 de4x5_dbg_open(dev);
1320
1321 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1322 lp->adapter_name, dev)) {
1323 printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
1324 if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
1325 lp->adapter_name, dev)) {
1326 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1327 disable_ast(dev);
1328 de4x5_free_rx_buffs(dev);
1329 de4x5_free_tx_buffs(dev);
1330 yawn(dev, SLEEP);
1331 lp->state = CLOSED;
1332 return -EAGAIN;
1333 } else {
1334 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1335 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1336 }
1337 }
1338
1339 lp->interrupt = UNMASK_INTERRUPTS;
1340 dev->trans_start = jiffies; /* prevent tx timeout */
1341
1342 START_DE4X5;
1343
1344 de4x5_setup_intr(dev);
1345
1346 if (de4x5_debug & DEBUG_OPEN) {
1347 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1348 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1349 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1350 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1351 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1352 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1353 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1354 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1355 }
1356
1357 return status;
1358}
1359
1360/*
1361** Initialize the DE4X5 operating conditions. NB: a chip problem with the
1362** DC21140 requires using perfect filtering mode for that chip. Since I can't
1363** see why I'd want > 14 multicast addresses, I have changed all chips to use
1364** the perfect filtering mode. Keep the DMA burst length at 8: there seems
1365** to be data corruption problems if it is larger (UDP errors seen from a
1366** ttcp source).
1367*/
1368static int
1369de4x5_init(struct net_device *dev)
1370{
1371 /* Lock out other processes whilst setting up the hardware */
1372 netif_stop_queue(dev);
1373
1374 de4x5_sw_reset(dev);
1375
1376 /* Autoconfigure the connected port */
1377 autoconf_media(dev);
1378
1379 return 0;
1380}
1381
1382static int
1383de4x5_sw_reset(struct net_device *dev)
1384{
1385 struct de4x5_private *lp = netdev_priv(dev);
1386 u_long iobase = dev->base_addr;
1387 int i, j, status = 0;
1388 s32 bmr, omr;
1389
1390 /* Select the MII or SRL port now and RESET the MAC */
1391 if (!lp->useSROM) {
1392 if (lp->phy[lp->active].id != 0) {
1393 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1394 } else {
1395 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1396 }
1397 de4x5_switch_mac_port(dev);
1398 }
1399
1400 /*
1401 ** Set the programmable burst length to 8 longwords for all the DC21140
1402 ** Fasternet chips and 4 longwords for all others: DMA errors result
1403 ** without these values. Cache align 16 long.
1404 */
1405 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1406 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1407 outl(bmr, DE4X5_BMR);
1408
1409 omr = inl(DE4X5_OMR) & ~OMR_PR; /* Turn off promiscuous mode */
1410 if (lp->chipset == DC21140) {
1411 omr |= (OMR_SDP | OMR_SB);
1412 }
1413 lp->setup_f = PERFECT;
1414 outl(lp->dma_rings, DE4X5_RRBA);
1415 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1416 DE4X5_TRBA);
1417
1418 lp->rx_new = lp->rx_old = 0;
1419 lp->tx_new = lp->tx_old = 0;
1420
1421 for (i = 0; i < lp->rxRingSize; i++) {
1422 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1423 }
1424
1425 for (i = 0; i < lp->txRingSize; i++) {
1426 lp->tx_ring[i].status = cpu_to_le32(0);
1427 }
1428
1429 barrier();
1430
1431 /* Build the setup frame depending on filtering mode */
1432 SetMulticastFilter(dev);
1433
1434 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1435 outl(omr|OMR_ST, DE4X5_OMR);
1436
1437 /* Poll for setup frame completion (adapter interrupts are disabled now) */
1438
1439 for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */
1440 mdelay(1);
1441 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1442 }
1443 outl(omr, DE4X5_OMR); /* Stop everything! */
1444
1445 if (j == 0) {
1446 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1447 inl(DE4X5_STS));
1448 status = -EIO;
1449 }
1450
1451 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1452 lp->tx_old = lp->tx_new;
1453
1454 return status;
1455}
1456
1457/*
1458** Writes a socket buffer address to the next available transmit descriptor.
1459*/
1460static netdev_tx_t
1461de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1462{
1463 struct de4x5_private *lp = netdev_priv(dev);
1464 u_long iobase = dev->base_addr;
1465 u_long flags = 0;
1466
1467 netif_stop_queue(dev);
1468 if (!lp->tx_enable) /* Cannot send for now */
1469 return NETDEV_TX_LOCKED;
1470
1471 /*
1472 ** Clean out the TX ring asynchronously to interrupts - sometimes the
1473 ** interrupts are lost by delayed descriptor status updates relative to
1474 ** the irq assertion, especially with a busy PCI bus.
1475 */
1476 spin_lock_irqsave(&lp->lock, flags);
1477 de4x5_tx(dev);
1478 spin_unlock_irqrestore(&lp->lock, flags);
1479
1480 /* Test if cache is already locked - requeue skb if so */
1481 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1482 return NETDEV_TX_LOCKED;
1483
1484 /* Transmit descriptor ring full or stale skb */
1485 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1486 if (lp->interrupt) {
1487 de4x5_putb_cache(dev, skb); /* Requeue the buffer */
1488 } else {
1489 de4x5_put_cache(dev, skb);
1490 }
1491 if (de4x5_debug & DEBUG_TX) {
1492 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1493 }
1494 } else if (skb->len > 0) {
1495 /* If we already have stuff queued locally, use that first */
1496 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1497 de4x5_put_cache(dev, skb);
1498 skb = de4x5_get_cache(dev);
1499 }
1500
1501 while (skb && !netif_queue_stopped(dev) &&
1502 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1503 spin_lock_irqsave(&lp->lock, flags);
1504 netif_stop_queue(dev);
1505 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1506 lp->stats.tx_bytes += skb->len;
1507 outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
1508
1509 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1510
1511 if (TX_BUFFS_AVAIL) {
1512 netif_start_queue(dev); /* Another pkt may be queued */
1513 }
1514 skb = de4x5_get_cache(dev);
1515 spin_unlock_irqrestore(&lp->lock, flags);
1516 }
1517 if (skb) de4x5_putb_cache(dev, skb);
1518 }
1519
1520 lp->cache.lock = 0;
1521
1522 return NETDEV_TX_OK;
1523}
1524
1525/*
1526** The DE4X5 interrupt handler.
1527**
1528** I/O Read/Writes through intermediate PCI bridges are never 'posted',
1529** so that the asserted interrupt always has some real data to work with -
1530** if these I/O accesses are ever changed to memory accesses, ensure the
1531** STS write is read immediately to complete the transaction if the adapter
1532** is not on bus 0. Lost interrupts can still occur when the PCI bus load
1533** is high and descriptor status bits cannot be set before the associated
1534** interrupt is asserted and this routine entered.
1535*/
1536static irqreturn_t
1537de4x5_interrupt(int irq, void *dev_id)
1538{
1539 struct net_device *dev = dev_id;
1540 struct de4x5_private *lp;
1541 s32 imr, omr, sts, limit;
1542 u_long iobase;
1543 unsigned int handled = 0;
1544
1545 lp = netdev_priv(dev);
1546 spin_lock(&lp->lock);
1547 iobase = dev->base_addr;
1548
1549 DISABLE_IRQs; /* Ensure non re-entrancy */
1550
1551 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1552 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1553
1554 synchronize_irq(dev->irq);
1555
1556 for (limit=0; limit<8; limit++) {
1557 sts = inl(DE4X5_STS); /* Read IRQ status */
1558 outl(sts, DE4X5_STS); /* Reset the board interrupts */
1559
1560 if (!(sts & lp->irq_mask)) break;/* All done */
1561 handled = 1;
1562
1563 if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
1564 de4x5_rx(dev);
1565
1566 if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
1567 de4x5_tx(dev);
1568
1569 if (sts & STS_LNF) { /* TP Link has failed */
1570 lp->irq_mask &= ~IMR_LFM;
1571 }
1572
1573 if (sts & STS_UNF) { /* Transmit underrun */
1574 de4x5_txur(dev);
1575 }
1576
1577 if (sts & STS_SE) { /* Bus Error */
1578 STOP_DE4X5;
1579 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1580 dev->name, sts);
1581 spin_unlock(&lp->lock);
1582 return IRQ_HANDLED;
1583 }
1584 }
1585
1586 /* Load the TX ring with any locally stored packets */
1587 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1588 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1589 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1590 }
1591 lp->cache.lock = 0;
1592 }
1593
1594 lp->interrupt = UNMASK_INTERRUPTS;
1595 ENABLE_IRQs;
1596 spin_unlock(&lp->lock);
1597
1598 return IRQ_RETVAL(handled);
1599}
1600
1601static int
1602de4x5_rx(struct net_device *dev)
1603{
1604 struct de4x5_private *lp = netdev_priv(dev);
1605 u_long iobase = dev->base_addr;
1606 int entry;
1607 s32 status;
1608
1609 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1610 entry=lp->rx_new) {
1611 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1612
1613 if (lp->rx_ovf) {
1614 if (inl(DE4X5_MFC) & MFC_FOCM) {
1615 de4x5_rx_ovfc(dev);
1616 break;
1617 }
1618 }
1619
1620 if (status & RD_FS) { /* Remember the start of frame */
1621 lp->rx_old = entry;
1622 }
1623
1624 if (status & RD_LS) { /* Valid frame status */
1625 if (lp->tx_enable) lp->linkOK++;
1626 if (status & RD_ES) { /* There was an error. */
1627 lp->stats.rx_errors++; /* Update the error stats. */
1628 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1629 if (status & RD_CE) lp->stats.rx_crc_errors++;
1630 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1631 if (status & RD_TL) lp->stats.rx_length_errors++;
1632 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1633 if (status & RD_CS) lp->pktStats.rx_collision++;
1634 if (status & RD_DB) lp->pktStats.rx_dribble++;
1635 if (status & RD_OF) lp->pktStats.rx_overflow++;
1636 } else { /* A valid frame received */
1637 struct sk_buff *skb;
1638 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1639 >> 16) - 4;
1640
1641 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1642 printk("%s: Insufficient memory; nuking packet.\n",
1643 dev->name);
1644 lp->stats.rx_dropped++;
1645 } else {
1646 de4x5_dbg_rx(skb, pkt_len);
1647
1648 /* Push up the protocol stack */
1649 skb->protocol=eth_type_trans(skb,dev);
1650 de4x5_local_stats(dev, skb->data, pkt_len);
1651 netif_rx(skb);
1652
1653 /* Update stats */
1654 lp->stats.rx_packets++;
1655 lp->stats.rx_bytes += pkt_len;
1656 }
1657 }
1658
1659 /* Change buffer ownership for this frame, back to the adapter */
1660 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1661 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1662 barrier();
1663 }
1664 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1665 barrier();
1666 }
1667
1668 /*
1669 ** Update entry information
1670 */
1671 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1672 }
1673
1674 return 0;
1675}
1676
1677static inline void
1678de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1679{
1680 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1681 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1682 DMA_TO_DEVICE);
1683 if ((u_long) lp->tx_skb[entry] > 1)
1684 dev_kfree_skb_irq(lp->tx_skb[entry]);
1685 lp->tx_skb[entry] = NULL;
1686}
1687
1688/*
1689** Buffer sent - check for TX buffer errors.
1690*/
1691static int
1692de4x5_tx(struct net_device *dev)
1693{
1694 struct de4x5_private *lp = netdev_priv(dev);
1695 u_long iobase = dev->base_addr;
1696 int entry;
1697 s32 status;
1698
1699 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1700 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1701 if (status < 0) { /* Buffer not sent yet */
1702 break;
1703 } else if (status != 0x7fffffff) { /* Not setup frame */
1704 if (status & TD_ES) { /* An error happened */
1705 lp->stats.tx_errors++;
1706 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1707 if (status & TD_LC) lp->stats.tx_window_errors++;
1708 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1709 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1710 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1711
1712 if (TX_PKT_PENDING) {
1713 outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
1714 }
1715 } else { /* Packet sent */
1716 lp->stats.tx_packets++;
1717 if (lp->tx_enable) lp->linkOK++;
1718 }
1719 /* Update the collision counter */
1720 lp->stats.collisions += ((status & TD_EC) ? 16 :
1721 ((status & TD_CC) >> 3));
1722
1723 /* Free the buffer. */
1724 if (lp->tx_skb[entry] != NULL)
1725 de4x5_free_tx_buff(lp, entry);
1726 }
1727
1728 /* Update all the pointers */
1729 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1730 }
1731
1732 /* Any resources available? */
1733 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1734 if (lp->interrupt)
1735 netif_wake_queue(dev);
1736 else
1737 netif_start_queue(dev);
1738 }
1739
1740 return 0;
1741}
1742
1743static void
1744de4x5_ast(struct net_device *dev)
1745{
1746 struct de4x5_private *lp = netdev_priv(dev);
1747 int next_tick = DE4X5_AUTOSENSE_MS;
1748 int dt;
1749
1750 if (lp->useSROM)
1751 next_tick = srom_autoconf(dev);
1752 else if (lp->chipset == DC21140)
1753 next_tick = dc21140m_autoconf(dev);
1754 else if (lp->chipset == DC21041)
1755 next_tick = dc21041_autoconf(dev);
1756 else if (lp->chipset == DC21040)
1757 next_tick = dc21040_autoconf(dev);
1758 lp->linkOK = 0;
1759
1760 dt = (next_tick * HZ) / 1000;
1761
1762 if (!dt)
1763 dt = 1;
1764
1765 mod_timer(&lp->timer, jiffies + dt);
1766}
1767
1768static int
1769de4x5_txur(struct net_device *dev)
1770{
1771 struct de4x5_private *lp = netdev_priv(dev);
1772 u_long iobase = dev->base_addr;
1773 int omr;
1774
1775 omr = inl(DE4X5_OMR);
1776 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1777 omr &= ~(OMR_ST|OMR_SR);
1778 outl(omr, DE4X5_OMR);
1779 while (inl(DE4X5_STS) & STS_TS);
1780 if ((omr & OMR_TR) < OMR_TR) {
1781 omr += 0x4000;
1782 } else {
1783 omr |= OMR_SF;
1784 }
1785 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1786 }
1787
1788 return 0;
1789}
1790
1791static int
1792de4x5_rx_ovfc(struct net_device *dev)
1793{
1794 struct de4x5_private *lp = netdev_priv(dev);
1795 u_long iobase = dev->base_addr;
1796 int omr;
1797
1798 omr = inl(DE4X5_OMR);
1799 outl(omr & ~OMR_SR, DE4X5_OMR);
1800 while (inl(DE4X5_STS) & STS_RS);
1801
1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1804 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1805 }
1806
1807 outl(omr, DE4X5_OMR);
1808
1809 return 0;
1810}
1811
1812static int
1813de4x5_close(struct net_device *dev)
1814{
1815 struct de4x5_private *lp = netdev_priv(dev);
1816 u_long iobase = dev->base_addr;
1817 s32 imr, omr;
1818
1819 disable_ast(dev);
1820
1821 netif_stop_queue(dev);
1822
1823 if (de4x5_debug & DEBUG_CLOSE) {
1824 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1825 dev->name, inl(DE4X5_STS));
1826 }
1827
1828 /*
1829 ** We stop the DE4X5 here... mask interrupts and stop TX & RX
1830 */
1831 DISABLE_IRQs;
1832 STOP_DE4X5;
1833
1834 /* Free the associated irq */
1835 free_irq(dev->irq, dev);
1836 lp->state = CLOSED;
1837
1838 /* Free any socket buffers */
1839 de4x5_free_rx_buffs(dev);
1840 de4x5_free_tx_buffs(dev);
1841
1842 /* Put the adapter to sleep to save power */
1843 yawn(dev, SLEEP);
1844
1845 return 0;
1846}
1847
1848static struct net_device_stats *
1849de4x5_get_stats(struct net_device *dev)
1850{
1851 struct de4x5_private *lp = netdev_priv(dev);
1852 u_long iobase = dev->base_addr;
1853
1854 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1855
1856 return &lp->stats;
1857}
1858
1859static void
1860de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1861{
1862 struct de4x5_private *lp = netdev_priv(dev);
1863 int i;
1864
1865 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1866 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1867 lp->pktStats.bins[i]++;
1868 i = DE4X5_PKT_STAT_SZ;
1869 }
1870 }
1871 if (is_multicast_ether_addr(buf)) {
1872 if (is_broadcast_ether_addr(buf)) {
1873 lp->pktStats.broadcast++;
1874 } else {
1875 lp->pktStats.multicast++;
1876 }
1877 } else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
1878 lp->pktStats.unicast++;
1879 }
1880
1881 lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
1882 if (lp->pktStats.bins[0] == 0) { /* Reset counters */
1883 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1884 }
1885}
1886
1887/*
1888** Removes the TD_IC flag from previous descriptor to improve TX performance.
1889** If the flag is changed on a descriptor that is being read by the hardware,
1890** I assume PCI transaction ordering will mean you are either successful or
1891** just miss asserting the change to the hardware. Anyway you're messing with
1892** a descriptor you don't own, but this shouldn't kill the chip provided
1893** the descriptor register is read only to the hardware.
1894*/
1895static void
1896load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1897{
1898 struct de4x5_private *lp = netdev_priv(dev);
1899 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1900 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1901
1902 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1903 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1904 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1905 lp->tx_skb[lp->tx_new] = skb;
1906 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1907 barrier();
1908
1909 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1910 barrier();
1911}
1912
1913/*
1914** Set or clear the multicast filter for this adaptor.
1915*/
1916static void
1917set_multicast_list(struct net_device *dev)
1918{
1919 struct de4x5_private *lp = netdev_priv(dev);
1920 u_long iobase = dev->base_addr;
1921
1922 /* First, double check that the adapter is open */
1923 if (lp->state == OPEN) {
1924 if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
1925 u32 omr;
1926 omr = inl(DE4X5_OMR);
1927 omr |= OMR_PR;
1928 outl(omr, DE4X5_OMR);
1929 } else {
1930 SetMulticastFilter(dev);
1931 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1932 SETUP_FRAME_LEN, (struct sk_buff *)1);
1933
1934 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1935 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
1936 dev->trans_start = jiffies; /* prevent tx timeout */
1937 }
1938 }
1939}
1940
1941/*
1942** Calculate the hash code and update the logical address filter
1943** from a list of ethernet multicast addresses.
1944** Little endian crc one liner from Matt Thomas, DEC.
1945*/
1946static void
1947SetMulticastFilter(struct net_device *dev)
1948{
1949 struct de4x5_private *lp = netdev_priv(dev);
1950 struct netdev_hw_addr *ha;
1951 u_long iobase = dev->base_addr;
1952 int i, bit, byte;
1953 u16 hashcode;
1954 u32 omr, crc;
1955 char *pa;
1956 unsigned char *addrs;
1957
1958 omr = inl(DE4X5_OMR);
1959 omr &= ~(OMR_PR | OMR_PM);
1960 pa = build_setup_frame(dev, ALL); /* Build the basic frame */
1961
1962 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1963 omr |= OMR_PM; /* Pass all multicasts */
1964 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
1965 netdev_for_each_mc_addr(ha, dev) {
1966 crc = ether_crc_le(ETH_ALEN, ha->addr);
1967 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
1968
1969 byte = hashcode >> 3; /* bit[3-8] -> byte in filter */
1970 bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
1971
1972 byte <<= 1; /* calc offset into setup frame */
1973 if (byte & 0x02) {
1974 byte -= 1;
1975 }
1976 lp->setup_frame[byte] |= bit;
1977 }
1978 } else { /* Perfect filtering */
1979 netdev_for_each_mc_addr(ha, dev) {
1980 addrs = ha->addr;
1981 for (i=0; i<ETH_ALEN; i++) {
1982 *(pa + (i&1)) = *addrs++;
1983 if (i & 0x01) pa += 4;
1984 }
1985 }
1986 }
1987 outl(omr, DE4X5_OMR);
1988}
1989
1990#ifdef CONFIG_EISA
1991
1992static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1993
1994static int __init de4x5_eisa_probe (struct device *gendev)
1995{
1996 struct eisa_device *edev;
1997 u_long iobase;
1998 u_char irq, regval;
1999 u_short vendor;
2000 u32 cfid;
2001 int status, device;
2002 struct net_device *dev;
2003 struct de4x5_private *lp;
2004
2005 edev = to_eisa_device (gendev);
2006 iobase = edev->base_addr;
2007
2008 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2009 return -EBUSY;
2010
2011 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2012 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2013 status = -EBUSY;
2014 goto release_reg_1;
2015 }
2016
2017 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2018 status = -ENOMEM;
2019 goto release_reg_2;
2020 }
2021 lp = netdev_priv(dev);
2022
2023 cfid = (u32) inl(PCI_CFID);
2024 lp->cfrv = (u_short) inl(PCI_CFRV);
2025 device = (cfid >> 8) & 0x00ffff00;
2026 vendor = (u_short) cfid;
2027
2028 /* Read the EISA Configuration Registers */
2029 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2030#ifdef CONFIG_ALPHA
2031 /* Looks like the Jensen firmware (rev 2.2) doesn't really
2032 * care about the EISA configuration, and thus doesn't
2033 * configure the PLX bridge properly. Oh well... Simply mimic
2034 * the EISA config file to sort it out. */
2035
2036 /* EISA REG1: Assert DecChip 21040 HW Reset */
2037 outb (ER1_IAM | 1, EISA_REG1);
2038 mdelay (1);
2039
2040 /* EISA REG1: Deassert DecChip 21040 HW Reset */
2041 outb (ER1_IAM, EISA_REG1);
2042 mdelay (1);
2043
2044 /* EISA REG3: R/W Burst Transfer Enable */
2045 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2046
2047 /* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
2048 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2049#endif
2050 irq = de4x5_irq[(regval >> 1) & 0x03];
2051
2052 if (is_DC2114x) {
2053 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2054 }
2055 lp->chipset = device;
2056 lp->bus = EISA;
2057
2058 /* Write the PCI Configuration Registers */
2059 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2060 outl(0x00006000, PCI_CFLT);
2061 outl(iobase, PCI_CBIO);
2062
2063 DevicePresent(dev, EISA_APROM);
2064
2065 dev->irq = irq;
2066
2067 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2068 return 0;
2069 }
2070
2071 free_netdev (dev);
2072 release_reg_2:
2073 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2074 release_reg_1:
2075 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2076
2077 return status;
2078}
2079
2080static int __devexit de4x5_eisa_remove (struct device *device)
2081{
2082 struct net_device *dev;
2083 u_long iobase;
2084
2085 dev = dev_get_drvdata(device);
2086 iobase = dev->base_addr;
2087
2088 unregister_netdev (dev);
2089 free_netdev (dev);
2090 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2091 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2092
2093 return 0;
2094}
2095
2096static struct eisa_device_id de4x5_eisa_ids[] = {
2097 { "DEC4250", 0 }, /* 0 is the board name index... */
2098 { "" }
2099};
2100MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2101
2102static struct eisa_driver de4x5_eisa_driver = {
2103 .id_table = de4x5_eisa_ids,
2104 .driver = {
2105 .name = "de4x5",
2106 .probe = de4x5_eisa_probe,
2107 .remove = __devexit_p (de4x5_eisa_remove),
2108 }
2109};
2110MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2111#endif
2112
2113#ifdef CONFIG_PCI
2114
2115/*
2116** This function searches the current bus (which is >0) for a DECchip with an
2117** SROM, so that in multiport cards that have one SROM shared between multiple
2118** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
2119** For single port cards this is a time waster...
2120*/
2121static void __devinit
2122srom_search(struct net_device *dev, struct pci_dev *pdev)
2123{
2124 u_char pb;
2125 u_short vendor, status;
2126 u_int irq = 0, device;
2127 u_long iobase = 0; /* Clear upper 32 bits in Alphas */
2128 int i, j;
2129 struct de4x5_private *lp = netdev_priv(dev);
2130 struct list_head *walk;
2131
2132 list_for_each(walk, &pdev->bus_list) {
2133 struct pci_dev *this_dev = pci_dev_b(walk);
2134
2135 /* Skip the pci_bus list entry */
2136 if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
2137
2138 vendor = this_dev->vendor;
2139 device = this_dev->device << 8;
2140 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2141
2142 /* Get the chip configuration revision register */
2143 pb = this_dev->bus->number;
2144
2145 /* Set the device number information */
2146 lp->device = PCI_SLOT(this_dev->devfn);
2147 lp->bus_num = pb;
2148
2149 /* Set the chipset information */
2150 if (is_DC2114x) {
2151 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2152 ? DC21142 : DC21143);
2153 }
2154 lp->chipset = device;
2155
2156 /* Get the board I/O address (64 bits on sparc64) */
2157 iobase = pci_resource_start(this_dev, 0);
2158
2159 /* Fetch the IRQ to be used */
2160 irq = this_dev->irq;
2161 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2162
2163 /* Check if I/O accesses are enabled */
2164 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2165 if (!(status & PCI_COMMAND_IO)) continue;
2166
2167 /* Search for a valid SROM attached to this DECchip */
2168 DevicePresent(dev, DE4X5_APROM);
2169 for (j=0, i=0; i<ETH_ALEN; i++) {
2170 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2171 }
2172 if (j != 0 && j != 6 * 0xff) {
2173 last.chipset = device;
2174 last.bus = pb;
2175 last.irq = irq;
2176 for (i=0; i<ETH_ALEN; i++) {
2177 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2178 }
2179 return;
2180 }
2181 }
2182}
2183
2184/*
2185** PCI bus I/O device probe
2186** NB: PCI I/O accesses and Bus Mastering are enabled by the PCI BIOS, not
2187** the driver. Some PCI BIOS's, pre V2.1, need the slot + features to be
2188** enabled by the user first in the set up utility. Hence we just check for
2189** enabled features and silently ignore the card if they're not.
2190**
2191** STOP PRESS: Some BIOS's __require__ the driver to enable the bus mastering
2192** bit. Here, check for I/O accesses and then set BM. If you put the card in
2193** a non BM slot, you're on your own (and complain to the PC vendor that your
2194** PC doesn't conform to the PCI standard)!
2195**
2196** This function is only compatible with the *latest* 2.1.x kernels. For 2.0.x
2197** kernels use the V0.535[n] drivers.
2198*/
2199
2200static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
2201 const struct pci_device_id *ent)
2202{
2203 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2204 u_short vendor, status;
2205 u_int irq = 0, device;
2206 u_long iobase = 0; /* Clear upper 32 bits in Alphas */
2207 int error;
2208 struct net_device *dev;
2209 struct de4x5_private *lp;
2210
2211 dev_num = PCI_SLOT(pdev->devfn);
2212 pb = pdev->bus->number;
2213
2214 if (io) { /* probe a single PCI device */
2215 pbus = (u_short)(io >> 8);
2216 dnum = (u_short)(io & 0xff);
2217 if ((pbus != pb) || (dnum != dev_num))
2218 return -ENODEV;
2219 }
2220
2221 vendor = pdev->vendor;
2222 device = pdev->device << 8;
2223 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2224 return -ENODEV;
2225
2226 /* Ok, the device seems to be for us. */
2227 if ((error = pci_enable_device (pdev)))
2228 return error;
2229
2230 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2231 error = -ENOMEM;
2232 goto disable_dev;
2233 }
2234
2235 lp = netdev_priv(dev);
2236 lp->bus = PCI;
2237 lp->bus_num = 0;
2238
2239 /* Search for an SROM on this bus */
2240 if (lp->bus_num != pb) {
2241 lp->bus_num = pb;
2242 srom_search(dev, pdev);
2243 }
2244
2245 /* Get the chip configuration revision register */
2246 lp->cfrv = pdev->revision;
2247
2248 /* Set the device number information */
2249 lp->device = dev_num;
2250 lp->bus_num = pb;
2251
2252 /* Set the chipset information */
2253 if (is_DC2114x) {
2254 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2255 }
2256 lp->chipset = device;
2257
2258 /* Get the board I/O address (64 bits on sparc64) */
2259 iobase = pci_resource_start(pdev, 0);
2260
2261 /* Fetch the IRQ to be used */
2262 irq = pdev->irq;
2263 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2264 error = -ENODEV;
2265 goto free_dev;
2266 }
2267
2268 /* Check if I/O accesses and Bus Mastering are enabled */
2269 pci_read_config_word(pdev, PCI_COMMAND, &status);
2270#ifdef __powerpc__
2271 if (!(status & PCI_COMMAND_IO)) {
2272 status |= PCI_COMMAND_IO;
2273 pci_write_config_word(pdev, PCI_COMMAND, status);
2274 pci_read_config_word(pdev, PCI_COMMAND, &status);
2275 }
2276#endif /* __powerpc__ */
2277 if (!(status & PCI_COMMAND_IO)) {
2278 error = -ENODEV;
2279 goto free_dev;
2280 }
2281
2282 if (!(status & PCI_COMMAND_MASTER)) {
2283 status |= PCI_COMMAND_MASTER;
2284 pci_write_config_word(pdev, PCI_COMMAND, status);
2285 pci_read_config_word(pdev, PCI_COMMAND, &status);
2286 }
2287 if (!(status & PCI_COMMAND_MASTER)) {
2288 error = -ENODEV;
2289 goto free_dev;
2290 }
2291
2292 /* Check the latency timer for values >= 0x60 */
2293 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2294 if (timer < 0x60) {
2295 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2296 }
2297
2298 DevicePresent(dev, DE4X5_APROM);
2299
2300 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2301 error = -EBUSY;
2302 goto free_dev;
2303 }
2304
2305 dev->irq = irq;
2306
2307 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2308 goto release;
2309 }
2310
2311 return 0;
2312
2313 release:
2314 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2315 free_dev:
2316 free_netdev (dev);
2317 disable_dev:
2318 pci_disable_device (pdev);
2319 return error;
2320}
2321
2322static void __devexit de4x5_pci_remove (struct pci_dev *pdev)
2323{
2324 struct net_device *dev;
2325 u_long iobase;
2326
2327 dev = dev_get_drvdata(&pdev->dev);
2328 iobase = dev->base_addr;
2329
2330 unregister_netdev (dev);
2331 free_netdev (dev);
2332 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2333 pci_disable_device (pdev);
2334}
2335
2336static struct pci_device_id de4x5_pci_tbl[] = {
2337 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2338 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2339 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2340 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2341 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2342 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2343 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2344 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2345 { },
2346};
2347
2348static struct pci_driver de4x5_pci_driver = {
2349 .name = "de4x5",
2350 .id_table = de4x5_pci_tbl,
2351 .probe = de4x5_pci_probe,
2352 .remove = __devexit_p (de4x5_pci_remove),
2353};
2354
2355#endif
2356
2357/*
2358** Auto configure the media here rather than setting the port at compile
2359** time. This routine is called by de4x5_init() and when a loss of media is
2360** detected (excessive collisions, loss of carrier, no carrier or link fail
2361** [TP] or no recent receive activity) to check whether the user has been
2362** sneaky and changed the port on us.
2363*/
2364static int
2365autoconf_media(struct net_device *dev)
2366{
2367 struct de4x5_private *lp = netdev_priv(dev);
2368 u_long iobase = dev->base_addr;
2369
2370 disable_ast(dev);
2371
2372 lp->c_media = AUTO; /* Bogus last media */
2373 inl(DE4X5_MFC); /* Zero the lost frames counter */
2374 lp->media = INIT;
2375 lp->tcount = 0;
2376
2377 de4x5_ast(dev);
2378
2379 return lp->media;
2380}
2381
2382/*
2383** Autoconfigure the media when using the DC21040. AUI cannot be distinguished
2384** from BNC as the port has a jumper to set thick or thin wire. When set for
2385** BNC, the BNC port will indicate activity if it's not terminated correctly.
2386** The only way to test for that is to place a loopback packet onto the
2387** network and watch for errors. Since we're messing with the interrupt mask
2388** register, disable the board interrupts and do not allow any more packets to
2389** be queued to the hardware. Re-enable everything only when the media is
2390** found.
2391** I may have to "age out" locally queued packets so that the higher layer
2392** timeouts don't effectively duplicate packets on the network.
2393*/
2394static int
2395dc21040_autoconf(struct net_device *dev)
2396{
2397 struct de4x5_private *lp = netdev_priv(dev);
2398 u_long iobase = dev->base_addr;
2399 int next_tick = DE4X5_AUTOSENSE_MS;
2400 s32 imr;
2401
2402 switch (lp->media) {
2403 case INIT:
2404 DISABLE_IRQs;
2405 lp->tx_enable = false;
2406 lp->timeout = -1;
2407 de4x5_save_skbs(dev);
2408 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2409 lp->media = TP;
2410 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2411 lp->media = BNC_AUI;
2412 } else if (lp->autosense == EXT_SIA) {
2413 lp->media = EXT_SIA;
2414 } else {
2415 lp->media = NC;
2416 }
2417 lp->local_state = 0;
2418 next_tick = dc21040_autoconf(dev);
2419 break;
2420
2421 case TP:
2422 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2423 TP_SUSPECT, test_tp);
2424 break;
2425
2426 case TP_SUSPECT:
2427 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2428 break;
2429
2430 case BNC:
2431 case AUI:
2432 case BNC_AUI:
2433 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2434 BNC_AUI_SUSPECT, ping_media);
2435 break;
2436
2437 case BNC_AUI_SUSPECT:
2438 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2439 break;
2440
2441 case EXT_SIA:
2442 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2443 NC, EXT_SIA_SUSPECT, ping_media);
2444 break;
2445
2446 case EXT_SIA_SUSPECT:
2447 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2448 break;
2449
2450 case NC:
2451 /* default to TP for all */
2452 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2453 if (lp->media != lp->c_media) {
2454 de4x5_dbg_media(dev);
2455 lp->c_media = lp->media;
2456 }
2457 lp->media = INIT;
2458 lp->tx_enable = false;
2459 break;
2460 }
2461
2462 return next_tick;
2463}
2464
2465static int
2466dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2467 int next_state, int suspect_state,
2468 int (*fn)(struct net_device *, int))
2469{
2470 struct de4x5_private *lp = netdev_priv(dev);
2471 int next_tick = DE4X5_AUTOSENSE_MS;
2472 int linkBad;
2473
2474 switch (lp->local_state) {
2475 case 0:
2476 reset_init_sia(dev, csr13, csr14, csr15);
2477 lp->local_state++;
2478 next_tick = 500;
2479 break;
2480
2481 case 1:
2482 if (!lp->tx_enable) {
2483 linkBad = fn(dev, timeout);
2484 if (linkBad < 0) {
2485 next_tick = linkBad & ~TIMER_CB;
2486 } else {
2487 if (linkBad && (lp->autosense == AUTO)) {
2488 lp->local_state = 0;
2489 lp->media = next_state;
2490 } else {
2491 de4x5_init_connection(dev);
2492 }
2493 }
2494 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2495 lp->media = suspect_state;
2496 next_tick = 3000;
2497 }
2498 break;
2499 }
2500
2501 return next_tick;
2502}
2503
2504static int
2505de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2506 int (*fn)(struct net_device *, int),
2507 int (*asfn)(struct net_device *))
2508{
2509 struct de4x5_private *lp = netdev_priv(dev);
2510 int next_tick = DE4X5_AUTOSENSE_MS;
2511 int linkBad;
2512
2513 switch (lp->local_state) {
2514 case 1:
2515 if (lp->linkOK) {
2516 lp->media = prev_state;
2517 } else {
2518 lp->local_state++;
2519 next_tick = asfn(dev);
2520 }
2521 break;
2522
2523 case 2:
2524 linkBad = fn(dev, timeout);
2525 if (linkBad < 0) {
2526 next_tick = linkBad & ~TIMER_CB;
2527 } else if (!linkBad) {
2528 lp->local_state--;
2529 lp->media = prev_state;
2530 } else {
2531 lp->media = INIT;
2532 lp->tcount++;
2533 }
2534 }
2535
2536 return next_tick;
2537}
2538
2539/*
2540** Autoconfigure the media when using the DC21041. AUI needs to be tested
2541** before BNC, because the BNC port will indicate activity if it's not
2542** terminated correctly. The only way to test for that is to place a loopback
2543** packet onto the network and watch for errors. Since we're messing with
2544** the interrupt mask register, disable the board interrupts and do not allow
2545** any more packets to be queued to the hardware. Re-enable everything only
2546** when the media is found.
2547*/
2548static int
2549dc21041_autoconf(struct net_device *dev)
2550{
2551 struct de4x5_private *lp = netdev_priv(dev);
2552 u_long iobase = dev->base_addr;
2553 s32 sts, irqs, irq_mask, imr, omr;
2554 int next_tick = DE4X5_AUTOSENSE_MS;
2555
2556 switch (lp->media) {
2557 case INIT:
2558 DISABLE_IRQs;
2559 lp->tx_enable = false;
2560 lp->timeout = -1;
2561 de4x5_save_skbs(dev); /* Save non transmitted skb's */
2562 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2563 lp->media = TP; /* On chip auto negotiation is broken */
2564 } else if (lp->autosense == TP) {
2565 lp->media = TP;
2566 } else if (lp->autosense == BNC) {
2567 lp->media = BNC;
2568 } else if (lp->autosense == AUI) {
2569 lp->media = AUI;
2570 } else {
2571 lp->media = NC;
2572 }
2573 lp->local_state = 0;
2574 next_tick = dc21041_autoconf(dev);
2575 break;
2576
2577 case TP_NW:
2578 if (lp->timeout < 0) {
2579 omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
2580 outl(omr | OMR_FDX, DE4X5_OMR);
2581 }
2582 irqs = STS_LNF | STS_LNP;
2583 irq_mask = IMR_LFM | IMR_LPM;
2584 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2585 if (sts < 0) {
2586 next_tick = sts & ~TIMER_CB;
2587 } else {
2588 if (sts & STS_LNP) {
2589 lp->media = ANS;
2590 } else {
2591 lp->media = AUI;
2592 }
2593 next_tick = dc21041_autoconf(dev);
2594 }
2595 break;
2596
2597 case ANS:
2598 if (!lp->tx_enable) {
2599 irqs = STS_LNP;
2600 irq_mask = IMR_LPM;
2601 sts = test_ans(dev, irqs, irq_mask, 3000);
2602 if (sts < 0) {
2603 next_tick = sts & ~TIMER_CB;
2604 } else {
2605 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2606 lp->media = TP;
2607 next_tick = dc21041_autoconf(dev);
2608 } else {
2609 lp->local_state = 1;
2610 de4x5_init_connection(dev);
2611 }
2612 }
2613 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2614 lp->media = ANS_SUSPECT;
2615 next_tick = 3000;
2616 }
2617 break;
2618
2619 case ANS_SUSPECT:
2620 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2621 break;
2622
2623 case TP:
2624 if (!lp->tx_enable) {
2625 if (lp->timeout < 0) {
2626 omr = inl(DE4X5_OMR); /* Set up half duplex for TP */
2627 outl(omr & ~OMR_FDX, DE4X5_OMR);
2628 }
2629 irqs = STS_LNF | STS_LNP;
2630 irq_mask = IMR_LFM | IMR_LPM;
2631 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2632 if (sts < 0) {
2633 next_tick = sts & ~TIMER_CB;
2634 } else {
2635 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2636 if (inl(DE4X5_SISR) & SISR_NRA) {
2637 lp->media = AUI; /* Non selected port activity */
2638 } else {
2639 lp->media = BNC;
2640 }
2641 next_tick = dc21041_autoconf(dev);
2642 } else {
2643 lp->local_state = 1;
2644 de4x5_init_connection(dev);
2645 }
2646 }
2647 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2648 lp->media = TP_SUSPECT;
2649 next_tick = 3000;
2650 }
2651 break;
2652
2653 case TP_SUSPECT:
2654 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2655 break;
2656
2657 case AUI:
2658 if (!lp->tx_enable) {
2659 if (lp->timeout < 0) {
2660 omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
2661 outl(omr & ~OMR_FDX, DE4X5_OMR);
2662 }
2663 irqs = 0;
2664 irq_mask = 0;
2665 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2666 if (sts < 0) {
2667 next_tick = sts & ~TIMER_CB;
2668 } else {
2669 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2670 lp->media = BNC;
2671 next_tick = dc21041_autoconf(dev);
2672 } else {
2673 lp->local_state = 1;
2674 de4x5_init_connection(dev);
2675 }
2676 }
2677 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2678 lp->media = AUI_SUSPECT;
2679 next_tick = 3000;
2680 }
2681 break;
2682
2683 case AUI_SUSPECT:
2684 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2685 break;
2686
2687 case BNC:
2688 switch (lp->local_state) {
2689 case 0:
2690 if (lp->timeout < 0) {
2691 omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
2692 outl(omr & ~OMR_FDX, DE4X5_OMR);
2693 }
2694 irqs = 0;
2695 irq_mask = 0;
2696 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2697 if (sts < 0) {
2698 next_tick = sts & ~TIMER_CB;
2699 } else {
2700 lp->local_state++; /* Ensure media connected */
2701 next_tick = dc21041_autoconf(dev);
2702 }
2703 break;
2704
2705 case 1:
2706 if (!lp->tx_enable) {
2707 if ((sts = ping_media(dev, 3000)) < 0) {
2708 next_tick = sts & ~TIMER_CB;
2709 } else {
2710 if (sts) {
2711 lp->local_state = 0;
2712 lp->media = NC;
2713 } else {
2714 de4x5_init_connection(dev);
2715 }
2716 }
2717 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2718 lp->media = BNC_SUSPECT;
2719 next_tick = 3000;
2720 }
2721 break;
2722 }
2723 break;
2724
2725 case BNC_SUSPECT:
2726 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2727 break;
2728
2729 case NC:
2730 omr = inl(DE4X5_OMR); /* Set up full duplex for the autonegotiate */
2731 outl(omr | OMR_FDX, DE4X5_OMR);
2732 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);/* Initialise the SIA */
2733 if (lp->media != lp->c_media) {
2734 de4x5_dbg_media(dev);
2735 lp->c_media = lp->media;
2736 }
2737 lp->media = INIT;
2738 lp->tx_enable = false;
2739 break;
2740 }
2741
2742 return next_tick;
2743}
2744
2745/*
2746** Some autonegotiation chips are broken in that they do not return the
2747** acknowledge bit (anlpa & MII_ANLPA_ACK) in the link partner advertisement
2748** register, except at the first power up negotiation.
2749*/
2750static int
2751dc21140m_autoconf(struct net_device *dev)
2752{
2753 struct de4x5_private *lp = netdev_priv(dev);
2754 int ana, anlpa, cap, cr, slnk, sr;
2755 int next_tick = DE4X5_AUTOSENSE_MS;
2756 u_long imr, omr, iobase = dev->base_addr;
2757
2758 switch(lp->media) {
2759 case INIT:
2760 if (lp->timeout < 0) {
2761 DISABLE_IRQs;
2762 lp->tx_enable = false;
2763 lp->linkOK = 0;
2764 de4x5_save_skbs(dev); /* Save non transmitted skb's */
2765 }
2766 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2767 next_tick &= ~TIMER_CB;
2768 } else {
2769 if (lp->useSROM) {
2770 if (srom_map_media(dev) < 0) {
2771 lp->tcount++;
2772 return next_tick;
2773 }
2774 srom_exec(dev, lp->phy[lp->active].gep);
2775 if (lp->infoblock_media == ANS) {
2776 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2777 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2778 }
2779 } else {
2780 lp->tmp = MII_SR_ASSC; /* Fake out the MII speed set */
2781 SET_10Mb;
2782 if (lp->autosense == _100Mb) {
2783 lp->media = _100Mb;
2784 } else if (lp->autosense == _10Mb) {
2785 lp->media = _10Mb;
2786 } else if ((lp->autosense == AUTO) &&
2787 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2788 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2789 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2790 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2791 lp->media = ANS;
2792 } else if (lp->autosense == AUTO) {
2793 lp->media = SPD_DET;
2794 } else if (is_spd_100(dev) && is_100_up(dev)) {
2795 lp->media = _100Mb;
2796 } else {
2797 lp->media = NC;
2798 }
2799 }
2800 lp->local_state = 0;
2801 next_tick = dc21140m_autoconf(dev);
2802 }
2803 break;
2804
2805 case ANS:
2806 switch (lp->local_state) {
2807 case 0:
2808 if (lp->timeout < 0) {
2809 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2810 }
2811 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2812 if (cr < 0) {
2813 next_tick = cr & ~TIMER_CB;
2814 } else {
2815 if (cr) {
2816 lp->local_state = 0;
2817 lp->media = SPD_DET;
2818 } else {
2819 lp->local_state++;
2820 }
2821 next_tick = dc21140m_autoconf(dev);
2822 }
2823 break;
2824
2825 case 1:
2826 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2827 next_tick = sr & ~TIMER_CB;
2828 } else {
2829 lp->media = SPD_DET;
2830 lp->local_state = 0;
2831 if (sr) { /* Success! */
2832 lp->tmp = MII_SR_ASSC;
2833 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2834 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2835 if (!(anlpa & MII_ANLPA_RF) &&
2836 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2837 if (cap & MII_ANA_100M) {
2838 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2839 lp->media = _100Mb;
2840 } else if (cap & MII_ANA_10M) {
2841 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2842
2843 lp->media = _10Mb;
2844 }
2845 }
2846 } /* Auto Negotiation failed to finish */
2847 next_tick = dc21140m_autoconf(dev);
2848 } /* Auto Negotiation failed to start */
2849 break;
2850 }
2851 break;
2852
2853 case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
2854 if (lp->timeout < 0) {
2855 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2856 (~gep_rd(dev) & GEP_LNP));
2857 SET_100Mb_PDET;
2858 }
2859 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2860 next_tick = slnk & ~TIMER_CB;
2861 } else {
2862 if (is_spd_100(dev) && is_100_up(dev)) {
2863 lp->media = _100Mb;
2864 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2865 lp->media = _10Mb;
2866 } else {
2867 lp->media = NC;
2868 }
2869 next_tick = dc21140m_autoconf(dev);
2870 }
2871 break;
2872
2873 case _100Mb: /* Set 100Mb/s */
2874 next_tick = 3000;
2875 if (!lp->tx_enable) {
2876 SET_100Mb;
2877 de4x5_init_connection(dev);
2878 } else {
2879 if (!lp->linkOK && (lp->autosense == AUTO)) {
2880 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2881 lp->media = INIT;
2882 lp->tcount++;
2883 next_tick = DE4X5_AUTOSENSE_MS;
2884 }
2885 }
2886 }
2887 break;
2888
2889 case BNC:
2890 case AUI:
2891 case _10Mb: /* Set 10Mb/s */
2892 next_tick = 3000;
2893 if (!lp->tx_enable) {
2894 SET_10Mb;
2895 de4x5_init_connection(dev);
2896 } else {
2897 if (!lp->linkOK && (lp->autosense == AUTO)) {
2898 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2899 lp->media = INIT;
2900 lp->tcount++;
2901 next_tick = DE4X5_AUTOSENSE_MS;
2902 }
2903 }
2904 }
2905 break;
2906
2907 case NC:
2908 if (lp->media != lp->c_media) {
2909 de4x5_dbg_media(dev);
2910 lp->c_media = lp->media;
2911 }
2912 lp->media = INIT;
2913 lp->tx_enable = false;
2914 break;
2915 }
2916
2917 return next_tick;
2918}
2919
2920/*
2921** This routine may be merged into dc21140m_autoconf() sometime as I'm
2922** changing how I figure out the media - but trying to keep it backwards
2923** compatible with the de500-xa and de500-aa.
2924** Whether it's BNC, AUI, SYM or MII is sorted out in the infoblock
2925** functions and set during de4x5_mac_port() and/or de4x5_reset_phy().
2926** This routine just has to figure out whether 10Mb/s or 100Mb/s is
2927** active.
2928** When autonegotiation is working, the ANS part searches the SROM for
2929** the highest common speed (TP) link that both can run and if that can
2930** be full duplex. That infoblock is executed and then the link speed set.
2931**
2932** Only _10Mb and _100Mb are tested here.
2933*/
2934static int
2935dc2114x_autoconf(struct net_device *dev)
2936{
2937 struct de4x5_private *lp = netdev_priv(dev);
2938 u_long iobase = dev->base_addr;
2939 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2940 int next_tick = DE4X5_AUTOSENSE_MS;
2941
2942 switch (lp->media) {
2943 case INIT:
2944 if (lp->timeout < 0) {
2945 DISABLE_IRQs;
2946 lp->tx_enable = false;
2947 lp->linkOK = 0;
2948 lp->timeout = -1;
2949 de4x5_save_skbs(dev); /* Save non transmitted skb's */
2950 if (lp->params.autosense & ~AUTO) {
2951 srom_map_media(dev); /* Fixed media requested */
2952 if (lp->media != lp->params.autosense) {
2953 lp->tcount++;
2954 lp->media = INIT;
2955 return next_tick;
2956 }
2957 lp->media = INIT;
2958 }
2959 }
2960 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2961 next_tick &= ~TIMER_CB;
2962 } else {
2963 if (lp->autosense == _100Mb) {
2964 lp->media = _100Mb;
2965 } else if (lp->autosense == _10Mb) {
2966 lp->media = _10Mb;
2967 } else if (lp->autosense == TP) {
2968 lp->media = TP;
2969 } else if (lp->autosense == BNC) {
2970 lp->media = BNC;
2971 } else if (lp->autosense == AUI) {
2972 lp->media = AUI;
2973 } else {
2974 lp->media = SPD_DET;
2975 if ((lp->infoblock_media == ANS) &&
2976 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2977 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2978 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2979 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2980 lp->media = ANS;
2981 }
2982 }
2983 lp->local_state = 0;
2984 next_tick = dc2114x_autoconf(dev);
2985 }
2986 break;
2987
2988 case ANS:
2989 switch (lp->local_state) {
2990 case 0:
2991 if (lp->timeout < 0) {
2992 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2993 }
2994 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2995 if (cr < 0) {
2996 next_tick = cr & ~TIMER_CB;
2997 } else {
2998 if (cr) {
2999 lp->local_state = 0;
3000 lp->media = SPD_DET;
3001 } else {
3002 lp->local_state++;
3003 }
3004 next_tick = dc2114x_autoconf(dev);
3005 }
3006 break;
3007
3008 case 1:
3009 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3010 if (sr < 0) {
3011 next_tick = sr & ~TIMER_CB;
3012 } else {
3013 lp->media = SPD_DET;
3014 lp->local_state = 0;
3015 if (sr) { /* Success! */
3016 lp->tmp = MII_SR_ASSC;
3017 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3018 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3019 if (!(anlpa & MII_ANLPA_RF) &&
3020 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3021 if (cap & MII_ANA_100M) {
3022 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3023 lp->media = _100Mb;
3024 } else if (cap & MII_ANA_10M) {
3025 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3026 lp->media = _10Mb;
3027 }
3028 }
3029 } /* Auto Negotiation failed to finish */
3030 next_tick = dc2114x_autoconf(dev);
3031 } /* Auto Negotiation failed to start */
3032 break;
3033 }
3034 break;
3035
3036 case AUI:
3037 if (!lp->tx_enable) {
3038 if (lp->timeout < 0) {
3039 omr = inl(DE4X5_OMR); /* Set up half duplex for AUI */
3040 outl(omr & ~OMR_FDX, DE4X5_OMR);
3041 }
3042 irqs = 0;
3043 irq_mask = 0;
3044 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3045 if (sts < 0) {
3046 next_tick = sts & ~TIMER_CB;
3047 } else {
3048 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3049 lp->media = BNC;
3050 next_tick = dc2114x_autoconf(dev);
3051 } else {
3052 lp->local_state = 1;
3053 de4x5_init_connection(dev);
3054 }
3055 }
3056 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3057 lp->media = AUI_SUSPECT;
3058 next_tick = 3000;
3059 }
3060 break;
3061
3062 case AUI_SUSPECT:
3063 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3064 break;
3065
3066 case BNC:
3067 switch (lp->local_state) {
3068 case 0:
3069 if (lp->timeout < 0) {
3070 omr = inl(DE4X5_OMR); /* Set up half duplex for BNC */
3071 outl(omr & ~OMR_FDX, DE4X5_OMR);
3072 }
3073 irqs = 0;
3074 irq_mask = 0;
3075 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3076 if (sts < 0) {
3077 next_tick = sts & ~TIMER_CB;
3078 } else {
3079 lp->local_state++; /* Ensure media connected */
3080 next_tick = dc2114x_autoconf(dev);
3081 }
3082 break;
3083
3084 case 1:
3085 if (!lp->tx_enable) {
3086 if ((sts = ping_media(dev, 3000)) < 0) {
3087 next_tick = sts & ~TIMER_CB;
3088 } else {
3089 if (sts) {
3090 lp->local_state = 0;
3091 lp->tcount++;
3092 lp->media = INIT;
3093 } else {
3094 de4x5_init_connection(dev);
3095 }
3096 }
3097 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3098 lp->media = BNC_SUSPECT;
3099 next_tick = 3000;
3100 }
3101 break;
3102 }
3103 break;
3104
3105 case BNC_SUSPECT:
3106 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3107 break;
3108
3109 case SPD_DET: /* Choose 10Mb/s or 100Mb/s */
3110 if (srom_map_media(dev) < 0) {
3111 lp->tcount++;
3112 lp->media = INIT;
3113 return next_tick;
3114 }
3115 if (lp->media == _100Mb) {
3116 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3117 lp->media = SPD_DET;
3118 return slnk & ~TIMER_CB;
3119 }
3120 } else {
3121 if (wait_for_link(dev) < 0) {
3122 lp->media = SPD_DET;
3123 return PDET_LINK_WAIT;
3124 }
3125 }
3126 if (lp->media == ANS) { /* Do MII parallel detection */
3127 if (is_spd_100(dev)) {
3128 lp->media = _100Mb;
3129 } else {
3130 lp->media = _10Mb;
3131 }
3132 next_tick = dc2114x_autoconf(dev);
3133 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3134 (((lp->media == _10Mb) || (lp->media == TP) ||
3135 (lp->media == BNC) || (lp->media == AUI)) &&
3136 is_10_up(dev))) {
3137 next_tick = dc2114x_autoconf(dev);
3138 } else {
3139 lp->tcount++;
3140 lp->media = INIT;
3141 }
3142 break;
3143
3144 case _10Mb:
3145 next_tick = 3000;
3146 if (!lp->tx_enable) {
3147 SET_10Mb;
3148 de4x5_init_connection(dev);
3149 } else {
3150 if (!lp->linkOK && (lp->autosense == AUTO)) {
3151 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3152 lp->media = INIT;
3153 lp->tcount++;
3154 next_tick = DE4X5_AUTOSENSE_MS;
3155 }
3156 }
3157 }
3158 break;
3159
3160 case _100Mb:
3161 next_tick = 3000;
3162 if (!lp->tx_enable) {
3163 SET_100Mb;
3164 de4x5_init_connection(dev);
3165 } else {
3166 if (!lp->linkOK && (lp->autosense == AUTO)) {
3167 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3168 lp->media = INIT;
3169 lp->tcount++;
3170 next_tick = DE4X5_AUTOSENSE_MS;
3171 }
3172 }
3173 }
3174 break;
3175
3176 default:
3177 lp->tcount++;
3178printk("Huh?: media:%02x\n", lp->media);
3179 lp->media = INIT;
3180 break;
3181 }
3182
3183 return next_tick;
3184}
3185
3186static int
3187srom_autoconf(struct net_device *dev)
3188{
3189 struct de4x5_private *lp = netdev_priv(dev);
3190
3191 return lp->infoleaf_fn(dev);
3192}
3193
3194/*
3195** This mapping keeps the original media codes and FDX flag unchanged.
3196** While it isn't strictly necessary, it helps me for the moment...
3197** The early return avoids a media state / SROM media space clash.
3198*/
3199static int
3200srom_map_media(struct net_device *dev)
3201{
3202 struct de4x5_private *lp = netdev_priv(dev);
3203
3204 lp->fdx = false;
3205 if (lp->infoblock_media == lp->media)
3206 return 0;
3207
3208 switch(lp->infoblock_media) {
3209 case SROM_10BASETF:
3210 if (!lp->params.fdx) return -1;
3211 lp->fdx = true;
3212 case SROM_10BASET:
3213 if (lp->params.fdx && !lp->fdx) return -1;
3214 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3215 lp->media = _10Mb;
3216 } else {
3217 lp->media = TP;
3218 }
3219 break;
3220
3221 case SROM_10BASE2:
3222 lp->media = BNC;
3223 break;
3224
3225 case SROM_10BASE5:
3226 lp->media = AUI;
3227 break;
3228
3229 case SROM_100BASETF:
3230 if (!lp->params.fdx) return -1;
3231 lp->fdx = true;
3232 case SROM_100BASET:
3233 if (lp->params.fdx && !lp->fdx) return -1;
3234 lp->media = _100Mb;
3235 break;
3236
3237 case SROM_100BASET4:
3238 lp->media = _100Mb;
3239 break;
3240
3241 case SROM_100BASEFF:
3242 if (!lp->params.fdx) return -1;
3243 lp->fdx = true;
3244 case SROM_100BASEF:
3245 if (lp->params.fdx && !lp->fdx) return -1;
3246 lp->media = _100Mb;
3247 break;
3248
3249 case ANS:
3250 lp->media = ANS;
3251 lp->fdx = lp->params.fdx;
3252 break;
3253
3254 default:
3255 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3256 lp->infoblock_media);
3257 return -1;
3258 break;
3259 }
3260
3261 return 0;
3262}
3263
3264static void
3265de4x5_init_connection(struct net_device *dev)
3266{
3267 struct de4x5_private *lp = netdev_priv(dev);
3268 u_long iobase = dev->base_addr;
3269 u_long flags = 0;
3270
3271 if (lp->media != lp->c_media) {
3272 de4x5_dbg_media(dev);
3273 lp->c_media = lp->media; /* Stop scrolling media messages */
3274 }
3275
3276 spin_lock_irqsave(&lp->lock, flags);
3277 de4x5_rst_desc_ring(dev);
3278 de4x5_setup_intr(dev);
3279 lp->tx_enable = true;
3280 spin_unlock_irqrestore(&lp->lock, flags);
3281 outl(POLL_DEMAND, DE4X5_TPD);
3282
3283 netif_wake_queue(dev);
3284}
3285
3286/*
3287** General PHY reset function. Some MII devices don't reset correctly
3288** since their MII address pins can float at voltages that are dependent
3289** on the signal pin use. Do a double reset to ensure a reset.
3290*/
3291static int
3292de4x5_reset_phy(struct net_device *dev)
3293{
3294 struct de4x5_private *lp = netdev_priv(dev);
3295 u_long iobase = dev->base_addr;
3296 int next_tick = 0;
3297
3298 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3299 if (lp->timeout < 0) {
3300 if (lp->useSROM) {
3301 if (lp->phy[lp->active].rst) {
3302 srom_exec(dev, lp->phy[lp->active].rst);
3303 srom_exec(dev, lp->phy[lp->active].rst);
3304 } else if (lp->rst) { /* Type 5 infoblock reset */
3305 srom_exec(dev, lp->rst);
3306 srom_exec(dev, lp->rst);
3307 }
3308 } else {
3309 PHY_HARD_RESET;
3310 }
3311 if (lp->useMII) {
3312 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3313 }
3314 }
3315 if (lp->useMII) {
3316 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3317 }
3318 } else if (lp->chipset == DC21140) {
3319 PHY_HARD_RESET;
3320 }
3321
3322 return next_tick;
3323}
3324
3325static int
3326test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3327{
3328 struct de4x5_private *lp = netdev_priv(dev);
3329 u_long iobase = dev->base_addr;
3330 s32 sts, csr12;
3331
3332 if (lp->timeout < 0) {
3333 lp->timeout = msec/100;
3334 if (!lp->useSROM) { /* Already done if by SROM, else dc2104[01] */
3335 reset_init_sia(dev, csr13, csr14, csr15);
3336 }
3337
3338 /* set up the interrupt mask */
3339 outl(irq_mask, DE4X5_IMR);
3340
3341 /* clear all pending interrupts */
3342 sts = inl(DE4X5_STS);
3343 outl(sts, DE4X5_STS);
3344
3345 /* clear csr12 NRA and SRA bits */
3346 if ((lp->chipset == DC21041) || lp->useSROM) {
3347 csr12 = inl(DE4X5_SISR);
3348 outl(csr12, DE4X5_SISR);
3349 }
3350 }
3351
3352 sts = inl(DE4X5_STS) & ~TIMER_CB;
3353
3354 if (!(sts & irqs) && --lp->timeout) {
3355 sts = 100 | TIMER_CB;
3356 } else {
3357 lp->timeout = -1;
3358 }
3359
3360 return sts;
3361}
3362
3363static int
3364test_tp(struct net_device *dev, s32 msec)
3365{
3366 struct de4x5_private *lp = netdev_priv(dev);
3367 u_long iobase = dev->base_addr;
3368 int sisr;
3369
3370 if (lp->timeout < 0) {
3371 lp->timeout = msec/100;
3372 }
3373
3374 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3375
3376 if (sisr && --lp->timeout) {
3377 sisr = 100 | TIMER_CB;
3378 } else {
3379 lp->timeout = -1;
3380 }
3381
3382 return sisr;
3383}
3384
3385/*
3386** Samples the 100Mb Link State Signal. The sample interval is important
3387** because too fast a rate can give erroneous results and confuse the
3388** speed sense algorithm.
3389*/
3390#define SAMPLE_INTERVAL 500 /* ms */
3391#define SAMPLE_DELAY 2000 /* ms */
3392static int
3393test_for_100Mb(struct net_device *dev, int msec)
3394{
3395 struct de4x5_private *lp = netdev_priv(dev);
3396 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3397
3398 if (lp->timeout < 0) {
3399 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3400 if (msec > SAMPLE_DELAY) {
3401 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3402 gep = SAMPLE_DELAY | TIMER_CB;
3403 return gep;
3404 } else {
3405 lp->timeout = msec/SAMPLE_INTERVAL;
3406 }
3407 }
3408
3409 if (lp->phy[lp->active].id || lp->useSROM) {
3410 gep = is_100_up(dev) | is_spd_100(dev);
3411 } else {
3412 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3413 }
3414 if (!(gep & ret) && --lp->timeout) {
3415 gep = SAMPLE_INTERVAL | TIMER_CB;
3416 } else {
3417 lp->timeout = -1;
3418 }
3419
3420 return gep;
3421}
3422
3423static int
3424wait_for_link(struct net_device *dev)
3425{
3426 struct de4x5_private *lp = netdev_priv(dev);
3427
3428 if (lp->timeout < 0) {
3429 lp->timeout = 1;
3430 }
3431
3432 if (lp->timeout--) {
3433 return TIMER_CB;
3434 } else {
3435 lp->timeout = -1;
3436 }
3437
3438 return 0;
3439}
3440
3441/*
3442**
3443**
3444*/
3445static int
3446test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3447{
3448 struct de4x5_private *lp = netdev_priv(dev);
3449 int test;
3450 u_long iobase = dev->base_addr;
3451
3452 if (lp->timeout < 0) {
3453 lp->timeout = msec/100;
3454 }
3455
3456 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3457 test = (reg ^ (pol ? ~0 : 0)) & mask;
3458
3459 if (test && --lp->timeout) {
3460 reg = 100 | TIMER_CB;
3461 } else {
3462 lp->timeout = -1;
3463 }
3464
3465 return reg;
3466}
3467
3468static int
3469is_spd_100(struct net_device *dev)
3470{
3471 struct de4x5_private *lp = netdev_priv(dev);
3472 u_long iobase = dev->base_addr;
3473 int spd;
3474
3475 if (lp->useMII) {
3476 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3477 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3478 spd &= lp->phy[lp->active].spd.mask;
3479 } else if (!lp->useSROM) { /* de500-xa */
3480 spd = ((~gep_rd(dev)) & GEP_SLNK);
3481 } else {
3482 if ((lp->ibn == 2) || !lp->asBitValid)
3483 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3484
3485 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3486 (lp->linkOK & ~lp->asBitValid);
3487 }
3488
3489 return spd;
3490}
3491
3492static int
3493is_100_up(struct net_device *dev)
3494{
3495 struct de4x5_private *lp = netdev_priv(dev);
3496 u_long iobase = dev->base_addr;
3497
3498 if (lp->useMII) {
3499 /* Double read for sticky bits & temporary drops */
3500 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3501 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3502 } else if (!lp->useSROM) { /* de500-xa */
3503 return (~gep_rd(dev)) & GEP_SLNK;
3504 } else {
3505 if ((lp->ibn == 2) || !lp->asBitValid)
3506 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3507
3508 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3509 (lp->linkOK & ~lp->asBitValid);
3510 }
3511}
3512
3513static int
3514is_10_up(struct net_device *dev)
3515{
3516 struct de4x5_private *lp = netdev_priv(dev);
3517 u_long iobase = dev->base_addr;
3518
3519 if (lp->useMII) {
3520 /* Double read for sticky bits & temporary drops */
3521 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3522 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3523 } else if (!lp->useSROM) { /* de500-xa */
3524 return (~gep_rd(dev)) & GEP_LNP;
3525 } else {
3526 if ((lp->ibn == 2) || !lp->asBitValid)
3527 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3528 (~inl(DE4X5_SISR)&SISR_LS10):
3529 0;
3530
3531 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3532 (lp->linkOK & ~lp->asBitValid);
3533 }
3534}
3535
3536static int
3537is_anc_capable(struct net_device *dev)
3538{
3539 struct de4x5_private *lp = netdev_priv(dev);
3540 u_long iobase = dev->base_addr;
3541
3542 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3543 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3544 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3545 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3546 } else {
3547 return 0;
3548 }
3549}
3550
3551/*
3552** Send a packet onto the media and watch for send errors that indicate the
3553** media is bad or unconnected.
3554*/
3555static int
3556ping_media(struct net_device *dev, int msec)
3557{
3558 struct de4x5_private *lp = netdev_priv(dev);
3559 u_long iobase = dev->base_addr;
3560 int sisr;
3561
3562 if (lp->timeout < 0) {
3563 lp->timeout = msec/100;
3564
3565 lp->tmp = lp->tx_new; /* Remember the ring position */
3566 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3567 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3568 outl(POLL_DEMAND, DE4X5_TPD);
3569 }
3570
3571 sisr = inl(DE4X5_SISR);
3572
3573 if ((!(sisr & SISR_NCR)) &&
3574 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3575 (--lp->timeout)) {
3576 sisr = 100 | TIMER_CB;
3577 } else {
3578 if ((!(sisr & SISR_NCR)) &&
3579 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3580 lp->timeout) {
3581 sisr = 0;
3582 } else {
3583 sisr = 1;
3584 }
3585 lp->timeout = -1;
3586 }
3587
3588 return sisr;
3589}
3590
3591/*
3592** This function does 2 things: on Intels it kmalloc's another buffer to
3593** replace the one about to be passed up. On Alpha's it kmallocs a buffer
3594** into which the packet is copied.
3595*/
3596static struct sk_buff *
3597de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3598{
3599 struct de4x5_private *lp = netdev_priv(dev);
3600 struct sk_buff *p;
3601
3602#if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3603 struct sk_buff *ret;
3604 u_long i=0, tmp;
3605
3606 p = dev_alloc_skb(IEEE802_3_SZ + DE4X5_ALIGN + 2);
3607 if (!p) return NULL;
3608
3609 tmp = virt_to_bus(p->data);
3610 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3611 skb_reserve(p, i);
3612 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3613
3614 ret = lp->rx_skb[index];
3615 lp->rx_skb[index] = p;
3616
3617 if ((u_long) ret > 1) {
3618 skb_put(ret, len);
3619 }
3620
3621 return ret;
3622
3623#else
3624 if (lp->state != OPEN) return (struct sk_buff *)1; /* Fake out the open */
3625
3626 p = dev_alloc_skb(len + 2);
3627 if (!p) return NULL;
3628
3629 skb_reserve(p, 2); /* Align */
3630 if (index < lp->rx_old) { /* Wrapped buffer */
3631 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3632 memcpy(skb_put(p,tlen),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,tlen);
3633 memcpy(skb_put(p,len-tlen),lp->rx_bufs,len-tlen);
3634 } else { /* Linear buffer */
3635 memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
3636 }
3637
3638 return p;
3639#endif
3640}
3641
3642static void
3643de4x5_free_rx_buffs(struct net_device *dev)
3644{
3645 struct de4x5_private *lp = netdev_priv(dev);
3646 int i;
3647
3648 for (i=0; i<lp->rxRingSize; i++) {
3649 if ((u_long) lp->rx_skb[i] > 1) {
3650 dev_kfree_skb(lp->rx_skb[i]);
3651 }
3652 lp->rx_ring[i].status = 0;
3653 lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
3654 }
3655}
3656
3657static void
3658de4x5_free_tx_buffs(struct net_device *dev)
3659{
3660 struct de4x5_private *lp = netdev_priv(dev);
3661 int i;
3662
3663 for (i=0; i<lp->txRingSize; i++) {
3664 if (lp->tx_skb[i])
3665 de4x5_free_tx_buff(lp, i);
3666 lp->tx_ring[i].status = 0;
3667 }
3668
3669 /* Unload the locally queued packets */
3670 __skb_queue_purge(&lp->cache.queue);
3671}
3672
3673/*
3674** When a user pulls a connection, the DECchip can end up in a
3675** 'running - waiting for end of transmission' state. This means that we
3676** have to perform a chip soft reset to ensure that we can synchronize
3677** the hardware and software and make any media probes using a loopback
3678** packet meaningful.
3679*/
3680static void
3681de4x5_save_skbs(struct net_device *dev)
3682{
3683 struct de4x5_private *lp = netdev_priv(dev);
3684 u_long iobase = dev->base_addr;
3685 s32 omr;
3686
3687 if (!lp->cache.save_cnt) {
3688 STOP_DE4X5;
3689 de4x5_tx(dev); /* Flush any sent skb's */
3690 de4x5_free_tx_buffs(dev);
3691 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3692 de4x5_sw_reset(dev);
3693 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3694 lp->cache.save_cnt++;
3695 START_DE4X5;
3696 }
3697}
3698
3699static void
3700de4x5_rst_desc_ring(struct net_device *dev)
3701{
3702 struct de4x5_private *lp = netdev_priv(dev);
3703 u_long iobase = dev->base_addr;
3704 int i;
3705 s32 omr;
3706
3707 if (lp->cache.save_cnt) {
3708 STOP_DE4X5;
3709 outl(lp->dma_rings, DE4X5_RRBA);
3710 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3711 DE4X5_TRBA);
3712
3713 lp->rx_new = lp->rx_old = 0;
3714 lp->tx_new = lp->tx_old = 0;
3715
3716 for (i = 0; i < lp->rxRingSize; i++) {
3717 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3718 }
3719
3720 for (i = 0; i < lp->txRingSize; i++) {
3721 lp->tx_ring[i].status = cpu_to_le32(0);
3722 }
3723
3724 barrier();
3725 lp->cache.save_cnt--;
3726 START_DE4X5;
3727 }
3728}
3729
3730static void
3731de4x5_cache_state(struct net_device *dev, int flag)
3732{
3733 struct de4x5_private *lp = netdev_priv(dev);
3734 u_long iobase = dev->base_addr;
3735
3736 switch(flag) {
3737 case DE4X5_SAVE_STATE:
3738 lp->cache.csr0 = inl(DE4X5_BMR);
3739 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3740 lp->cache.csr7 = inl(DE4X5_IMR);
3741 break;
3742
3743 case DE4X5_RESTORE_STATE:
3744 outl(lp->cache.csr0, DE4X5_BMR);
3745 outl(lp->cache.csr6, DE4X5_OMR);
3746 outl(lp->cache.csr7, DE4X5_IMR);
3747 if (lp->chipset == DC21140) {
3748 gep_wr(lp->cache.gepc, dev);
3749 gep_wr(lp->cache.gep, dev);
3750 } else {
3751 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3752 lp->cache.csr15);
3753 }
3754 break;
3755 }
3756}
3757
3758static void
3759de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3760{
3761 struct de4x5_private *lp = netdev_priv(dev);
3762
3763 __skb_queue_tail(&lp->cache.queue, skb);
3764}
3765
3766static void
3767de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3768{
3769 struct de4x5_private *lp = netdev_priv(dev);
3770
3771 __skb_queue_head(&lp->cache.queue, skb);
3772}
3773
3774static struct sk_buff *
3775de4x5_get_cache(struct net_device *dev)
3776{
3777 struct de4x5_private *lp = netdev_priv(dev);
3778
3779 return __skb_dequeue(&lp->cache.queue);
3780}
3781
3782/*
3783** Check the Auto Negotiation State. Return OK when a link pass interrupt
3784** is received and the auto-negotiation status is NWAY OK.
3785*/
3786static int
3787test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3788{
3789 struct de4x5_private *lp = netdev_priv(dev);
3790 u_long iobase = dev->base_addr;
3791 s32 sts, ans;
3792
3793 if (lp->timeout < 0) {
3794 lp->timeout = msec/100;
3795 outl(irq_mask, DE4X5_IMR);
3796
3797 /* clear all pending interrupts */
3798 sts = inl(DE4X5_STS);
3799 outl(sts, DE4X5_STS);
3800 }
3801
3802 ans = inl(DE4X5_SISR) & SISR_ANS;
3803 sts = inl(DE4X5_STS) & ~TIMER_CB;
3804
3805 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3806 sts = 100 | TIMER_CB;
3807 } else {
3808 lp->timeout = -1;
3809 }
3810
3811 return sts;
3812}
3813
3814static void
3815de4x5_setup_intr(struct net_device *dev)
3816{
3817 struct de4x5_private *lp = netdev_priv(dev);
3818 u_long iobase = dev->base_addr;
3819 s32 imr, sts;
3820
3821 if (inl(DE4X5_OMR) & OMR_SR) { /* Only unmask if TX/RX is enabled */
3822 imr = 0;
3823 UNMASK_IRQs;
3824 sts = inl(DE4X5_STS); /* Reset any pending (stale) interrupts */
3825 outl(sts, DE4X5_STS);
3826 ENABLE_IRQs;
3827 }
3828}
3829
3830/*
3831**
3832*/
3833static void
3834reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3835{
3836 struct de4x5_private *lp = netdev_priv(dev);
3837 u_long iobase = dev->base_addr;
3838
3839 RESET_SIA;
3840 if (lp->useSROM) {
3841 if (lp->ibn == 3) {
3842 srom_exec(dev, lp->phy[lp->active].rst);
3843 srom_exec(dev, lp->phy[lp->active].gep);
3844 outl(1, DE4X5_SICR);
3845 return;
3846 } else {
3847 csr15 = lp->cache.csr15;
3848 csr14 = lp->cache.csr14;
3849 csr13 = lp->cache.csr13;
3850 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3851 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3852 }
3853 } else {
3854 outl(csr15, DE4X5_SIGR);
3855 }
3856 outl(csr14, DE4X5_STRR);
3857 outl(csr13, DE4X5_SICR);
3858
3859 mdelay(10);
3860}
3861
3862/*
3863** Create a loopback ethernet packet
3864*/
3865static void
3866create_packet(struct net_device *dev, char *frame, int len)
3867{
3868 int i;
3869 char *buf = frame;
3870
3871 for (i=0; i<ETH_ALEN; i++) { /* Use this source address */
3872 *buf++ = dev->dev_addr[i];
3873 }
3874 for (i=0; i<ETH_ALEN; i++) { /* Use this destination address */
3875 *buf++ = dev->dev_addr[i];
3876 }
3877
3878 *buf++ = 0; /* Packet length (2 bytes) */
3879 *buf++ = 1;
3880}
3881
3882/*
3883** Look for a particular board name in the EISA configuration space
3884*/
3885static int
3886EISA_signature(char *name, struct device *device)
3887{
3888 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3889 struct eisa_device *edev;
3890
3891 *name = '\0';
3892 edev = to_eisa_device (device);
3893 i = edev->id.driver_data;
3894
3895 if (i >= 0 && i < siglen) {
3896 strcpy (name, de4x5_signatures[i]);
3897 status = 1;
3898 }
3899
3900 return status; /* return the device name string */
3901}
3902
3903/*
3904** Look for a particular board name in the PCI configuration space
3905*/
3906static int
3907PCI_signature(char *name, struct de4x5_private *lp)
3908{
3909 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3910
3911 if (lp->chipset == DC21040) {
3912 strcpy(name, "DE434/5");
3913 return status;
3914 } else { /* Search for a DEC name in the SROM */
3915 int tmp = *((char *)&lp->srom + 19) * 3;
3916 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3917 }
3918 name[8] = '\0';
3919 for (i=0; i<siglen; i++) {
3920 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3921 }
3922 if (i == siglen) {
3923 if (dec_only) {
3924 *name = '\0';
3925 } else { /* Use chip name to avoid confusion */
3926 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3927 ((lp->chipset == DC21041) ? "DC21041" :
3928 ((lp->chipset == DC21140) ? "DC21140" :
3929 ((lp->chipset == DC21142) ? "DC21142" :
3930 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3931 )))))));
3932 }
3933 if (lp->chipset != DC21041) {
3934 lp->useSROM = true; /* card is not recognisably DEC */
3935 }
3936 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3937 lp->useSROM = true;
3938 }
3939
3940 return status;
3941}
3942
3943/*
3944** Set up the Ethernet PROM counter to the start of the Ethernet address on
3945** the DC21040, else read the SROM for the other chips.
3946** The SROM may not be present in a multi-MAC card, so first read the
3947** MAC address and check for a bad address. If there is a bad one then exit
3948** immediately with the prior srom contents intact (the h/w address will
3949** be fixed up later).
3950*/
3951static void
3952DevicePresent(struct net_device *dev, u_long aprom_addr)
3953{
3954 int i, j=0;
3955 struct de4x5_private *lp = netdev_priv(dev);
3956
3957 if (lp->chipset == DC21040) {
3958 if (lp->bus == EISA) {
3959 enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
3960 } else {
3961 outl(0, aprom_addr); /* Reset Ethernet Address ROM Pointer */
3962 }
3963 } else { /* Read new srom */
3964 u_short tmp;
3965 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3966 for (i=0; i<(ETH_ALEN>>1); i++) {
3967 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3968 j += tmp; /* for check for 0:0:0:0:0:0 or ff:ff:ff:ff:ff:ff */
3969 *p = cpu_to_le16(tmp);
3970 }
3971 if (j == 0 || j == 3 * 0xffff) {
3972 /* could get 0 only from all-0 and 3 * 0xffff only from all-1 */
3973 return;
3974 }
3975
3976 p = (__le16 *)&lp->srom;
3977 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3978 tmp = srom_rd(aprom_addr, i);
3979 *p++ = cpu_to_le16(tmp);
3980 }
3981 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
3982 }
3983}
3984
3985/*
3986** Since the write on the Enet PROM register doesn't seem to reset the PROM
3987** pointer correctly (at least on my DE425 EISA card), this routine should do
3988** it...from depca.c.
3989*/
3990static void
3991enet_addr_rst(u_long aprom_addr)
3992{
3993 union {
3994 struct {
3995 u32 a;
3996 u32 b;
3997 } llsig;
3998 char Sig[sizeof(u32) << 1];
3999 } dev;
4000 short sigLength=0;
4001 s8 data;
4002 int i, j;
4003
4004 dev.llsig.a = ETH_PROM_SIG;
4005 dev.llsig.b = ETH_PROM_SIG;
4006 sigLength = sizeof(u32) << 1;
4007
4008 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4009 data = inb(aprom_addr);
4010 if (dev.Sig[j] == data) { /* track signature */
4011 j++;
4012 } else { /* lost signature; begin search again */
4013 if (data == dev.Sig[0]) { /* rare case.... */
4014 j=1;
4015 } else {
4016 j=0;
4017 }
4018 }
4019 }
4020}
4021
4022/*
4023** For the bad status case and no SROM, then add one to the previous
4024** address. However, need to add one backwards in case we have 0xff
4025** as one or more of the bytes. Only the last 3 bytes should be checked
4026** as the first three are invariant - assigned to an organisation.
4027*/
4028static int
4029get_hw_addr(struct net_device *dev)
4030{
4031 u_long iobase = dev->base_addr;
4032 int broken, i, k, tmp, status = 0;
4033 u_short j,chksum;
4034 struct de4x5_private *lp = netdev_priv(dev);
4035
4036 broken = de4x5_bad_srom(lp);
4037
4038 for (i=0,k=0,j=0;j<3;j++) {
4039 k <<= 1;
4040 if (k > 0xffff) k-=0xffff;
4041
4042 if (lp->bus == PCI) {
4043 if (lp->chipset == DC21040) {
4044 while ((tmp = inl(DE4X5_APROM)) < 0);
4045 k += (u_char) tmp;
4046 dev->dev_addr[i++] = (u_char) tmp;
4047 while ((tmp = inl(DE4X5_APROM)) < 0);
4048 k += (u_short) (tmp << 8);
4049 dev->dev_addr[i++] = (u_char) tmp;
4050 } else if (!broken) {
4051 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4052 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4053 } else if ((broken == SMC) || (broken == ACCTON)) {
4054 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4055 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4056 }
4057 } else {
4058 k += (u_char) (tmp = inb(EISA_APROM));
4059 dev->dev_addr[i++] = (u_char) tmp;
4060 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4061 dev->dev_addr[i++] = (u_char) tmp;
4062 }
4063
4064 if (k > 0xffff) k-=0xffff;
4065 }
4066 if (k == 0xffff) k=0;
4067
4068 if (lp->bus == PCI) {
4069 if (lp->chipset == DC21040) {
4070 while ((tmp = inl(DE4X5_APROM)) < 0);
4071 chksum = (u_char) tmp;
4072 while ((tmp = inl(DE4X5_APROM)) < 0);
4073 chksum |= (u_short) (tmp << 8);
4074 if ((k != chksum) && (dec_only)) status = -1;
4075 }
4076 } else {
4077 chksum = (u_char) inb(EISA_APROM);
4078 chksum |= (u_short) (inb(EISA_APROM) << 8);
4079 if ((k != chksum) && (dec_only)) status = -1;
4080 }
4081
4082 /* If possible, try to fix a broken card - SMC only so far */
4083 srom_repair(dev, broken);
4084
4085#ifdef CONFIG_PPC_PMAC
4086 /*
4087 ** If the address starts with 00 a0, we have to bit-reverse
4088 ** each byte of the address.
4089 */
4090 if ( machine_is(powermac) &&
4091 (dev->dev_addr[0] == 0) &&
4092 (dev->dev_addr[1] == 0xa0) )
4093 {
4094 for (i = 0; i < ETH_ALEN; ++i)
4095 {
4096 int x = dev->dev_addr[i];
4097 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4098 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4099 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4100 }
4101 }
4102#endif /* CONFIG_PPC_PMAC */
4103
4104 /* Test for a bad enet address */
4105 status = test_bad_enet(dev, status);
4106
4107 return status;
4108}
4109
4110/*
4111** Test for enet addresses in the first 32 bytes. The built-in strncmp
4112** didn't seem to work here...?
4113*/
4114static int
4115de4x5_bad_srom(struct de4x5_private *lp)
4116{
4117 int i, status = 0;
4118
4119 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4120 if (!de4x5_strncmp((char *)&lp->srom, (char *)&enet_det[i], 3) &&
4121 !de4x5_strncmp((char *)&lp->srom+0x10, (char *)&enet_det[i], 3)) {
4122 if (i == 0) {
4123 status = SMC;
4124 } else if (i == 1) {
4125 status = ACCTON;
4126 }
4127 break;
4128 }
4129 }
4130
4131 return status;
4132}
4133
4134static int
4135de4x5_strncmp(char *a, char *b, int n)
4136{
4137 int ret=0;
4138
4139 for (;n && !ret; n--) {
4140 ret = *a++ - *b++;
4141 }
4142
4143 return ret;
4144}
4145
4146static void
4147srom_repair(struct net_device *dev, int card)
4148{
4149 struct de4x5_private *lp = netdev_priv(dev);
4150
4151 switch(card) {
4152 case SMC:
4153 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4154 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4155 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4156 lp->useSROM = true;
4157 break;
4158 }
4159}
4160
4161/*
4162** Assume that the irq's do not follow the PCI spec - this is seems
4163** to be true so far (2 for 2).
4164*/
4165static int
4166test_bad_enet(struct net_device *dev, int status)
4167{
4168 struct de4x5_private *lp = netdev_priv(dev);
4169 int i, tmp;
4170
4171 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4172 if ((tmp == 0) || (tmp == 0x5fa)) {
4173 if ((lp->chipset == last.chipset) &&
4174 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4175 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4176 for (i=ETH_ALEN-1; i>2; --i) {
4177 dev->dev_addr[i] += 1;
4178 if (dev->dev_addr[i] != 0) break;
4179 }
4180 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4181 if (!an_exception(lp)) {
4182 dev->irq = last.irq;
4183 }
4184
4185 status = 0;
4186 }
4187 } else if (!status) {
4188 last.chipset = lp->chipset;
4189 last.bus = lp->bus_num;
4190 last.irq = dev->irq;
4191 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4192 }
4193
4194 return status;
4195}
4196
4197/*
4198** List of board exceptions with correctly wired IRQs
4199*/
4200static int
4201an_exception(struct de4x5_private *lp)
4202{
4203 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4204 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4205 return -1;
4206 }
4207
4208 return 0;
4209}
4210
4211/*
4212** SROM Read
4213*/
4214static short
4215srom_rd(u_long addr, u_char offset)
4216{
4217 sendto_srom(SROM_RD | SROM_SR, addr);
4218
4219 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4220 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4221 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4222
4223 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4224}
4225
4226static void
4227srom_latch(u_int command, u_long addr)
4228{
4229 sendto_srom(command, addr);
4230 sendto_srom(command | DT_CLK, addr);
4231 sendto_srom(command, addr);
4232}
4233
4234static void
4235srom_command(u_int command, u_long addr)
4236{
4237 srom_latch(command, addr);
4238 srom_latch(command, addr);
4239 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4240}
4241
4242static void
4243srom_address(u_int command, u_long addr, u_char offset)
4244{
4245 int i, a;
4246
4247 a = offset << 2;
4248 for (i=0; i<6; i++, a <<= 1) {
4249 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4250 }
4251 udelay(1);
4252
4253 i = (getfrom_srom(addr) >> 3) & 0x01;
4254}
4255
4256static short
4257srom_data(u_int command, u_long addr)
4258{
4259 int i;
4260 short word = 0;
4261 s32 tmp;
4262
4263 for (i=0; i<16; i++) {
4264 sendto_srom(command | DT_CLK, addr);
4265 tmp = getfrom_srom(addr);
4266 sendto_srom(command, addr);
4267
4268 word = (word << 1) | ((tmp >> 3) & 0x01);
4269 }
4270
4271 sendto_srom(command & 0x0000ff00, addr);
4272
4273 return word;
4274}
4275
4276/*
4277static void
4278srom_busy(u_int command, u_long addr)
4279{
4280 sendto_srom((command & 0x0000ff00) | DT_CS, addr);
4281
4282 while (!((getfrom_srom(addr) >> 3) & 0x01)) {
4283 mdelay(1);
4284 }
4285
4286 sendto_srom(command & 0x0000ff00, addr);
4287}
4288*/
4289
4290static void
4291sendto_srom(u_int command, u_long addr)
4292{
4293 outl(command, addr);
4294 udelay(1);
4295}
4296
4297static int
4298getfrom_srom(u_long addr)
4299{
4300 s32 tmp;
4301
4302 tmp = inl(addr);
4303 udelay(1);
4304
4305 return tmp;
4306}
4307
4308static int
4309srom_infoleaf_info(struct net_device *dev)
4310{
4311 struct de4x5_private *lp = netdev_priv(dev);
4312 int i, count;
4313 u_char *p;
4314
4315 /* Find the infoleaf decoder function that matches this chipset */
4316 for (i=0; i<INFOLEAF_SIZE; i++) {
4317 if (lp->chipset == infoleaf_array[i].chipset) break;
4318 }
4319 if (i == INFOLEAF_SIZE) {
4320 lp->useSROM = false;
4321 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4322 dev->name);
4323 return -ENXIO;
4324 }
4325
4326 lp->infoleaf_fn = infoleaf_array[i].fn;
4327
4328 /* Find the information offset that this function should use */
4329 count = *((u_char *)&lp->srom + 19);
4330 p = (u_char *)&lp->srom + 26;
4331
4332 if (count > 1) {
4333 for (i=count; i; --i, p+=3) {
4334 if (lp->device == *p) break;
4335 }
4336 if (i == 0) {
4337 lp->useSROM = false;
4338 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4339 dev->name, lp->device);
4340 return -ENXIO;
4341 }
4342 }
4343
4344 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4345
4346 return 0;
4347}
4348
4349/*
4350** This routine loads any type 1 or 3 MII info into the mii device
4351** struct and executes any type 5 code to reset PHY devices for this
4352** controller.
4353** The info for the MII devices will be valid since the index used
4354** will follow the discovery process from MII address 1-31 then 0.
4355*/
4356static void
4357srom_init(struct net_device *dev)
4358{
4359 struct de4x5_private *lp = netdev_priv(dev);
4360 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4361 u_char count;
4362
4363 p+=2;
4364 if (lp->chipset == DC21140) {
4365 lp->cache.gepc = (*p++ | GEP_CTRL);
4366 gep_wr(lp->cache.gepc, dev);
4367 }
4368
4369 /* Block count */
4370 count = *p++;
4371
4372 /* Jump the infoblocks to find types */
4373 for (;count; --count) {
4374 if (*p < 128) {
4375 p += COMPACT_LEN;
4376 } else if (*(p+1) == 5) {
4377 type5_infoblock(dev, 1, p);
4378 p += ((*p & BLOCK_LEN) + 1);
4379 } else if (*(p+1) == 4) {
4380 p += ((*p & BLOCK_LEN) + 1);
4381 } else if (*(p+1) == 3) {
4382 type3_infoblock(dev, 1, p);
4383 p += ((*p & BLOCK_LEN) + 1);
4384 } else if (*(p+1) == 2) {
4385 p += ((*p & BLOCK_LEN) + 1);
4386 } else if (*(p+1) == 1) {
4387 type1_infoblock(dev, 1, p);
4388 p += ((*p & BLOCK_LEN) + 1);
4389 } else {
4390 p += ((*p & BLOCK_LEN) + 1);
4391 }
4392 }
4393}
4394
4395/*
4396** A generic routine that writes GEP control, data and reset information
4397** to the GEP register (21140) or csr15 GEP portion (2114[23]).
4398*/
4399static void
4400srom_exec(struct net_device *dev, u_char *p)
4401{
4402 struct de4x5_private *lp = netdev_priv(dev);
4403 u_long iobase = dev->base_addr;
4404 u_char count = (p ? *p++ : 0);
4405 u_short *w = (u_short *)p;
4406
4407 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4408
4409 if (lp->chipset != DC21140) RESET_SIA;
4410
4411 while (count--) {
4412 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4413 *p++ : get_unaligned_le16(w++)), dev);
4414 mdelay(2); /* 2ms per action */
4415 }
4416
4417 if (lp->chipset != DC21140) {
4418 outl(lp->cache.csr14, DE4X5_STRR);
4419 outl(lp->cache.csr13, DE4X5_SICR);
4420 }
4421}
4422
4423/*
4424** Basically this function is a NOP since it will never be called,
4425** unless I implement the DC21041 SROM functions. There's no need
4426** since the existing code will be satisfactory for all boards.
4427*/
4428static int
4429dc21041_infoleaf(struct net_device *dev)
4430{
4431 return DE4X5_AUTOSENSE_MS;
4432}
4433
4434static int
4435dc21140_infoleaf(struct net_device *dev)
4436{
4437 struct de4x5_private *lp = netdev_priv(dev);
4438 u_char count = 0;
4439 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4440 int next_tick = DE4X5_AUTOSENSE_MS;
4441
4442 /* Read the connection type */
4443 p+=2;
4444
4445 /* GEP control */
4446 lp->cache.gepc = (*p++ | GEP_CTRL);
4447
4448 /* Block count */
4449 count = *p++;
4450
4451 /* Recursively figure out the info blocks */
4452 if (*p < 128) {
4453 next_tick = dc_infoblock[COMPACT](dev, count, p);
4454 } else {
4455 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4456 }
4457
4458 if (lp->tcount == count) {
4459 lp->media = NC;
4460 if (lp->media != lp->c_media) {
4461 de4x5_dbg_media(dev);
4462 lp->c_media = lp->media;
4463 }
4464 lp->media = INIT;
4465 lp->tcount = 0;
4466 lp->tx_enable = false;
4467 }
4468
4469 return next_tick & ~TIMER_CB;
4470}
4471
4472static int
4473dc21142_infoleaf(struct net_device *dev)
4474{
4475 struct de4x5_private *lp = netdev_priv(dev);
4476 u_char count = 0;
4477 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4478 int next_tick = DE4X5_AUTOSENSE_MS;
4479
4480 /* Read the connection type */
4481 p+=2;
4482
4483 /* Block count */
4484 count = *p++;
4485
4486 /* Recursively figure out the info blocks */
4487 if (*p < 128) {
4488 next_tick = dc_infoblock[COMPACT](dev, count, p);
4489 } else {
4490 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4491 }
4492
4493 if (lp->tcount == count) {
4494 lp->media = NC;
4495 if (lp->media != lp->c_media) {
4496 de4x5_dbg_media(dev);
4497 lp->c_media = lp->media;
4498 }
4499 lp->media = INIT;
4500 lp->tcount = 0;
4501 lp->tx_enable = false;
4502 }
4503
4504 return next_tick & ~TIMER_CB;
4505}
4506
4507static int
4508dc21143_infoleaf(struct net_device *dev)
4509{
4510 struct de4x5_private *lp = netdev_priv(dev);
4511 u_char count = 0;
4512 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4513 int next_tick = DE4X5_AUTOSENSE_MS;
4514
4515 /* Read the connection type */
4516 p+=2;
4517
4518 /* Block count */
4519 count = *p++;
4520
4521 /* Recursively figure out the info blocks */
4522 if (*p < 128) {
4523 next_tick = dc_infoblock[COMPACT](dev, count, p);
4524 } else {
4525 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4526 }
4527 if (lp->tcount == count) {
4528 lp->media = NC;
4529 if (lp->media != lp->c_media) {
4530 de4x5_dbg_media(dev);
4531 lp->c_media = lp->media;
4532 }
4533 lp->media = INIT;
4534 lp->tcount = 0;
4535 lp->tx_enable = false;
4536 }
4537
4538 return next_tick & ~TIMER_CB;
4539}
4540
4541/*
4542** The compact infoblock is only designed for DC21140[A] chips, so
4543** we'll reuse the dc21140m_autoconf function. Non MII media only.
4544*/
4545static int
4546compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4547{
4548 struct de4x5_private *lp = netdev_priv(dev);
4549 u_char flags, csr6;
4550
4551 /* Recursively figure out the info blocks */
4552 if (--count > lp->tcount) {
4553 if (*(p+COMPACT_LEN) < 128) {
4554 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4555 } else {
4556 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4557 }
4558 }
4559
4560 if ((lp->media == INIT) && (lp->timeout < 0)) {
4561 lp->ibn = COMPACT;
4562 lp->active = 0;
4563 gep_wr(lp->cache.gepc, dev);
4564 lp->infoblock_media = (*p++) & COMPACT_MC;
4565 lp->cache.gep = *p++;
4566 csr6 = *p++;
4567 flags = *p++;
4568
4569 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4570 lp->defMedium = (flags & 0x40) ? -1 : 0;
4571 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4572 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4573 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4574 lp->useMII = false;
4575
4576 de4x5_switch_mac_port(dev);
4577 }
4578
4579 return dc21140m_autoconf(dev);
4580}
4581
4582/*
4583** This block describes non MII media for the DC21140[A] only.
4584*/
4585static int
4586type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4587{
4588 struct de4x5_private *lp = netdev_priv(dev);
4589 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4590
4591 /* Recursively figure out the info blocks */
4592 if (--count > lp->tcount) {
4593 if (*(p+len) < 128) {
4594 return dc_infoblock[COMPACT](dev, count, p+len);
4595 } else {
4596 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4597 }
4598 }
4599
4600 if ((lp->media == INIT) && (lp->timeout < 0)) {
4601 lp->ibn = 0;
4602 lp->active = 0;
4603 gep_wr(lp->cache.gepc, dev);
4604 p+=2;
4605 lp->infoblock_media = (*p++) & BLOCK0_MC;
4606 lp->cache.gep = *p++;
4607 csr6 = *p++;
4608 flags = *p++;
4609
4610 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4611 lp->defMedium = (flags & 0x40) ? -1 : 0;
4612 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4613 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4614 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4615 lp->useMII = false;
4616
4617 de4x5_switch_mac_port(dev);
4618 }
4619
4620 return dc21140m_autoconf(dev);
4621}
4622
4623/* These functions are under construction! */
4624
4625static int
4626type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4627{
4628 struct de4x5_private *lp = netdev_priv(dev);
4629 u_char len = (*p & BLOCK_LEN)+1;
4630
4631 /* Recursively figure out the info blocks */
4632 if (--count > lp->tcount) {
4633 if (*(p+len) < 128) {
4634 return dc_infoblock[COMPACT](dev, count, p+len);
4635 } else {
4636 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4637 }
4638 }
4639
4640 p += 2;
4641 if (lp->state == INITIALISED) {
4642 lp->ibn = 1;
4643 lp->active = *p++;
4644 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4645 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4646 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4647 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4648 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4649 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4650 return 0;
4651 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4652 lp->ibn = 1;
4653 lp->active = *p;
4654 lp->infoblock_csr6 = OMR_MII_100;
4655 lp->useMII = true;
4656 lp->infoblock_media = ANS;
4657
4658 de4x5_switch_mac_port(dev);
4659 }
4660
4661 return dc21140m_autoconf(dev);
4662}
4663
4664static int
4665type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4666{
4667 struct de4x5_private *lp = netdev_priv(dev);
4668 u_char len = (*p & BLOCK_LEN)+1;
4669
4670 /* Recursively figure out the info blocks */
4671 if (--count > lp->tcount) {
4672 if (*(p+len) < 128) {
4673 return dc_infoblock[COMPACT](dev, count, p+len);
4674 } else {
4675 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4676 }
4677 }
4678
4679 if ((lp->media == INIT) && (lp->timeout < 0)) {
4680 lp->ibn = 2;
4681 lp->active = 0;
4682 p += 2;
4683 lp->infoblock_media = (*p) & MEDIA_CODE;
4684
4685 if ((*p++) & EXT_FIELD) {
4686 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4687 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4688 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4689 } else {
4690 lp->cache.csr13 = CSR13;
4691 lp->cache.csr14 = CSR14;
4692 lp->cache.csr15 = CSR15;
4693 }
4694 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4695 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4696 lp->infoblock_csr6 = OMR_SIA;
4697 lp->useMII = false;
4698
4699 de4x5_switch_mac_port(dev);
4700 }
4701
4702 return dc2114x_autoconf(dev);
4703}
4704
4705static int
4706type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4707{
4708 struct de4x5_private *lp = netdev_priv(dev);
4709 u_char len = (*p & BLOCK_LEN)+1;
4710
4711 /* Recursively figure out the info blocks */
4712 if (--count > lp->tcount) {
4713 if (*(p+len) < 128) {
4714 return dc_infoblock[COMPACT](dev, count, p+len);
4715 } else {
4716 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4717 }
4718 }
4719
4720 p += 2;
4721 if (lp->state == INITIALISED) {
4722 lp->ibn = 3;
4723 lp->active = *p++;
4724 if (MOTO_SROM_BUG) lp->active = 0;
4725 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4726 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4727 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4728 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4729 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4730 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4731 lp->phy[lp->active].mci = *p;
4732 return 0;
4733 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4734 lp->ibn = 3;
4735 lp->active = *p;
4736 if (MOTO_SROM_BUG) lp->active = 0;
4737 lp->infoblock_csr6 = OMR_MII_100;
4738 lp->useMII = true;
4739 lp->infoblock_media = ANS;
4740
4741 de4x5_switch_mac_port(dev);
4742 }
4743
4744 return dc2114x_autoconf(dev);
4745}
4746
4747static int
4748type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4749{
4750 struct de4x5_private *lp = netdev_priv(dev);
4751 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4752
4753 /* Recursively figure out the info blocks */
4754 if (--count > lp->tcount) {
4755 if (*(p+len) < 128) {
4756 return dc_infoblock[COMPACT](dev, count, p+len);
4757 } else {
4758 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4759 }
4760 }
4761
4762 if ((lp->media == INIT) && (lp->timeout < 0)) {
4763 lp->ibn = 4;
4764 lp->active = 0;
4765 p+=2;
4766 lp->infoblock_media = (*p++) & MEDIA_CODE;
4767 lp->cache.csr13 = CSR13; /* Hard coded defaults */
4768 lp->cache.csr14 = CSR14;
4769 lp->cache.csr15 = CSR15;
4770 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4771 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4772 csr6 = *p++;
4773 flags = *p++;
4774
4775 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4776 lp->defMedium = (flags & 0x40) ? -1 : 0;
4777 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4778 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4779 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4780 lp->useMII = false;
4781
4782 de4x5_switch_mac_port(dev);
4783 }
4784
4785 return dc2114x_autoconf(dev);
4786}
4787
4788/*
4789** This block type provides information for resetting external devices
4790** (chips) through the General Purpose Register.
4791*/
4792static int
4793type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4794{
4795 struct de4x5_private *lp = netdev_priv(dev);
4796 u_char len = (*p & BLOCK_LEN)+1;
4797
4798 /* Recursively figure out the info blocks */
4799 if (--count > lp->tcount) {
4800 if (*(p+len) < 128) {
4801 return dc_infoblock[COMPACT](dev, count, p+len);
4802 } else {
4803 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4804 }
4805 }
4806
4807 /* Must be initializing to run this code */
4808 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4809 p+=2;
4810 lp->rst = p;
4811 srom_exec(dev, lp->rst);
4812 }
4813
4814 return DE4X5_AUTOSENSE_MS;
4815}
4816
4817/*
4818** MII Read/Write
4819*/
4820
4821static int
4822mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4823{
4824 mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
4825 mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
4826 mii_wdata(MII_STRD, 4, ioaddr); /* SFD and Read operation */
4827 mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
4828 mii_address(phyreg, ioaddr); /* PHY Register to read */
4829 mii_ta(MII_STRD, ioaddr); /* Turn around time - 2 MDC */
4830
4831 return mii_rdata(ioaddr); /* Read data */
4832}
4833
4834static void
4835mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4836{
4837 mii_wdata(MII_PREAMBLE, 2, ioaddr); /* Start of 34 bit preamble... */
4838 mii_wdata(MII_PREAMBLE, 32, ioaddr); /* ...continued */
4839 mii_wdata(MII_STWR, 4, ioaddr); /* SFD and Write operation */
4840 mii_address(phyaddr, ioaddr); /* PHY address to be accessed */
4841 mii_address(phyreg, ioaddr); /* PHY Register to write */
4842 mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
4843 data = mii_swap(data, 16); /* Swap data bit ordering */
4844 mii_wdata(data, 16, ioaddr); /* Write data */
4845}
4846
4847static int
4848mii_rdata(u_long ioaddr)
4849{
4850 int i;
4851 s32 tmp = 0;
4852
4853 for (i=0; i<16; i++) {
4854 tmp <<= 1;
4855 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4856 }
4857
4858 return tmp;
4859}
4860
4861static void
4862mii_wdata(int data, int len, u_long ioaddr)
4863{
4864 int i;
4865
4866 for (i=0; i<len; i++) {
4867 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4868 data >>= 1;
4869 }
4870}
4871
4872static void
4873mii_address(u_char addr, u_long ioaddr)
4874{
4875 int i;
4876
4877 addr = mii_swap(addr, 5);
4878 for (i=0; i<5; i++) {
4879 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4880 addr >>= 1;
4881 }
4882}
4883
4884static void
4885mii_ta(u_long rw, u_long ioaddr)
4886{
4887 if (rw == MII_STWR) {
4888 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4889 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4890 } else {
4891 getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
4892 }
4893}
4894
4895static int
4896mii_swap(int data, int len)
4897{
4898 int i, tmp = 0;
4899
4900 for (i=0; i<len; i++) {
4901 tmp <<= 1;
4902 tmp |= (data & 1);
4903 data >>= 1;
4904 }
4905
4906 return tmp;
4907}
4908
4909static void
4910sendto_mii(u32 command, int data, u_long ioaddr)
4911{
4912 u32 j;
4913
4914 j = (data & 1) << 17;
4915 outl(command | j, ioaddr);
4916 udelay(1);
4917 outl(command | MII_MDC | j, ioaddr);
4918 udelay(1);
4919}
4920
4921static int
4922getfrom_mii(u32 command, u_long ioaddr)
4923{
4924 outl(command, ioaddr);
4925 udelay(1);
4926 outl(command | MII_MDC, ioaddr);
4927 udelay(1);
4928
4929 return (inl(ioaddr) >> 19) & 1;
4930}
4931
4932/*
4933** Here's 3 ways to calculate the OUI from the ID registers.
4934*/
4935static int
4936mii_get_oui(u_char phyaddr, u_long ioaddr)
4937{
4938/*
4939 union {
4940 u_short reg;
4941 u_char breg[2];
4942 } a;
4943 int i, r2, r3, ret=0;*/
4944 int r2, r3;
4945
4946 /* Read r2 and r3 */
4947 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4948 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
4949 /* SEEQ and Cypress way * /
4950 / * Shuffle r2 and r3 * /
4951 a.reg=0;
4952 r3 = ((r3>>10)|(r2<<6))&0x0ff;
4953 r2 = ((r2>>2)&0x3fff);
4954
4955 / * Bit reverse r3 * /
4956 for (i=0;i<8;i++) {
4957 ret<<=1;
4958 ret |= (r3&1);
4959 r3>>=1;
4960 }
4961
4962 / * Bit reverse r2 * /
4963 for (i=0;i<16;i++) {
4964 a.reg<<=1;
4965 a.reg |= (r2&1);
4966 r2>>=1;
4967 }
4968
4969 / * Swap r2 bytes * /
4970 i=a.breg[0];
4971 a.breg[0]=a.breg[1];
4972 a.breg[1]=i;
4973
4974 return (a.reg<<8)|ret; */ /* SEEQ and Cypress way */
4975/* return (r2<<6)|(u_int)(r3>>10); */ /* NATIONAL and BROADCOM way */
4976 return r2; /* (I did it) My way */
4977}
4978
4979/*
4980** The SROM spec forces us to search addresses [1-31 0]. Bummer.
4981*/
4982static int
4983mii_get_phy(struct net_device *dev)
4984{
4985 struct de4x5_private *lp = netdev_priv(dev);
4986 u_long iobase = dev->base_addr;
4987 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4988 int id;
4989
4990 lp->active = 0;
4991 lp->useMII = true;
4992
4993 /* Search the MII address space for possible PHY devices */
4994 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4995 lp->phy[lp->active].addr = i;
4996 if (i==0) n++; /* Count cycles */
4997 while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
4998 id = mii_get_oui(i, DE4X5_MII);
4999 if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
5000 for (j=0; j<limit; j++) { /* Search PHY table */
5001 if (id != phy_info[j].id) continue; /* ID match? */
5002 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5003 if (k < DE4X5_MAX_PHY) {
5004 memcpy((char *)&lp->phy[k],
5005 (char *)&phy_info[j], sizeof(struct phy_table));
5006 lp->phy[k].addr = i;
5007 lp->mii_cnt++;
5008 lp->active++;
5009 } else {
5010 goto purgatory; /* Stop the search */
5011 }
5012 break;
5013 }
5014 if ((j == limit) && (i < DE4X5_MAX_MII)) {
5015 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5016 lp->phy[k].addr = i;
5017 lp->phy[k].id = id;
5018 lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
5019 lp->phy[k].spd.mask = GENERIC_MASK; /* 100Mb/s technologies */
5020 lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
5021 lp->mii_cnt++;
5022 lp->active++;
5023 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5024 j = de4x5_debug;
5025 de4x5_debug |= DEBUG_MII;
5026 de4x5_dbg_mii(dev, k);
5027 de4x5_debug = j;
5028 printk("\n");
5029 }
5030 }
5031 purgatory:
5032 lp->active = 0;
5033 if (lp->phy[0].id) { /* Reset the PHY devices */
5034 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
5035 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5036 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5037
5038 de4x5_dbg_mii(dev, k);
5039 }
5040 }
5041 if (!lp->mii_cnt) lp->useMII = false;
5042
5043 return lp->mii_cnt;
5044}
5045
5046static char *
5047build_setup_frame(struct net_device *dev, int mode)
5048{
5049 struct de4x5_private *lp = netdev_priv(dev);
5050 int i;
5051 char *pa = lp->setup_frame;
5052
5053 /* Initialise the setup frame */
5054 if (mode == ALL) {
5055 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5056 }
5057
5058 if (lp->setup_f == HASH_PERF) {
5059 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5060 *(pa + i) = dev->dev_addr[i]; /* Host address */
5061 if (i & 0x01) pa += 2;
5062 }
5063 *(lp->setup_frame + (HASH_TABLE_LEN >> 3) - 3) = 0x80;
5064 } else {
5065 for (i=0; i<ETH_ALEN; i++) { /* Host address */
5066 *(pa + (i&1)) = dev->dev_addr[i];
5067 if (i & 0x01) pa += 4;
5068 }
5069 for (i=0; i<ETH_ALEN; i++) { /* Broadcast address */
5070 *(pa + (i&1)) = (char) 0xff;
5071 if (i & 0x01) pa += 4;
5072 }
5073 }
5074
5075 return pa; /* Points to the next entry */
5076}
5077
5078static void
5079disable_ast(struct net_device *dev)
5080{
5081 struct de4x5_private *lp = netdev_priv(dev);
5082 del_timer_sync(&lp->timer);
5083}
5084
5085static long
5086de4x5_switch_mac_port(struct net_device *dev)
5087{
5088 struct de4x5_private *lp = netdev_priv(dev);
5089 u_long iobase = dev->base_addr;
5090 s32 omr;
5091
5092 STOP_DE4X5;
5093
5094 /* Assert the OMR_PS bit in CSR6 */
5095 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5096 OMR_FDX));
5097 omr |= lp->infoblock_csr6;
5098 if (omr & OMR_PS) omr |= OMR_HBD;
5099 outl(omr, DE4X5_OMR);
5100
5101 /* Soft Reset */
5102 RESET_DE4X5;
5103
5104 /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
5105 if (lp->chipset == DC21140) {
5106 gep_wr(lp->cache.gepc, dev);
5107 gep_wr(lp->cache.gep, dev);
5108 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5109 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5110 }
5111
5112 /* Restore CSR6 */
5113 outl(omr, DE4X5_OMR);
5114
5115 /* Reset CSR8 */
5116 inl(DE4X5_MFC);
5117
5118 return omr;
5119}
5120
5121static void
5122gep_wr(s32 data, struct net_device *dev)
5123{
5124 struct de4x5_private *lp = netdev_priv(dev);
5125 u_long iobase = dev->base_addr;
5126
5127 if (lp->chipset == DC21140) {
5128 outl(data, DE4X5_GEP);
5129 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5130 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5131 }
5132}
5133
5134static int
5135gep_rd(struct net_device *dev)
5136{
5137 struct de4x5_private *lp = netdev_priv(dev);
5138 u_long iobase = dev->base_addr;
5139
5140 if (lp->chipset == DC21140) {
5141 return inl(DE4X5_GEP);
5142 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5143 return inl(DE4X5_SIGR) & 0x000fffff;
5144 }
5145
5146 return 0;
5147}
5148
5149static void
5150yawn(struct net_device *dev, int state)
5151{
5152 struct de4x5_private *lp = netdev_priv(dev);
5153 u_long iobase = dev->base_addr;
5154
5155 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5156
5157 if(lp->bus == EISA) {
5158 switch(state) {
5159 case WAKEUP:
5160 outb(WAKEUP, PCI_CFPM);
5161 mdelay(10);
5162 break;
5163
5164 case SNOOZE:
5165 outb(SNOOZE, PCI_CFPM);
5166 break;
5167
5168 case SLEEP:
5169 outl(0, DE4X5_SICR);
5170 outb(SLEEP, PCI_CFPM);
5171 break;
5172 }
5173 } else {
5174 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5175 switch(state) {
5176 case WAKEUP:
5177 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5178 mdelay(10);
5179 break;
5180
5181 case SNOOZE:
5182 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5183 break;
5184
5185 case SLEEP:
5186 outl(0, DE4X5_SICR);
5187 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5188 break;
5189 }
5190 }
5191}
5192
5193static void
5194de4x5_parse_params(struct net_device *dev)
5195{
5196 struct de4x5_private *lp = netdev_priv(dev);
5197 char *p, *q, t;
5198
5199 lp->params.fdx = 0;
5200 lp->params.autosense = AUTO;
5201
5202 if (args == NULL) return;
5203
5204 if ((p = strstr(args, dev->name))) {
5205 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5206 t = *q;
5207 *q = '\0';
5208
5209 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
5210
5211 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5212 if (strstr(p, "TP")) {
5213 lp->params.autosense = TP;
5214 } else if (strstr(p, "TP_NW")) {
5215 lp->params.autosense = TP_NW;
5216 } else if (strstr(p, "BNC")) {
5217 lp->params.autosense = BNC;
5218 } else if (strstr(p, "AUI")) {
5219 lp->params.autosense = AUI;
5220 } else if (strstr(p, "BNC_AUI")) {
5221 lp->params.autosense = BNC;
5222 } else if (strstr(p, "10Mb")) {
5223 lp->params.autosense = _10Mb;
5224 } else if (strstr(p, "100Mb")) {
5225 lp->params.autosense = _100Mb;
5226 } else if (strstr(p, "AUTO")) {
5227 lp->params.autosense = AUTO;
5228 }
5229 }
5230 *q = t;
5231 }
5232}
5233
5234static void
5235de4x5_dbg_open(struct net_device *dev)
5236{
5237 struct de4x5_private *lp = netdev_priv(dev);
5238 int i;
5239
5240 if (de4x5_debug & DEBUG_OPEN) {
5241 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5242 printk("\tphysical address: ");
5243 for (i=0;i<6;i++) {
5244 printk("%2.2x:",(short)dev->dev_addr[i]);
5245 }
5246 printk("\n");
5247 printk("Descriptor head addresses:\n");
5248 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5249 printk("Descriptor addresses:\nRX: ");
5250 for (i=0;i<lp->rxRingSize-1;i++){
5251 if (i < 3) {
5252 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5253 }
5254 }
5255 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5256 printk("TX: ");
5257 for (i=0;i<lp->txRingSize-1;i++){
5258 if (i < 3) {
5259 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5260 }
5261 }
5262 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5263 printk("Descriptor buffers:\nRX: ");
5264 for (i=0;i<lp->rxRingSize-1;i++){
5265 if (i < 3) {
5266 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5267 }
5268 }
5269 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5270 printk("TX: ");
5271 for (i=0;i<lp->txRingSize-1;i++){
5272 if (i < 3) {
5273 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5274 }
5275 }
5276 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5277 printk("Ring size:\nRX: %d\nTX: %d\n",
5278 (short)lp->rxRingSize,
5279 (short)lp->txRingSize);
5280 }
5281}
5282
5283static void
5284de4x5_dbg_mii(struct net_device *dev, int k)
5285{
5286 struct de4x5_private *lp = netdev_priv(dev);
5287 u_long iobase = dev->base_addr;
5288
5289 if (de4x5_debug & DEBUG_MII) {
5290 printk("\nMII device address: %d\n", lp->phy[k].addr);
5291 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5292 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5293 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5294 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5295 if (lp->phy[k].id != BROADCOM_T4) {
5296 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5297 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5298 }
5299 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5300 if (lp->phy[k].id != BROADCOM_T4) {
5301 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5302 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5303 } else {
5304 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5305 }
5306 }
5307}
5308
5309static void
5310de4x5_dbg_media(struct net_device *dev)
5311{
5312 struct de4x5_private *lp = netdev_priv(dev);
5313
5314 if (lp->media != lp->c_media) {
5315 if (de4x5_debug & DEBUG_MEDIA) {
5316 printk("%s: media is %s%s\n", dev->name,
5317 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5318 (lp->media == TP ? "TP" :
5319 (lp->media == ANS ? "TP/Nway" :
5320 (lp->media == BNC ? "BNC" :
5321 (lp->media == AUI ? "AUI" :
5322 (lp->media == BNC_AUI ? "BNC/AUI" :
5323 (lp->media == EXT_SIA ? "EXT SIA" :
5324 (lp->media == _100Mb ? "100Mb/s" :
5325 (lp->media == _10Mb ? "10Mb/s" :
5326 "???"
5327 ))))))))), (lp->fdx?" full duplex.":"."));
5328 }
5329 lp->c_media = lp->media;
5330 }
5331}
5332
5333static void
5334de4x5_dbg_srom(struct de4x5_srom *p)
5335{
5336 int i;
5337
5338 if (de4x5_debug & DEBUG_SROM) {
5339 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5340 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5341 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5342 printk("SROM version: %02x\n", (u_char)(p->version));
5343 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5344
5345 printk("Hardware Address: %pM\n", p->ieee_addr);
5346 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5347 for (i=0; i<64; i++) {
5348 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5349 }
5350 }
5351}
5352
5353static void
5354de4x5_dbg_rx(struct sk_buff *skb, int len)
5355{
5356 int i, j;
5357
5358 if (de4x5_debug & DEBUG_RX) {
5359 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5360 skb->data, &skb->data[6],
5361 (u_char)skb->data[12],
5362 (u_char)skb->data[13],
5363 len);
5364 for (j=0; len>0;j+=16, len-=16) {
5365 printk(" %03x: ",j);
5366 for (i=0; i<16 && i<len; i++) {
5367 printk("%02x ",(u_char)skb->data[i+j]);
5368 }
5369 printk("\n");
5370 }
5371 }
5372}
5373
5374/*
5375** Perform IOCTL call functions here. Some are privileged operations and the
5376** effective uid is checked in those cases. In the normal course of events
5377** this function is only used for my testing.
5378*/
5379static int
5380de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5381{
5382 struct de4x5_private *lp = netdev_priv(dev);
5383 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5384 u_long iobase = dev->base_addr;
5385 int i, j, status = 0;
5386 s32 omr;
5387 union {
5388 u8 addr[144];
5389 u16 sval[72];
5390 u32 lval[36];
5391 } tmp;
5392 u_long flags = 0;
5393
5394 switch(ioc->cmd) {
5395 case DE4X5_GET_HWADDR: /* Get the hardware address */
5396 ioc->len = ETH_ALEN;
5397 for (i=0; i<ETH_ALEN; i++) {
5398 tmp.addr[i] = dev->dev_addr[i];
5399 }
5400 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5401 break;
5402
5403 case DE4X5_SET_HWADDR: /* Set the hardware address */
5404 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5405 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5406 if (netif_queue_stopped(dev))
5407 return -EBUSY;
5408 netif_stop_queue(dev);
5409 for (i=0; i<ETH_ALEN; i++) {
5410 dev->dev_addr[i] = tmp.addr[i];
5411 }
5412 build_setup_frame(dev, PHYS_ADDR_ONLY);
5413 /* Set up the descriptor and give ownership to the card */
5414 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5415 SETUP_FRAME_LEN, (struct sk_buff *)1);
5416 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5417 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
5418 netif_wake_queue(dev); /* Unlock the TX ring */
5419 break;
5420
5421 case DE4X5_SAY_BOO: /* Say "Boo!" to the kernel log file */
5422 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5423 printk("%s: Boo!\n", dev->name);
5424 break;
5425
5426 case DE4X5_MCA_EN: /* Enable pass all multicast addressing */
5427 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5428 omr = inl(DE4X5_OMR);
5429 omr |= OMR_PM;
5430 outl(omr, DE4X5_OMR);
5431 break;
5432
5433 case DE4X5_GET_STATS: /* Get the driver statistics */
5434 {
5435 struct pkt_stats statbuf;
5436 ioc->len = sizeof(statbuf);
5437 spin_lock_irqsave(&lp->lock, flags);
5438 memcpy(&statbuf, &lp->pktStats, ioc->len);
5439 spin_unlock_irqrestore(&lp->lock, flags);
5440 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5441 return -EFAULT;
5442 break;
5443 }
5444 case DE4X5_CLR_STATS: /* Zero out the driver statistics */
5445 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5446 spin_lock_irqsave(&lp->lock, flags);
5447 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5448 spin_unlock_irqrestore(&lp->lock, flags);
5449 break;
5450
5451 case DE4X5_GET_OMR: /* Get the OMR Register contents */
5452 tmp.addr[0] = inl(DE4X5_OMR);
5453 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5454 break;
5455
5456 case DE4X5_SET_OMR: /* Set the OMR Register contents */
5457 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5458 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5459 outl(tmp.addr[0], DE4X5_OMR);
5460 break;
5461
5462 case DE4X5_GET_REG: /* Get the DE4X5 Registers */
5463 j = 0;
5464 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5465 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5466 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5467 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5468 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5469 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5470 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5471 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5472 ioc->len = j;
5473 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5474 return -EFAULT;
5475 break;
5476
5477#define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
5478/*
5479 case DE4X5_DUMP:
5480 j = 0;
5481 tmp.addr[j++] = dev->irq;
5482 for (i=0; i<ETH_ALEN; i++) {
5483 tmp.addr[j++] = dev->dev_addr[i];
5484 }
5485 tmp.addr[j++] = lp->rxRingSize;
5486 tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
5487 tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
5488
5489 for (i=0;i<lp->rxRingSize-1;i++){
5490 if (i < 3) {
5491 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
5492 }
5493 }
5494 tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
5495 for (i=0;i<lp->txRingSize-1;i++){
5496 if (i < 3) {
5497 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
5498 }
5499 }
5500 tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
5501
5502 for (i=0;i<lp->rxRingSize-1;i++){
5503 if (i < 3) {
5504 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
5505 }
5506 }
5507 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
5508 for (i=0;i<lp->txRingSize-1;i++){
5509 if (i < 3) {
5510 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
5511 }
5512 }
5513 tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
5514
5515 for (i=0;i<lp->rxRingSize;i++){
5516 tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
5517 }
5518 for (i=0;i<lp->txRingSize;i++){
5519 tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
5520 }
5521
5522 tmp.lval[j>>2] = inl(DE4X5_BMR); j+=4;
5523 tmp.lval[j>>2] = inl(DE4X5_TPD); j+=4;
5524 tmp.lval[j>>2] = inl(DE4X5_RPD); j+=4;
5525 tmp.lval[j>>2] = inl(DE4X5_RRBA); j+=4;
5526 tmp.lval[j>>2] = inl(DE4X5_TRBA); j+=4;
5527 tmp.lval[j>>2] = inl(DE4X5_STS); j+=4;
5528 tmp.lval[j>>2] = inl(DE4X5_OMR); j+=4;
5529 tmp.lval[j>>2] = inl(DE4X5_IMR); j+=4;
5530 tmp.lval[j>>2] = lp->chipset; j+=4;
5531 if (lp->chipset == DC21140) {
5532 tmp.lval[j>>2] = gep_rd(dev); j+=4;
5533 } else {
5534 tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
5535 tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
5536 tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
5537 tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
5538 }
5539 tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
5540 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
5541 tmp.lval[j>>2] = lp->active; j+=4;
5542 tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5543 tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5544 tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5545 tmp.lval[j>>2]=mii_rd(MII_ID1,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5546 if (lp->phy[lp->active].id != BROADCOM_T4) {
5547 tmp.lval[j>>2]=mii_rd(MII_ANA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5548 tmp.lval[j>>2]=mii_rd(MII_ANLPA,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5549 }
5550 tmp.lval[j>>2]=mii_rd(0x10,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5551 if (lp->phy[lp->active].id != BROADCOM_T4) {
5552 tmp.lval[j>>2]=mii_rd(0x11,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5553 tmp.lval[j>>2]=mii_rd(0x12,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5554 } else {
5555 tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
5556 }
5557 }
5558
5559 tmp.addr[j++] = lp->txRingSize;
5560 tmp.addr[j++] = netif_queue_stopped(dev);
5561
5562 ioc->len = j;
5563 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5564 break;
5565
5566*/
5567 default:
5568 return -EOPNOTSUPP;
5569 }
5570
5571 return status;
5572}
5573
5574static int __init de4x5_module_init (void)
5575{
5576 int err = 0;
5577
5578#ifdef CONFIG_PCI
5579 err = pci_register_driver(&de4x5_pci_driver);
5580#endif
5581#ifdef CONFIG_EISA
5582 err |= eisa_driver_register (&de4x5_eisa_driver);
5583#endif
5584
5585 return err;
5586}
5587
5588static void __exit de4x5_module_exit (void)
5589{
5590#ifdef CONFIG_PCI
5591 pci_unregister_driver (&de4x5_pci_driver);
5592#endif
5593#ifdef CONFIG_EISA
5594 eisa_driver_unregister (&de4x5_eisa_driver);
5595#endif
5596}
5597
5598module_init (de4x5_module_init);
5599module_exit (de4x5_module_exit);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.h b/drivers/net/ethernet/dec/tulip/de4x5.h
new file mode 100644
index 000000000000..ec756eba397b
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/de4x5.h
@@ -0,0 +1,1017 @@
1/*
2 Copyright 1994 Digital Equipment Corporation.
3
4 This software may be used and distributed according to the terms of the
5 GNU General Public License, incorporated herein by reference.
6
7 The author may be reached as davies@wanton.lkg.dec.com or Digital
8 Equipment Corporation, 550 King Street, Littleton MA 01460.
9
10 =========================================================================
11*/
12
13/*
14** DC21040 CSR<1..15> Register Address Map
15*/
16#define DE4X5_BMR iobase+(0x000 << lp->bus) /* Bus Mode Register */
17#define DE4X5_TPD iobase+(0x008 << lp->bus) /* Transmit Poll Demand Reg */
18#define DE4X5_RPD iobase+(0x010 << lp->bus) /* Receive Poll Demand Reg */
19#define DE4X5_RRBA iobase+(0x018 << lp->bus) /* RX Ring Base Address Reg */
20#define DE4X5_TRBA iobase+(0x020 << lp->bus) /* TX Ring Base Address Reg */
21#define DE4X5_STS iobase+(0x028 << lp->bus) /* Status Register */
22#define DE4X5_OMR iobase+(0x030 << lp->bus) /* Operation Mode Register */
23#define DE4X5_IMR iobase+(0x038 << lp->bus) /* Interrupt Mask Register */
24#define DE4X5_MFC iobase+(0x040 << lp->bus) /* Missed Frame Counter */
25#define DE4X5_APROM iobase+(0x048 << lp->bus) /* Ethernet Address PROM */
26#define DE4X5_BROM iobase+(0x048 << lp->bus) /* Boot ROM Register */
27#define DE4X5_SROM iobase+(0x048 << lp->bus) /* Serial ROM Register */
28#define DE4X5_MII iobase+(0x048 << lp->bus) /* MII Interface Register */
29#define DE4X5_DDR iobase+(0x050 << lp->bus) /* Data Diagnostic Register */
30#define DE4X5_FDR iobase+(0x058 << lp->bus) /* Full Duplex Register */
31#define DE4X5_GPT iobase+(0x058 << lp->bus) /* General Purpose Timer Reg.*/
32#define DE4X5_GEP iobase+(0x060 << lp->bus) /* General Purpose Register */
33#define DE4X5_SISR iobase+(0x060 << lp->bus) /* SIA Status Register */
34#define DE4X5_SICR iobase+(0x068 << lp->bus) /* SIA Connectivity Register */
35#define DE4X5_STRR iobase+(0x070 << lp->bus) /* SIA TX/RX Register */
36#define DE4X5_SIGR iobase+(0x078 << lp->bus) /* SIA General Register */
37
38/*
39** EISA Register Address Map
40*/
41#define EISA_ID iobase+0x0c80 /* EISA ID Registers */
42#define EISA_ID0 iobase+0x0c80 /* EISA ID Register 0 */
43#define EISA_ID1 iobase+0x0c81 /* EISA ID Register 1 */
44#define EISA_ID2 iobase+0x0c82 /* EISA ID Register 2 */
45#define EISA_ID3 iobase+0x0c83 /* EISA ID Register 3 */
46#define EISA_CR iobase+0x0c84 /* EISA Control Register */
47#define EISA_REG0 iobase+0x0c88 /* EISA Configuration Register 0 */
48#define EISA_REG1 iobase+0x0c89 /* EISA Configuration Register 1 */
49#define EISA_REG2 iobase+0x0c8a /* EISA Configuration Register 2 */
50#define EISA_REG3 iobase+0x0c8f /* EISA Configuration Register 3 */
51#define EISA_APROM iobase+0x0c90 /* Ethernet Address PROM */
52
53/*
54** PCI/EISA Configuration Registers Address Map
55*/
56#define PCI_CFID iobase+0x0008 /* PCI Configuration ID Register */
57#define PCI_CFCS iobase+0x000c /* PCI Command/Status Register */
58#define PCI_CFRV iobase+0x0018 /* PCI Revision Register */
59#define PCI_CFLT iobase+0x001c /* PCI Latency Timer Register */
60#define PCI_CBIO iobase+0x0028 /* PCI Base I/O Register */
61#define PCI_CBMA iobase+0x002c /* PCI Base Memory Address Register */
62#define PCI_CBER iobase+0x0030 /* PCI Expansion ROM Base Address Reg. */
63#define PCI_CFIT iobase+0x003c /* PCI Configuration Interrupt Register */
64#define PCI_CFDA iobase+0x0040 /* PCI Driver Area Register */
65#define PCI_CFDD iobase+0x0041 /* PCI Driver Dependent Area Register */
66#define PCI_CFPM iobase+0x0043 /* PCI Power Management Area Register */
67
68/*
69** EISA Configuration Register 0 bit definitions
70*/
71#define ER0_BSW 0x80 /* EISA Bus Slave Width, 1: 32 bits */
72#define ER0_BMW 0x40 /* EISA Bus Master Width, 1: 32 bits */
73#define ER0_EPT 0x20 /* EISA PREEMPT Time, 0: 23 BCLKs */
74#define ER0_ISTS 0x10 /* Interrupt Status (X) */
75#define ER0_LI 0x08 /* Latch Interrupts */
76#define ER0_INTL 0x06 /* INTerrupt Level */
77#define ER0_INTT 0x01 /* INTerrupt Type, 0: Level, 1: Edge */
78
79/*
80** EISA Configuration Register 1 bit definitions
81*/
82#define ER1_IAM 0xe0 /* ISA Address Mode */
83#define ER1_IAE 0x10 /* ISA Addressing Enable */
84#define ER1_UPIN 0x0f /* User Pins */
85
86/*
87** EISA Configuration Register 2 bit definitions
88*/
89#define ER2_BRS 0xc0 /* Boot ROM Size */
90#define ER2_BRA 0x3c /* Boot ROM Address <16:13> */
91
92/*
93** EISA Configuration Register 3 bit definitions
94*/
95#define ER3_BWE 0x40 /* Burst Write Enable */
96#define ER3_BRE 0x04 /* Burst Read Enable */
97#define ER3_LSR 0x02 /* Local Software Reset */
98
99/*
100** PCI Configuration ID Register (PCI_CFID). The Device IDs are left
101** shifted 8 bits to allow detection of DC21142 and DC21143 variants with
102** the configuration revision register step number.
103*/
104#define CFID_DID 0xff00 /* Device ID */
105#define CFID_VID 0x00ff /* Vendor ID */
106#define DC21040_DID 0x0200 /* Unique Device ID # */
107#define DC21040_VID 0x1011 /* DC21040 Manufacturer */
108#define DC21041_DID 0x1400 /* Unique Device ID # */
109#define DC21041_VID 0x1011 /* DC21041 Manufacturer */
110#define DC21140_DID 0x0900 /* Unique Device ID # */
111#define DC21140_VID 0x1011 /* DC21140 Manufacturer */
112#define DC2114x_DID 0x1900 /* Unique Device ID # */
113#define DC2114x_VID 0x1011 /* DC2114[23] Manufacturer */
114
115/*
116** Chipset defines
117*/
118#define DC21040 DC21040_DID
119#define DC21041 DC21041_DID
120#define DC21140 DC21140_DID
121#define DC2114x DC2114x_DID
122#define DC21142 (DC2114x_DID | 0x0010)
123#define DC21143 (DC2114x_DID | 0x0030)
124#define DC2114x_BRK 0x0020 /* CFRV break between DC21142 & DC21143 */
125
126#define is_DC21040 ((vendor == DC21040_VID) && (device == DC21040_DID))
127#define is_DC21041 ((vendor == DC21041_VID) && (device == DC21041_DID))
128#define is_DC21140 ((vendor == DC21140_VID) && (device == DC21140_DID))
129#define is_DC2114x ((vendor == DC2114x_VID) && (device == DC2114x_DID))
130#define is_DC21142 ((vendor == DC2114x_VID) && (device == DC21142))
131#define is_DC21143 ((vendor == DC2114x_VID) && (device == DC21143))
132
133/*
134** PCI Configuration Command/Status Register (PCI_CFCS)
135*/
136#define CFCS_DPE 0x80000000 /* Detected Parity Error (S) */
137#define CFCS_SSE 0x40000000 /* Signal System Error (S) */
138#define CFCS_RMA 0x20000000 /* Receive Master Abort (S) */
139#define CFCS_RTA 0x10000000 /* Receive Target Abort (S) */
140#define CFCS_DST 0x06000000 /* DEVSEL Timing (S) */
141#define CFCS_DPR 0x01000000 /* Data Parity Report (S) */
142#define CFCS_FBB 0x00800000 /* Fast Back-To-Back (S) */
143#define CFCS_SEE 0x00000100 /* System Error Enable (C) */
144#define CFCS_PER 0x00000040 /* Parity Error Response (C) */
145#define CFCS_MO 0x00000004 /* Master Operation (C) */
146#define CFCS_MSA 0x00000002 /* Memory Space Access (C) */
147#define CFCS_IOSA 0x00000001 /* I/O Space Access (C) */
148
149/*
150** PCI Configuration Revision Register (PCI_CFRV)
151*/
152#define CFRV_BC 0xff000000 /* Base Class */
153#define CFRV_SC 0x00ff0000 /* Subclass */
154#define CFRV_RN 0x000000f0 /* Revision Number */
155#define CFRV_SN 0x0000000f /* Step Number */
156#define BASE_CLASS 0x02000000 /* Indicates Network Controller */
157#define SUB_CLASS 0x00000000 /* Indicates Ethernet Controller */
158#define STEP_NUMBER 0x00000020 /* Increments for future chips */
159#define REV_NUMBER 0x00000003 /* 0x00, 0x01, 0x02, 0x03: Rev in Step */
160#define CFRV_MASK 0xffff0000 /* Register mask */
161
162/*
163** PCI Configuration Latency Timer Register (PCI_CFLT)
164*/
165#define CFLT_BC 0x0000ff00 /* Latency Timer bits */
166
167/*
168** PCI Configuration Base I/O Address Register (PCI_CBIO)
169*/
170#define CBIO_MASK -128 /* Base I/O Address Mask */
171#define CBIO_IOSI 0x00000001 /* I/O Space Indicator (RO, value is 1) */
172
173/*
174** PCI Configuration Card Information Structure Register (PCI_CCIS)
175*/
176#define CCIS_ROMI 0xf0000000 /* ROM Image */
177#define CCIS_ASO 0x0ffffff8 /* Address Space Offset */
178#define CCIS_ASI 0x00000007 /* Address Space Indicator */
179
180/*
181** PCI Configuration Subsystem ID Register (PCI_SSID)
182*/
183#define SSID_SSID 0xffff0000 /* Subsystem ID */
184#define SSID_SVID 0x0000ffff /* Subsystem Vendor ID */
185
186/*
187** PCI Configuration Expansion ROM Base Address Register (PCI_CBER)
188*/
189#define CBER_MASK 0xfffffc00 /* Expansion ROM Base Address Mask */
190#define CBER_ROME 0x00000001 /* ROM Enable */
191
192/*
193** PCI Configuration Interrupt Register (PCI_CFIT)
194*/
195#define CFIT_MXLT 0xff000000 /* MAX_LAT Value (0.25us periods) */
196#define CFIT_MNGT 0x00ff0000 /* MIN_GNT Value (0.25us periods) */
197#define CFIT_IRQP 0x0000ff00 /* Interrupt Pin */
198#define CFIT_IRQL 0x000000ff /* Interrupt Line */
199
200/*
201** PCI Configuration Power Management Area Register (PCI_CFPM)
202*/
203#define SLEEP 0x80 /* Power Saving Sleep Mode */
204#define SNOOZE 0x40 /* Power Saving Snooze Mode */
205#define WAKEUP 0x00 /* Power Saving Wakeup */
206
207#define PCI_CFDA_DSU 0x41 /* 8 bit Configuration Space Address */
208#define PCI_CFDA_PSM 0x43 /* 8 bit Configuration Space Address */
209
210/*
211** DC21040 Bus Mode Register (DE4X5_BMR)
212*/
213#define BMR_RML 0x00200000 /* [Memory] Read Multiple */
214#define BMR_DBO 0x00100000 /* Descriptor Byte Ordering (Endian) */
215#define BMR_TAP 0x000e0000 /* Transmit Automatic Polling */
216#define BMR_DAS 0x00010000 /* Diagnostic Address Space */
217#define BMR_CAL 0x0000c000 /* Cache Alignment */
218#define BMR_PBL 0x00003f00 /* Programmable Burst Length */
219#define BMR_BLE 0x00000080 /* Big/Little Endian */
220#define BMR_DSL 0x0000007c /* Descriptor Skip Length */
221#define BMR_BAR 0x00000002 /* Bus ARbitration */
222#define BMR_SWR 0x00000001 /* Software Reset */
223
224 /* Timings here are for 10BASE-T/AUI only*/
225#define TAP_NOPOLL 0x00000000 /* No automatic polling */
226#define TAP_200US 0x00020000 /* TX automatic polling every 200us */
227#define TAP_800US 0x00040000 /* TX automatic polling every 800us */
228#define TAP_1_6MS 0x00060000 /* TX automatic polling every 1.6ms */
229#define TAP_12_8US 0x00080000 /* TX automatic polling every 12.8us */
230#define TAP_25_6US 0x000a0000 /* TX automatic polling every 25.6us */
231#define TAP_51_2US 0x000c0000 /* TX automatic polling every 51.2us */
232#define TAP_102_4US 0x000e0000 /* TX automatic polling every 102.4us */
233
234#define CAL_NOUSE 0x00000000 /* Not used */
235#define CAL_8LONG 0x00004000 /* 8-longword alignment */
236#define CAL_16LONG 0x00008000 /* 16-longword alignment */
237#define CAL_32LONG 0x0000c000 /* 32-longword alignment */
238
239#define PBL_0 0x00000000 /* DMA burst length = amount in RX FIFO */
240#define PBL_1 0x00000100 /* 1 longword DMA burst length */
241#define PBL_2 0x00000200 /* 2 longwords DMA burst length */
242#define PBL_4 0x00000400 /* 4 longwords DMA burst length */
243#define PBL_8 0x00000800 /* 8 longwords DMA burst length */
244#define PBL_16 0x00001000 /* 16 longwords DMA burst length */
245#define PBL_32 0x00002000 /* 32 longwords DMA burst length */
246
247#define DSL_0 0x00000000 /* 0 longword / descriptor */
248#define DSL_1 0x00000004 /* 1 longword / descriptor */
249#define DSL_2 0x00000008 /* 2 longwords / descriptor */
250#define DSL_4 0x00000010 /* 4 longwords / descriptor */
251#define DSL_8 0x00000020 /* 8 longwords / descriptor */
252#define DSL_16 0x00000040 /* 16 longwords / descriptor */
253#define DSL_32 0x00000080 /* 32 longwords / descriptor */
254
255/*
256** DC21040 Transmit Poll Demand Register (DE4X5_TPD)
257*/
258#define TPD 0x00000001 /* Transmit Poll Demand */
259
260/*
261** DC21040 Receive Poll Demand Register (DE4X5_RPD)
262*/
263#define RPD 0x00000001 /* Receive Poll Demand */
264
265/*
266** DC21040 Receive Ring Base Address Register (DE4X5_RRBA)
267*/
268#define RRBA 0xfffffffc /* RX Descriptor List Start Address */
269
270/*
271** DC21040 Transmit Ring Base Address Register (DE4X5_TRBA)
272*/
273#define TRBA 0xfffffffc /* TX Descriptor List Start Address */
274
275/*
276** Status Register (DE4X5_STS)
277*/
278#define STS_GPI 0x04000000 /* General Purpose Port Interrupt */
279#define STS_BE 0x03800000 /* Bus Error Bits */
280#define STS_TS 0x00700000 /* Transmit Process State */
281#define STS_RS 0x000e0000 /* Receive Process State */
282#define STS_NIS 0x00010000 /* Normal Interrupt Summary */
283#define STS_AIS 0x00008000 /* Abnormal Interrupt Summary */
284#define STS_ER 0x00004000 /* Early Receive */
285#define STS_FBE 0x00002000 /* Fatal Bus Error */
286#define STS_SE 0x00002000 /* System Error */
287#define STS_LNF 0x00001000 /* Link Fail */
288#define STS_FD 0x00000800 /* Full-Duplex Short Frame Received */
289#define STS_TM 0x00000800 /* Timer Expired (DC21041) */
290#define STS_ETI 0x00000400 /* Early Transmit Interrupt */
291#define STS_AT 0x00000400 /* AUI/TP Pin */
292#define STS_RWT 0x00000200 /* Receive Watchdog Time-Out */
293#define STS_RPS 0x00000100 /* Receive Process Stopped */
294#define STS_RU 0x00000080 /* Receive Buffer Unavailable */
295#define STS_RI 0x00000040 /* Receive Interrupt */
296#define STS_UNF 0x00000020 /* Transmit Underflow */
297#define STS_LNP 0x00000010 /* Link Pass */
298#define STS_ANC 0x00000010 /* Autonegotiation Complete */
299#define STS_TJT 0x00000008 /* Transmit Jabber Time-Out */
300#define STS_TU 0x00000004 /* Transmit Buffer Unavailable */
301#define STS_TPS 0x00000002 /* Transmit Process Stopped */
302#define STS_TI 0x00000001 /* Transmit Interrupt */
303
304#define EB_PAR 0x00000000 /* Parity Error */
305#define EB_MA 0x00800000 /* Master Abort */
306#define EB_TA 0x01000000 /* Target Abort */
307#define EB_RES0 0x01800000 /* Reserved */
308#define EB_RES1 0x02000000 /* Reserved */
309
310#define TS_STOP 0x00000000 /* Stopped */
311#define TS_FTD 0x00100000 /* Fetch Transmit Descriptor */
312#define TS_WEOT 0x00200000 /* Wait for End Of Transmission */
313#define TS_QDAT 0x00300000 /* Queue skb data into TX FIFO */
314#define TS_RES 0x00400000 /* Reserved */
315#define TS_SPKT 0x00500000 /* Setup Packet */
316#define TS_SUSP 0x00600000 /* Suspended */
317#define TS_CLTD 0x00700000 /* Close Transmit Descriptor */
318
319#define RS_STOP 0x00000000 /* Stopped */
320#define RS_FRD 0x00020000 /* Fetch Receive Descriptor */
321#define RS_CEOR 0x00040000 /* Check for End of Receive Packet */
322#define RS_WFRP 0x00060000 /* Wait for Receive Packet */
323#define RS_SUSP 0x00080000 /* Suspended */
324#define RS_CLRD 0x000a0000 /* Close Receive Descriptor */
325#define RS_FLUSH 0x000c0000 /* Flush RX FIFO */
326#define RS_QRFS 0x000e0000 /* Queue RX FIFO into RX Skb */
327
328#define INT_CANCEL 0x0001ffff /* For zeroing all interrupt sources */
329
330/*
331** Operation Mode Register (DE4X5_OMR)
332*/
333#define OMR_SC 0x80000000 /* Special Capture Effect Enable */
334#define OMR_RA 0x40000000 /* Receive All */
335#define OMR_SDP 0x02000000 /* SD Polarity - MUST BE ASSERTED */
336#define OMR_SCR 0x01000000 /* Scrambler Mode */
337#define OMR_PCS 0x00800000 /* PCS Function */
338#define OMR_TTM 0x00400000 /* Transmit Threshold Mode */
339#define OMR_SF 0x00200000 /* Store and Forward */
340#define OMR_HBD 0x00080000 /* HeartBeat Disable */
341#define OMR_PS 0x00040000 /* Port Select */
342#define OMR_CA 0x00020000 /* Capture Effect Enable */
343#define OMR_BP 0x00010000 /* Back Pressure */
344#define OMR_TR 0x0000c000 /* Threshold Control Bits */
345#define OMR_ST 0x00002000 /* Start/Stop Transmission Command */
346#define OMR_FC 0x00001000 /* Force Collision Mode */
347#define OMR_OM 0x00000c00 /* Operating Mode */
348#define OMR_FDX 0x00000200 /* Full Duplex Mode */
349#define OMR_FKD 0x00000100 /* Flaky Oscillator Disable */
350#define OMR_PM 0x00000080 /* Pass All Multicast */
351#define OMR_PR 0x00000040 /* Promiscuous Mode */
352#define OMR_SB 0x00000020 /* Start/Stop Backoff Counter */
353#define OMR_IF 0x00000010 /* Inverse Filtering */
354#define OMR_PB 0x00000008 /* Pass Bad Frames */
355#define OMR_HO 0x00000004 /* Hash Only Filtering Mode */
356#define OMR_SR 0x00000002 /* Start/Stop Receive */
357#define OMR_HP 0x00000001 /* Hash/Perfect Receive Filtering Mode */
358
359#define TR_72 0x00000000 /* Threshold set to 72 (128) bytes */
360#define TR_96 0x00004000 /* Threshold set to 96 (256) bytes */
361#define TR_128 0x00008000 /* Threshold set to 128 (512) bytes */
362#define TR_160 0x0000c000 /* Threshold set to 160 (1024) bytes */
363
364#define OMR_DEF (OMR_SDP)
365#define OMR_SIA (OMR_SDP | OMR_TTM)
366#define OMR_SYM (OMR_SDP | OMR_SCR | OMR_PCS | OMR_HBD | OMR_PS)
367#define OMR_MII_10 (OMR_SDP | OMR_TTM | OMR_PS)
368#define OMR_MII_100 (OMR_SDP | OMR_HBD | OMR_PS)
369
370/*
371** DC21040 Interrupt Mask Register (DE4X5_IMR)
372*/
373#define IMR_GPM 0x04000000 /* General Purpose Port Mask */
374#define IMR_NIM 0x00010000 /* Normal Interrupt Summary Mask */
375#define IMR_AIM 0x00008000 /* Abnormal Interrupt Summary Mask */
376#define IMR_ERM 0x00004000 /* Early Receive Mask */
377#define IMR_FBM 0x00002000 /* Fatal Bus Error Mask */
378#define IMR_SEM 0x00002000 /* System Error Mask */
379#define IMR_LFM 0x00001000 /* Link Fail Mask */
380#define IMR_FDM 0x00000800 /* Full-Duplex (Short Frame) Mask */
381#define IMR_TMM 0x00000800 /* Timer Expired Mask (DC21041) */
382#define IMR_ETM 0x00000400 /* Early Transmit Interrupt Mask */
383#define IMR_ATM 0x00000400 /* AUI/TP Switch Mask */
384#define IMR_RWM 0x00000200 /* Receive Watchdog Time-Out Mask */
385#define IMR_RSM 0x00000100 /* Receive Stopped Mask */
386#define IMR_RUM 0x00000080 /* Receive Buffer Unavailable Mask */
387#define IMR_RIM 0x00000040 /* Receive Interrupt Mask */
388#define IMR_UNM 0x00000020 /* Underflow Interrupt Mask */
389#define IMR_ANM 0x00000010 /* Autonegotiation Complete Mask */
390#define IMR_LPM 0x00000010 /* Link Pass */
391#define IMR_TJM 0x00000008 /* Transmit Time-Out Jabber Mask */
392#define IMR_TUM 0x00000004 /* Transmit Buffer Unavailable Mask */
393#define IMR_TSM 0x00000002 /* Transmission Stopped Mask */
394#define IMR_TIM 0x00000001 /* Transmit Interrupt Mask */
395
396/*
397** Missed Frames and FIFO Overflow Counters (DE4X5_MFC)
398*/
399#define MFC_FOCO 0x10000000 /* FIFO Overflow Counter Overflow Bit */
400#define MFC_FOC 0x0ffe0000 /* FIFO Overflow Counter Bits */
401#define MFC_OVFL 0x00010000 /* Missed Frames Counter Overflow Bit */
402#define MFC_CNTR 0x0000ffff /* Missed Frames Counter Bits */
403#define MFC_FOCM 0x1ffe0000 /* FIFO Overflow Counter Mask */
404
405/*
406** DC21040 Ethernet Address PROM (DE4X5_APROM)
407*/
408#define APROM_DN 0x80000000 /* Data Not Valid */
409#define APROM_DT 0x000000ff /* Address Byte */
410
411/*
412** DC21041 Boot/Ethernet Address ROM (DE4X5_BROM)
413*/
414#define BROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
415#define BROM_RD 0x00004000 /* Read from Boot ROM */
416#define BROM_WR 0x00002000 /* Write to Boot ROM */
417#define BROM_BR 0x00001000 /* Select Boot ROM when set */
418#define BROM_SR 0x00000800 /* Select Serial ROM when set */
419#define BROM_REG 0x00000400 /* External Register Select */
420#define BROM_DT 0x000000ff /* Data Byte */
421
422/*
423** DC21041 Serial/Ethernet Address ROM (DE4X5_SROM, DE4X5_MII)
424*/
425#define MII_MDI 0x00080000 /* MII Management Data In */
426#define MII_MDO 0x00060000 /* MII Management Mode/Data Out */
427#define MII_MRD 0x00040000 /* MII Management Define Read Mode */
428#define MII_MWR 0x00000000 /* MII Management Define Write Mode */
429#define MII_MDT 0x00020000 /* MII Management Data Out */
430#define MII_MDC 0x00010000 /* MII Management Clock */
431#define MII_RD 0x00004000 /* Read from MII */
432#define MII_WR 0x00002000 /* Write to MII */
433#define MII_SEL 0x00000800 /* Select MII when RESET */
434
435#define SROM_MODE 0x00008000 /* MODE_1: 0, MODE_0: 1 (read only) */
436#define SROM_RD 0x00004000 /* Read from Boot ROM */
437#define SROM_WR 0x00002000 /* Write to Boot ROM */
438#define SROM_BR 0x00001000 /* Select Boot ROM when set */
439#define SROM_SR 0x00000800 /* Select Serial ROM when set */
440#define SROM_REG 0x00000400 /* External Register Select */
441#define SROM_DT 0x000000ff /* Data Byte */
442
443#define DT_OUT 0x00000008 /* Serial Data Out */
444#define DT_IN 0x00000004 /* Serial Data In */
445#define DT_CLK 0x00000002 /* Serial ROM Clock */
446#define DT_CS 0x00000001 /* Serial ROM Chip Select */
447
448#define MII_PREAMBLE 0xffffffff /* MII Management Preamble */
449#define MII_TEST 0xaaaaaaaa /* MII Test Signal */
450#define MII_STRD 0x06 /* Start of Frame+Op Code: use low nibble */
451#define MII_STWR 0x0a /* Start of Frame+Op Code: use low nibble */
452
453#define MII_CR 0x00 /* MII Management Control Register */
454#define MII_SR 0x01 /* MII Management Status Register */
455#define MII_ID0 0x02 /* PHY Identifier Register 0 */
456#define MII_ID1 0x03 /* PHY Identifier Register 1 */
457#define MII_ANA 0x04 /* Auto Negotiation Advertisement */
458#define MII_ANLPA 0x05 /* Auto Negotiation Link Partner Ability */
459#define MII_ANE 0x06 /* Auto Negotiation Expansion */
460#define MII_ANP 0x07 /* Auto Negotiation Next Page TX */
461
462#define DE4X5_MAX_MII 32 /* Maximum address of MII PHY devices */
463
464/*
465** MII Management Control Register
466*/
467#define MII_CR_RST 0x8000 /* RESET the PHY chip */
468#define MII_CR_LPBK 0x4000 /* Loopback enable */
469#define MII_CR_SPD 0x2000 /* 0: 10Mb/s; 1: 100Mb/s */
470#define MII_CR_10 0x0000 /* Set 10Mb/s */
471#define MII_CR_100 0x2000 /* Set 100Mb/s */
472#define MII_CR_ASSE 0x1000 /* Auto Speed Select Enable */
473#define MII_CR_PD 0x0800 /* Power Down */
474#define MII_CR_ISOL 0x0400 /* Isolate Mode */
475#define MII_CR_RAN 0x0200 /* Restart Auto Negotiation */
476#define MII_CR_FDM 0x0100 /* Full Duplex Mode */
477#define MII_CR_CTE 0x0080 /* Collision Test Enable */
478
479/*
480** MII Management Status Register
481*/
482#define MII_SR_T4C 0x8000 /* 100BASE-T4 capable */
483#define MII_SR_TXFD 0x4000 /* 100BASE-TX Full Duplex capable */
484#define MII_SR_TXHD 0x2000 /* 100BASE-TX Half Duplex capable */
485#define MII_SR_TFD 0x1000 /* 10BASE-T Full Duplex capable */
486#define MII_SR_THD 0x0800 /* 10BASE-T Half Duplex capable */
487#define MII_SR_ASSC 0x0020 /* Auto Speed Selection Complete*/
488#define MII_SR_RFD 0x0010 /* Remote Fault Detected */
489#define MII_SR_ANC 0x0008 /* Auto Negotiation capable */
490#define MII_SR_LKS 0x0004 /* Link Status */
491#define MII_SR_JABD 0x0002 /* Jabber Detect */
492#define MII_SR_XC 0x0001 /* Extended Capabilities */
493
494/*
495** MII Management Auto Negotiation Advertisement Register
496*/
497#define MII_ANA_TAF 0x03e0 /* Technology Ability Field */
498#define MII_ANA_T4AM 0x0200 /* T4 Technology Ability Mask */
499#define MII_ANA_TXAM 0x0180 /* TX Technology Ability Mask */
500#define MII_ANA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
501#define MII_ANA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
502#define MII_ANA_100M 0x0380 /* 100Mb Technology Ability Mask */
503#define MII_ANA_10M 0x0060 /* 10Mb Technology Ability Mask */
504#define MII_ANA_CSMA 0x0001 /* CSMA-CD Capable */
505
506/*
507** MII Management Auto Negotiation Remote End Register
508*/
509#define MII_ANLPA_NP 0x8000 /* Next Page (Enable) */
510#define MII_ANLPA_ACK 0x4000 /* Remote Acknowledge */
511#define MII_ANLPA_RF 0x2000 /* Remote Fault */
512#define MII_ANLPA_TAF 0x03e0 /* Technology Ability Field */
513#define MII_ANLPA_T4AM 0x0200 /* T4 Technology Ability Mask */
514#define MII_ANLPA_TXAM 0x0180 /* TX Technology Ability Mask */
515#define MII_ANLPA_FDAM 0x0140 /* Full Duplex Technology Ability Mask */
516#define MII_ANLPA_HDAM 0x02a0 /* Half Duplex Technology Ability Mask */
517#define MII_ANLPA_100M 0x0380 /* 100Mb Technology Ability Mask */
518#define MII_ANLPA_10M 0x0060 /* 10Mb Technology Ability Mask */
519#define MII_ANLPA_CSMA 0x0001 /* CSMA-CD Capable */
520
521/*
522** SROM Media Definitions (ABG SROM Section)
523*/
524#define MEDIA_NWAY 0x0080 /* Nway (Auto Negotiation) on PHY */
525#define MEDIA_MII 0x0040 /* MII Present on the adapter */
526#define MEDIA_FIBRE 0x0008 /* Fibre Media present */
527#define MEDIA_AUI 0x0004 /* AUI Media present */
528#define MEDIA_TP 0x0002 /* TP Media present */
529#define MEDIA_BNC 0x0001 /* BNC Media present */
530
531/*
532** SROM Definitions (Digital Semiconductor Format)
533*/
534#define SROM_SSVID 0x0000 /* Sub-system Vendor ID offset */
535#define SROM_SSID 0x0002 /* Sub-system ID offset */
536#define SROM_CISPL 0x0004 /* CardBus CIS Pointer low offset */
537#define SROM_CISPH 0x0006 /* CardBus CIS Pointer high offset */
538#define SROM_IDCRC 0x0010 /* ID Block CRC offset*/
539#define SROM_RSVD2 0x0011 /* ID Reserved 2 offset */
540#define SROM_SFV 0x0012 /* SROM Format Version offset */
541#define SROM_CCNT 0x0013 /* Controller Count offset */
542#define SROM_HWADD 0x0014 /* Hardware Address offset */
543#define SROM_MRSVD 0x007c /* Manufacturer Reserved offset*/
544#define SROM_CRC 0x007e /* SROM CRC offset */
545
546/*
547** SROM Media Connection Definitions
548*/
549#define SROM_10BT 0x0000 /* 10BASE-T half duplex */
550#define SROM_10BTN 0x0100 /* 10BASE-T with Nway */
551#define SROM_10BTF 0x0204 /* 10BASE-T full duplex */
552#define SROM_10BTNLP 0x0400 /* 10BASE-T without Link Pass test */
553#define SROM_10B2 0x0001 /* 10BASE-2 (BNC) */
554#define SROM_10B5 0x0002 /* 10BASE-5 (AUI) */
555#define SROM_100BTH 0x0003 /* 100BASE-T half duplex */
556#define SROM_100BTF 0x0205 /* 100BASE-T full duplex */
557#define SROM_100BT4 0x0006 /* 100BASE-T4 */
558#define SROM_100BFX 0x0007 /* 100BASE-FX half duplex (Fiber) */
559#define SROM_M10BT 0x0009 /* MII 10BASE-T half duplex */
560#define SROM_M10BTF 0x020a /* MII 10BASE-T full duplex */
561#define SROM_M100BT 0x000d /* MII 100BASE-T half duplex */
562#define SROM_M100BTF 0x020e /* MII 100BASE-T full duplex */
563#define SROM_M100BT4 0x000f /* MII 100BASE-T4 */
564#define SROM_M100BF 0x0010 /* MII 100BASE-FX half duplex */
565#define SROM_M100BFF 0x0211 /* MII 100BASE-FX full duplex */
566#define SROM_PDA 0x0800 /* Powerup & Dynamic Autosense */
567#define SROM_PAO 0x8800 /* Powerup Autosense Only */
568#define SROM_NSMI 0xffff /* No Selected Media Information */
569
570/*
571** SROM Media Definitions
572*/
573#define SROM_10BASET 0x0000 /* 10BASE-T half duplex */
574#define SROM_10BASE2 0x0001 /* 10BASE-2 (BNC) */
575#define SROM_10BASE5 0x0002 /* 10BASE-5 (AUI) */
576#define SROM_100BASET 0x0003 /* 100BASE-T half duplex */
577#define SROM_10BASETF 0x0004 /* 10BASE-T full duplex */
578#define SROM_100BASETF 0x0005 /* 100BASE-T full duplex */
579#define SROM_100BASET4 0x0006 /* 100BASE-T4 */
580#define SROM_100BASEF 0x0007 /* 100BASE-FX half duplex */
581#define SROM_100BASEFF 0x0008 /* 100BASE-FX full duplex */
582
583#define BLOCK_LEN 0x7f /* Extended blocks length mask */
584#define EXT_FIELD 0x40 /* Extended blocks extension field bit */
585#define MEDIA_CODE 0x3f /* Extended blocks media code mask */
586
587/*
588** SROM Compact Format Block Masks
589*/
590#define COMPACT_FI 0x80 /* Format Indicator */
591#define COMPACT_LEN 0x04 /* Length */
592#define COMPACT_MC 0x3f /* Media Code */
593
594/*
595** SROM Extended Format Block Type 0 Masks
596*/
597#define BLOCK0_FI 0x80 /* Format Indicator */
598#define BLOCK0_MCS 0x80 /* Media Code byte Sign */
599#define BLOCK0_MC 0x3f /* Media Code */
600
601/*
602** DC21040 Full Duplex Register (DE4X5_FDR)
603*/
604#define FDR_FDACV 0x0000ffff /* Full Duplex Auto Configuration Value */
605
606/*
607** DC21041 General Purpose Timer Register (DE4X5_GPT)
608*/
609#define GPT_CON 0x00010000 /* One shot: 0, Continuous: 1 */
610#define GPT_VAL 0x0000ffff /* Timer Value */
611
612/*
613** DC21140 General Purpose Register (DE4X5_GEP) (hardware dependent bits)
614*/
615/* Valid ONLY for DE500 hardware */
616#define GEP_LNP 0x00000080 /* Link Pass (input) */
617#define GEP_SLNK 0x00000040 /* SYM LINK (input) */
618#define GEP_SDET 0x00000020 /* Signal Detect (input) */
619#define GEP_HRST 0x00000010 /* Hard RESET (to PHY) (output) */
620#define GEP_FDXD 0x00000008 /* Full Duplex Disable (output) */
621#define GEP_PHYL 0x00000004 /* PHY Loopback (output) */
622#define GEP_FLED 0x00000002 /* Force Activity LED on (output) */
623#define GEP_MODE 0x00000001 /* 0: 10Mb/s, 1: 100Mb/s */
624#define GEP_INIT 0x0000011f /* Setup inputs (0) and outputs (1) */
625#define GEP_CTRL 0x00000100 /* GEP control bit */
626
627/*
628** SIA Register Defaults
629*/
630#define CSR13 0x00000001
631#define CSR14 0x0003ff7f /* Autonegotiation disabled */
632#define CSR15 0x00000008
633
634/*
635** SIA Status Register (DE4X5_SISR)
636*/
637#define SISR_LPC 0xffff0000 /* Link Partner's Code Word */
638#define SISR_LPN 0x00008000 /* Link Partner Negotiable */
639#define SISR_ANS 0x00007000 /* Auto Negotiation Arbitration State */
640#define SISR_NSN 0x00000800 /* Non Stable NLPs Detected (DC21041) */
641#define SISR_TRF 0x00000800 /* Transmit Remote Fault */
642#define SISR_NSND 0x00000400 /* Non Stable NLPs Detected (DC21142) */
643#define SISR_ANR_FDS 0x00000400 /* Auto Negotiate Restart/Full Duplex Sel.*/
644#define SISR_TRA 0x00000200 /* 10BASE-T Receive Port Activity */
645#define SISR_NRA 0x00000200 /* Non Selected Port Receive Activity */
646#define SISR_ARA 0x00000100 /* AUI Receive Port Activity */
647#define SISR_SRA 0x00000100 /* Selected Port Receive Activity */
648#define SISR_DAO 0x00000080 /* PLL All One */
649#define SISR_DAZ 0x00000040 /* PLL All Zero */
650#define SISR_DSP 0x00000020 /* PLL Self-Test Pass */
651#define SISR_DSD 0x00000010 /* PLL Self-Test Done */
652#define SISR_APS 0x00000008 /* Auto Polarity State */
653#define SISR_LKF 0x00000004 /* Link Fail Status */
654#define SISR_LS10 0x00000004 /* 10Mb/s Link Fail Status */
655#define SISR_NCR 0x00000002 /* Network Connection Error */
656#define SISR_LS100 0x00000002 /* 100Mb/s Link Fail Status */
657#define SISR_PAUI 0x00000001 /* AUI_TP Indication */
658#define SISR_MRA 0x00000001 /* MII Receive Port Activity */
659
660#define ANS_NDIS 0x00000000 /* Nway disable */
661#define ANS_TDIS 0x00001000 /* Transmit Disable */
662#define ANS_ADET 0x00002000 /* Ability Detect */
663#define ANS_ACK 0x00003000 /* Acknowledge */
664#define ANS_CACK 0x00004000 /* Complete Acknowledge */
665#define ANS_NWOK 0x00005000 /* Nway OK - FLP Link Good */
666#define ANS_LCHK 0x00006000 /* Link Check */
667
668#define SISR_RST 0x00000301 /* CSR12 reset */
669#define SISR_ANR 0x00001301 /* Autonegotiation restart */
670
671/*
672** SIA Connectivity Register (DE4X5_SICR)
673*/
674#define SICR_SDM 0xffff0000 /* SIA Diagnostics Mode */
675#define SICR_OE57 0x00008000 /* Output Enable 5 6 7 */
676#define SICR_OE24 0x00004000 /* Output Enable 2 4 */
677#define SICR_OE13 0x00002000 /* Output Enable 1 3 */
678#define SICR_IE 0x00001000 /* Input Enable */
679#define SICR_EXT 0x00000000 /* SIA MUX Select External SIA Mode */
680#define SICR_D_SIA 0x00000400 /* SIA MUX Select Diagnostics - SIA Sigs */
681#define SICR_DPLL 0x00000800 /* SIA MUX Select Diagnostics - DPLL Sigs*/
682#define SICR_APLL 0x00000a00 /* SIA MUX Select Diagnostics - DPLL Sigs*/
683#define SICR_D_RxM 0x00000c00 /* SIA MUX Select Diagnostics - RxM Sigs */
684#define SICR_M_RxM 0x00000d00 /* SIA MUX Select Diagnostics - RxM Sigs */
685#define SICR_LNKT 0x00000e00 /* SIA MUX Select Diagnostics - Link Test*/
686#define SICR_SEL 0x00000f00 /* SIA MUX Select AUI or TP with LEDs */
687#define SICR_ASE 0x00000080 /* APLL Start Enable*/
688#define SICR_SIM 0x00000040 /* Serial Interface Input Multiplexer */
689#define SICR_ENI 0x00000020 /* Encoder Input Multiplexer */
690#define SICR_EDP 0x00000010 /* SIA PLL External Input Enable */
691#define SICR_AUI 0x00000008 /* 10Base-T (0) or AUI (1) */
692#define SICR_CAC 0x00000004 /* CSR Auto Configuration */
693#define SICR_PS 0x00000002 /* Pin AUI/TP Selection */
694#define SICR_SRL 0x00000001 /* SIA Reset */
695#define SIA_RESET 0x00000000 /* SIA Reset Value */
696
697/*
698** SIA Transmit and Receive Register (DE4X5_STRR)
699*/
700#define STRR_TAS 0x00008000 /* 10Base-T/AUI Autosensing Enable */
701#define STRR_SPP 0x00004000 /* Set Polarity Plus */
702#define STRR_APE 0x00002000 /* Auto Polarity Enable */
703#define STRR_LTE 0x00001000 /* Link Test Enable */
704#define STRR_SQE 0x00000800 /* Signal Quality Enable */
705#define STRR_CLD 0x00000400 /* Collision Detect Enable */
706#define STRR_CSQ 0x00000200 /* Collision Squelch Enable */
707#define STRR_RSQ 0x00000100 /* Receive Squelch Enable */
708#define STRR_ANE 0x00000080 /* Auto Negotiate Enable */
709#define STRR_HDE 0x00000040 /* Half Duplex Enable */
710#define STRR_CPEN 0x00000030 /* Compensation Enable */
711#define STRR_LSE 0x00000008 /* Link Pulse Send Enable */
712#define STRR_DREN 0x00000004 /* Driver Enable */
713#define STRR_LBK 0x00000002 /* Loopback Enable */
714#define STRR_ECEN 0x00000001 /* Encoder Enable */
715#define STRR_RESET 0xffffffff /* Reset value for STRR */
716
717/*
718** SIA General Register (DE4X5_SIGR)
719*/
720#define SIGR_RMI 0x40000000 /* Receive Match Interrupt */
721#define SIGR_GI1 0x20000000 /* General Port Interrupt 1 */
722#define SIGR_GI0 0x10000000 /* General Port Interrupt 0 */
723#define SIGR_CWE 0x08000000 /* Control Write Enable */
724#define SIGR_RME 0x04000000 /* Receive Match Enable */
725#define SIGR_GEI1 0x02000000 /* GEP Interrupt Enable on Port 1 */
726#define SIGR_GEI0 0x01000000 /* GEP Interrupt Enable on Port 0 */
727#define SIGR_LGS3 0x00800000 /* LED/GEP3 Select */
728#define SIGR_LGS2 0x00400000 /* LED/GEP2 Select */
729#define SIGR_LGS1 0x00200000 /* LED/GEP1 Select */
730#define SIGR_LGS0 0x00100000 /* LED/GEP0 Select */
731#define SIGR_MD 0x000f0000 /* General Purpose Mode and Data */
732#define SIGR_LV2 0x00008000 /* General Purpose LED2 value */
733#define SIGR_LE2 0x00004000 /* General Purpose LED2 enable */
734#define SIGR_FRL 0x00002000 /* Force Receiver Low */
735#define SIGR_DPST 0x00001000 /* PLL Self Test Start */
736#define SIGR_LSD 0x00000800 /* LED Stretch Disable */
737#define SIGR_FLF 0x00000400 /* Force Link Fail */
738#define SIGR_FUSQ 0x00000200 /* Force Unsquelch */
739#define SIGR_TSCK 0x00000100 /* Test Clock */
740#define SIGR_LV1 0x00000080 /* General Purpose LED1 value */
741#define SIGR_LE1 0x00000040 /* General Purpose LED1 enable */
742#define SIGR_RWR 0x00000020 /* Receive Watchdog Release */
743#define SIGR_RWD 0x00000010 /* Receive Watchdog Disable */
744#define SIGR_ABM 0x00000008 /* BNC: 0, AUI:1 */
745#define SIGR_JCK 0x00000004 /* Jabber Clock */
746#define SIGR_HUJ 0x00000002 /* Host Unjab */
747#define SIGR_JBD 0x00000001 /* Jabber Disable */
748#define SIGR_RESET 0xffff0000 /* Reset value for SIGR */
749
750/*
751** Receive Descriptor Bit Summary
752*/
753#define R_OWN 0x80000000 /* Own Bit */
754#define RD_FF 0x40000000 /* Filtering Fail */
755#define RD_FL 0x3fff0000 /* Frame Length */
756#define RD_ES 0x00008000 /* Error Summary */
757#define RD_LE 0x00004000 /* Length Error */
758#define RD_DT 0x00003000 /* Data Type */
759#define RD_RF 0x00000800 /* Runt Frame */
760#define RD_MF 0x00000400 /* Multicast Frame */
761#define RD_FS 0x00000200 /* First Descriptor */
762#define RD_LS 0x00000100 /* Last Descriptor */
763#define RD_TL 0x00000080 /* Frame Too Long */
764#define RD_CS 0x00000040 /* Collision Seen */
765#define RD_FT 0x00000020 /* Frame Type */
766#define RD_RJ 0x00000010 /* Receive Watchdog */
767#define RD_RE 0x00000008 /* Report on MII Error */
768#define RD_DB 0x00000004 /* Dribbling Bit */
769#define RD_CE 0x00000002 /* CRC Error */
770#define RD_OF 0x00000001 /* Overflow */
771
772#define RD_RER 0x02000000 /* Receive End Of Ring */
773#define RD_RCH 0x01000000 /* Second Address Chained */
774#define RD_RBS2 0x003ff800 /* Buffer 2 Size */
775#define RD_RBS1 0x000007ff /* Buffer 1 Size */
776
777/*
778** Transmit Descriptor Bit Summary
779*/
780#define T_OWN 0x80000000 /* Own Bit */
781#define TD_ES 0x00008000 /* Error Summary */
782#define TD_TO 0x00004000 /* Transmit Jabber Time-Out */
783#define TD_LO 0x00000800 /* Loss Of Carrier */
784#define TD_NC 0x00000400 /* No Carrier */
785#define TD_LC 0x00000200 /* Late Collision */
786#define TD_EC 0x00000100 /* Excessive Collisions */
787#define TD_HF 0x00000080 /* Heartbeat Fail */
788#define TD_CC 0x00000078 /* Collision Counter */
789#define TD_LF 0x00000004 /* Link Fail */
790#define TD_UF 0x00000002 /* Underflow Error */
791#define TD_DE 0x00000001 /* Deferred */
792
793#define TD_IC 0x80000000 /* Interrupt On Completion */
794#define TD_LS 0x40000000 /* Last Segment */
795#define TD_FS 0x20000000 /* First Segment */
796#define TD_FT1 0x10000000 /* Filtering Type */
797#define TD_SET 0x08000000 /* Setup Packet */
798#define TD_AC 0x04000000 /* Add CRC Disable */
799#define TD_TER 0x02000000 /* Transmit End Of Ring */
800#define TD_TCH 0x01000000 /* Second Address Chained */
801#define TD_DPD 0x00800000 /* Disabled Padding */
802#define TD_FT0 0x00400000 /* Filtering Type */
803#define TD_TBS2 0x003ff800 /* Buffer 2 Size */
804#define TD_TBS1 0x000007ff /* Buffer 1 Size */
805
806#define PERFECT_F 0x00000000
807#define HASH_F TD_FT0
808#define INVERSE_F TD_FT1
809#define HASH_O_F (TD_FT1 | TD_F0)
810
811/*
812** Media / mode state machine definitions
813** User selectable:
814*/
815#define TP 0x0040 /* 10Base-T (now equiv to _10Mb) */
816#define TP_NW 0x0002 /* 10Base-T with Nway */
817#define BNC 0x0004 /* Thinwire */
818#define AUI 0x0008 /* Thickwire */
819#define BNC_AUI 0x0010 /* BNC/AUI on DC21040 indistinguishable */
820#define _10Mb 0x0040 /* 10Mb/s Ethernet */
821#define _100Mb 0x0080 /* 100Mb/s Ethernet */
822#define AUTO 0x4000 /* Auto sense the media or speed */
823
824/*
825** Internal states
826*/
827#define NC 0x0000 /* No Connection */
828#define ANS 0x0020 /* Intermediate AutoNegotiation State */
829#define SPD_DET 0x0100 /* Parallel speed detection */
830#define INIT 0x0200 /* Initial state */
831#define EXT_SIA 0x0400 /* External SIA for motherboard chip */
832#define ANS_SUSPECT 0x0802 /* Suspect the ANS (TP) port is down */
833#define TP_SUSPECT 0x0803 /* Suspect the TP port is down */
834#define BNC_AUI_SUSPECT 0x0804 /* Suspect the BNC or AUI port is down */
835#define EXT_SIA_SUSPECT 0x0805 /* Suspect the EXT SIA port is down */
836#define BNC_SUSPECT 0x0806 /* Suspect the BNC port is down */
837#define AUI_SUSPECT 0x0807 /* Suspect the AUI port is down */
838#define MII 0x1000 /* MII on the 21143 */
839
840#define TIMER_CB 0x80000000 /* Timer callback detection */
841
842/*
843** DE4X5 DEBUG Options
844*/
845#define DEBUG_NONE 0x0000 /* No DEBUG messages */
846#define DEBUG_VERSION 0x0001 /* Print version message */
847#define DEBUG_MEDIA 0x0002 /* Print media messages */
848#define DEBUG_TX 0x0004 /* Print TX (queue_pkt) messages */
849#define DEBUG_RX 0x0008 /* Print RX (de4x5_rx) messages */
850#define DEBUG_SROM 0x0010 /* Print SROM messages */
851#define DEBUG_MII 0x0020 /* Print MII messages */
852#define DEBUG_OPEN 0x0040 /* Print de4x5_open() messages */
853#define DEBUG_CLOSE 0x0080 /* Print de4x5_close() messages */
854#define DEBUG_PCICFG 0x0100
855#define DEBUG_ALL 0x01ff
856
857/*
858** Miscellaneous
859*/
860#define PCI 0
861#define EISA 1
862
863#define HASH_TABLE_LEN 512 /* Bits */
864#define HASH_BITS 0x01ff /* 9 LS bits */
865
866#define SETUP_FRAME_LEN 192 /* Bytes */
867#define IMPERF_PA_OFFSET 156 /* Bytes */
868
869#define POLL_DEMAND 1
870
871#define LOST_MEDIA_THRESHOLD 3
872
873#define MASK_INTERRUPTS 1
874#define UNMASK_INTERRUPTS 0
875
876#define DE4X5_STRLEN 8
877
878#define DE4X5_INIT 0 /* Initialisation time */
879#define DE4X5_RUN 1 /* Run time */
880
881#define DE4X5_SAVE_STATE 0
882#define DE4X5_RESTORE_STATE 1
883
884/*
885** Address Filtering Modes
886*/
887#define PERFECT 0 /* 16 perfect physical addresses */
888#define HASH_PERF 1 /* 1 perfect, 512 multicast addresses */
889#define PERFECT_REJ 2 /* Reject 16 perfect physical addresses */
890#define ALL_HASH 3 /* Hashes all physical & multicast addrs */
891
892#define ALL 0 /* Clear out all the setup frame */
893#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
894
895/*
896** Adapter state
897*/
898#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
899#define CLOSED 1 /* Ready for opening */
900#define OPEN 2 /* Running */
901
902/*
903** Various wait times
904*/
905#define PDET_LINK_WAIT 1200 /* msecs to wait for link detect bits */
906#define ANS_FINISH_WAIT 1000 /* msecs to wait for link detect bits */
907
908/*
909** IEEE OUIs for various PHY vendor/chip combos - Reg 2 values only. Since
910** the vendors seem split 50-50 on how to calculate the OUI register values
911** anyway, just reading Reg2 seems reasonable for now [see de4x5_get_oui()].
912*/
913#define NATIONAL_TX 0x2000
914#define BROADCOM_T4 0x03e0
915#define SEEQ_T4 0x0016
916#define CYPRESS_T4 0x0014
917
918/*
919** Speed Selection stuff
920*/
921#define SET_10Mb {\
922 if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
923 omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
924 if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
925 mii_wr(MII_CR_10|(lp->fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
926 }\
927 omr |= ((lp->fdx ? OMR_FDX : 0) | OMR_TTM);\
928 outl(omr, DE4X5_OMR);\
929 if (!lp->useSROM) lp->cache.gep = 0;\
930 } else if (lp->useSROM && !lp->useMII) {\
931 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
932 omr |= (lp->fdx ? OMR_FDX : 0);\
933 outl(omr | (lp->infoblock_csr6 & ~(OMR_SCR | OMR_HBD)), DE4X5_OMR);\
934 } else {\
935 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
936 omr |= (lp->fdx ? OMR_FDX : 0);\
937 outl(omr | OMR_SDP | OMR_TTM, DE4X5_OMR);\
938 lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\
939 gep_wr(lp->cache.gep, dev);\
940 }\
941}
942
943#define SET_100Mb {\
944 if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
945 int fdx=0;\
946 if (lp->phy[lp->active].id == NATIONAL_TX) {\
947 mii_wr(mii_rd(0x18, lp->phy[lp->active].addr, DE4X5_MII) & ~0x2000,\
948 0x18, lp->phy[lp->active].addr, DE4X5_MII);\
949 }\
950 omr = inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX);\
951 sr = mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);\
952 if (!(sr & MII_ANA_T4AM) && lp->fdx) fdx=1;\
953 if ((lp->tmp != MII_SR_ASSC) || (lp->autosense != AUTO)) {\
954 mii_wr(MII_CR_100|(fdx?MII_CR_FDM:0), MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
955 }\
956 if (fdx) omr |= OMR_FDX;\
957 outl(omr, DE4X5_OMR);\
958 if (!lp->useSROM) lp->cache.gep = 0;\
959 } else if (lp->useSROM && !lp->useMII) {\
960 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
961 omr |= (lp->fdx ? OMR_FDX : 0);\
962 outl(omr | lp->infoblock_csr6, DE4X5_OMR);\
963 } else {\
964 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
965 omr |= (lp->fdx ? OMR_FDX : 0);\
966 outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS | OMR_SCR, DE4X5_OMR);\
967 lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\
968 gep_wr(lp->cache.gep, dev);\
969 }\
970}
971
972/* FIX ME so I don't jam 10Mb networks */
973#define SET_100Mb_PDET {\
974 if ((lp->phy[lp->active].id) && (!lp->useSROM || lp->useMII)) {\
975 mii_wr(MII_CR_100|MII_CR_ASSE, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);\
976 omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
977 outl(omr, DE4X5_OMR);\
978 } else if (lp->useSROM && !lp->useMII) {\
979 omr = (inl(DE4X5_OMR) & ~(OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
980 outl(omr, DE4X5_OMR);\
981 } else {\
982 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR | OMR_FDX));\
983 outl(omr | OMR_SDP | OMR_PS | OMR_HBD | OMR_PCS, DE4X5_OMR);\
984 lp->cache.gep = (GEP_FDXD | GEP_MODE);\
985 gep_wr(lp->cache.gep, dev);\
986 }\
987}
988
989/*
990** Include the IOCTL stuff
991*/
992#include <linux/sockios.h>
993
994struct de4x5_ioctl {
995 unsigned short cmd; /* Command to run */
996 unsigned short len; /* Length of the data buffer */
997 unsigned char __user *data; /* Pointer to the data buffer */
998};
999
1000/*
1001** Recognised commands for the driver
1002*/
1003#define DE4X5_GET_HWADDR 0x01 /* Get the hardware address */
1004#define DE4X5_SET_HWADDR 0x02 /* Set the hardware address */
1005/* 0x03 and 0x04 were used before and are obsoleted now. Don't use them. */
1006#define DE4X5_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
1007#define DE4X5_GET_MCA 0x06 /* Get a multicast address */
1008#define DE4X5_SET_MCA 0x07 /* Set a multicast address */
1009#define DE4X5_CLR_MCA 0x08 /* Clear a multicast address */
1010#define DE4X5_MCA_EN 0x09 /* Enable a multicast address group */
1011#define DE4X5_GET_STATS 0x0a /* Get the driver statistics */
1012#define DE4X5_CLR_STATS 0x0b /* Zero out the driver statistics */
1013#define DE4X5_GET_OMR 0x0c /* Get the OMR Register contents */
1014#define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */
1015#define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */
1016
1017#define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
new file mode 100644
index 000000000000..17b11ee1745a
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -0,0 +1,2253 @@
1/*
2 A Davicom DM9102/DM9102A/DM9102A+DM9801/DM9102A+DM9802 NIC fast
3 ethernet driver for Linux.
4 Copyright (C) 1997 Sten Wang
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License
8 as published by the Free Software Foundation; either version 2
9 of the License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 DAVICOM Web-Site: www.davicom.com.tw
17
18 Author: Sten Wang, 886-3-5798797-8517, E-mail: sten_wang@davicom.com.tw
19 Maintainer: Tobias Ringstrom <tori@unhappy.mine.nu>
20
21 (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved.
22
23 Marcelo Tosatti <marcelo@conectiva.com.br> :
24 Made it compile in 2.3 (device to net_device)
25
26 Alan Cox <alan@lxorguk.ukuu.org.uk> :
27 Cleaned up for kernel merge.
28 Removed the back compatibility support
29 Reformatted, fixing spelling etc as I went
30 Removed IRQ 0-15 assumption
31
32 Jeff Garzik <jgarzik@pobox.com> :
33 Updated to use new PCI driver API.
34 Resource usage cleanups.
35 Report driver version to user.
36
37 Tobias Ringstrom <tori@unhappy.mine.nu> :
38 Cleaned up and added SMP safety. Thanks go to Jeff Garzik,
39 Andrew Morton and Frank Davis for the SMP safety fixes.
40
41 Vojtech Pavlik <vojtech@suse.cz> :
42 Cleaned up pointer arithmetics.
43 Fixed a lot of 64bit issues.
44 Cleaned up printk()s a bit.
45 Fixed some obvious big endian problems.
46
47 Tobias Ringstrom <tori@unhappy.mine.nu> :
48 Use time_after for jiffies calculation. Added ethtool
49 support. Updated PCI resource allocation. Do not
50 forget to unmap PCI mapped skbs.
51
52 Alan Cox <alan@lxorguk.ukuu.org.uk>
53 Added new PCI identifiers provided by Clear Zhang at ALi
54 for their 1563 ethernet device.
55
56 TODO
57
58 Check on 64 bit boxes.
59 Check and fix on big endian boxes.
60
61 Test and make sure PCI latency is now correct for all cases.
62*/
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
66#define DRV_NAME "dmfe"
67#define DRV_VERSION "1.36.4"
68#define DRV_RELDATE "2002-01-17"
69
70#include <linux/module.h>
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/timer.h>
74#include <linux/ptrace.h>
75#include <linux/errno.h>
76#include <linux/ioport.h>
77#include <linux/interrupt.h>
78#include <linux/pci.h>
79#include <linux/dma-mapping.h>
80#include <linux/init.h>
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/ethtool.h>
84#include <linux/skbuff.h>
85#include <linux/delay.h>
86#include <linux/spinlock.h>
87#include <linux/crc32.h>
88#include <linux/bitops.h>
89
90#include <asm/processor.h>
91#include <asm/io.h>
92#include <asm/dma.h>
93#include <asm/uaccess.h>
94#include <asm/irq.h>
95
96#ifdef CONFIG_TULIP_DM910X
97#include <linux/of.h>
98#endif
99
100
101/* Board/System/Debug information/definition ---------------- */
102#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
103#define PCI_DM9102_ID 0x91021282 /* Davicom DM9102 ID */
104#define PCI_DM9100_ID 0x91001282 /* Davicom DM9100 ID */
105#define PCI_DM9009_ID 0x90091282 /* Davicom DM9009 ID */
106
107#define DM9102_IO_SIZE 0x80
108#define DM9102A_IO_SIZE 0x100
109#define TX_MAX_SEND_CNT 0x1 /* Maximum tx packet per time */
110#define TX_DESC_CNT 0x10 /* Allocated Tx descriptors */
111#define RX_DESC_CNT 0x20 /* Allocated Rx descriptors */
112#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
113#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
114#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
115#define TX_BUF_ALLOC 0x600
116#define RX_ALLOC_SIZE 0x620
117#define DM910X_RESET 1
118#define CR0_DEFAULT 0x00E00000 /* TX & RX burst mode */
119#define CR6_DEFAULT 0x00080000 /* HD */
120#define CR7_DEFAULT 0x180c1
121#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
122#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
123#define MAX_PACKET_SIZE 1514
124#define DMFE_MAX_MULTICAST 14
125#define RX_COPY_SIZE 100
126#define MAX_CHECK_PACKET 0x8000
127#define DM9801_NOISE_FLOOR 8
128#define DM9802_NOISE_FLOOR 5
129
130#define DMFE_WOL_LINKCHANGE 0x20000000
131#define DMFE_WOL_SAMPLEPACKET 0x10000000
132#define DMFE_WOL_MAGICPACKET 0x08000000
133
134
135#define DMFE_10MHF 0
136#define DMFE_100MHF 1
137#define DMFE_10MFD 4
138#define DMFE_100MFD 5
139#define DMFE_AUTO 8
140#define DMFE_1M_HPNA 0x10
141
142#define DMFE_TXTH_72 0x400000 /* TX TH 72 byte */
143#define DMFE_TXTH_96 0x404000 /* TX TH 96 byte */
144#define DMFE_TXTH_128 0x0000 /* TX TH 128 byte */
145#define DMFE_TXTH_256 0x4000 /* TX TH 256 byte */
146#define DMFE_TXTH_512 0x8000 /* TX TH 512 byte */
147#define DMFE_TXTH_1K 0xC000 /* TX TH 1K byte */
148
149#define DMFE_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
150#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
151#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
152
153#define DMFE_DBUG(dbug_now, msg, value) \
154 do { \
155 if (dmfe_debug || (dbug_now)) \
156 pr_err("%s %lx\n", \
157 (msg), (long) (value)); \
158 } while (0)
159
160#define SHOW_MEDIA_TYPE(mode) \
161 pr_info("Change Speed to %sMhz %s duplex\n" , \
162 (mode & 1) ? "100":"10", \
163 (mode & 4) ? "full":"half");
164
165
166/* CR9 definition: SROM/MII */
167#define CR9_SROM_READ 0x4800
168#define CR9_SRCS 0x1
169#define CR9_SRCLK 0x2
170#define CR9_CRDOUT 0x8
171#define SROM_DATA_0 0x0
172#define SROM_DATA_1 0x4
173#define PHY_DATA_1 0x20000
174#define PHY_DATA_0 0x00000
175#define MDCLKH 0x10000
176
177#define PHY_POWER_DOWN 0x800
178
179#define SROM_V41_CODE 0x14
180
181#define SROM_CLK_WRITE(data, ioaddr) \
182 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
183 udelay(5); \
184 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
185 udelay(5); \
186 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
187 udelay(5);
188
189#define __CHK_IO_SIZE(pci_id, dev_rev) \
190 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
191 DM9102A_IO_SIZE: DM9102_IO_SIZE)
192
193#define CHK_IO_SIZE(pci_dev) \
194 (__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
195 (pci_dev)->revision))
196
197/* Sten Check */
198#define DEVICE net_device
199
200/* Structure/enum declaration ------------------------------- */
201struct tx_desc {
202 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
203 char *tx_buf_ptr; /* Data for us */
204 struct tx_desc *next_tx_desc;
205} __attribute__(( aligned(32) ));
206
207struct rx_desc {
208 __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
209 struct sk_buff *rx_skb_ptr; /* Data for us */
210 struct rx_desc *next_rx_desc;
211} __attribute__(( aligned(32) ));
212
213struct dmfe_board_info {
214 u32 chip_id; /* Chip vendor/Device ID */
215 u8 chip_revision; /* Chip revision */
216 struct DEVICE *next_dev; /* next device */
217 struct pci_dev *pdev; /* PCI device */
218 spinlock_t lock;
219
220 long ioaddr; /* I/O base address */
221 u32 cr0_data;
222 u32 cr5_data;
223 u32 cr6_data;
224 u32 cr7_data;
225 u32 cr15_data;
226
227 /* pointer for memory physical address */
228 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
229 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
230 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
231 dma_addr_t first_tx_desc_dma;
232 dma_addr_t first_rx_desc_dma;
233
234 /* descriptor pointer */
235 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
236 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
237 unsigned char *desc_pool_ptr; /* descriptor pool memory */
238 struct tx_desc *first_tx_desc;
239 struct tx_desc *tx_insert_ptr;
240 struct tx_desc *tx_remove_ptr;
241 struct rx_desc *first_rx_desc;
242 struct rx_desc *rx_insert_ptr;
243 struct rx_desc *rx_ready_ptr; /* packet come pointer */
244 unsigned long tx_packet_cnt; /* transmitted packet count */
245 unsigned long tx_queue_cnt; /* wait to send packet count */
246 unsigned long rx_avail_cnt; /* available rx descriptor count */
247 unsigned long interval_rx_cnt; /* rx packet count a callback time */
248
249 u16 HPNA_command; /* For HPNA register 16 */
250 u16 HPNA_timer; /* For HPNA remote device check */
251 u16 dbug_cnt;
252 u16 NIC_capability; /* NIC media capability */
253 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
254
255 u8 HPNA_present; /* 0:none, 1:DM9801, 2:DM9802 */
256 u8 chip_type; /* Keep DM9102A chip type */
257 u8 media_mode; /* user specify media mode */
258 u8 op_mode; /* real work media mode */
259 u8 phy_addr;
260 u8 wait_reset; /* Hardware failed, need to reset */
261 u8 dm910x_chk_mode; /* Operating mode check */
262 u8 first_in_callback; /* Flag to record state */
263 u8 wol_mode; /* user WOL settings */
264 struct timer_list timer;
265
266 /* Driver defined statistic counter */
267 unsigned long tx_fifo_underrun;
268 unsigned long tx_loss_carrier;
269 unsigned long tx_no_carrier;
270 unsigned long tx_late_collision;
271 unsigned long tx_excessive_collision;
272 unsigned long tx_jabber_timeout;
273 unsigned long reset_count;
274 unsigned long reset_cr8;
275 unsigned long reset_fatal;
276 unsigned long reset_TXtimeout;
277
278 /* NIC SROM data */
279 unsigned char srom[128];
280};
281
282enum dmfe_offsets {
283 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
284 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
285 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
286 DCR15 = 0x78
287};
288
289enum dmfe_CR6_bits {
290 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
291 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
292 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
293};
294
295/* Global variable declaration ----------------------------- */
296static int __devinitdata printed_version;
297static const char version[] __devinitconst =
298 "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
299
300static int dmfe_debug;
301static unsigned char dmfe_media_mode = DMFE_AUTO;
302static u32 dmfe_cr6_user_set;
303
304/* For module input parameter */
305static int debug;
306static u32 cr6set;
307static unsigned char mode = 8;
308static u8 chkmode = 1;
309static u8 HPNA_mode; /* Default: Low Power/High Speed */
310static u8 HPNA_rx_cmd; /* Default: Disable Rx remote command */
311static u8 HPNA_tx_cmd; /* Default: Don't issue remote command */
312static u8 HPNA_NoiseFloor; /* Default: HPNA NoiseFloor */
313static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
314 4: TX pause packet */
315
316
317/* function declaration ------------------------------------- */
318static int dmfe_open(struct DEVICE *);
319static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
320static int dmfe_stop(struct DEVICE *);
321static void dmfe_set_filter_mode(struct DEVICE *);
322static const struct ethtool_ops netdev_ethtool_ops;
323static u16 read_srom_word(long ,int);
324static irqreturn_t dmfe_interrupt(int , void *);
325#ifdef CONFIG_NET_POLL_CONTROLLER
326static void poll_dmfe (struct net_device *dev);
327#endif
328static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
329static void allocate_rx_buffer(struct dmfe_board_info *);
330static void update_cr6(u32, unsigned long);
331static void send_filter_frame(struct DEVICE *);
332static void dm9132_id_table(struct DEVICE *);
333static u16 phy_read(unsigned long, u8, u8, u32);
334static void phy_write(unsigned long, u8, u8, u16, u32);
335static void phy_write_1bit(unsigned long, u32);
336static u16 phy_read_1bit(unsigned long);
337static u8 dmfe_sense_speed(struct dmfe_board_info *);
338static void dmfe_process_mode(struct dmfe_board_info *);
339static void dmfe_timer(unsigned long);
340static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
341static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
342static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
343static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
344static void dmfe_dynamic_reset(struct DEVICE *);
345static void dmfe_free_rxbuffer(struct dmfe_board_info *);
346static void dmfe_init_dm910x(struct DEVICE *);
347static void dmfe_parse_srom(struct dmfe_board_info *);
348static void dmfe_program_DM9801(struct dmfe_board_info *, int);
349static void dmfe_program_DM9802(struct dmfe_board_info *);
350static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * );
351static void dmfe_set_phyxcer(struct dmfe_board_info *);
352
353/* DM910X network board routine ---------------------------- */
354
355static const struct net_device_ops netdev_ops = {
356 .ndo_open = dmfe_open,
357 .ndo_stop = dmfe_stop,
358 .ndo_start_xmit = dmfe_start_xmit,
359 .ndo_set_rx_mode = dmfe_set_filter_mode,
360 .ndo_change_mtu = eth_change_mtu,
361 .ndo_set_mac_address = eth_mac_addr,
362 .ndo_validate_addr = eth_validate_addr,
363#ifdef CONFIG_NET_POLL_CONTROLLER
364 .ndo_poll_controller = poll_dmfe,
365#endif
366};
367
368/*
369 * Search DM910X board ,allocate space and register it
370 */
371
372static int __devinit dmfe_init_one (struct pci_dev *pdev,
373 const struct pci_device_id *ent)
374{
375 struct dmfe_board_info *db; /* board information structure */
376 struct net_device *dev;
377 u32 pci_pmr;
378 int i, err;
379
380 DMFE_DBUG(0, "dmfe_init_one()", 0);
381
382 if (!printed_version++)
383 pr_info("%s\n", version);
384
385 /*
386 * SPARC on-board DM910x chips should be handled by the main
387 * tulip driver, except for early DM9100s.
388 */
389#ifdef CONFIG_TULIP_DM910X
390 if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
391 ent->driver_data == PCI_DM9102_ID) {
392 struct device_node *dp = pci_device_to_OF_node(pdev);
393
394 if (dp && of_get_property(dp, "local-mac-address", NULL)) {
395 pr_info("skipping on-board DM910x (use tulip)\n");
396 return -ENODEV;
397 }
398 }
399#endif
400
401 /* Init network device */
402 dev = alloc_etherdev(sizeof(*db));
403 if (dev == NULL)
404 return -ENOMEM;
405 SET_NETDEV_DEV(dev, &pdev->dev);
406
407 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
408 pr_warn("32-bit PCI DMA not available\n");
409 err = -ENODEV;
410 goto err_out_free;
411 }
412
413 /* Enable Master/IO access, Disable memory access */
414 err = pci_enable_device(pdev);
415 if (err)
416 goto err_out_free;
417
418 if (!pci_resource_start(pdev, 0)) {
419 pr_err("I/O base is zero\n");
420 err = -ENODEV;
421 goto err_out_disable;
422 }
423
424 if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
425 pr_err("Allocated I/O size too small\n");
426 err = -ENODEV;
427 goto err_out_disable;
428 }
429
430#if 0 /* pci_{enable_device,set_master} sets minimum latency for us now */
431
432 /* Set Latency Timer 80h */
433 /* FIXME: setting values > 32 breaks some SiS 559x stuff.
434 Need a PCI quirk.. */
435
436 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x80);
437#endif
438
439 if (pci_request_regions(pdev, DRV_NAME)) {
440 pr_err("Failed to request PCI regions\n");
441 err = -ENODEV;
442 goto err_out_disable;
443 }
444
445 /* Init system & device */
446 db = netdev_priv(dev);
447
448 /* Allocate Tx/Rx descriptor memory */
449 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
450 DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
451 if (!db->desc_pool_ptr)
452 goto err_out_res;
453
454 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
455 TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
456 if (!db->buf_pool_ptr)
457 goto err_out_free_desc;
458
459 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
460 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
461 db->buf_pool_start = db->buf_pool_ptr;
462 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
463
464 db->chip_id = ent->driver_data;
465 db->ioaddr = pci_resource_start(pdev, 0);
466 db->chip_revision = pdev->revision;
467 db->wol_mode = 0;
468
469 db->pdev = pdev;
470
471 dev->base_addr = db->ioaddr;
472 dev->irq = pdev->irq;
473 pci_set_drvdata(pdev, dev);
474 dev->netdev_ops = &netdev_ops;
475 dev->ethtool_ops = &netdev_ethtool_ops;
476 netif_carrier_off(dev);
477 spin_lock_init(&db->lock);
478
479 pci_read_config_dword(pdev, 0x50, &pci_pmr);
480 pci_pmr &= 0x70000;
481 if ( (pci_pmr == 0x10000) && (db->chip_revision == 0x31) )
482 db->chip_type = 1; /* DM9102A E3 */
483 else
484 db->chip_type = 0;
485
486 /* read 64 word srom data */
487 for (i = 0; i < 64; i++)
488 ((__le16 *) db->srom)[i] =
489 cpu_to_le16(read_srom_word(db->ioaddr, i));
490
491 /* Set Node address */
492 for (i = 0; i < 6; i++)
493 dev->dev_addr[i] = db->srom[20 + i];
494
495 err = register_netdev (dev);
496 if (err)
497 goto err_out_free_buf;
498
499 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
500 ent->driver_data >> 16,
501 pci_name(pdev), dev->dev_addr, dev->irq);
502
503 pci_set_master(pdev);
504
505 return 0;
506
507err_out_free_buf:
508 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
509 db->buf_pool_ptr, db->buf_pool_dma_ptr);
510err_out_free_desc:
511 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
512 db->desc_pool_ptr, db->desc_pool_dma_ptr);
513err_out_res:
514 pci_release_regions(pdev);
515err_out_disable:
516 pci_disable_device(pdev);
517err_out_free:
518 pci_set_drvdata(pdev, NULL);
519 free_netdev(dev);
520
521 return err;
522}
523
524
525static void __devexit dmfe_remove_one (struct pci_dev *pdev)
526{
527 struct net_device *dev = pci_get_drvdata(pdev);
528 struct dmfe_board_info *db = netdev_priv(dev);
529
530 DMFE_DBUG(0, "dmfe_remove_one()", 0);
531
532 if (dev) {
533
534 unregister_netdev(dev);
535
536 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
537 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
538 db->desc_pool_dma_ptr);
539 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
540 db->buf_pool_ptr, db->buf_pool_dma_ptr);
541 pci_release_regions(pdev);
542 free_netdev(dev); /* free board information */
543
544 pci_set_drvdata(pdev, NULL);
545 }
546
547 DMFE_DBUG(0, "dmfe_remove_one() exit", 0);
548}
549
550
551/*
552 * Open the interface.
553 * The interface is opened whenever "ifconfig" actives it.
554 */
555
556static int dmfe_open(struct DEVICE *dev)
557{
558 int ret;
559 struct dmfe_board_info *db = netdev_priv(dev);
560
561 DMFE_DBUG(0, "dmfe_open", 0);
562
563 ret = request_irq(dev->irq, dmfe_interrupt,
564 IRQF_SHARED, dev->name, dev);
565 if (ret)
566 return ret;
567
568 /* system variable init */
569 db->cr6_data = CR6_DEFAULT | dmfe_cr6_user_set;
570 db->tx_packet_cnt = 0;
571 db->tx_queue_cnt = 0;
572 db->rx_avail_cnt = 0;
573 db->wait_reset = 0;
574
575 db->first_in_callback = 0;
576 db->NIC_capability = 0xf; /* All capability*/
577 db->PHY_reg4 = 0x1e0;
578
579 /* CR6 operation mode decision */
580 if ( !chkmode || (db->chip_id == PCI_DM9132_ID) ||
581 (db->chip_revision >= 0x30) ) {
582 db->cr6_data |= DMFE_TXTH_256;
583 db->cr0_data = CR0_DEFAULT;
584 db->dm910x_chk_mode=4; /* Enter the normal mode */
585 } else {
586 db->cr6_data |= CR6_SFT; /* Store & Forward mode */
587 db->cr0_data = 0;
588 db->dm910x_chk_mode = 1; /* Enter the check mode */
589 }
590
591 /* Initialize DM910X board */
592 dmfe_init_dm910x(dev);
593
594 /* Active System Interface */
595 netif_wake_queue(dev);
596
597 /* set and active a timer process */
598 init_timer(&db->timer);
599 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
600 db->timer.data = (unsigned long)dev;
601 db->timer.function = dmfe_timer;
602 add_timer(&db->timer);
603
604 return 0;
605}
606
607
608/* Initialize DM910X board
609 * Reset DM910X board
610 * Initialize TX/Rx descriptor chain structure
611 * Send the set-up frame
612 * Enable Tx/Rx machine
613 */
614
615static void dmfe_init_dm910x(struct DEVICE *dev)
616{
617 struct dmfe_board_info *db = netdev_priv(dev);
618 unsigned long ioaddr = db->ioaddr;
619
620 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
621
622 /* Reset DM910x MAC controller */
623 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */
624 udelay(100);
625 outl(db->cr0_data, ioaddr + DCR0);
626 udelay(5);
627
628 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
629 db->phy_addr = 1;
630
631 /* Parser SROM and media mode */
632 dmfe_parse_srom(db);
633 db->media_mode = dmfe_media_mode;
634
635 /* RESET Phyxcer Chip by GPR port bit 7 */
636 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */
637 if (db->chip_id == PCI_DM9009_ID) {
638 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */
639 mdelay(300); /* Delay 300 ms */
640 }
641 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */
642
643 /* Process Phyxcer Media Mode */
644 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
645 dmfe_set_phyxcer(db);
646
647 /* Media Mode Process */
648 if ( !(db->media_mode & DMFE_AUTO) )
649 db->op_mode = db->media_mode; /* Force Mode */
650
651 /* Initialize Transmit/Receive decriptor and CR3/4 */
652 dmfe_descriptor_init(db, ioaddr);
653
654 /* Init CR6 to program DM910x operation */
655 update_cr6(db->cr6_data, ioaddr);
656
657 /* Send setup frame */
658 if (db->chip_id == PCI_DM9132_ID)
659 dm9132_id_table(dev); /* DM9132 */
660 else
661 send_filter_frame(dev); /* DM9102/DM9102A */
662
663 /* Init CR7, interrupt active bit */
664 db->cr7_data = CR7_DEFAULT;
665 outl(db->cr7_data, ioaddr + DCR7);
666
667 /* Init CR15, Tx jabber and Rx watchdog timer */
668 outl(db->cr15_data, ioaddr + DCR15);
669
670 /* Enable DM910X Tx/Rx function */
671 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
672 update_cr6(db->cr6_data, ioaddr);
673}
674
675
676/*
677 * Hardware start transmission.
678 * Send a packet to media from the upper layer.
679 */
680
681static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
682 struct DEVICE *dev)
683{
684 struct dmfe_board_info *db = netdev_priv(dev);
685 struct tx_desc *txptr;
686 unsigned long flags;
687
688 DMFE_DBUG(0, "dmfe_start_xmit", 0);
689
690 /* Too large packet check */
691 if (skb->len > MAX_PACKET_SIZE) {
692 pr_err("big packet = %d\n", (u16)skb->len);
693 dev_kfree_skb(skb);
694 return NETDEV_TX_OK;
695 }
696
697 /* Resource flag check */
698 netif_stop_queue(dev);
699
700 spin_lock_irqsave(&db->lock, flags);
701
702 /* No Tx resource check, it never happen nromally */
703 if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
704 spin_unlock_irqrestore(&db->lock, flags);
705 pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
706 return NETDEV_TX_BUSY;
707 }
708
709 /* Disable NIC interrupt */
710 outl(0, dev->base_addr + DCR7);
711
712 /* transmit this packet */
713 txptr = db->tx_insert_ptr;
714 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
715 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
716
717 /* Point to next transmit free descriptor */
718 db->tx_insert_ptr = txptr->next_tx_desc;
719
720 /* Transmit Packet Process */
721 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
722 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
723 db->tx_packet_cnt++; /* Ready to send */
724 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
725 dev->trans_start = jiffies; /* saved time stamp */
726 } else {
727 db->tx_queue_cnt++; /* queue TX packet */
728 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
729 }
730
731 /* Tx resource check */
732 if ( db->tx_queue_cnt < TX_FREE_DESC_CNT )
733 netif_wake_queue(dev);
734
735 /* Restore CR7 to enable interrupt */
736 spin_unlock_irqrestore(&db->lock, flags);
737 outl(db->cr7_data, dev->base_addr + DCR7);
738
739 /* free this SKB */
740 dev_kfree_skb(skb);
741
742 return NETDEV_TX_OK;
743}
744
745
746/*
747 * Stop the interface.
748 * The interface is stopped when it is brought.
749 */
750
751static int dmfe_stop(struct DEVICE *dev)
752{
753 struct dmfe_board_info *db = netdev_priv(dev);
754 unsigned long ioaddr = dev->base_addr;
755
756 DMFE_DBUG(0, "dmfe_stop", 0);
757
758 /* disable system */
759 netif_stop_queue(dev);
760
761 /* deleted timer */
762 del_timer_sync(&db->timer);
763
764 /* Reset & stop DM910X board */
765 outl(DM910X_RESET, ioaddr + DCR0);
766 udelay(5);
767 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
768
769 /* free interrupt */
770 free_irq(dev->irq, dev);
771
772 /* free allocated rx buffer */
773 dmfe_free_rxbuffer(db);
774
775#if 0
776 /* show statistic counter */
777 printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
778 db->tx_fifo_underrun, db->tx_excessive_collision,
779 db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
780 db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
781 db->reset_fatal, db->reset_TXtimeout);
782#endif
783
784 return 0;
785}
786
787
788/*
789 * DM9102 insterrupt handler
790 * receive the packet to upper layer, free the transmitted packet
791 */
792
793static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
794{
795 struct DEVICE *dev = dev_id;
796 struct dmfe_board_info *db = netdev_priv(dev);
797 unsigned long ioaddr = dev->base_addr;
798 unsigned long flags;
799
800 DMFE_DBUG(0, "dmfe_interrupt()", 0);
801
802 spin_lock_irqsave(&db->lock, flags);
803
804 /* Got DM910X status */
805 db->cr5_data = inl(ioaddr + DCR5);
806 outl(db->cr5_data, ioaddr + DCR5);
807 if ( !(db->cr5_data & 0xc1) ) {
808 spin_unlock_irqrestore(&db->lock, flags);
809 return IRQ_HANDLED;
810 }
811
812 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
813 outl(0, ioaddr + DCR7);
814
815 /* Check system status */
816 if (db->cr5_data & 0x2000) {
817 /* system bus error happen */
818 DMFE_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
819 db->reset_fatal++;
820 db->wait_reset = 1; /* Need to RESET */
821 spin_unlock_irqrestore(&db->lock, flags);
822 return IRQ_HANDLED;
823 }
824
825 /* Received the coming packet */
826 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
827 dmfe_rx_packet(dev, db);
828
829 /* reallocate rx descriptor buffer */
830 if (db->rx_avail_cnt<RX_DESC_CNT)
831 allocate_rx_buffer(db);
832
833 /* Free the transmitted descriptor */
834 if ( db->cr5_data & 0x01)
835 dmfe_free_tx_pkt(dev, db);
836
837 /* Mode Check */
838 if (db->dm910x_chk_mode & 0x2) {
839 db->dm910x_chk_mode = 0x4;
840 db->cr6_data |= 0x100;
841 update_cr6(db->cr6_data, db->ioaddr);
842 }
843
844 /* Restore CR7 to enable interrupt mask */
845 outl(db->cr7_data, ioaddr + DCR7);
846
847 spin_unlock_irqrestore(&db->lock, flags);
848 return IRQ_HANDLED;
849}
850
851
852#ifdef CONFIG_NET_POLL_CONTROLLER
853/*
854 * Polling 'interrupt' - used by things like netconsole to send skbs
855 * without having to re-enable interrupts. It's not called while
856 * the interrupt routine is executing.
857 */
858
859static void poll_dmfe (struct net_device *dev)
860{
861 /* disable_irq here is not very nice, but with the lockless
862 interrupt handler we have no other choice. */
863 disable_irq(dev->irq);
864 dmfe_interrupt (dev->irq, dev);
865 enable_irq(dev->irq);
866}
867#endif
868
869/*
870 * Free TX resource after TX complete
871 */
872
873static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
874{
875 struct tx_desc *txptr;
876 unsigned long ioaddr = dev->base_addr;
877 u32 tdes0;
878
879 txptr = db->tx_remove_ptr;
880 while(db->tx_packet_cnt) {
881 tdes0 = le32_to_cpu(txptr->tdes0);
882 if (tdes0 & 0x80000000)
883 break;
884
885 /* A packet sent completed */
886 db->tx_packet_cnt--;
887 dev->stats.tx_packets++;
888
889 /* Transmit statistic counter */
890 if ( tdes0 != 0x7fffffff ) {
891 dev->stats.collisions += (tdes0 >> 3) & 0xf;
892 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
893 if (tdes0 & TDES0_ERR_MASK) {
894 dev->stats.tx_errors++;
895
896 if (tdes0 & 0x0002) { /* UnderRun */
897 db->tx_fifo_underrun++;
898 if ( !(db->cr6_data & CR6_SFT) ) {
899 db->cr6_data = db->cr6_data | CR6_SFT;
900 update_cr6(db->cr6_data, db->ioaddr);
901 }
902 }
903 if (tdes0 & 0x0100)
904 db->tx_excessive_collision++;
905 if (tdes0 & 0x0200)
906 db->tx_late_collision++;
907 if (tdes0 & 0x0400)
908 db->tx_no_carrier++;
909 if (tdes0 & 0x0800)
910 db->tx_loss_carrier++;
911 if (tdes0 & 0x4000)
912 db->tx_jabber_timeout++;
913 }
914 }
915
916 txptr = txptr->next_tx_desc;
917 }/* End of while */
918
919 /* Update TX remove pointer to next */
920 db->tx_remove_ptr = txptr;
921
922 /* Send the Tx packet in queue */
923 if ( (db->tx_packet_cnt < TX_MAX_SEND_CNT) && db->tx_queue_cnt ) {
924 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
925 db->tx_packet_cnt++; /* Ready to send */
926 db->tx_queue_cnt--;
927 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */
928 dev->trans_start = jiffies; /* saved time stamp */
929 }
930
931 /* Resource available check */
932 if ( db->tx_queue_cnt < TX_WAKE_DESC_CNT )
933 netif_wake_queue(dev); /* Active upper layer, send again */
934}
935
936
937/*
938 * Calculate the CRC valude of the Rx packet
939 * flag = 1 : return the reverse CRC (for the received packet CRC)
940 * 0 : return the normal CRC (for Hash Table index)
941 */
942
943static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
944{
945 u32 crc = crc32(~0, Data, Len);
946 if (flag) crc = ~crc;
947 return crc;
948}
949
950
951/*
952 * Receive the come packet and pass to upper layer
953 */
954
955static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
956{
957 struct rx_desc *rxptr;
958 struct sk_buff *skb, *newskb;
959 int rxlen;
960 u32 rdes0;
961
962 rxptr = db->rx_ready_ptr;
963
964 while(db->rx_avail_cnt) {
965 rdes0 = le32_to_cpu(rxptr->rdes0);
966 if (rdes0 & 0x80000000) /* packet owner check */
967 break;
968
969 db->rx_avail_cnt--;
970 db->interval_rx_cnt++;
971
972 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
973 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
974
975 if ( (rdes0 & 0x300) != 0x300) {
976 /* A packet without First/Last flag */
977 /* reuse this SKB */
978 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
979 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
980 } else {
981 /* A packet with First/Last flag */
982 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
983
984 /* error summary bit check */
985 if (rdes0 & 0x8000) {
986 /* This is a error packet */
987 dev->stats.rx_errors++;
988 if (rdes0 & 1)
989 dev->stats.rx_fifo_errors++;
990 if (rdes0 & 2)
991 dev->stats.rx_crc_errors++;
992 if (rdes0 & 0x80)
993 dev->stats.rx_length_errors++;
994 }
995
996 if ( !(rdes0 & 0x8000) ||
997 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
998 skb = rxptr->rx_skb_ptr;
999
1000 /* Received Packet CRC check need or not */
1001 if ( (db->dm910x_chk_mode & 1) &&
1002 (cal_CRC(skb->data, rxlen, 1) !=
1003 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
1004 /* Found a error received packet */
1005 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1006 db->dm910x_chk_mode = 3;
1007 } else {
1008 /* Good packet, send to upper layer */
1009 /* Shorst packet used new SKB */
1010 if ((rxlen < RX_COPY_SIZE) &&
1011 ((newskb = dev_alloc_skb(rxlen + 2))
1012 != NULL)) {
1013
1014 skb = newskb;
1015 /* size less than COPY_SIZE, allocate a rxlen SKB */
1016 skb_reserve(skb, 2); /* 16byte align */
1017 skb_copy_from_linear_data(rxptr->rx_skb_ptr,
1018 skb_put(skb, rxlen),
1019 rxlen);
1020 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1021 } else
1022 skb_put(skb, rxlen);
1023
1024 skb->protocol = eth_type_trans(skb, dev);
1025 netif_rx(skb);
1026 dev->stats.rx_packets++;
1027 dev->stats.rx_bytes += rxlen;
1028 }
1029 } else {
1030 /* Reuse SKB buffer when the packet is error */
1031 DMFE_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
1032 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
1033 }
1034 }
1035
1036 rxptr = rxptr->next_rx_desc;
1037 }
1038
1039 db->rx_ready_ptr = rxptr;
1040}
1041
1042/*
1043 * Set DM910X multicast address
1044 */
1045
1046static void dmfe_set_filter_mode(struct DEVICE * dev)
1047{
1048 struct dmfe_board_info *db = netdev_priv(dev);
1049 unsigned long flags;
1050 int mc_count = netdev_mc_count(dev);
1051
1052 DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
1053 spin_lock_irqsave(&db->lock, flags);
1054
1055 if (dev->flags & IFF_PROMISC) {
1056 DMFE_DBUG(0, "Enable PROM Mode", 0);
1057 db->cr6_data |= CR6_PM | CR6_PBF;
1058 update_cr6(db->cr6_data, db->ioaddr);
1059 spin_unlock_irqrestore(&db->lock, flags);
1060 return;
1061 }
1062
1063 if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
1064 DMFE_DBUG(0, "Pass all multicast address", mc_count);
1065 db->cr6_data &= ~(CR6_PM | CR6_PBF);
1066 db->cr6_data |= CR6_PAM;
1067 spin_unlock_irqrestore(&db->lock, flags);
1068 return;
1069 }
1070
1071 DMFE_DBUG(0, "Set multicast address", mc_count);
1072 if (db->chip_id == PCI_DM9132_ID)
1073 dm9132_id_table(dev); /* DM9132 */
1074 else
1075 send_filter_frame(dev); /* DM9102/DM9102A */
1076 spin_unlock_irqrestore(&db->lock, flags);
1077}
1078
1079/*
1080 * Ethtool interace
1081 */
1082
1083static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1084 struct ethtool_drvinfo *info)
1085{
1086 struct dmfe_board_info *np = netdev_priv(dev);
1087
1088 strcpy(info->driver, DRV_NAME);
1089 strcpy(info->version, DRV_VERSION);
1090 if (np->pdev)
1091 strcpy(info->bus_info, pci_name(np->pdev));
1092 else
1093 sprintf(info->bus_info, "EISA 0x%lx %d",
1094 dev->base_addr, dev->irq);
1095}
1096
1097static int dmfe_ethtool_set_wol(struct net_device *dev,
1098 struct ethtool_wolinfo *wolinfo)
1099{
1100 struct dmfe_board_info *db = netdev_priv(dev);
1101
1102 if (wolinfo->wolopts & (WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
1103 WAKE_ARP | WAKE_MAGICSECURE))
1104 return -EOPNOTSUPP;
1105
1106 db->wol_mode = wolinfo->wolopts;
1107 return 0;
1108}
1109
1110static void dmfe_ethtool_get_wol(struct net_device *dev,
1111 struct ethtool_wolinfo *wolinfo)
1112{
1113 struct dmfe_board_info *db = netdev_priv(dev);
1114
1115 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1116 wolinfo->wolopts = db->wol_mode;
1117}
1118
1119
1120static const struct ethtool_ops netdev_ethtool_ops = {
1121 .get_drvinfo = dmfe_ethtool_get_drvinfo,
1122 .get_link = ethtool_op_get_link,
1123 .set_wol = dmfe_ethtool_set_wol,
1124 .get_wol = dmfe_ethtool_get_wol,
1125};
1126
1127/*
1128 * A periodic timer routine
1129 * Dynamic media sense, allocate Rx buffer...
1130 */
1131
1132static void dmfe_timer(unsigned long data)
1133{
1134 u32 tmp_cr8;
1135 unsigned char tmp_cr12;
1136 struct DEVICE *dev = (struct DEVICE *) data;
1137 struct dmfe_board_info *db = netdev_priv(dev);
1138 unsigned long flags;
1139
1140 int link_ok, link_ok_phy;
1141
1142 DMFE_DBUG(0, "dmfe_timer()", 0);
1143 spin_lock_irqsave(&db->lock, flags);
1144
1145 /* Media mode process when Link OK before enter this route */
1146 if (db->first_in_callback == 0) {
1147 db->first_in_callback = 1;
1148 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1149 db->cr6_data &= ~0x40000;
1150 update_cr6(db->cr6_data, db->ioaddr);
1151 phy_write(db->ioaddr,
1152 db->phy_addr, 0, 0x1000, db->chip_id);
1153 db->cr6_data |= 0x40000;
1154 update_cr6(db->cr6_data, db->ioaddr);
1155 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1156 add_timer(&db->timer);
1157 spin_unlock_irqrestore(&db->lock, flags);
1158 return;
1159 }
1160 }
1161
1162
1163 /* Operating Mode Check */
1164 if ( (db->dm910x_chk_mode & 0x1) &&
1165 (dev->stats.rx_packets > MAX_CHECK_PACKET) )
1166 db->dm910x_chk_mode = 0x4;
1167
1168 /* Dynamic reset DM910X : system error or transmit time-out */
1169 tmp_cr8 = inl(db->ioaddr + DCR8);
1170 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1171 db->reset_cr8++;
1172 db->wait_reset = 1;
1173 }
1174 db->interval_rx_cnt = 0;
1175
1176 /* TX polling kick monitor */
1177 if ( db->tx_packet_cnt &&
1178 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1179 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1180
1181 /* TX Timeout */
1182 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1183 db->reset_TXtimeout++;
1184 db->wait_reset = 1;
1185 dev_warn(&dev->dev, "Tx timeout - resetting\n");
1186 }
1187 }
1188
1189 if (db->wait_reset) {
1190 DMFE_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1191 db->reset_count++;
1192 dmfe_dynamic_reset(dev);
1193 db->first_in_callback = 0;
1194 db->timer.expires = DMFE_TIMER_WUT;
1195 add_timer(&db->timer);
1196 spin_unlock_irqrestore(&db->lock, flags);
1197 return;
1198 }
1199
1200 /* Link status check, Dynamic media type change */
1201 if (db->chip_id == PCI_DM9132_ID)
1202 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */
1203 else
1204 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */
1205
1206 if ( ((db->chip_id == PCI_DM9102_ID) &&
1207 (db->chip_revision == 0x30)) ||
1208 ((db->chip_id == PCI_DM9132_ID) &&
1209 (db->chip_revision == 0x10)) ) {
1210 /* DM9102A Chip */
1211 if (tmp_cr12 & 2)
1212 link_ok = 0;
1213 else
1214 link_ok = 1;
1215 }
1216 else
1217 /*0x43 is used instead of 0x3 because bit 6 should represent
1218 link status of external PHY */
1219 link_ok = (tmp_cr12 & 0x43) ? 1 : 0;
1220
1221
1222 /* If chip reports that link is failed it could be because external
1223 PHY link status pin is not connected correctly to chip
1224 To be sure ask PHY too.
1225 */
1226
1227 /* need a dummy read because of PHY's register latch*/
1228 phy_read (db->ioaddr, db->phy_addr, 1, db->chip_id);
1229 link_ok_phy = (phy_read (db->ioaddr,
1230 db->phy_addr, 1, db->chip_id) & 0x4) ? 1 : 0;
1231
1232 if (link_ok_phy != link_ok) {
1233 DMFE_DBUG (0, "PHY and chip report different link status", 0);
1234 link_ok = link_ok | link_ok_phy;
1235 }
1236
1237 if ( !link_ok && netif_carrier_ok(dev)) {
1238 /* Link Failed */
1239 DMFE_DBUG(0, "Link Failed", tmp_cr12);
1240 netif_carrier_off(dev);
1241
1242 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1243 /* AUTO or force 1M Homerun/Longrun don't need */
1244 if ( !(db->media_mode & 0x38) )
1245 phy_write(db->ioaddr, db->phy_addr,
1246 0, 0x1000, db->chip_id);
1247
1248 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1249 if (db->media_mode & DMFE_AUTO) {
1250 /* 10/100M link failed, used 1M Home-Net */
1251 db->cr6_data|=0x00040000; /* bit18=1, MII */
1252 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1253 update_cr6(db->cr6_data, db->ioaddr);
1254 }
1255 } else if (!netif_carrier_ok(dev)) {
1256
1257 DMFE_DBUG(0, "Link link OK", tmp_cr12);
1258
1259 /* Auto Sense Speed */
1260 if ( !(db->media_mode & DMFE_AUTO) || !dmfe_sense_speed(db)) {
1261 netif_carrier_on(dev);
1262 SHOW_MEDIA_TYPE(db->op_mode);
1263 }
1264
1265 dmfe_process_mode(db);
1266 }
1267
1268 /* HPNA remote command check */
1269 if (db->HPNA_command & 0xf00) {
1270 db->HPNA_timer--;
1271 if (!db->HPNA_timer)
1272 dmfe_HPNA_remote_cmd_chk(db);
1273 }
1274
1275 /* Timer active again */
1276 db->timer.expires = DMFE_TIMER_WUT;
1277 add_timer(&db->timer);
1278 spin_unlock_irqrestore(&db->lock, flags);
1279}
1280
1281
1282/*
1283 * Dynamic reset the DM910X board
1284 * Stop DM910X board
1285 * Free Tx/Rx allocated memory
1286 * Reset DM910X board
1287 * Re-initialize DM910X board
1288 */
1289
1290static void dmfe_dynamic_reset(struct DEVICE *dev)
1291{
1292 struct dmfe_board_info *db = netdev_priv(dev);
1293
1294 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1295
1296 /* Sopt MAC controller */
1297 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1298 update_cr6(db->cr6_data, dev->base_addr);
1299 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1300 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1301
1302 /* Disable upper layer interface */
1303 netif_stop_queue(dev);
1304
1305 /* Free Rx Allocate buffer */
1306 dmfe_free_rxbuffer(db);
1307
1308 /* system variable init */
1309 db->tx_packet_cnt = 0;
1310 db->tx_queue_cnt = 0;
1311 db->rx_avail_cnt = 0;
1312 netif_carrier_off(dev);
1313 db->wait_reset = 0;
1314
1315 /* Re-initialize DM910X board */
1316 dmfe_init_dm910x(dev);
1317
1318 /* Restart upper layer interface */
1319 netif_wake_queue(dev);
1320}
1321
1322
1323/*
1324 * free all allocated rx buffer
1325 */
1326
1327static void dmfe_free_rxbuffer(struct dmfe_board_info * db)
1328{
1329 DMFE_DBUG(0, "dmfe_free_rxbuffer()", 0);
1330
1331 /* free allocated rx buffer */
1332 while (db->rx_avail_cnt) {
1333 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1334 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1335 db->rx_avail_cnt--;
1336 }
1337}
1338
1339
1340/*
1341 * Reuse the SK buffer
1342 */
1343
1344static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1345{
1346 struct rx_desc *rxptr = db->rx_insert_ptr;
1347
1348 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1349 rxptr->rx_skb_ptr = skb;
1350 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
1351 skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1352 wmb();
1353 rxptr->rdes0 = cpu_to_le32(0x80000000);
1354 db->rx_avail_cnt++;
1355 db->rx_insert_ptr = rxptr->next_rx_desc;
1356 } else
1357 DMFE_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1358}
1359
1360
1361/*
1362 * Initialize transmit/Receive descriptor
1363 * Using Chain structure, and allocate Tx/Rx buffer
1364 */
1365
1366static void dmfe_descriptor_init(struct dmfe_board_info *db, unsigned long ioaddr)
1367{
1368 struct tx_desc *tmp_tx;
1369 struct rx_desc *tmp_rx;
1370 unsigned char *tmp_buf;
1371 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1372 dma_addr_t tmp_buf_dma;
1373 int i;
1374
1375 DMFE_DBUG(0, "dmfe_descriptor_init()", 0);
1376
1377 /* tx descriptor start pointer */
1378 db->tx_insert_ptr = db->first_tx_desc;
1379 db->tx_remove_ptr = db->first_tx_desc;
1380 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1381
1382 /* rx descriptor start pointer */
1383 db->first_rx_desc = (void *)db->first_tx_desc +
1384 sizeof(struct tx_desc) * TX_DESC_CNT;
1385
1386 db->first_rx_desc_dma = db->first_tx_desc_dma +
1387 sizeof(struct tx_desc) * TX_DESC_CNT;
1388 db->rx_insert_ptr = db->first_rx_desc;
1389 db->rx_ready_ptr = db->first_rx_desc;
1390 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1391
1392 /* Init Transmit chain */
1393 tmp_buf = db->buf_pool_start;
1394 tmp_buf_dma = db->buf_pool_dma_start;
1395 tmp_tx_dma = db->first_tx_desc_dma;
1396 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1397 tmp_tx->tx_buf_ptr = tmp_buf;
1398 tmp_tx->tdes0 = cpu_to_le32(0);
1399 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1400 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1401 tmp_tx_dma += sizeof(struct tx_desc);
1402 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1403 tmp_tx->next_tx_desc = tmp_tx + 1;
1404 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1405 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1406 }
1407 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1408 tmp_tx->next_tx_desc = db->first_tx_desc;
1409
1410 /* Init Receive descriptor chain */
1411 tmp_rx_dma=db->first_rx_desc_dma;
1412 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1413 tmp_rx->rdes0 = cpu_to_le32(0);
1414 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1415 tmp_rx_dma += sizeof(struct rx_desc);
1416 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1417 tmp_rx->next_rx_desc = tmp_rx + 1;
1418 }
1419 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1420 tmp_rx->next_rx_desc = db->first_rx_desc;
1421
1422 /* pre-allocate Rx buffer */
1423 allocate_rx_buffer(db);
1424}
1425
1426
1427/*
1428 * Update CR6 value
1429 * Firstly stop DM910X , then written value and start
1430 */
1431
1432static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1433{
1434 u32 cr6_tmp;
1435
1436 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1437 outl(cr6_tmp, ioaddr + DCR6);
1438 udelay(5);
1439 outl(cr6_data, ioaddr + DCR6);
1440 udelay(5);
1441}
1442
1443
1444/*
1445 * Send a setup frame for DM9132
1446 * This setup frame initialize DM910X address filter mode
1447*/
1448
1449static void dm9132_id_table(struct DEVICE *dev)
1450{
1451 struct netdev_hw_addr *ha;
1452 u16 * addrptr;
1453 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1454 u32 hash_val;
1455 u16 i, hash_table[4];
1456
1457 DMFE_DBUG(0, "dm9132_id_table()", 0);
1458
1459 /* Node address */
1460 addrptr = (u16 *) dev->dev_addr;
1461 outw(addrptr[0], ioaddr);
1462 ioaddr += 4;
1463 outw(addrptr[1], ioaddr);
1464 ioaddr += 4;
1465 outw(addrptr[2], ioaddr);
1466 ioaddr += 4;
1467
1468 /* Clear Hash Table */
1469 memset(hash_table, 0, sizeof(hash_table));
1470
1471 /* broadcast address */
1472 hash_table[3] = 0x8000;
1473
1474 /* the multicast address in Hash Table : 64 bits */
1475 netdev_for_each_mc_addr(ha, dev) {
1476 hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
1477 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1478 }
1479
1480 /* Write the hash table to MAC MD table */
1481 for (i = 0; i < 4; i++, ioaddr += 4)
1482 outw(hash_table[i], ioaddr);
1483}
1484
1485
1486/*
1487 * Send a setup frame for DM9102/DM9102A
1488 * This setup frame initialize DM910X address filter mode
1489 */
1490
1491static void send_filter_frame(struct DEVICE *dev)
1492{
1493 struct dmfe_board_info *db = netdev_priv(dev);
1494 struct netdev_hw_addr *ha;
1495 struct tx_desc *txptr;
1496 u16 * addrptr;
1497 u32 * suptr;
1498 int i;
1499
1500 DMFE_DBUG(0, "send_filter_frame()", 0);
1501
1502 txptr = db->tx_insert_ptr;
1503 suptr = (u32 *) txptr->tx_buf_ptr;
1504
1505 /* Node address */
1506 addrptr = (u16 *) dev->dev_addr;
1507 *suptr++ = addrptr[0];
1508 *suptr++ = addrptr[1];
1509 *suptr++ = addrptr[2];
1510
1511 /* broadcast address */
1512 *suptr++ = 0xffff;
1513 *suptr++ = 0xffff;
1514 *suptr++ = 0xffff;
1515
1516 /* fit the multicast address */
1517 netdev_for_each_mc_addr(ha, dev) {
1518 addrptr = (u16 *) ha->addr;
1519 *suptr++ = addrptr[0];
1520 *suptr++ = addrptr[1];
1521 *suptr++ = addrptr[2];
1522 }
1523
1524 for (i = netdev_mc_count(dev); i < 14; i++) {
1525 *suptr++ = 0xffff;
1526 *suptr++ = 0xffff;
1527 *suptr++ = 0xffff;
1528 }
1529
1530 /* prepare the setup frame */
1531 db->tx_insert_ptr = txptr->next_tx_desc;
1532 txptr->tdes1 = cpu_to_le32(0x890000c0);
1533
1534 /* Resource Check and Send the setup packet */
1535 if (!db->tx_packet_cnt) {
1536 /* Resource Empty */
1537 db->tx_packet_cnt++;
1538 txptr->tdes0 = cpu_to_le32(0x80000000);
1539 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1540 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1541 update_cr6(db->cr6_data, dev->base_addr);
1542 dev->trans_start = jiffies;
1543 } else
1544 db->tx_queue_cnt++; /* Put in TX queue */
1545}
1546
1547
1548/*
1549 * Allocate rx buffer,
1550 * As possible as allocate maxiumn Rx buffer
1551 */
1552
1553static void allocate_rx_buffer(struct dmfe_board_info *db)
1554{
1555 struct rx_desc *rxptr;
1556 struct sk_buff *skb;
1557
1558 rxptr = db->rx_insert_ptr;
1559
1560 while(db->rx_avail_cnt < RX_DESC_CNT) {
1561 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1562 break;
1563 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1564 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
1565 RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1566 wmb();
1567 rxptr->rdes0 = cpu_to_le32(0x80000000);
1568 rxptr = rxptr->next_rx_desc;
1569 db->rx_avail_cnt++;
1570 }
1571
1572 db->rx_insert_ptr = rxptr;
1573}
1574
1575
1576/*
1577 * Read one word data from the serial ROM
1578 */
1579
1580static u16 read_srom_word(long ioaddr, int offset)
1581{
1582 int i;
1583 u16 srom_data = 0;
1584 long cr9_ioaddr = ioaddr + DCR9;
1585
1586 outl(CR9_SROM_READ, cr9_ioaddr);
1587 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1588
1589 /* Send the Read Command 110b */
1590 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1591 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1592 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1593
1594 /* Send the offset */
1595 for (i = 5; i >= 0; i--) {
1596 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1597 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1598 }
1599
1600 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1601
1602 for (i = 16; i > 0; i--) {
1603 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1604 udelay(5);
1605 srom_data = (srom_data << 1) |
1606 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1607 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1608 udelay(5);
1609 }
1610
1611 outl(CR9_SROM_READ, cr9_ioaddr);
1612 return srom_data;
1613}
1614
1615
1616/*
1617 * Auto sense the media mode
1618 */
1619
1620static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1621{
1622 u8 ErrFlag = 0;
1623 u16 phy_mode;
1624
1625 /* CR6 bit18=0, select 10/100M */
1626 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr);
1627
1628 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1629 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1630
1631 if ( (phy_mode & 0x24) == 0x24 ) {
1632 if (db->chip_id == PCI_DM9132_ID) /* DM9132 */
1633 phy_mode = phy_read(db->ioaddr,
1634 db->phy_addr, 7, db->chip_id) & 0xf000;
1635 else /* DM9102/DM9102A */
1636 phy_mode = phy_read(db->ioaddr,
1637 db->phy_addr, 17, db->chip_id) & 0xf000;
1638 switch (phy_mode) {
1639 case 0x1000: db->op_mode = DMFE_10MHF; break;
1640 case 0x2000: db->op_mode = DMFE_10MFD; break;
1641 case 0x4000: db->op_mode = DMFE_100MHF; break;
1642 case 0x8000: db->op_mode = DMFE_100MFD; break;
1643 default: db->op_mode = DMFE_10MHF;
1644 ErrFlag = 1;
1645 break;
1646 }
1647 } else {
1648 db->op_mode = DMFE_10MHF;
1649 DMFE_DBUG(0, "Link Failed :", phy_mode);
1650 ErrFlag = 1;
1651 }
1652
1653 return ErrFlag;
1654}
1655
1656
1657/*
1658 * Set 10/100 phyxcer capability
1659 * AUTO mode : phyxcer register4 is NIC capability
1660 * Force mode: phyxcer register4 is the force media
1661 */
1662
1663static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1664{
1665 u16 phy_reg;
1666
1667 /* Select 10/100M phyxcer */
1668 db->cr6_data &= ~0x40000;
1669 update_cr6(db->cr6_data, db->ioaddr);
1670
1671 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1672 if (db->chip_id == PCI_DM9009_ID) {
1673 phy_reg = phy_read(db->ioaddr,
1674 db->phy_addr, 18, db->chip_id) & ~0x1000;
1675
1676 phy_write(db->ioaddr,
1677 db->phy_addr, 18, phy_reg, db->chip_id);
1678 }
1679
1680 /* Phyxcer capability setting */
1681 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1682
1683 if (db->media_mode & DMFE_AUTO) {
1684 /* AUTO Mode */
1685 phy_reg |= db->PHY_reg4;
1686 } else {
1687 /* Force Mode */
1688 switch(db->media_mode) {
1689 case DMFE_10MHF: phy_reg |= 0x20; break;
1690 case DMFE_10MFD: phy_reg |= 0x40; break;
1691 case DMFE_100MHF: phy_reg |= 0x80; break;
1692 case DMFE_100MFD: phy_reg |= 0x100; break;
1693 }
1694 if (db->chip_id == PCI_DM9009_ID) phy_reg &= 0x61;
1695 }
1696
1697 /* Write new capability to Phyxcer Reg4 */
1698 if ( !(phy_reg & 0x01e0)) {
1699 phy_reg|=db->PHY_reg4;
1700 db->media_mode|=DMFE_AUTO;
1701 }
1702 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1703
1704 /* Restart Auto-Negotiation */
1705 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1706 phy_write(db->ioaddr, db->phy_addr, 0, 0x1800, db->chip_id);
1707 if ( !db->chip_type )
1708 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1709}
1710
1711
1712/*
1713 * Process op-mode
1714 * AUTO mode : PHY controller in Auto-negotiation Mode
1715 * Force mode: PHY controller in force mode with HUB
1716 * N-way force capability with SWITCH
1717 */
1718
1719static void dmfe_process_mode(struct dmfe_board_info *db)
1720{
1721 u16 phy_reg;
1722
1723 /* Full Duplex Mode Check */
1724 if (db->op_mode & 0x4)
1725 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1726 else
1727 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1728
1729 /* Transciver Selection */
1730 if (db->op_mode & 0x10) /* 1M HomePNA */
1731 db->cr6_data |= 0x40000;/* External MII select */
1732 else
1733 db->cr6_data &= ~0x40000;/* Internal 10/100 transciver */
1734
1735 update_cr6(db->cr6_data, db->ioaddr);
1736
1737 /* 10/100M phyxcer force mode need */
1738 if ( !(db->media_mode & 0x18)) {
1739 /* Forece Mode */
1740 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1741 if ( !(phy_reg & 0x1) ) {
1742 /* parter without N-Way capability */
1743 phy_reg = 0x0;
1744 switch(db->op_mode) {
1745 case DMFE_10MHF: phy_reg = 0x0; break;
1746 case DMFE_10MFD: phy_reg = 0x100; break;
1747 case DMFE_100MHF: phy_reg = 0x2000; break;
1748 case DMFE_100MFD: phy_reg = 0x2100; break;
1749 }
1750 phy_write(db->ioaddr,
1751 db->phy_addr, 0, phy_reg, db->chip_id);
1752 if ( db->chip_type && (db->chip_id == PCI_DM9102_ID) )
1753 mdelay(20);
1754 phy_write(db->ioaddr,
1755 db->phy_addr, 0, phy_reg, db->chip_id);
1756 }
1757 }
1758}
1759
1760
1761/*
1762 * Write a word to Phy register
1763 */
1764
1765static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1766 u16 phy_data, u32 chip_id)
1767{
1768 u16 i;
1769 unsigned long ioaddr;
1770
1771 if (chip_id == PCI_DM9132_ID) {
1772 ioaddr = iobase + 0x80 + offset * 4;
1773 outw(phy_data, ioaddr);
1774 } else {
1775 /* DM9102/DM9102A Chip */
1776 ioaddr = iobase + DCR9;
1777
1778 /* Send 33 synchronization clock to Phy controller */
1779 for (i = 0; i < 35; i++)
1780 phy_write_1bit(ioaddr, PHY_DATA_1);
1781
1782 /* Send start command(01) to Phy */
1783 phy_write_1bit(ioaddr, PHY_DATA_0);
1784 phy_write_1bit(ioaddr, PHY_DATA_1);
1785
1786 /* Send write command(01) to Phy */
1787 phy_write_1bit(ioaddr, PHY_DATA_0);
1788 phy_write_1bit(ioaddr, PHY_DATA_1);
1789
1790 /* Send Phy address */
1791 for (i = 0x10; i > 0; i = i >> 1)
1792 phy_write_1bit(ioaddr,
1793 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1794
1795 /* Send register address */
1796 for (i = 0x10; i > 0; i = i >> 1)
1797 phy_write_1bit(ioaddr,
1798 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1799
1800 /* written trasnition */
1801 phy_write_1bit(ioaddr, PHY_DATA_1);
1802 phy_write_1bit(ioaddr, PHY_DATA_0);
1803
1804 /* Write a word data to PHY controller */
1805 for ( i = 0x8000; i > 0; i >>= 1)
1806 phy_write_1bit(ioaddr,
1807 phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1808 }
1809}
1810
1811
1812/*
1813 * Read a word data from phy register
1814 */
1815
1816static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1817{
1818 int i;
1819 u16 phy_data;
1820 unsigned long ioaddr;
1821
1822 if (chip_id == PCI_DM9132_ID) {
1823 /* DM9132 Chip */
1824 ioaddr = iobase + 0x80 + offset * 4;
1825 phy_data = inw(ioaddr);
1826 } else {
1827 /* DM9102/DM9102A Chip */
1828 ioaddr = iobase + DCR9;
1829
1830 /* Send 33 synchronization clock to Phy controller */
1831 for (i = 0; i < 35; i++)
1832 phy_write_1bit(ioaddr, PHY_DATA_1);
1833
1834 /* Send start command(01) to Phy */
1835 phy_write_1bit(ioaddr, PHY_DATA_0);
1836 phy_write_1bit(ioaddr, PHY_DATA_1);
1837
1838 /* Send read command(10) to Phy */
1839 phy_write_1bit(ioaddr, PHY_DATA_1);
1840 phy_write_1bit(ioaddr, PHY_DATA_0);
1841
1842 /* Send Phy address */
1843 for (i = 0x10; i > 0; i = i >> 1)
1844 phy_write_1bit(ioaddr,
1845 phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1846
1847 /* Send register address */
1848 for (i = 0x10; i > 0; i = i >> 1)
1849 phy_write_1bit(ioaddr,
1850 offset & i ? PHY_DATA_1 : PHY_DATA_0);
1851
1852 /* Skip transition state */
1853 phy_read_1bit(ioaddr);
1854
1855 /* read 16bit data */
1856 for (phy_data = 0, i = 0; i < 16; i++) {
1857 phy_data <<= 1;
1858 phy_data |= phy_read_1bit(ioaddr);
1859 }
1860 }
1861
1862 return phy_data;
1863}
1864
1865
1866/*
1867 * Write one bit data to Phy Controller
1868 */
1869
1870static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1871{
1872 outl(phy_data, ioaddr); /* MII Clock Low */
1873 udelay(1);
1874 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1875 udelay(1);
1876 outl(phy_data, ioaddr); /* MII Clock Low */
1877 udelay(1);
1878}
1879
1880
1881/*
1882 * Read one bit phy data from PHY controller
1883 */
1884
1885static u16 phy_read_1bit(unsigned long ioaddr)
1886{
1887 u16 phy_data;
1888
1889 outl(0x50000, ioaddr);
1890 udelay(1);
1891 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1892 outl(0x40000, ioaddr);
1893 udelay(1);
1894
1895 return phy_data;
1896}
1897
1898
1899/*
1900 * Parser SROM and media mode
1901 */
1902
1903static void dmfe_parse_srom(struct dmfe_board_info * db)
1904{
1905 char * srom = db->srom;
1906 int dmfe_mode, tmp_reg;
1907
1908 DMFE_DBUG(0, "dmfe_parse_srom() ", 0);
1909
1910 /* Init CR15 */
1911 db->cr15_data = CR15_DEFAULT;
1912
1913 /* Check SROM Version */
1914 if ( ( (int) srom[18] & 0xff) == SROM_V41_CODE) {
1915 /* SROM V4.01 */
1916 /* Get NIC support media mode */
1917 db->NIC_capability = le16_to_cpup((__le16 *) (srom + 34));
1918 db->PHY_reg4 = 0;
1919 for (tmp_reg = 1; tmp_reg < 0x10; tmp_reg <<= 1) {
1920 switch( db->NIC_capability & tmp_reg ) {
1921 case 0x1: db->PHY_reg4 |= 0x0020; break;
1922 case 0x2: db->PHY_reg4 |= 0x0040; break;
1923 case 0x4: db->PHY_reg4 |= 0x0080; break;
1924 case 0x8: db->PHY_reg4 |= 0x0100; break;
1925 }
1926 }
1927
1928 /* Media Mode Force or not check */
1929 dmfe_mode = (le32_to_cpup((__le32 *) (srom + 34)) &
1930 le32_to_cpup((__le32 *) (srom + 36)));
1931 switch(dmfe_mode) {
1932 case 0x4: dmfe_media_mode = DMFE_100MHF; break; /* 100MHF */
1933 case 0x2: dmfe_media_mode = DMFE_10MFD; break; /* 10MFD */
1934 case 0x8: dmfe_media_mode = DMFE_100MFD; break; /* 100MFD */
1935 case 0x100:
1936 case 0x200: dmfe_media_mode = DMFE_1M_HPNA; break;/* HomePNA */
1937 }
1938
1939 /* Special Function setting */
1940 /* VLAN function */
1941 if ( (SF_mode & 0x1) || (srom[43] & 0x80) )
1942 db->cr15_data |= 0x40;
1943
1944 /* Flow Control */
1945 if ( (SF_mode & 0x2) || (srom[40] & 0x1) )
1946 db->cr15_data |= 0x400;
1947
1948 /* TX pause packet */
1949 if ( (SF_mode & 0x4) || (srom[40] & 0xe) )
1950 db->cr15_data |= 0x9800;
1951 }
1952
1953 /* Parse HPNA parameter */
1954 db->HPNA_command = 1;
1955
1956 /* Accept remote command or not */
1957 if (HPNA_rx_cmd == 0)
1958 db->HPNA_command |= 0x8000;
1959
1960 /* Issue remote command & operation mode */
1961 if (HPNA_tx_cmd == 1)
1962 switch(HPNA_mode) { /* Issue Remote Command */
1963 case 0: db->HPNA_command |= 0x0904; break;
1964 case 1: db->HPNA_command |= 0x0a00; break;
1965 case 2: db->HPNA_command |= 0x0506; break;
1966 case 3: db->HPNA_command |= 0x0602; break;
1967 }
1968 else
1969 switch(HPNA_mode) { /* Don't Issue */
1970 case 0: db->HPNA_command |= 0x0004; break;
1971 case 1: db->HPNA_command |= 0x0000; break;
1972 case 2: db->HPNA_command |= 0x0006; break;
1973 case 3: db->HPNA_command |= 0x0002; break;
1974 }
1975
1976 /* Check DM9801 or DM9802 present or not */
1977 db->HPNA_present = 0;
1978 update_cr6(db->cr6_data|0x40000, db->ioaddr);
1979 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1980 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1981 /* DM9801 or DM9802 present */
1982 db->HPNA_timer = 8;
1983 if ( phy_read(db->ioaddr, db->phy_addr, 31, db->chip_id) == 0x4404) {
1984 /* DM9801 HomeRun */
1985 db->HPNA_present = 1;
1986 dmfe_program_DM9801(db, tmp_reg);
1987 } else {
1988 /* DM9802 LongRun */
1989 db->HPNA_present = 2;
1990 dmfe_program_DM9802(db);
1991 }
1992 }
1993
1994}
1995
1996
1997/*
1998 * Init HomeRun DM9801
1999 */
2000
2001static void dmfe_program_DM9801(struct dmfe_board_info * db, int HPNA_rev)
2002{
2003 uint reg17, reg25;
2004
2005 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9801_NOISE_FLOOR;
2006 switch(HPNA_rev) {
2007 case 0xb900: /* DM9801 E3 */
2008 db->HPNA_command |= 0x1000;
2009 reg25 = phy_read(db->ioaddr, db->phy_addr, 24, db->chip_id);
2010 reg25 = ( (reg25 + HPNA_NoiseFloor) & 0xff) | 0xf000;
2011 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2012 break;
2013 case 0xb901: /* DM9801 E4 */
2014 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2015 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor;
2016 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2017 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor + 3;
2018 break;
2019 case 0xb902: /* DM9801 E5 */
2020 case 0xb903: /* DM9801 E6 */
2021 default:
2022 db->HPNA_command |= 0x1000;
2023 reg25 = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2024 reg25 = (reg25 & 0xff00) + HPNA_NoiseFloor - 5;
2025 reg17 = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id);
2026 reg17 = (reg17 & 0xfff0) + HPNA_NoiseFloor;
2027 break;
2028 }
2029 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2030 phy_write(db->ioaddr, db->phy_addr, 17, reg17, db->chip_id);
2031 phy_write(db->ioaddr, db->phy_addr, 25, reg25, db->chip_id);
2032}
2033
2034
2035/*
2036 * Init HomeRun DM9802
2037 */
2038
2039static void dmfe_program_DM9802(struct dmfe_board_info * db)
2040{
2041 uint phy_reg;
2042
2043 if ( !HPNA_NoiseFloor ) HPNA_NoiseFloor = DM9802_NOISE_FLOOR;
2044 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command, db->chip_id);
2045 phy_reg = phy_read(db->ioaddr, db->phy_addr, 25, db->chip_id);
2046 phy_reg = ( phy_reg & 0xff00) + HPNA_NoiseFloor;
2047 phy_write(db->ioaddr, db->phy_addr, 25, phy_reg, db->chip_id);
2048}
2049
2050
2051/*
2052 * Check remote HPNA power and speed status. If not correct,
2053 * issue command again.
2054*/
2055
2056static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
2057{
2058 uint phy_reg;
2059
2060 /* Got remote device status */
2061 phy_reg = phy_read(db->ioaddr, db->phy_addr, 17, db->chip_id) & 0x60;
2062 switch(phy_reg) {
2063 case 0x00: phy_reg = 0x0a00;break; /* LP/LS */
2064 case 0x20: phy_reg = 0x0900;break; /* LP/HS */
2065 case 0x40: phy_reg = 0x0600;break; /* HP/LS */
2066 case 0x60: phy_reg = 0x0500;break; /* HP/HS */
2067 }
2068
2069 /* Check remote device status match our setting ot not */
2070 if ( phy_reg != (db->HPNA_command & 0x0f00) ) {
2071 phy_write(db->ioaddr, db->phy_addr, 16, db->HPNA_command,
2072 db->chip_id);
2073 db->HPNA_timer=8;
2074 } else
2075 db->HPNA_timer=600; /* Match, every 10 minutes, check */
2076}
2077
2078
2079
2080static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
2081 { 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
2082 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
2083 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
2084 { 0x1282, 0x9009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9009_ID },
2085 { 0, }
2086};
2087MODULE_DEVICE_TABLE(pci, dmfe_pci_tbl);
2088
2089
2090#ifdef CONFIG_PM
2091static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2092{
2093 struct net_device *dev = pci_get_drvdata(pci_dev);
2094 struct dmfe_board_info *db = netdev_priv(dev);
2095 u32 tmp;
2096
2097 /* Disable upper layer interface */
2098 netif_device_detach(dev);
2099
2100 /* Disable Tx/Rx */
2101 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2102 update_cr6(db->cr6_data, dev->base_addr);
2103
2104 /* Disable Interrupt */
2105 outl(0, dev->base_addr + DCR7);
2106 outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5);
2107
2108 /* Fre RX buffers */
2109 dmfe_free_rxbuffer(db);
2110
2111 /* Enable WOL */
2112 pci_read_config_dword(pci_dev, 0x40, &tmp);
2113 tmp &= ~(DMFE_WOL_LINKCHANGE|DMFE_WOL_MAGICPACKET);
2114
2115 if (db->wol_mode & WAKE_PHY)
2116 tmp |= DMFE_WOL_LINKCHANGE;
2117 if (db->wol_mode & WAKE_MAGIC)
2118 tmp |= DMFE_WOL_MAGICPACKET;
2119
2120 pci_write_config_dword(pci_dev, 0x40, tmp);
2121
2122 pci_enable_wake(pci_dev, PCI_D3hot, 1);
2123 pci_enable_wake(pci_dev, PCI_D3cold, 1);
2124
2125 /* Power down device*/
2126 pci_save_state(pci_dev);
2127 pci_set_power_state(pci_dev, pci_choose_state (pci_dev, state));
2128
2129 return 0;
2130}
2131
2132static int dmfe_resume(struct pci_dev *pci_dev)
2133{
2134 struct net_device *dev = pci_get_drvdata(pci_dev);
2135 u32 tmp;
2136
2137 pci_set_power_state(pci_dev, PCI_D0);
2138 pci_restore_state(pci_dev);
2139
2140 /* Re-initialize DM910X board */
2141 dmfe_init_dm910x(dev);
2142
2143 /* Disable WOL */
2144 pci_read_config_dword(pci_dev, 0x40, &tmp);
2145
2146 tmp &= ~(DMFE_WOL_LINKCHANGE | DMFE_WOL_MAGICPACKET);
2147 pci_write_config_dword(pci_dev, 0x40, tmp);
2148
2149 pci_enable_wake(pci_dev, PCI_D3hot, 0);
2150 pci_enable_wake(pci_dev, PCI_D3cold, 0);
2151
2152 /* Restart upper layer interface */
2153 netif_device_attach(dev);
2154
2155 return 0;
2156}
2157#else
2158#define dmfe_suspend NULL
2159#define dmfe_resume NULL
2160#endif
2161
2162static struct pci_driver dmfe_driver = {
2163 .name = "dmfe",
2164 .id_table = dmfe_pci_tbl,
2165 .probe = dmfe_init_one,
2166 .remove = __devexit_p(dmfe_remove_one),
2167 .suspend = dmfe_suspend,
2168 .resume = dmfe_resume
2169};
2170
2171MODULE_AUTHOR("Sten Wang, sten_wang@davicom.com.tw");
2172MODULE_DESCRIPTION("Davicom DM910X fast ethernet driver");
2173MODULE_LICENSE("GPL");
2174MODULE_VERSION(DRV_VERSION);
2175
2176module_param(debug, int, 0);
2177module_param(mode, byte, 0);
2178module_param(cr6set, int, 0);
2179module_param(chkmode, byte, 0);
2180module_param(HPNA_mode, byte, 0);
2181module_param(HPNA_rx_cmd, byte, 0);
2182module_param(HPNA_tx_cmd, byte, 0);
2183module_param(HPNA_NoiseFloor, byte, 0);
2184module_param(SF_mode, byte, 0);
2185MODULE_PARM_DESC(debug, "Davicom DM9xxx enable debugging (0-1)");
2186MODULE_PARM_DESC(mode, "Davicom DM9xxx: "
2187 "Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
2188
2189MODULE_PARM_DESC(SF_mode, "Davicom DM9xxx special function "
2190 "(bit 0: VLAN, bit 1 Flow Control, bit 2: TX pause packet)");
2191
2192/* Description:
2193 * when user used insmod to add module, system invoked init_module()
2194 * to initialize and register.
2195 */
2196
2197static int __init dmfe_init_module(void)
2198{
2199 int rc;
2200
2201 pr_info("%s\n", version);
2202 printed_version = 1;
2203
2204 DMFE_DBUG(0, "init_module() ", debug);
2205
2206 if (debug)
2207 dmfe_debug = debug; /* set debug flag */
2208 if (cr6set)
2209 dmfe_cr6_user_set = cr6set;
2210
2211 switch(mode) {
2212 case DMFE_10MHF:
2213 case DMFE_100MHF:
2214 case DMFE_10MFD:
2215 case DMFE_100MFD:
2216 case DMFE_1M_HPNA:
2217 dmfe_media_mode = mode;
2218 break;
2219 default:dmfe_media_mode = DMFE_AUTO;
2220 break;
2221 }
2222
2223 if (HPNA_mode > 4)
2224 HPNA_mode = 0; /* Default: LP/HS */
2225 if (HPNA_rx_cmd > 1)
2226 HPNA_rx_cmd = 0; /* Default: Ignored remote cmd */
2227 if (HPNA_tx_cmd > 1)
2228 HPNA_tx_cmd = 0; /* Default: Don't issue remote cmd */
2229 if (HPNA_NoiseFloor > 15)
2230 HPNA_NoiseFloor = 0;
2231
2232 rc = pci_register_driver(&dmfe_driver);
2233 if (rc < 0)
2234 return rc;
2235
2236 return 0;
2237}
2238
2239
2240/*
2241 * Description:
2242 * when user used rmmod to delete module, system invoked clean_module()
2243 * to un-register all registered services.
2244 */
2245
2246static void __exit dmfe_cleanup_module(void)
2247{
2248 DMFE_DBUG(0, "dmfe_clean_module() ", debug);
2249 pci_unregister_driver(&dmfe_driver);
2250}
2251
2252module_init(dmfe_init_module);
2253module_exit(dmfe_cleanup_module);
diff --git a/drivers/net/ethernet/dec/tulip/eeprom.c b/drivers/net/ethernet/dec/tulip/eeprom.c
new file mode 100644
index 000000000000..14d5b611783d
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/eeprom.c
@@ -0,0 +1,383 @@
1/*
2 drivers/net/tulip/eeprom.c
3
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please submit bug reports to http://bugzilla.kernel.org/.
11*/
12
13#include <linux/pci.h>
14#include <linux/slab.h>
15#include "tulip.h"
16#include <linux/init.h>
17#include <asm/unaligned.h>
18
19
20
21/* Serial EEPROM section. */
22/* The main routine to parse the very complicated SROM structure.
23 Search www.digital.com for "21X4 SROM" to get details.
24 This code is very complex, and will require changes to support
25 additional cards, so I'll be verbose about what is going on.
26 */
27
28/* Known cards that have old-style EEPROMs. */
29static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
30 {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
31 0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
32 {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
33 0x0000, 0x009E, /* 10baseT */
34 0x0004, 0x009E, /* 10baseT-FD */
35 0x0903, 0x006D, /* 100baseTx */
36 0x0905, 0x006D, /* 100baseTx-FD */ }},
37 {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
38 0x0107, 0x8021, /* 100baseFx */
39 0x0108, 0x8021, /* 100baseFx-FD */
40 0x0100, 0x009E, /* 10baseT */
41 0x0104, 0x009E, /* 10baseT-FD */
42 0x0103, 0x006D, /* 100baseTx */
43 0x0105, 0x006D, /* 100baseTx-FD */ }},
44 {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
45 0x1001, 0x009E, /* 10base2, CSR12 0x10*/
46 0x0000, 0x009E, /* 10baseT */
47 0x0004, 0x009E, /* 10baseT-FD */
48 0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
49 0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
50 {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
51 0x1B01, 0x0000, /* 10base2, CSR12 0x1B */
52 0x0B00, 0x009E, /* 10baseT, CSR12 0x0B */
53 0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
54 0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
55 0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
56 }},
57 {"NetWinder", 0x00, 0x10, 0x57,
58 /* Default media = MII
59 * MII block, reset sequence (3) = 0x0821 0x0000 0x0001, capabilities 0x01e1
60 */
61 { 0x1e00, 0x0000, 0x000b, 0x8f01, 0x0103, 0x0300, 0x0821, 0x000, 0x0001, 0x0000, 0x01e1 }
62 },
63 {"Cobalt Microserver", 0, 0x10, 0xE0, {0x1e00, /* 0 == controller #, 1e == offset */
64 0x0000, /* 0 == high offset, 0 == gap */
65 0x0800, /* Default Autoselect */
66 0x8001, /* 1 leaf, extended type, bogus len */
67 0x0003, /* Type 3 (MII), PHY #0 */
68 0x0400, /* 0 init instr, 4 reset instr */
69 0x0801, /* Set control mode, GP0 output */
70 0x0000, /* Drive GP0 Low (RST is active low) */
71 0x0800, /* control mode, GP0 input (undriven) */
72 0x0000, /* clear control mode */
73 0x7800, /* 100TX FDX + HDX, 10bT FDX + HDX */
74 0x01e0, /* Advertise all above */
75 0x5000, /* FDX all above */
76 0x1800, /* Set fast TTM in 100bt modes */
77 0x0000, /* PHY cannot be unplugged */
78 }},
79 {NULL}};
80
81
82static const char *block_name[] __devinitdata = {
83 "21140 non-MII",
84 "21140 MII PHY",
85 "21142 Serial PHY",
86 "21142 MII PHY",
87 "21143 SYM PHY",
88 "21143 reset method"
89};
90
91
92/**
93 * tulip_build_fake_mediatable - Build a fake mediatable entry.
94 * @tp: Ptr to the tulip private data.
95 *
96 * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
97 * srom and can not be handled under the fixup routine. These cards
98 * still need a valid mediatable entry for correct csr12 setup and
99 * mii handling.
100 *
101 * Since this is currently a parisc-linux specific function, the
102 * #ifdef __hppa__ should completely optimize this function away for
103 * non-parisc hardware.
104 */
105static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
106{
107#ifdef CONFIG_GSC
108 if (tp->flags & NEEDS_FAKE_MEDIA_TABLE) {
109 static unsigned char leafdata[] =
110 { 0x01, /* phy number */
111 0x02, /* gpr setup sequence length */
112 0x02, 0x00, /* gpr setup sequence */
113 0x02, /* phy reset sequence length */
114 0x01, 0x00, /* phy reset sequence */
115 0x00, 0x78, /* media capabilities */
116 0x00, 0xe0, /* nway advertisement */
117 0x00, 0x05, /* fdx bit map */
118 0x00, 0x06 /* ttm bit map */
119 };
120
121 tp->mtable = kmalloc(sizeof(struct mediatable) +
122 sizeof(struct medialeaf), GFP_KERNEL);
123
124 if (tp->mtable == NULL)
125 return; /* Horrible, impossible failure. */
126
127 tp->mtable->defaultmedia = 0x800;
128 tp->mtable->leafcount = 1;
129 tp->mtable->csr12dir = 0x3f; /* inputs on bit7 for hsc-pci, bit6 for pci-fx */
130 tp->mtable->has_nonmii = 0;
131 tp->mtable->has_reset = 0;
132 tp->mtable->has_mii = 1;
133 tp->mtable->csr15dir = tp->mtable->csr15val = 0;
134 tp->mtable->mleaf[0].type = 1;
135 tp->mtable->mleaf[0].media = 11;
136 tp->mtable->mleaf[0].leafdata = &leafdata[0];
137 tp->flags |= HAS_PHY_IRQ;
138 tp->csr12_shadow = -1;
139 }
140#endif
141}
142
143void __devinit tulip_parse_eeprom(struct net_device *dev)
144{
145 /*
146 dev is not registered at this point, so logging messages can't
147 use dev_<level> or netdev_<level> but dev->name is good via a
148 hack in the caller
149 */
150
151 /* The last media info list parsed, for multiport boards. */
152 static struct mediatable *last_mediatable;
153 static unsigned char *last_ee_data;
154 static int controller_index;
155 struct tulip_private *tp = netdev_priv(dev);
156 unsigned char *ee_data = tp->eeprom;
157 int i;
158
159 tp->mtable = NULL;
160 /* Detect an old-style (SA only) EEPROM layout:
161 memcmp(eedata, eedata+16, 8). */
162 for (i = 0; i < 8; i ++)
163 if (ee_data[i] != ee_data[16+i])
164 break;
165 if (i >= 8) {
166 if (ee_data[0] == 0xff) {
167 if (last_mediatable) {
168 controller_index++;
169 pr_info("%s: Controller %d of multiport board\n",
170 dev->name, controller_index);
171 tp->mtable = last_mediatable;
172 ee_data = last_ee_data;
173 goto subsequent_board;
174 } else
175 pr_info("%s: Missing EEPROM, this interface may not work correctly!\n",
176 dev->name);
177 return;
178 }
179 /* Do a fix-up based on the vendor half of the station address prefix. */
180 for (i = 0; eeprom_fixups[i].name; i++) {
181 if (dev->dev_addr[0] == eeprom_fixups[i].addr0 &&
182 dev->dev_addr[1] == eeprom_fixups[i].addr1 &&
183 dev->dev_addr[2] == eeprom_fixups[i].addr2) {
184 if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
185 i++; /* An Accton EN1207, not an outlaw Maxtech. */
186 memcpy(ee_data + 26, eeprom_fixups[i].newtable,
187 sizeof(eeprom_fixups[i].newtable));
188 pr_info("%s: Old format EEPROM on '%s' board. Using substitute media control info\n",
189 dev->name, eeprom_fixups[i].name);
190 break;
191 }
192 }
193 if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
194 pr_info("%s: Old style EEPROM with no media selection information\n",
195 dev->name);
196 return;
197 }
198 }
199
200 controller_index = 0;
201 if (ee_data[19] > 1) { /* Multiport board. */
202 last_ee_data = ee_data;
203 }
204subsequent_board:
205
206 if (ee_data[27] == 0) { /* No valid media table. */
207 tulip_build_fake_mediatable(tp);
208 } else {
209 unsigned char *p = (void *)ee_data + ee_data[27];
210 unsigned char csr12dir = 0;
211 int count, new_advertise = 0;
212 struct mediatable *mtable;
213 u16 media = get_u16(p);
214
215 p += 2;
216 if (tp->flags & CSR12_IN_SROM)
217 csr12dir = *p++;
218 count = *p++;
219
220 /* there is no phy information, don't even try to build mtable */
221 if (count == 0) {
222 if (tulip_debug > 0)
223 pr_warn("%s: no phy info, aborting mtable build\n",
224 dev->name);
225 return;
226 }
227
228 mtable = kmalloc(sizeof(struct mediatable) +
229 count * sizeof(struct medialeaf),
230 GFP_KERNEL);
231 if (mtable == NULL)
232 return; /* Horrible, impossible failure. */
233 last_mediatable = tp->mtable = mtable;
234 mtable->defaultmedia = media;
235 mtable->leafcount = count;
236 mtable->csr12dir = csr12dir;
237 mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
238 mtable->csr15dir = mtable->csr15val = 0;
239
240 pr_info("%s: EEPROM default media type %s\n",
241 dev->name,
242 media & 0x0800 ? "Autosense"
243 : medianame[media & MEDIA_MASK]);
244 for (i = 0; i < count; i++) {
245 struct medialeaf *leaf = &mtable->mleaf[i];
246
247 if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
248 leaf->type = 0;
249 leaf->media = p[0] & 0x3f;
250 leaf->leafdata = p;
251 if ((p[2] & 0x61) == 0x01) /* Bogus, but Znyx boards do it. */
252 mtable->has_mii = 1;
253 p += 4;
254 } else {
255 leaf->type = p[1];
256 if (p[1] == 0x05) {
257 mtable->has_reset = i;
258 leaf->media = p[2] & 0x0f;
259 } else if (tp->chip_id == DM910X && p[1] == 0x80) {
260 /* Hack to ignore Davicom delay period block */
261 mtable->leafcount--;
262 count--;
263 i--;
264 leaf->leafdata = p + 2;
265 p += (p[0] & 0x3f) + 1;
266 continue;
267 } else if (p[1] & 1) {
268 int gpr_len, reset_len;
269
270 mtable->has_mii = 1;
271 leaf->media = 11;
272 gpr_len=p[3]*2;
273 reset_len=p[4+gpr_len]*2;
274 new_advertise |= get_u16(&p[7+gpr_len+reset_len]);
275 } else {
276 mtable->has_nonmii = 1;
277 leaf->media = p[2] & MEDIA_MASK;
278 /* Davicom's media number for 100BaseTX is strange */
279 if (tp->chip_id == DM910X && leaf->media == 1)
280 leaf->media = 3;
281 switch (leaf->media) {
282 case 0: new_advertise |= 0x0020; break;
283 case 4: new_advertise |= 0x0040; break;
284 case 3: new_advertise |= 0x0080; break;
285 case 5: new_advertise |= 0x0100; break;
286 case 6: new_advertise |= 0x0200; break;
287 }
288 if (p[1] == 2 && leaf->media == 0) {
289 if (p[2] & 0x40) {
290 u32 base15 = get_unaligned((u16*)&p[7]);
291 mtable->csr15dir =
292 (get_unaligned((u16*)&p[9])<<16) + base15;
293 mtable->csr15val =
294 (get_unaligned((u16*)&p[11])<<16) + base15;
295 } else {
296 mtable->csr15dir = get_unaligned((u16*)&p[3])<<16;
297 mtable->csr15val = get_unaligned((u16*)&p[5])<<16;
298 }
299 }
300 }
301 leaf->leafdata = p + 2;
302 p += (p[0] & 0x3f) + 1;
303 }
304 if (tulip_debug > 1 && leaf->media == 11) {
305 unsigned char *bp = leaf->leafdata;
306 pr_info("%s: MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
307 dev->name,
308 bp[0], bp[1], bp[2 + bp[1]*2],
309 bp[5 + bp[2 + bp[1]*2]*2],
310 bp[4 + bp[2 + bp[1]*2]*2]);
311 }
312 pr_info("%s: Index #%d - Media %s (#%d) described by a %s (%d) block\n",
313 dev->name,
314 i, medianame[leaf->media & 15], leaf->media,
315 leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
316 leaf->type);
317 }
318 if (new_advertise)
319 tp->sym_advertise = new_advertise;
320 }
321}
322/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
323
324/* EEPROM_Ctrl bits. */
325#define EE_SHIFT_CLK 0x02 /* EEPROM shift clock. */
326#define EE_CS 0x01 /* EEPROM chip select. */
327#define EE_DATA_WRITE 0x04 /* Data from the Tulip to EEPROM. */
328#define EE_WRITE_0 0x01
329#define EE_WRITE_1 0x05
330#define EE_DATA_READ 0x08 /* Data from the EEPROM chip. */
331#define EE_ENB (0x4800 | EE_CS)
332
333/* Delay between EEPROM clock transitions.
334 Even at 33Mhz current PCI implementations don't overrun the EEPROM clock.
335 We add a bus turn-around to insure that this remains true. */
336#define eeprom_delay() ioread32(ee_addr)
337
338/* The EEPROM commands include the alway-set leading bit. */
339#define EE_READ_CMD (6)
340
341/* Note: this routine returns extra data bits for size detection. */
342int __devinit tulip_read_eeprom(struct net_device *dev, int location, int addr_len)
343{
344 int i;
345 unsigned retval = 0;
346 struct tulip_private *tp = netdev_priv(dev);
347 void __iomem *ee_addr = tp->base_addr + CSR9;
348 int read_cmd = location | (EE_READ_CMD << addr_len);
349
350 /* If location is past the end of what we can address, don't
351 * read some other location (ie truncate). Just return zero.
352 */
353 if (location > (1 << addr_len) - 1)
354 return 0;
355
356 iowrite32(EE_ENB & ~EE_CS, ee_addr);
357 iowrite32(EE_ENB, ee_addr);
358
359 /* Shift the read command bits out. */
360 for (i = 4 + addr_len; i >= 0; i--) {
361 short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
362 iowrite32(EE_ENB | dataval, ee_addr);
363 eeprom_delay();
364 iowrite32(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
365 eeprom_delay();
366 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0);
367 }
368 iowrite32(EE_ENB, ee_addr);
369 eeprom_delay();
370
371 for (i = 16; i > 0; i--) {
372 iowrite32(EE_ENB | EE_SHIFT_CLK, ee_addr);
373 eeprom_delay();
374 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DATA_READ) ? 1 : 0);
375 iowrite32(EE_ENB, ee_addr);
376 eeprom_delay();
377 }
378
379 /* Terminate the EEPROM access. */
380 iowrite32(EE_ENB & ~EE_CS, ee_addr);
381 return (tp->flags & HAS_SWAPPED_SEEPROM) ? swab16(retval) : retval;
382}
383
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
new file mode 100644
index 000000000000..4fb8c8c0a420
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -0,0 +1,808 @@
1/*
2 drivers/net/tulip/interrupt.c
3
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please submit bugs to http://bugzilla.kernel.org/ .
11*/
12
13#include <linux/pci.h>
14#include "tulip.h"
15#include <linux/etherdevice.h>
16
17int tulip_rx_copybreak;
18unsigned int tulip_max_interrupt_work;
19
20#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
21#define MIT_SIZE 15
22#define MIT_TABLE 15 /* We use 0 or max */
23
24static unsigned int mit_table[MIT_SIZE+1] =
25{
26 /* CRS11 21143 hardware Mitigation Control Interrupt
27 We use only RX mitigation we other techniques for
28 TX intr. mitigation.
29
30 31 Cycle Size (timer control)
31 30:27 TX timer in 16 * Cycle size
32 26:24 TX No pkts before Int.
33 23:20 RX timer in Cycle size
34 19:17 RX No pkts before Int.
35 16 Continues Mode (CM)
36 */
37
38 0x0, /* IM disabled */
39 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
40 0x80150000,
41 0x80270000,
42 0x80370000,
43 0x80490000,
44 0x80590000,
45 0x80690000,
46 0x807B0000,
47 0x808B0000,
48 0x809D0000,
49 0x80AD0000,
50 0x80BD0000,
51 0x80CF0000,
52 0x80DF0000,
53// 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
54 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
55};
56#endif
57
58
59int tulip_refill_rx(struct net_device *dev)
60{
61 struct tulip_private *tp = netdev_priv(dev);
62 int entry;
63 int refilled = 0;
64
65 /* Refill the Rx ring buffers. */
66 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
67 entry = tp->dirty_rx % RX_RING_SIZE;
68 if (tp->rx_buffers[entry].skb == NULL) {
69 struct sk_buff *skb;
70 dma_addr_t mapping;
71
72 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
73 if (skb == NULL)
74 break;
75
76 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
77 PCI_DMA_FROMDEVICE);
78 tp->rx_buffers[entry].mapping = mapping;
79
80 skb->dev = dev; /* Mark as being used by this device. */
81 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
82 refilled++;
83 }
84 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
85 }
86 if(tp->chip_id == LC82C168) {
87 if(((ioread32(tp->base_addr + CSR5)>>17)&0x07) == 4) {
88 /* Rx stopped due to out of buffers,
89 * restart it
90 */
91 iowrite32(0x01, tp->base_addr + CSR2);
92 }
93 }
94 return refilled;
95}
96
97#ifdef CONFIG_TULIP_NAPI
98
99void oom_timer(unsigned long data)
100{
101 struct net_device *dev = (struct net_device *)data;
102 struct tulip_private *tp = netdev_priv(dev);
103 napi_schedule(&tp->napi);
104}
105
106int tulip_poll(struct napi_struct *napi, int budget)
107{
108 struct tulip_private *tp = container_of(napi, struct tulip_private, napi);
109 struct net_device *dev = tp->dev;
110 int entry = tp->cur_rx % RX_RING_SIZE;
111 int work_done = 0;
112#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
113 int received = 0;
114#endif
115
116#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
117
118/* that one buffer is needed for mit activation; or might be a
119 bug in the ring buffer code; check later -- JHS*/
120
121 if (budget >=RX_RING_SIZE) budget--;
122#endif
123
124 if (tulip_debug > 4)
125 netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
126 entry, tp->rx_ring[entry].status);
127
128 do {
129 if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
130 netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
131 break;
132 }
133 /* Acknowledge current RX interrupt sources. */
134 iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
135
136
137 /* If we own the next entry, it is a new packet. Send it up. */
138 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
139 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
140 short pkt_len;
141
142 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
143 break;
144
145 if (tulip_debug > 5)
146 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
147 entry, status);
148
149 if (++work_done >= budget)
150 goto not_done;
151
152 /*
153 * Omit the four octet CRC from the length.
154 * (May not be considered valid until we have
155 * checked status for RxLengthOver2047 bits)
156 */
157 pkt_len = ((status >> 16) & 0x7ff) - 4;
158
159 /*
160 * Maximum pkt_len is 1518 (1514 + vlan header)
161 * Anything higher than this is always invalid
162 * regardless of RxLengthOver2047 bits
163 */
164
165 if ((status & (RxLengthOver2047 |
166 RxDescCRCError |
167 RxDescCollisionSeen |
168 RxDescRunt |
169 RxDescDescErr |
170 RxWholePkt)) != RxWholePkt ||
171 pkt_len > 1518) {
172 if ((status & (RxLengthOver2047 |
173 RxWholePkt)) != RxWholePkt) {
174 /* Ingore earlier buffers. */
175 if ((status & 0xffff) != 0x7fff) {
176 if (tulip_debug > 1)
177 dev_warn(&dev->dev,
178 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
179 status);
180 dev->stats.rx_length_errors++;
181 }
182 } else {
183 /* There was a fatal error. */
184 if (tulip_debug > 2)
185 netdev_dbg(dev, "Receive error, Rx status %08x\n",
186 status);
187 dev->stats.rx_errors++; /* end of a packet.*/
188 if (pkt_len > 1518 ||
189 (status & RxDescRunt))
190 dev->stats.rx_length_errors++;
191
192 if (status & 0x0004)
193 dev->stats.rx_frame_errors++;
194 if (status & 0x0002)
195 dev->stats.rx_crc_errors++;
196 if (status & 0x0001)
197 dev->stats.rx_fifo_errors++;
198 }
199 } else {
200 struct sk_buff *skb;
201
202 /* Check if the packet is long enough to accept without copying
203 to a minimally-sized skbuff. */
204 if (pkt_len < tulip_rx_copybreak &&
205 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
206 skb_reserve(skb, 2); /* 16 byte align the IP header */
207 pci_dma_sync_single_for_cpu(tp->pdev,
208 tp->rx_buffers[entry].mapping,
209 pkt_len, PCI_DMA_FROMDEVICE);
210#if ! defined(__alpha__)
211 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
212 pkt_len);
213 skb_put(skb, pkt_len);
214#else
215 memcpy(skb_put(skb, pkt_len),
216 tp->rx_buffers[entry].skb->data,
217 pkt_len);
218#endif
219 pci_dma_sync_single_for_device(tp->pdev,
220 tp->rx_buffers[entry].mapping,
221 pkt_len, PCI_DMA_FROMDEVICE);
222 } else { /* Pass up the skb already on the Rx ring. */
223 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
224 pkt_len);
225
226#ifndef final_version
227 if (tp->rx_buffers[entry].mapping !=
228 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
229 dev_err(&dev->dev,
230 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
231 le32_to_cpu(tp->rx_ring[entry].buffer1),
232 (unsigned long long)tp->rx_buffers[entry].mapping,
233 skb->head, temp);
234 }
235#endif
236
237 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
238 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
239
240 tp->rx_buffers[entry].skb = NULL;
241 tp->rx_buffers[entry].mapping = 0;
242 }
243 skb->protocol = eth_type_trans(skb, dev);
244
245 netif_receive_skb(skb);
246
247 dev->stats.rx_packets++;
248 dev->stats.rx_bytes += pkt_len;
249 }
250#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
251 received++;
252#endif
253
254 entry = (++tp->cur_rx) % RX_RING_SIZE;
255 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
256 tulip_refill_rx(dev);
257
258 }
259
260 /* New ack strategy... irq does not ack Rx any longer
261 hopefully this helps */
262
263 /* Really bad things can happen here... If new packet arrives
264 * and an irq arrives (tx or just due to occasionally unset
265 * mask), it will be acked by irq handler, but new thread
266 * is not scheduled. It is major hole in design.
267 * No idea how to fix this if "playing with fire" will fail
268 * tomorrow (night 011029). If it will not fail, we won
269 * finally: amount of IO did not increase at all. */
270 } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
271
272 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
273
274 /* We use this simplistic scheme for IM. It's proven by
275 real life installations. We can have IM enabled
276 continuesly but this would cause unnecessary latency.
277 Unfortunely we can't use all the NET_RX_* feedback here.
278 This would turn on IM for devices that is not contributing
279 to backlog congestion with unnecessary latency.
280
281 We monitor the device RX-ring and have:
282
283 HW Interrupt Mitigation either ON or OFF.
284
285 ON: More then 1 pkt received (per intr.) OR we are dropping
286 OFF: Only 1 pkt received
287
288 Note. We only use min and max (0, 15) settings from mit_table */
289
290
291 if( tp->flags & HAS_INTR_MITIGATION) {
292 if( received > 1 ) {
293 if( ! tp->mit_on ) {
294 tp->mit_on = 1;
295 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
296 }
297 }
298 else {
299 if( tp->mit_on ) {
300 tp->mit_on = 0;
301 iowrite32(0, tp->base_addr + CSR11);
302 }
303 }
304 }
305
306#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
307
308 tulip_refill_rx(dev);
309
310 /* If RX ring is not full we are out of memory. */
311 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
312 goto oom;
313
314 /* Remove us from polling list and enable RX intr. */
315
316 napi_complete(napi);
317 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
318
319 /* The last op happens after poll completion. Which means the following:
320 * 1. it can race with disabling irqs in irq handler
321 * 2. it can race with dise/enabling irqs in other poll threads
322 * 3. if an irq raised after beginning loop, it will be immediately
323 * triggered here.
324 *
325 * Summarizing: the logic results in some redundant irqs both
326 * due to races in masking and due to too late acking of already
327 * processed irqs. But it must not result in losing events.
328 */
329
330 return work_done;
331
332 not_done:
333 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
334 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
335 tulip_refill_rx(dev);
336
337 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
338 goto oom;
339
340 return work_done;
341
342 oom: /* Executed with RX ints disabled */
343
344 /* Start timer, stop polling, but do not enable rx interrupts. */
345 mod_timer(&tp->oom_timer, jiffies+1);
346
347 /* Think: timer_pending() was an explicit signature of bug.
348 * Timer can be pending now but fired and completed
349 * before we did napi_complete(). See? We would lose it. */
350
351 /* remove ourselves from the polling list */
352 napi_complete(napi);
353
354 return work_done;
355}
356
357#else /* CONFIG_TULIP_NAPI */
358
359static int tulip_rx(struct net_device *dev)
360{
361 struct tulip_private *tp = netdev_priv(dev);
362 int entry = tp->cur_rx % RX_RING_SIZE;
363 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
364 int received = 0;
365
366 if (tulip_debug > 4)
367 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
368 entry, tp->rx_ring[entry].status);
369 /* If we own the next entry, it is a new packet. Send it up. */
370 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
371 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
372 short pkt_len;
373
374 if (tulip_debug > 5)
375 netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
376 entry, status);
377 if (--rx_work_limit < 0)
378 break;
379
380 /*
381 Omit the four octet CRC from the length.
382 (May not be considered valid until we have
383 checked status for RxLengthOver2047 bits)
384 */
385 pkt_len = ((status >> 16) & 0x7ff) - 4;
386 /*
387 Maximum pkt_len is 1518 (1514 + vlan header)
388 Anything higher than this is always invalid
389 regardless of RxLengthOver2047 bits
390 */
391
392 if ((status & (RxLengthOver2047 |
393 RxDescCRCError |
394 RxDescCollisionSeen |
395 RxDescRunt |
396 RxDescDescErr |
397 RxWholePkt)) != RxWholePkt ||
398 pkt_len > 1518) {
399 if ((status & (RxLengthOver2047 |
400 RxWholePkt)) != RxWholePkt) {
401 /* Ingore earlier buffers. */
402 if ((status & 0xffff) != 0x7fff) {
403 if (tulip_debug > 1)
404 netdev_warn(dev,
405 "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
406 status);
407 dev->stats.rx_length_errors++;
408 }
409 } else {
410 /* There was a fatal error. */
411 if (tulip_debug > 2)
412 netdev_dbg(dev, "Receive error, Rx status %08x\n",
413 status);
414 dev->stats.rx_errors++; /* end of a packet.*/
415 if (pkt_len > 1518 ||
416 (status & RxDescRunt))
417 dev->stats.rx_length_errors++;
418 if (status & 0x0004)
419 dev->stats.rx_frame_errors++;
420 if (status & 0x0002)
421 dev->stats.rx_crc_errors++;
422 if (status & 0x0001)
423 dev->stats.rx_fifo_errors++;
424 }
425 } else {
426 struct sk_buff *skb;
427
428 /* Check if the packet is long enough to accept without copying
429 to a minimally-sized skbuff. */
430 if (pkt_len < tulip_rx_copybreak &&
431 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
432 skb_reserve(skb, 2); /* 16 byte align the IP header */
433 pci_dma_sync_single_for_cpu(tp->pdev,
434 tp->rx_buffers[entry].mapping,
435 pkt_len, PCI_DMA_FROMDEVICE);
436#if ! defined(__alpha__)
437 skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
438 pkt_len);
439 skb_put(skb, pkt_len);
440#else
441 memcpy(skb_put(skb, pkt_len),
442 tp->rx_buffers[entry].skb->data,
443 pkt_len);
444#endif
445 pci_dma_sync_single_for_device(tp->pdev,
446 tp->rx_buffers[entry].mapping,
447 pkt_len, PCI_DMA_FROMDEVICE);
448 } else { /* Pass up the skb already on the Rx ring. */
449 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
450 pkt_len);
451
452#ifndef final_version
453 if (tp->rx_buffers[entry].mapping !=
454 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
455 dev_err(&dev->dev,
456 "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
457 le32_to_cpu(tp->rx_ring[entry].buffer1),
458 (long long)tp->rx_buffers[entry].mapping,
459 skb->head, temp);
460 }
461#endif
462
463 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
464 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
465
466 tp->rx_buffers[entry].skb = NULL;
467 tp->rx_buffers[entry].mapping = 0;
468 }
469 skb->protocol = eth_type_trans(skb, dev);
470
471 netif_rx(skb);
472
473 dev->stats.rx_packets++;
474 dev->stats.rx_bytes += pkt_len;
475 }
476 received++;
477 entry = (++tp->cur_rx) % RX_RING_SIZE;
478 }
479 return received;
480}
481#endif /* CONFIG_TULIP_NAPI */
482
483static inline unsigned int phy_interrupt (struct net_device *dev)
484{
485#ifdef __hppa__
486 struct tulip_private *tp = netdev_priv(dev);
487 int csr12 = ioread32(tp->base_addr + CSR12) & 0xff;
488
489 if (csr12 != tp->csr12_shadow) {
490 /* ack interrupt */
491 iowrite32(csr12 | 0x02, tp->base_addr + CSR12);
492 tp->csr12_shadow = csr12;
493 /* do link change stuff */
494 spin_lock(&tp->lock);
495 tulip_check_duplex(dev);
496 spin_unlock(&tp->lock);
497 /* clear irq ack bit */
498 iowrite32(csr12 & ~0x02, tp->base_addr + CSR12);
499
500 return 1;
501 }
502#endif
503
504 return 0;
505}
506
507/* The interrupt handler does all of the Rx thread work and cleans up
508 after the Tx thread. */
509irqreturn_t tulip_interrupt(int irq, void *dev_instance)
510{
511 struct net_device *dev = (struct net_device *)dev_instance;
512 struct tulip_private *tp = netdev_priv(dev);
513 void __iomem *ioaddr = tp->base_addr;
514 int csr5;
515 int missed;
516 int rx = 0;
517 int tx = 0;
518 int oi = 0;
519 int maxrx = RX_RING_SIZE;
520 int maxtx = TX_RING_SIZE;
521 int maxoi = TX_RING_SIZE;
522#ifdef CONFIG_TULIP_NAPI
523 int rxd = 0;
524#else
525 int entry;
526#endif
527 unsigned int work_count = tulip_max_interrupt_work;
528 unsigned int handled = 0;
529
530 /* Let's see whether the interrupt really is for us */
531 csr5 = ioread32(ioaddr + CSR5);
532
533 if (tp->flags & HAS_PHY_IRQ)
534 handled = phy_interrupt (dev);
535
536 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
537 return IRQ_RETVAL(handled);
538
539 tp->nir++;
540
541 do {
542
543#ifdef CONFIG_TULIP_NAPI
544
545 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
546 rxd++;
547 /* Mask RX intrs and add the device to poll list. */
548 iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
549 napi_schedule(&tp->napi);
550
551 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
552 break;
553 }
554
555 /* Acknowledge the interrupt sources we handle here ASAP
556 the poll function does Rx and RxNoBuf acking */
557
558 iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
559
560#else
561 /* Acknowledge all of the current interrupt sources ASAP. */
562 iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
563
564
565 if (csr5 & (RxIntr | RxNoBuf)) {
566 rx += tulip_rx(dev);
567 tulip_refill_rx(dev);
568 }
569
570#endif /* CONFIG_TULIP_NAPI */
571
572 if (tulip_debug > 4)
573 netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
574 csr5, ioread32(ioaddr + CSR5));
575
576
577 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
578 unsigned int dirty_tx;
579
580 spin_lock(&tp->lock);
581
582 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
583 dirty_tx++) {
584 int entry = dirty_tx % TX_RING_SIZE;
585 int status = le32_to_cpu(tp->tx_ring[entry].status);
586
587 if (status < 0)
588 break; /* It still has not been Txed */
589
590 /* Check for Rx filter setup frames. */
591 if (tp->tx_buffers[entry].skb == NULL) {
592 /* test because dummy frames not mapped */
593 if (tp->tx_buffers[entry].mapping)
594 pci_unmap_single(tp->pdev,
595 tp->tx_buffers[entry].mapping,
596 sizeof(tp->setup_frame),
597 PCI_DMA_TODEVICE);
598 continue;
599 }
600
601 if (status & 0x8000) {
602 /* There was an major error, log it. */
603#ifndef final_version
604 if (tulip_debug > 1)
605 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
606 status);
607#endif
608 dev->stats.tx_errors++;
609 if (status & 0x4104)
610 dev->stats.tx_aborted_errors++;
611 if (status & 0x0C00)
612 dev->stats.tx_carrier_errors++;
613 if (status & 0x0200)
614 dev->stats.tx_window_errors++;
615 if (status & 0x0002)
616 dev->stats.tx_fifo_errors++;
617 if ((status & 0x0080) && tp->full_duplex == 0)
618 dev->stats.tx_heartbeat_errors++;
619 } else {
620 dev->stats.tx_bytes +=
621 tp->tx_buffers[entry].skb->len;
622 dev->stats.collisions += (status >> 3) & 15;
623 dev->stats.tx_packets++;
624 }
625
626 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
627 tp->tx_buffers[entry].skb->len,
628 PCI_DMA_TODEVICE);
629
630 /* Free the original skb. */
631 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
632 tp->tx_buffers[entry].skb = NULL;
633 tp->tx_buffers[entry].mapping = 0;
634 tx++;
635 }
636
637#ifndef final_version
638 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
639 dev_err(&dev->dev,
640 "Out-of-sync dirty pointer, %d vs. %d\n",
641 dirty_tx, tp->cur_tx);
642 dirty_tx += TX_RING_SIZE;
643 }
644#endif
645
646 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
647 netif_wake_queue(dev);
648
649 tp->dirty_tx = dirty_tx;
650 if (csr5 & TxDied) {
651 if (tulip_debug > 2)
652 dev_warn(&dev->dev,
653 "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
654 csr5, ioread32(ioaddr + CSR6),
655 tp->csr6);
656 tulip_restart_rxtx(tp);
657 }
658 spin_unlock(&tp->lock);
659 }
660
661 /* Log errors. */
662 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
663 if (csr5 == 0xffffffff)
664 break;
665 if (csr5 & TxJabber)
666 dev->stats.tx_errors++;
667 if (csr5 & TxFIFOUnderflow) {
668 if ((tp->csr6 & 0xC000) != 0xC000)
669 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
670 else
671 tp->csr6 |= 0x00200000; /* Store-n-forward. */
672 /* Restart the transmit process. */
673 tulip_restart_rxtx(tp);
674 iowrite32(0, ioaddr + CSR1);
675 }
676 if (csr5 & (RxDied | RxNoBuf)) {
677 if (tp->flags & COMET_MAC_ADDR) {
678 iowrite32(tp->mc_filter[0], ioaddr + 0xAC);
679 iowrite32(tp->mc_filter[1], ioaddr + 0xB0);
680 }
681 }
682 if (csr5 & RxDied) { /* Missed a Rx frame. */
683 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
684 dev->stats.rx_errors++;
685 tulip_start_rxtx(tp);
686 }
687 /*
688 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
689 * call is ever done under the spinlock
690 */
691 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
692 if (tp->link_change)
693 (tp->link_change)(dev, csr5);
694 }
695 if (csr5 & SystemError) {
696 int error = (csr5 >> 23) & 7;
697 /* oops, we hit a PCI error. The code produced corresponds
698 * to the reason:
699 * 0 - parity error
700 * 1 - master abort
701 * 2 - target abort
702 * Note that on parity error, we should do a software reset
703 * of the chip to get it back into a sane state (according
704 * to the 21142/3 docs that is).
705 * -- rmk
706 */
707 dev_err(&dev->dev,
708 "(%lu) System Error occurred (%d)\n",
709 tp->nir, error);
710 }
711 /* Clear all error sources, included undocumented ones! */
712 iowrite32(0x0800f7ba, ioaddr + CSR5);
713 oi++;
714 }
715 if (csr5 & TimerInt) {
716
717 if (tulip_debug > 2)
718 dev_err(&dev->dev,
719 "Re-enabling interrupts, %08x\n",
720 csr5);
721 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
722 tp->ttimer = 0;
723 oi++;
724 }
725 if (tx > maxtx || rx > maxrx || oi > maxoi) {
726 if (tulip_debug > 1)
727 dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
728 csr5, tp->nir, tx, rx, oi);
729
730 /* Acknowledge all interrupt sources. */
731 iowrite32(0x8001ffff, ioaddr + CSR5);
732 if (tp->flags & HAS_INTR_MITIGATION) {
733 /* Josip Loncaric at ICASE did extensive experimentation
734 to develop a good interrupt mitigation setting.*/
735 iowrite32(0x8b240000, ioaddr + CSR11);
736 } else if (tp->chip_id == LC82C168) {
737 /* the LC82C168 doesn't have a hw timer.*/
738 iowrite32(0x00, ioaddr + CSR7);
739 mod_timer(&tp->timer, RUN_AT(HZ/50));
740 } else {
741 /* Mask all interrupting sources, set timer to
742 re-enable. */
743 iowrite32(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
744 iowrite32(0x0012, ioaddr + CSR11);
745 }
746 break;
747 }
748
749 work_count--;
750 if (work_count == 0)
751 break;
752
753 csr5 = ioread32(ioaddr + CSR5);
754
755#ifdef CONFIG_TULIP_NAPI
756 if (rxd)
757 csr5 &= ~RxPollInt;
758 } while ((csr5 & (TxNoBuf |
759 TxDied |
760 TxIntr |
761 TimerInt |
762 /* Abnormal intr. */
763 RxDied |
764 TxFIFOUnderflow |
765 TxJabber |
766 TPLnkFail |
767 SystemError )) != 0);
768#else
769 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
770
771 tulip_refill_rx(dev);
772
773 /* check if the card is in suspend mode */
774 entry = tp->dirty_rx % RX_RING_SIZE;
775 if (tp->rx_buffers[entry].skb == NULL) {
776 if (tulip_debug > 1)
777 dev_warn(&dev->dev,
778 "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
779 tp->nir, tp->cur_rx, tp->ttimer, rx);
780 if (tp->chip_id == LC82C168) {
781 iowrite32(0x00, ioaddr + CSR7);
782 mod_timer(&tp->timer, RUN_AT(HZ/50));
783 } else {
784 if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
785 if (tulip_debug > 1)
786 dev_warn(&dev->dev,
787 "in rx suspend mode: (%lu) set timer\n",
788 tp->nir);
789 iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
790 ioaddr + CSR7);
791 iowrite32(TimerInt, ioaddr + CSR5);
792 iowrite32(12, ioaddr + CSR11);
793 tp->ttimer = 1;
794 }
795 }
796 }
797#endif /* CONFIG_TULIP_NAPI */
798
799 if ((missed = ioread32(ioaddr + CSR8) & 0x1ffff)) {
800 dev->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
801 }
802
803 if (tulip_debug > 4)
804 netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
805 ioread32(ioaddr + CSR5));
806
807 return IRQ_HANDLED;
808}
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
new file mode 100644
index 000000000000..beeb17b52ad4
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -0,0 +1,553 @@
1/*
2 drivers/net/tulip/media.c
3
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please submit bugs to http://bugzilla.kernel.org/ .
11*/
12
13#include <linux/kernel.h>
14#include <linux/mii.h>
15#include <linux/init.h>
16#include <linux/delay.h>
17#include <linux/pci.h>
18#include "tulip.h"
19
20
21/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
22 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
23 "overclocking" issues or future 66Mhz PCI. */
24#define mdio_delay() ioread32(mdio_addr)
25
26/* Read and write the MII registers using software-generated serial
27 MDIO protocol. It is just different enough from the EEPROM protocol
28 to not share code. The maxium data clock rate is 2.5 Mhz. */
29#define MDIO_SHIFT_CLK 0x10000
30#define MDIO_DATA_WRITE0 0x00000
31#define MDIO_DATA_WRITE1 0x20000
32#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
33#define MDIO_ENB_IN 0x40000
34#define MDIO_DATA_READ 0x80000
35
36static const unsigned char comet_miireg2offset[32] = {
37 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0,
38 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
39
40
41/* MII transceiver control section.
42 Read and write the MII registers using software-generated serial
43 MDIO protocol.
44 See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management functions")
45 or DP83840A data sheet for more details.
46 */
47
48int tulip_mdio_read(struct net_device *dev, int phy_id, int location)
49{
50 struct tulip_private *tp = netdev_priv(dev);
51 int i;
52 int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
53 int retval = 0;
54 void __iomem *ioaddr = tp->base_addr;
55 void __iomem *mdio_addr = ioaddr + CSR9;
56 unsigned long flags;
57
58 if (location & ~0x1f)
59 return 0xffff;
60
61 if (tp->chip_id == COMET && phy_id == 30) {
62 if (comet_miireg2offset[location])
63 return ioread32(ioaddr + comet_miireg2offset[location]);
64 return 0xffff;
65 }
66
67 spin_lock_irqsave(&tp->mii_lock, flags);
68 if (tp->chip_id == LC82C168) {
69 iowrite32(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
70 ioread32(ioaddr + 0xA0);
71 ioread32(ioaddr + 0xA0);
72 for (i = 1000; i >= 0; --i) {
73 barrier();
74 if ( ! ((retval = ioread32(ioaddr + 0xA0)) & 0x80000000))
75 break;
76 }
77 spin_unlock_irqrestore(&tp->mii_lock, flags);
78 return retval & 0xffff;
79 }
80
81 /* Establish sync by sending at least 32 logic ones. */
82 for (i = 32; i >= 0; i--) {
83 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
84 mdio_delay();
85 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
86 mdio_delay();
87 }
88 /* Shift the read command bits out. */
89 for (i = 15; i >= 0; i--) {
90 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
91
92 iowrite32(MDIO_ENB | dataval, mdio_addr);
93 mdio_delay();
94 iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
95 mdio_delay();
96 }
97 /* Read the two transition, 16 data, and wire-idle bits. */
98 for (i = 19; i > 0; i--) {
99 iowrite32(MDIO_ENB_IN, mdio_addr);
100 mdio_delay();
101 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
102 iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
103 mdio_delay();
104 }
105
106 spin_unlock_irqrestore(&tp->mii_lock, flags);
107 return (retval>>1) & 0xffff;
108}
109
110void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
111{
112 struct tulip_private *tp = netdev_priv(dev);
113 int i;
114 int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff);
115 void __iomem *ioaddr = tp->base_addr;
116 void __iomem *mdio_addr = ioaddr + CSR9;
117 unsigned long flags;
118
119 if (location & ~0x1f)
120 return;
121
122 if (tp->chip_id == COMET && phy_id == 30) {
123 if (comet_miireg2offset[location])
124 iowrite32(val, ioaddr + comet_miireg2offset[location]);
125 return;
126 }
127
128 spin_lock_irqsave(&tp->mii_lock, flags);
129 if (tp->chip_id == LC82C168) {
130 iowrite32(cmd, ioaddr + 0xA0);
131 for (i = 1000; i >= 0; --i) {
132 barrier();
133 if ( ! (ioread32(ioaddr + 0xA0) & 0x80000000))
134 break;
135 }
136 spin_unlock_irqrestore(&tp->mii_lock, flags);
137 return;
138 }
139
140 /* Establish sync by sending 32 logic ones. */
141 for (i = 32; i >= 0; i--) {
142 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
143 mdio_delay();
144 iowrite32(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
145 mdio_delay();
146 }
147 /* Shift the command bits out. */
148 for (i = 31; i >= 0; i--) {
149 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
150 iowrite32(MDIO_ENB | dataval, mdio_addr);
151 mdio_delay();
152 iowrite32(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
153 mdio_delay();
154 }
155 /* Clear out extra bits. */
156 for (i = 2; i > 0; i--) {
157 iowrite32(MDIO_ENB_IN, mdio_addr);
158 mdio_delay();
159 iowrite32(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
160 mdio_delay();
161 }
162
163 spin_unlock_irqrestore(&tp->mii_lock, flags);
164}
165
166
167/* Set up the transceiver control registers for the selected media type. */
168void tulip_select_media(struct net_device *dev, int startup)
169{
170 struct tulip_private *tp = netdev_priv(dev);
171 void __iomem *ioaddr = tp->base_addr;
172 struct mediatable *mtable = tp->mtable;
173 u32 new_csr6;
174 int i;
175
176 if (mtable) {
177 struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
178 unsigned char *p = mleaf->leafdata;
179 switch (mleaf->type) {
180 case 0: /* 21140 non-MII xcvr. */
181 if (tulip_debug > 1)
182 netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n",
183 p[1]);
184 dev->if_port = p[0];
185 if (startup)
186 iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
187 iowrite32(p[1], ioaddr + CSR12);
188 new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
189 break;
190 case 2: case 4: {
191 u16 setup[5];
192 u32 csr13val, csr14val, csr15dir, csr15val;
193 for (i = 0; i < 5; i++)
194 setup[i] = get_u16(&p[i*2 + 1]);
195
196 dev->if_port = p[0] & MEDIA_MASK;
197 if (tulip_media_cap[dev->if_port] & MediaAlwaysFD)
198 tp->full_duplex = 1;
199
200 if (startup && mtable->has_reset) {
201 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
202 unsigned char *rst = rleaf->leafdata;
203 if (tulip_debug > 1)
204 netdev_dbg(dev, "Resetting the transceiver\n");
205 for (i = 0; i < rst[0]; i++)
206 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
207 }
208 if (tulip_debug > 1)
209 netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n",
210 medianame[dev->if_port],
211 setup[0], setup[1]);
212 if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
213 csr13val = setup[0];
214 csr14val = setup[1];
215 csr15dir = (setup[3]<<16) | setup[2];
216 csr15val = (setup[4]<<16) | setup[2];
217 iowrite32(0, ioaddr + CSR13);
218 iowrite32(csr14val, ioaddr + CSR14);
219 iowrite32(csr15dir, ioaddr + CSR15); /* Direction */
220 iowrite32(csr15val, ioaddr + CSR15); /* Data */
221 iowrite32(csr13val, ioaddr + CSR13);
222 } else {
223 csr13val = 1;
224 csr14val = 0;
225 csr15dir = (setup[0]<<16) | 0x0008;
226 csr15val = (setup[1]<<16) | 0x0008;
227 if (dev->if_port <= 4)
228 csr14val = t21142_csr14[dev->if_port];
229 if (startup) {
230 iowrite32(0, ioaddr + CSR13);
231 iowrite32(csr14val, ioaddr + CSR14);
232 }
233 iowrite32(csr15dir, ioaddr + CSR15); /* Direction */
234 iowrite32(csr15val, ioaddr + CSR15); /* Data */
235 if (startup) iowrite32(csr13val, ioaddr + CSR13);
236 }
237 if (tulip_debug > 1)
238 netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n",
239 csr15dir, csr15val);
240 if (mleaf->type == 4)
241 new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
242 else
243 new_csr6 = 0x82420000;
244 break;
245 }
246 case 1: case 3: {
247 int phy_num = p[0];
248 int init_length = p[1];
249 u16 *misc_info, tmp_info;
250
251 dev->if_port = 11;
252 new_csr6 = 0x020E0000;
253 if (mleaf->type == 3) { /* 21142 */
254 u16 *init_sequence = (u16*)(p+2);
255 u16 *reset_sequence = &((u16*)(p+3))[init_length];
256 int reset_length = p[2 + init_length*2];
257 misc_info = reset_sequence + reset_length;
258 if (startup) {
259 int timeout = 10; /* max 1 ms */
260 for (i = 0; i < reset_length; i++)
261 iowrite32(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
262
263 /* flush posted writes */
264 ioread32(ioaddr + CSR15);
265
266 /* Sect 3.10.3 in DP83840A.pdf (p39) */
267 udelay(500);
268
269 /* Section 4.2 in DP83840A.pdf (p43) */
270 /* and IEEE 802.3 "22.2.4.1.1 Reset" */
271 while (timeout-- &&
272 (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET))
273 udelay(100);
274 }
275 for (i = 0; i < init_length; i++)
276 iowrite32(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
277
278 ioread32(ioaddr + CSR15); /* flush posted writes */
279 } else {
280 u8 *init_sequence = p + 2;
281 u8 *reset_sequence = p + 3 + init_length;
282 int reset_length = p[2 + init_length];
283 misc_info = (u16*)(reset_sequence + reset_length);
284 if (startup) {
285 int timeout = 10; /* max 1 ms */
286 iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
287 for (i = 0; i < reset_length; i++)
288 iowrite32(reset_sequence[i], ioaddr + CSR12);
289
290 /* flush posted writes */
291 ioread32(ioaddr + CSR12);
292
293 /* Sect 3.10.3 in DP83840A.pdf (p39) */
294 udelay(500);
295
296 /* Section 4.2 in DP83840A.pdf (p43) */
297 /* and IEEE 802.3 "22.2.4.1.1 Reset" */
298 while (timeout-- &&
299 (tulip_mdio_read (dev, phy_num, MII_BMCR) & BMCR_RESET))
300 udelay(100);
301 }
302 for (i = 0; i < init_length; i++)
303 iowrite32(init_sequence[i], ioaddr + CSR12);
304
305 ioread32(ioaddr + CSR12); /* flush posted writes */
306 }
307
308 tmp_info = get_u16(&misc_info[1]);
309 if (tmp_info)
310 tp->advertising[phy_num] = tmp_info | 1;
311 if (tmp_info && startup < 2) {
312 if (tp->mii_advertise == 0)
313 tp->mii_advertise = tp->advertising[phy_num];
314 if (tulip_debug > 1)
315 netdev_dbg(dev, " Advertising %04x on MII %d\n",
316 tp->mii_advertise,
317 tp->phys[phy_num]);
318 tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
319 }
320 break;
321 }
322 case 5: case 6: {
323 u16 setup[5];
324
325 new_csr6 = 0; /* FIXME */
326
327 for (i = 0; i < 5; i++)
328 setup[i] = get_u16(&p[i*2 + 1]);
329
330 if (startup && mtable->has_reset) {
331 struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
332 unsigned char *rst = rleaf->leafdata;
333 if (tulip_debug > 1)
334 netdev_dbg(dev, "Resetting the transceiver\n");
335 for (i = 0; i < rst[0]; i++)
336 iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
337 }
338
339 break;
340 }
341 default:
342 netdev_dbg(dev, " Invalid media table selection %d\n",
343 mleaf->type);
344 new_csr6 = 0x020E0000;
345 }
346 if (tulip_debug > 1)
347 netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n",
348 medianame[dev->if_port],
349 ioread32(ioaddr + CSR12) & 0xff);
350 } else if (tp->chip_id == LC82C168) {
351 if (startup && ! tp->medialock)
352 dev->if_port = tp->mii_cnt ? 11 : 0;
353 if (tulip_debug > 1)
354 netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n",
355 ioread32(ioaddr + 0xB8),
356 medianame[dev->if_port]);
357 if (tp->mii_cnt) {
358 new_csr6 = 0x810C0000;
359 iowrite32(0x0001, ioaddr + CSR15);
360 iowrite32(0x0201B07A, ioaddr + 0xB8);
361 } else if (startup) {
362 /* Start with 10mbps to do autonegotiation. */
363 iowrite32(0x32, ioaddr + CSR12);
364 new_csr6 = 0x00420000;
365 iowrite32(0x0001B078, ioaddr + 0xB8);
366 iowrite32(0x0201B078, ioaddr + 0xB8);
367 } else if (dev->if_port == 3 || dev->if_port == 5) {
368 iowrite32(0x33, ioaddr + CSR12);
369 new_csr6 = 0x01860000;
370 /* Trigger autonegotiation. */
371 iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
372 } else {
373 iowrite32(0x32, ioaddr + CSR12);
374 new_csr6 = 0x00420000;
375 iowrite32(0x1F078, ioaddr + 0xB8);
376 }
377 } else { /* Unknown chip type with no media table. */
378 if (tp->default_port == 0)
379 dev->if_port = tp->mii_cnt ? 11 : 3;
380 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
381 new_csr6 = 0x020E0000;
382 } else if (tulip_media_cap[dev->if_port] & MediaIsFx) {
383 new_csr6 = 0x02860000;
384 } else
385 new_csr6 = 0x03860000;
386 if (tulip_debug > 1)
387 netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n",
388 medianame[dev->if_port],
389 ioread32(ioaddr + CSR12));
390 }
391
392 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
393
394 mdelay(1);
395}
396
397/*
398 Check the MII negotiated duplex and change the CSR6 setting if
399 required.
400 Return 0 if everything is OK.
401 Return < 0 if the transceiver is missing or has no link beat.
402 */
403int tulip_check_duplex(struct net_device *dev)
404{
405 struct tulip_private *tp = netdev_priv(dev);
406 unsigned int bmsr, lpa, negotiated, new_csr6;
407
408 bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
409 lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
410 if (tulip_debug > 1)
411 dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n",
412 bmsr, lpa);
413 if (bmsr == 0xffff)
414 return -2;
415 if ((bmsr & BMSR_LSTATUS) == 0) {
416 int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
417 if ((new_bmsr & BMSR_LSTATUS) == 0) {
418 if (tulip_debug > 1)
419 dev_info(&dev->dev,
420 "No link beat on the MII interface, status %04x\n",
421 new_bmsr);
422 return -1;
423 }
424 }
425 negotiated = lpa & tp->advertising[0];
426 tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated);
427
428 new_csr6 = tp->csr6;
429
430 if (negotiated & LPA_100) new_csr6 &= ~TxThreshold;
431 else new_csr6 |= TxThreshold;
432 if (tp->full_duplex) new_csr6 |= FullDuplex;
433 else new_csr6 &= ~FullDuplex;
434
435 if (new_csr6 != tp->csr6) {
436 tp->csr6 = new_csr6;
437 tulip_restart_rxtx(tp);
438
439 if (tulip_debug > 0)
440 dev_info(&dev->dev,
441 "Setting %s-duplex based on MII#%d link partner capability of %04x\n",
442 tp->full_duplex ? "full" : "half",
443 tp->phys[0], lpa);
444 return 1;
445 }
446
447 return 0;
448}
449
450void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
451{
452 struct tulip_private *tp = netdev_priv(dev);
453 int phyn, phy_idx = 0;
454 int mii_reg0;
455 int mii_advert;
456 unsigned int to_advert, new_bmcr, ane_switch;
457
458 /* Find the connected MII xcvrs.
459 Doing this in open() would allow detecting external xcvrs later,
460 but takes much time. */
461 for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
462 int phy = phyn & 0x1f;
463 int mii_status = tulip_mdio_read (dev, phy, MII_BMSR);
464 if ((mii_status & 0x8301) == 0x8001 ||
465 ((mii_status & BMSR_100BASE4) == 0 &&
466 (mii_status & 0x7800) != 0)) {
467 /* preserve Becker logic, gain indentation level */
468 } else {
469 continue;
470 }
471
472 mii_reg0 = tulip_mdio_read (dev, phy, MII_BMCR);
473 mii_advert = tulip_mdio_read (dev, phy, MII_ADVERTISE);
474 ane_switch = 0;
475
476 /* if not advertising at all, gen an
477 * advertising value from the capability
478 * bits in BMSR
479 */
480 if ((mii_advert & ADVERTISE_ALL) == 0) {
481 unsigned int tmpadv = tulip_mdio_read (dev, phy, MII_BMSR);
482 mii_advert = ((tmpadv >> 6) & 0x3e0) | 1;
483 }
484
485 if (tp->mii_advertise) {
486 tp->advertising[phy_idx] =
487 to_advert = tp->mii_advertise;
488 } else if (tp->advertising[phy_idx]) {
489 to_advert = tp->advertising[phy_idx];
490 } else {
491 tp->advertising[phy_idx] =
492 tp->mii_advertise =
493 to_advert = mii_advert;
494 }
495
496 tp->phys[phy_idx++] = phy;
497
498 pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n",
499 board_idx, phy, mii_reg0, mii_status, mii_advert);
500
501 /* Fixup for DLink with miswired PHY. */
502 if (mii_advert != to_advert) {
503 pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
504 board_idx, to_advert, phy, mii_advert);
505 tulip_mdio_write (dev, phy, 4, to_advert);
506 }
507
508 /* Enable autonegotiation: some boards default to off. */
509 if (tp->default_port == 0) {
510 new_bmcr = mii_reg0 | BMCR_ANENABLE;
511 if (new_bmcr != mii_reg0) {
512 new_bmcr |= BMCR_ANRESTART;
513 ane_switch = 1;
514 }
515 }
516 /* ...or disable nway, if forcing media */
517 else {
518 new_bmcr = mii_reg0 & ~BMCR_ANENABLE;
519 if (new_bmcr != mii_reg0)
520 ane_switch = 1;
521 }
522
523 /* clear out bits we never want at this point */
524 new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE |
525 BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK |
526 BMCR_RESET);
527
528 if (tp->full_duplex)
529 new_bmcr |= BMCR_FULLDPLX;
530 if (tulip_media_cap[tp->default_port] & MediaIs100)
531 new_bmcr |= BMCR_SPEED100;
532
533 if (new_bmcr != mii_reg0) {
534 /* some phys need the ANE switch to
535 * happen before forced media settings
536 * will "take." However, we write the
537 * same value twice in order not to
538 * confuse the sane phys.
539 */
540 if (ane_switch) {
541 tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr);
542 udelay (10);
543 }
544 tulip_mdio_write (dev, phy, MII_BMCR, new_bmcr);
545 }
546 }
547 tp->mii_cnt = phy_idx;
548 if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
549 pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n",
550 board_idx);
551 tp->phys[0] = 1;
552 }
553}
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
new file mode 100644
index 000000000000..9c16e4ad02a6
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -0,0 +1,170 @@
1/*
2 drivers/net/tulip/pnic.c
3
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please submit bugs to http://bugzilla.kernel.org/ .
11*/
12
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include "tulip.h"
17
18
19void pnic_do_nway(struct net_device *dev)
20{
21 struct tulip_private *tp = netdev_priv(dev);
22 void __iomem *ioaddr = tp->base_addr;
23 u32 phy_reg = ioread32(ioaddr + 0xB8);
24 u32 new_csr6 = tp->csr6 & ~0x40C40200;
25
26 if (phy_reg & 0x78000000) { /* Ignore baseT4 */
27 if (phy_reg & 0x20000000) dev->if_port = 5;
28 else if (phy_reg & 0x40000000) dev->if_port = 3;
29 else if (phy_reg & 0x10000000) dev->if_port = 4;
30 else if (phy_reg & 0x08000000) dev->if_port = 0;
31 tp->nwayset = 1;
32 new_csr6 = (dev->if_port & 1) ? 0x01860000 : 0x00420000;
33 iowrite32(0x32 | (dev->if_port & 1), ioaddr + CSR12);
34 if (dev->if_port & 1)
35 iowrite32(0x1F868, ioaddr + 0xB8);
36 if (phy_reg & 0x30000000) {
37 tp->full_duplex = 1;
38 new_csr6 |= 0x00000200;
39 }
40 if (tulip_debug > 1)
41 netdev_dbg(dev, "PNIC autonegotiated status %08x, %s\n",
42 phy_reg, medianame[dev->if_port]);
43 if (tp->csr6 != new_csr6) {
44 tp->csr6 = new_csr6;
45 /* Restart Tx */
46 tulip_restart_rxtx(tp);
47 dev->trans_start = jiffies;
48 }
49 }
50}
51
52void pnic_lnk_change(struct net_device *dev, int csr5)
53{
54 struct tulip_private *tp = netdev_priv(dev);
55 void __iomem *ioaddr = tp->base_addr;
56 int phy_reg = ioread32(ioaddr + 0xB8);
57
58 if (tulip_debug > 1)
59 netdev_dbg(dev, "PNIC link changed state %08x, CSR5 %08x\n",
60 phy_reg, csr5);
61 if (ioread32(ioaddr + CSR5) & TPLnkFail) {
62 iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
63 /* If we use an external MII, then we mustn't use the
64 * internal negotiation.
65 */
66 if (tulip_media_cap[dev->if_port] & MediaIsMII)
67 return;
68 if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) {
69 tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
70 iowrite32(tp->csr6, ioaddr + CSR6);
71 iowrite32(0x30, ioaddr + CSR12);
72 iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
73 dev->trans_start = jiffies;
74 }
75 } else if (ioread32(ioaddr + CSR5) & TPLnkPass) {
76 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
77 spin_lock(&tp->lock);
78 tulip_check_duplex(dev);
79 spin_unlock(&tp->lock);
80 } else {
81 pnic_do_nway(dev);
82 }
83 iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkPass) | TPLnkFail, ioaddr + CSR7);
84 }
85}
86
87void pnic_timer(unsigned long data)
88{
89 struct net_device *dev = (struct net_device *)data;
90 struct tulip_private *tp = netdev_priv(dev);
91 void __iomem *ioaddr = tp->base_addr;
92 int next_tick = 60*HZ;
93
94 if(!ioread32(ioaddr + CSR7)) {
95 /* the timer was called due to a work overflow
96 * in the interrupt handler. Skip the connection
97 * checks, the nic is definitively speaking with
98 * his link partner.
99 */
100 goto too_good_connection;
101 }
102
103 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
104 spin_lock_irq(&tp->lock);
105 if (tulip_check_duplex(dev) > 0)
106 next_tick = 3*HZ;
107 spin_unlock_irq(&tp->lock);
108 } else {
109 int csr12 = ioread32(ioaddr + CSR12);
110 int new_csr6 = tp->csr6 & ~0x40C40200;
111 int phy_reg = ioread32(ioaddr + 0xB8);
112 int csr5 = ioread32(ioaddr + CSR5);
113
114 if (tulip_debug > 1)
115 netdev_dbg(dev, "PNIC timer PHY status %08x, %s CSR5 %08x\n",
116 phy_reg, medianame[dev->if_port], csr5);
117 if (phy_reg & 0x04000000) { /* Remote link fault */
118 iowrite32(0x0201F078, ioaddr + 0xB8);
119 next_tick = 1*HZ;
120 tp->nwayset = 0;
121 } else if (phy_reg & 0x78000000) { /* Ignore baseT4 */
122 pnic_do_nway(dev);
123 next_tick = 60*HZ;
124 } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
125 if (tulip_debug > 1)
126 netdev_dbg(dev, "%s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
127 medianame[dev->if_port],
128 csr12,
129 ioread32(ioaddr + CSR5),
130 ioread32(ioaddr + 0xB8));
131 next_tick = 3*HZ;
132 if (tp->medialock) {
133 } else if (tp->nwayset && (dev->if_port & 1)) {
134 next_tick = 1*HZ;
135 } else if (dev->if_port == 0) {
136 dev->if_port = 3;
137 iowrite32(0x33, ioaddr + CSR12);
138 new_csr6 = 0x01860000;
139 iowrite32(0x1F868, ioaddr + 0xB8);
140 } else {
141 dev->if_port = 0;
142 iowrite32(0x32, ioaddr + CSR12);
143 new_csr6 = 0x00420000;
144 iowrite32(0x1F078, ioaddr + 0xB8);
145 }
146 if (tp->csr6 != new_csr6) {
147 tp->csr6 = new_csr6;
148 /* Restart Tx */
149 tulip_restart_rxtx(tp);
150 dev->trans_start = jiffies;
151 if (tulip_debug > 1)
152 dev_info(&dev->dev,
153 "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
154 medianame[dev->if_port],
155 tp->full_duplex ? "full" : "half",
156 new_csr6);
157 }
158 }
159 }
160too_good_connection:
161 mod_timer(&tp->timer, RUN_AT(next_tick));
162 if(!ioread32(ioaddr + CSR7)) {
163 if (tulip_debug > 1)
164 dev_info(&dev->dev, "sw timer wakeup\n");
165 disable_irq(dev->irq);
166 tulip_refill_rx(dev);
167 enable_irq(dev->irq);
168 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
169 }
170}
diff --git a/drivers/net/ethernet/dec/tulip/pnic2.c b/drivers/net/ethernet/dec/tulip/pnic2.c
new file mode 100644
index 000000000000..04a7e477eaff
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/pnic2.c
@@ -0,0 +1,403 @@
1/*
2 drivers/net/tulip/pnic2.c
3
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6 Modified to hep support PNIC_II by Kevin B. Hendricks
7
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
10
11 Please submit bugs to http://bugzilla.kernel.org/ .
12*/
13
14
15/* Understanding the PNIC_II - everything is this file is based
16 * on the PNIC_II_PDF datasheet which is sorely lacking in detail
17 *
18 * As I understand things, here are the registers and bits that
19 * explain the masks and constants used in this file that are
20 * either different from the 21142/3 or important for basic operation.
21 *
22 *
23 * CSR 6 (mask = 0xfe3bd1fd of bits not to change)
24 * -----
25 * Bit 24 - SCR
26 * Bit 23 - PCS
27 * Bit 22 - TTM (Trasmit Threshold Mode)
28 * Bit 18 - Port Select
29 * Bit 13 - Start - 1, Stop - 0 Transmissions
30 * Bit 11:10 - Loop Back Operation Mode
31 * Bit 9 - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set)
32 * Bit 1 - Start - 1, Stop - 0 Receive
33 *
34 *
35 * CSR 14 (mask = 0xfff0ee39 of bits not to change)
36 * ------
37 * Bit 19 - PAUSE-Pause
38 * Bit 18 - Advertise T4
39 * Bit 17 - Advertise 100baseTx-FD
40 * Bit 16 - Advertise 100baseTx-HD
41 * Bit 12 - LTE - Link Test Enable
42 * Bit 7 - ANE - Auto Negotiate Enable
43 * Bit 6 - HDE - Advertise 10baseT-HD
44 * Bit 2 - Reset to Power down - kept as 1 for normal operation
45 * Bit 1 - Loop Back enable for 10baseT MCC
46 *
47 *
48 * CSR 12
49 * ------
50 * Bit 25 - Partner can do T4
51 * Bit 24 - Partner can do 100baseTx-FD
52 * Bit 23 - Partner can do 100baseTx-HD
53 * Bit 22 - Partner can do 10baseT-FD
54 * Bit 21 - Partner can do 10baseT-HD
55 * Bit 15 - LPN is 1 if all above bits are valid other wise 0
56 * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate)
57 * Bit 3 - Autopolarity state
58 * Bit 2 - LS10B - link state of 10baseT 0 - good, 1 - failed
59 * Bit 1 - LS100B - link state of 100baseT 0 - good, 1 - failed
60 *
61 *
62 * Data Port Selection Info
63 *-------------------------
64 *
65 * CSR14<7> CSR6<18> CSR6<22> CSR6<23> CSR6<24> MODE/PORT
66 * 1 0 0 (X) 0 (X) 1 NWAY
67 * 0 0 1 0 (X) 0 10baseT
68 * 0 1 0 1 1 (X) 100baseT
69 *
70 *
71 */
72
73
74
75#include "tulip.h"
76#include <linux/delay.h>
77
78
79void pnic2_timer(unsigned long data)
80{
81 struct net_device *dev = (struct net_device *)data;
82 struct tulip_private *tp = netdev_priv(dev);
83 void __iomem *ioaddr = tp->base_addr;
84 int next_tick = 60*HZ;
85
86 if (tulip_debug > 3)
87 dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
88 ioread32(ioaddr + CSR12));
89
90 if (next_tick) {
91 mod_timer(&tp->timer, RUN_AT(next_tick));
92 }
93}
94
95
96void pnic2_start_nway(struct net_device *dev)
97{
98 struct tulip_private *tp = netdev_priv(dev);
99 void __iomem *ioaddr = tp->base_addr;
100 int csr14;
101 int csr12;
102
103 /* set up what to advertise during the negotiation */
104
105 /* load in csr14 and mask off bits not to touch
106 * comment at top of file explains mask value
107 */
108 csr14 = (ioread32(ioaddr + CSR14) & 0xfff0ee39);
109
110 /* bit 17 - advetise 100baseTx-FD */
111 if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000;
112
113 /* bit 16 - advertise 100baseTx-HD */
114 if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000;
115
116 /* bit 6 - advertise 10baseT-HD */
117 if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040;
118
119 /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable
120 * and bit 0 Don't PowerDown 10baseT
121 */
122 csr14 |= 0x00001184;
123
124 if (tulip_debug > 1)
125 netdev_dbg(dev, "Restarting PNIC2 autonegotiation, csr14=%08x\n",
126 csr14);
127
128 /* tell pnic2_lnk_change we are doing an nway negotiation */
129 dev->if_port = 0;
130 tp->nway = tp->mediasense = 1;
131 tp->nwayset = tp->lpar = 0;
132
133 /* now we have to set up csr6 for NWAY state */
134
135 tp->csr6 = ioread32(ioaddr + CSR6);
136 if (tulip_debug > 1)
137 netdev_dbg(dev, "On Entry to Nway, csr6=%08x\n", tp->csr6);
138
139 /* mask off any bits not to touch
140 * comment at top of file explains mask value
141 */
142 tp->csr6 = tp->csr6 & 0xfe3bd1fd;
143
144 /* don't forget that bit 9 is also used for advertising */
145 /* advertise 10baseT-FD for the negotiation (bit 9) */
146 if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200;
147
148 /* set bit 24 for nway negotiation mode ...
149 * see Data Port Selection comment at top of file
150 * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1)
151 */
152 tp->csr6 |= 0x01000000;
153 iowrite32(csr14, ioaddr + CSR14);
154 iowrite32(tp->csr6, ioaddr + CSR6);
155 udelay(100);
156
157 /* all set up so now force the negotiation to begin */
158
159 /* read in current values and mask off all but the
160 * Autonegotiation bits 14:12. Writing a 001 to those bits
161 * should start the autonegotiation
162 */
163 csr12 = (ioread32(ioaddr + CSR12) & 0xffff8fff);
164 csr12 |= 0x1000;
165 iowrite32(csr12, ioaddr + CSR12);
166}
167
168
169
170void pnic2_lnk_change(struct net_device *dev, int csr5)
171{
172 struct tulip_private *tp = netdev_priv(dev);
173 void __iomem *ioaddr = tp->base_addr;
174 int csr14;
175
176 /* read the staus register to find out what is up */
177 int csr12 = ioread32(ioaddr + CSR12);
178
179 if (tulip_debug > 1)
180 dev_info(&dev->dev,
181 "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
182 csr12, csr5, ioread32(ioaddr + CSR14));
183
184 /* If NWay finished and we have a negotiated partner capability.
185 * check bits 14:12 for bit pattern 101 - all is good
186 */
187 if (tp->nway && !tp->nwayset) {
188
189 /* we did an auto negotiation */
190
191 if ((csr12 & 0x7000) == 0x5000) {
192
193 /* negotiation ended successfully */
194
195 /* get the link partners reply and mask out all but
196 * bits 24-21 which show the partners capabilities
197 * and match those to what we advertised
198 *
199 * then begin to interpret the results of the negotiation.
200 * Always go in this order : (we are ignoring T4 for now)
201 * 100baseTx-FD, 100baseTx-HD, 10baseT-FD, 10baseT-HD
202 */
203
204 int negotiated = ((csr12 >> 16) & 0x01E0) & tp->sym_advertise;
205 tp->lpar = (csr12 >> 16);
206 tp->nwayset = 1;
207
208 if (negotiated & 0x0100) dev->if_port = 5;
209 else if (negotiated & 0x0080) dev->if_port = 3;
210 else if (negotiated & 0x0040) dev->if_port = 4;
211 else if (negotiated & 0x0020) dev->if_port = 0;
212 else {
213 if (tulip_debug > 1)
214 dev_info(&dev->dev,
215 "funny autonegotiate result csr12 %08x advertising %04x\n",
216 csr12, tp->sym_advertise);
217 tp->nwayset = 0;
218 /* so check if 100baseTx link state is okay */
219 if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
220 dev->if_port = 3;
221 }
222
223 /* now record the duplex that was negotiated */
224 tp->full_duplex = 0;
225 if ((dev->if_port == 4) || (dev->if_port == 5))
226 tp->full_duplex = 1;
227
228 if (tulip_debug > 1) {
229 if (tp->nwayset)
230 dev_info(&dev->dev,
231 "Switching to %s based on link negotiation %04x & %04x = %04x\n",
232 medianame[dev->if_port],
233 tp->sym_advertise, tp->lpar,
234 negotiated);
235 }
236
237 /* remember to turn off bit 7 - autonegotiate
238 * enable so we can properly end nway mode and
239 * set duplex (ie. use csr6<9> again)
240 */
241 csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
242 iowrite32(csr14,ioaddr + CSR14);
243
244
245 /* now set the data port and operating mode
246 * (see the Data Port Selection comments at
247 * the top of the file
248 */
249
250 /* get current csr6 and mask off bits not to touch */
251 /* see comment at top of file */
252
253 tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
254
255 /* so if using if_port 3 or 5 then select the 100baseT
256 * port else select the 10baseT port.
257 * See the Data Port Selection table at the top
258 * of the file which was taken from the PNIC_II.PDF
259 * datasheet
260 */
261 if (dev->if_port & 1) tp->csr6 |= 0x01840000;
262 else tp->csr6 |= 0x00400000;
263
264 /* now set the full duplex bit appropriately */
265 if (tp->full_duplex) tp->csr6 |= 0x00000200;
266
267 iowrite32(1, ioaddr + CSR13);
268
269 if (tulip_debug > 2)
270 netdev_dbg(dev, "Setting CSR6 %08x/%x CSR12 %08x\n",
271 tp->csr6,
272 ioread32(ioaddr + CSR6),
273 ioread32(ioaddr + CSR12));
274
275 /* now the following actually writes out the
276 * new csr6 values
277 */
278 tulip_start_rxtx(tp);
279
280 return;
281
282 } else {
283 dev_info(&dev->dev,
284 "Autonegotiation failed, using %s, link beat status %04x\n",
285 medianame[dev->if_port], csr12);
286
287 /* remember to turn off bit 7 - autonegotiate
288 * enable so we don't forget
289 */
290 csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
291 iowrite32(csr14,ioaddr + CSR14);
292
293 /* what should we do when autonegotiate fails?
294 * should we try again or default to baseline
295 * case. I just don't know.
296 *
297 * for now default to some baseline case
298 */
299
300 dev->if_port = 0;
301 tp->nway = 0;
302 tp->nwayset = 1;
303
304 /* set to 10baseTx-HD - see Data Port Selection
305 * comment given at the top of the file
306 */
307 tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
308 tp->csr6 |= 0x00400000;
309
310 tulip_restart_rxtx(tp);
311
312 return;
313
314 }
315 }
316
317 if ((tp->nwayset && (csr5 & 0x08000000) &&
318 (dev->if_port == 3 || dev->if_port == 5) &&
319 (csr12 & 2) == 2) || (tp->nway && (csr5 & (TPLnkFail)))) {
320
321 /* Link blew? Maybe restart NWay. */
322
323 if (tulip_debug > 2)
324 netdev_dbg(dev, "Ugh! Link blew?\n");
325
326 del_timer_sync(&tp->timer);
327 pnic2_start_nway(dev);
328 tp->timer.expires = RUN_AT(3*HZ);
329 add_timer(&tp->timer);
330
331 return;
332 }
333
334
335 if (dev->if_port == 3 || dev->if_port == 5) {
336
337 /* we are at 100mb and a potential link change occurred */
338
339 if (tulip_debug > 1)
340 dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
341 medianame[dev->if_port],
342 (csr12 & 2) ? "failed" : "good");
343
344 /* check 100 link beat */
345
346 tp->nway = 0;
347 tp->nwayset = 1;
348
349 /* if failed then try doing an nway to get in sync */
350 if ((csr12 & 2) && ! tp->medialock) {
351 del_timer_sync(&tp->timer);
352 pnic2_start_nway(dev);
353 tp->timer.expires = RUN_AT(3*HZ);
354 add_timer(&tp->timer);
355 }
356
357 return;
358 }
359
360 if (dev->if_port == 0 || dev->if_port == 4) {
361
362 /* we are at 10mb and a potential link change occurred */
363
364 if (tulip_debug > 1)
365 dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
366 medianame[dev->if_port],
367 (csr12 & 4) ? "failed" : "good");
368
369
370 tp->nway = 0;
371 tp->nwayset = 1;
372
373 /* if failed, try doing an nway to get in sync */
374 if ((csr12 & 4) && ! tp->medialock) {
375 del_timer_sync(&tp->timer);
376 pnic2_start_nway(dev);
377 tp->timer.expires = RUN_AT(3*HZ);
378 add_timer(&tp->timer);
379 }
380
381 return;
382 }
383
384
385 if (tulip_debug > 1)
386 dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
387
388 /* if all else fails default to trying 10baseT-HD */
389 dev->if_port = 0;
390
391 /* make sure autonegotiate enable is off */
392 csr14 = (ioread32(ioaddr + CSR14) & 0xffffff7f);
393 iowrite32(csr14,ioaddr + CSR14);
394
395 /* set to 10baseTx-HD - see Data Port Selection
396 * comment given at the top of the file
397 */
398 tp->csr6 = (ioread32(ioaddr + CSR6) & 0xfe3bd1fd);
399 tp->csr6 |= 0x00400000;
400
401 tulip_restart_rxtx(tp);
402}
403
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
new file mode 100644
index 000000000000..19078d28ffb9
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -0,0 +1,176 @@
1/*
2 drivers/net/tulip/timer.c
3
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please submit bugs to http://bugzilla.kernel.org/ .
11*/
12
13
14#include "tulip.h"
15
16
17void tulip_media_task(struct work_struct *work)
18{
19 struct tulip_private *tp =
20 container_of(work, struct tulip_private, media_work);
21 struct net_device *dev = tp->dev;
22 void __iomem *ioaddr = tp->base_addr;
23 u32 csr12 = ioread32(ioaddr + CSR12);
24 int next_tick = 2*HZ;
25 unsigned long flags;
26
27 if (tulip_debug > 2) {
28 netdev_dbg(dev, "Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
29 medianame[dev->if_port],
30 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
31 csr12, ioread32(ioaddr + CSR13),
32 ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
33 }
34 switch (tp->chip_id) {
35 case DC21140:
36 case DC21142:
37 case MX98713:
38 case COMPEX9881:
39 case DM910X:
40 default: {
41 struct medialeaf *mleaf;
42 unsigned char *p;
43 if (tp->mtable == NULL) { /* No EEPROM info, use generic code. */
44 /* Not much that can be done.
45 Assume this a generic MII or SYM transceiver. */
46 next_tick = 60*HZ;
47 if (tulip_debug > 2)
48 netdev_dbg(dev, "network media monitor CSR6 %08x CSR12 0x%02x\n",
49 ioread32(ioaddr + CSR6),
50 csr12 & 0xff);
51 break;
52 }
53 mleaf = &tp->mtable->mleaf[tp->cur_index];
54 p = mleaf->leafdata;
55 switch (mleaf->type) {
56 case 0: case 4: {
57 /* Type 0 serial or 4 SYM transceiver. Check the link beat bit. */
58 int offset = mleaf->type == 4 ? 5 : 2;
59 s8 bitnum = p[offset];
60 if (p[offset+1] & 0x80) {
61 if (tulip_debug > 1)
62 netdev_dbg(dev, "Transceiver monitor tick CSR12=%#02x, no media sense\n",
63 csr12);
64 if (mleaf->type == 4) {
65 if (mleaf->media == 3 && (csr12 & 0x02))
66 goto select_next_media;
67 }
68 break;
69 }
70 if (tulip_debug > 2)
71 netdev_dbg(dev, "Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
72 csr12, (bitnum >> 1) & 7,
73 (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
74 (bitnum >= 0));
75 /* Check that the specified bit has the proper value. */
76 if ((bitnum < 0) !=
77 ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
78 if (tulip_debug > 2)
79 netdev_dbg(dev, "Link beat detected for %s\n",
80 medianame[mleaf->media & MEDIA_MASK]);
81 if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
82 goto actually_mii;
83 netif_carrier_on(dev);
84 break;
85 }
86 netif_carrier_off(dev);
87 if (tp->medialock)
88 break;
89 select_next_media:
90 if (--tp->cur_index < 0) {
91 /* We start again, but should instead look for default. */
92 tp->cur_index = tp->mtable->leafcount - 1;
93 }
94 dev->if_port = tp->mtable->mleaf[tp->cur_index].media;
95 if (tulip_media_cap[dev->if_port] & MediaIsFD)
96 goto select_next_media; /* Skip FD entries. */
97 if (tulip_debug > 1)
98 netdev_dbg(dev, "No link beat on media %s, trying transceiver type %s\n",
99 medianame[mleaf->media & MEDIA_MASK],
100 medianame[tp->mtable->mleaf[tp->cur_index].media]);
101 tulip_select_media(dev, 0);
102 /* Restart the transmit process. */
103 tulip_restart_rxtx(tp);
104 next_tick = (24*HZ)/10;
105 break;
106 }
107 case 1: case 3: /* 21140, 21142 MII */
108 actually_mii:
109 if (tulip_check_duplex(dev) < 0) {
110 netif_carrier_off(dev);
111 next_tick = 3*HZ;
112 } else {
113 netif_carrier_on(dev);
114 next_tick = 60*HZ;
115 }
116 break;
117 case 2: /* 21142 serial block has no link beat. */
118 default:
119 break;
120 }
121 }
122 break;
123 }
124
125
126 spin_lock_irqsave(&tp->lock, flags);
127 if (tp->timeout_recovery) {
128 tulip_tx_timeout_complete(tp, ioaddr);
129 tp->timeout_recovery = 0;
130 }
131 spin_unlock_irqrestore(&tp->lock, flags);
132
133 /* mod_timer synchronizes us with potential add_timer calls
134 * from interrupts.
135 */
136 mod_timer(&tp->timer, RUN_AT(next_tick));
137}
138
139
140void mxic_timer(unsigned long data)
141{
142 struct net_device *dev = (struct net_device *)data;
143 struct tulip_private *tp = netdev_priv(dev);
144 void __iomem *ioaddr = tp->base_addr;
145 int next_tick = 60*HZ;
146
147 if (tulip_debug > 3) {
148 dev_info(&dev->dev, "MXIC negotiation status %08x\n",
149 ioread32(ioaddr + CSR12));
150 }
151 if (next_tick) {
152 mod_timer(&tp->timer, RUN_AT(next_tick));
153 }
154}
155
156
157void comet_timer(unsigned long data)
158{
159 struct net_device *dev = (struct net_device *)data;
160 struct tulip_private *tp = netdev_priv(dev);
161 int next_tick = 60*HZ;
162
163 if (tulip_debug > 1)
164 netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
165 tulip_mdio_read(dev, tp->phys[0], 1),
166 tulip_mdio_read(dev, tp->phys[0], 5));
167 /* mod_timer synchronizes us with potential add_timer calls
168 * from interrupts.
169 */
170 if (tulip_check_duplex(dev) < 0)
171 { netif_carrier_off(dev); }
172 else
173 { netif_carrier_on(dev); }
174 mod_timer(&tp->timer, RUN_AT(next_tick));
175}
176
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
new file mode 100644
index 000000000000..fb3887c18dc6
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -0,0 +1,570 @@
1/*
2 drivers/net/tulip/tulip.h
3
4 Copyright 2000,2001 The Linux Kernel Team
5 Written/copyright 1994-2001 by Donald Becker.
6
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 Please submit bugs to http://bugzilla.kernel.org/ .
11*/
12
13#ifndef __NET_TULIP_H__
14#define __NET_TULIP_H__
15
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/spinlock.h>
19#include <linux/netdevice.h>
20#include <linux/ethtool.h>
21#include <linux/timer.h>
22#include <linux/delay.h>
23#include <linux/pci.h>
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/unaligned.h>
27
28
29
30/* undefine, or define to various debugging levels (>4 == obscene levels) */
31#define TULIP_DEBUG 1
32
33#ifdef CONFIG_TULIP_MMIO
34#define TULIP_BAR 1 /* CBMA */
35#else
36#define TULIP_BAR 0 /* CBIO */
37#endif
38
39
40
41struct tulip_chip_table {
42 char *chip_name;
43 int io_size;
44 int valid_intrs; /* CSR7 interrupt enable settings */
45 int flags;
46 void (*media_timer) (unsigned long);
47 work_func_t media_task;
48};
49
50
51enum tbl_flag {
52 HAS_MII = 0x00001,
53 HAS_MEDIA_TABLE = 0x00002,
54 CSR12_IN_SROM = 0x00004,
55 ALWAYS_CHECK_MII = 0x00008,
56 HAS_ACPI = 0x00010,
57 MC_HASH_ONLY = 0x00020, /* Hash-only multicast filter. */
58 HAS_PNICNWAY = 0x00080,
59 HAS_NWAY = 0x00040, /* Uses internal NWay xcvr. */
60 HAS_INTR_MITIGATION = 0x00100,
61 IS_ASIX = 0x00200,
62 HAS_8023X = 0x00400,
63 COMET_MAC_ADDR = 0x00800,
64 HAS_PCI_MWI = 0x01000,
65 HAS_PHY_IRQ = 0x02000,
66 HAS_SWAPPED_SEEPROM = 0x04000,
67 NEEDS_FAKE_MEDIA_TABLE = 0x08000,
68 COMET_PM = 0x10000,
69};
70
71
72/* chip types. careful! order is VERY IMPORTANT here, as these
73 * are used throughout the driver as indices into arrays */
74/* Note 21142 == 21143. */
75enum chips {
76 DC21040 = 0,
77 DC21041 = 1,
78 DC21140 = 2,
79 DC21142 = 3, DC21143 = 3,
80 LC82C168,
81 MX98713,
82 MX98715,
83 MX98725,
84 AX88140,
85 PNIC2,
86 COMET,
87 COMPEX9881,
88 I21145,
89 DM910X,
90 CONEXANT,
91};
92
93
94enum MediaIs {
95 MediaIsFD = 1,
96 MediaAlwaysFD = 2,
97 MediaIsMII = 4,
98 MediaIsFx = 8,
99 MediaIs100 = 16
100};
101
102
103/* Offsets to the Command and Status Registers, "CSRs". All accesses
104 must be longword instructions and quadword aligned. */
105enum tulip_offsets {
106 CSR0 = 0,
107 CSR1 = 0x08,
108 CSR2 = 0x10,
109 CSR3 = 0x18,
110 CSR4 = 0x20,
111 CSR5 = 0x28,
112 CSR6 = 0x30,
113 CSR7 = 0x38,
114 CSR8 = 0x40,
115 CSR9 = 0x48,
116 CSR10 = 0x50,
117 CSR11 = 0x58,
118 CSR12 = 0x60,
119 CSR13 = 0x68,
120 CSR14 = 0x70,
121 CSR15 = 0x78,
122 CSR18 = 0x88,
123 CSR19 = 0x8c,
124 CSR20 = 0x90,
125 CSR27 = 0xAC,
126 CSR28 = 0xB0,
127};
128
129/* register offset and bits for CFDD PCI config reg */
130enum pci_cfg_driver_reg {
131 CFDD = 0x40,
132 CFDD_Sleep = (1 << 31),
133 CFDD_Snooze = (1 << 30),
134};
135
136#define RxPollInt (RxIntr|RxNoBuf|RxDied|RxJabber)
137
138/* The bits in the CSR5 status registers, mostly interrupt sources. */
139enum status_bits {
140 TimerInt = 0x800,
141 SystemError = 0x2000,
142 TPLnkFail = 0x1000,
143 TPLnkPass = 0x10,
144 NormalIntr = 0x10000,
145 AbnormalIntr = 0x8000,
146 RxJabber = 0x200,
147 RxDied = 0x100,
148 RxNoBuf = 0x80,
149 RxIntr = 0x40,
150 TxFIFOUnderflow = 0x20,
151 RxErrIntr = 0x10,
152 TxJabber = 0x08,
153 TxNoBuf = 0x04,
154 TxDied = 0x02,
155 TxIntr = 0x01,
156};
157
158/* bit mask for CSR5 TX/RX process state */
159#define CSR5_TS 0x00700000
160#define CSR5_RS 0x000e0000
161
162enum tulip_mode_bits {
163 TxThreshold = (1 << 22),
164 FullDuplex = (1 << 9),
165 TxOn = 0x2000,
166 AcceptBroadcast = 0x0100,
167 AcceptAllMulticast = 0x0080,
168 AcceptAllPhys = 0x0040,
169 AcceptRunt = 0x0008,
170 RxOn = 0x0002,
171 RxTx = (TxOn | RxOn),
172};
173
174
175enum tulip_busconfig_bits {
176 MWI = (1 << 24),
177 MRL = (1 << 23),
178 MRM = (1 << 21),
179 CALShift = 14,
180 BurstLenShift = 8,
181};
182
183
184/* The Tulip Rx and Tx buffer descriptors. */
185struct tulip_rx_desc {
186 __le32 status;
187 __le32 length;
188 __le32 buffer1;
189 __le32 buffer2;
190};
191
192
193struct tulip_tx_desc {
194 __le32 status;
195 __le32 length;
196 __le32 buffer1;
197 __le32 buffer2; /* We use only buffer 1. */
198};
199
200
201enum desc_status_bits {
202 DescOwned = 0x80000000,
203 DescWholePkt = 0x60000000,
204 DescEndPkt = 0x40000000,
205 DescStartPkt = 0x20000000,
206 DescEndRing = 0x02000000,
207 DescUseLink = 0x01000000,
208
209 /*
210 * Error summary flag is logical or of 'CRC Error', 'Collision Seen',
211 * 'Frame Too Long', 'Runt' and 'Descriptor Error' flags generated
212 * within tulip chip.
213 */
214 RxDescErrorSummary = 0x8000,
215 RxDescCRCError = 0x0002,
216 RxDescCollisionSeen = 0x0040,
217
218 /*
219 * 'Frame Too Long' flag is set if packet length including CRC exceeds
220 * 1518. However, a full sized VLAN tagged frame is 1522 bytes
221 * including CRC.
222 *
223 * The tulip chip does not block oversized frames, and if this flag is
224 * set on a receive descriptor it does not indicate the frame has been
225 * truncated. The receive descriptor also includes the actual length.
226 * Therefore we can safety ignore this flag and check the length
227 * ourselves.
228 */
229 RxDescFrameTooLong = 0x0080,
230 RxDescRunt = 0x0800,
231 RxDescDescErr = 0x4000,
232 RxWholePkt = 0x00000300,
233 /*
234 * Top three bits of 14 bit frame length (status bits 27-29) should
235 * never be set as that would make frame over 2047 bytes. The Receive
236 * Watchdog flag (bit 4) may indicate the length is over 2048 and the
237 * length field is invalid.
238 */
239 RxLengthOver2047 = 0x38000010
240};
241
242
243enum t21143_csr6_bits {
244 csr6_sc = (1<<31),
245 csr6_ra = (1<<30),
246 csr6_ign_dest_msb = (1<<26),
247 csr6_mbo = (1<<25),
248 csr6_scr = (1<<24), /* scramble mode flag: can't be set */
249 csr6_pcs = (1<<23), /* Enables PCS functions (symbol mode requires csr6_ps be set) default is set */
250 csr6_ttm = (1<<22), /* Transmit Threshold Mode, set for 10baseT, 0 for 100BaseTX */
251 csr6_sf = (1<<21), /* Store and forward. If set ignores TR bits */
252 csr6_hbd = (1<<19), /* Heart beat disable. Disables SQE function in 10baseT */
253 csr6_ps = (1<<18), /* Port Select. 0 (defualt) = 10baseT, 1 = 100baseTX: can't be set */
254 csr6_ca = (1<<17), /* Collision Offset Enable. If set uses special algorithm in low collision situations */
255 csr6_trh = (1<<15), /* Transmit Threshold high bit */
256 csr6_trl = (1<<14), /* Transmit Threshold low bit */
257
258 /***************************************************************
259 * This table shows transmit threshold values based on media *
260 * and these two registers (from PNIC1 & 2 docs) Note: this is *
261 * all meaningless if sf is set. *
262 ***************************************************************/
263
264 /***********************************
265 * (trh,trl) * 100BaseTX * 10BaseT *
266 ***********************************
267 * (0,0) * 128 * 72 *
268 * (0,1) * 256 * 96 *
269 * (1,0) * 512 * 128 *
270 * (1,1) * 1024 * 160 *
271 ***********************************/
272
273 csr6_fc = (1<<12), /* Forces a collision in next transmission (for testing in loopback mode) */
274 csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */
275 csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */
276 /* set both and you get (PHY) loopback */
277 csr6_fd = (1<<9), /* Full duplex mode, disables hearbeat, no loopback */
278 csr6_pm = (1<<7), /* Pass All Multicast */
279 csr6_pr = (1<<6), /* Promiscuous mode */
280 csr6_sb = (1<<5), /* Start(1)/Stop(0) backoff counter */
281 csr6_if = (1<<4), /* Inverse Filtering, rejects only addresses in address table: can't be set */
282 csr6_pb = (1<<3), /* Pass Bad Frames, (1) causes even bad frames to be passed on */
283 csr6_ho = (1<<2), /* Hash-only filtering mode: can't be set */
284 csr6_hp = (1<<0), /* Hash/Perfect Receive Filtering Mode: can't be set */
285
286 csr6_mask_capture = (csr6_sc | csr6_ca),
287 csr6_mask_defstate = (csr6_mask_capture | csr6_mbo),
288 csr6_mask_hdcap = (csr6_mask_defstate | csr6_hbd | csr6_ps),
289 csr6_mask_hdcaptt = (csr6_mask_hdcap | csr6_trh | csr6_trl),
290 csr6_mask_fullcap = (csr6_mask_hdcaptt | csr6_fd),
291 csr6_mask_fullpromisc = (csr6_pr | csr6_pm),
292 csr6_mask_filters = (csr6_hp | csr6_ho | csr6_if),
293 csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
294};
295
296enum tulip_comet_csr13_bits {
297/* The LINKOFFE and LINKONE work in conjunction with LSCE, i.e. they
298 * determine which link status transition wakes up if LSCE is
299 * enabled */
300 comet_csr13_linkoffe = (1 << 17),
301 comet_csr13_linkone = (1 << 16),
302 comet_csr13_wfre = (1 << 10),
303 comet_csr13_mpre = (1 << 9),
304 comet_csr13_lsce = (1 << 8),
305 comet_csr13_wfr = (1 << 2),
306 comet_csr13_mpr = (1 << 1),
307 comet_csr13_lsc = (1 << 0),
308};
309
310enum tulip_comet_csr18_bits {
311 comet_csr18_pmes_sticky = (1 << 24),
312 comet_csr18_pm_mode = (1 << 19),
313 comet_csr18_apm_mode = (1 << 18),
314 comet_csr18_d3a = (1 << 7)
315};
316
317enum tulip_comet_csr20_bits {
318 comet_csr20_pmes = (1 << 15),
319};
320
321/* Keep the ring sizes a power of two for efficiency.
322 Making the Tx ring too large decreases the effectiveness of channel
323 bonding and packet priority.
324 There are no ill effects from too-large receive rings. */
325
326#define TX_RING_SIZE 32
327#define RX_RING_SIZE 128
328#define MEDIA_MASK 31
329
330/* The receiver on the DC21143 rev 65 can fail to close the last
331 * receive descriptor in certain circumstances (see errata) when
332 * using MWI. This can only occur if the receive buffer ends on
333 * a cache line boundary, so the "+ 4" below ensures it doesn't.
334 */
335#define PKT_BUF_SZ (1536 + 4) /* Size of each temporary Rx buffer. */
336
337#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */
338
339#if defined(__sparc__) || defined(__hppa__)
340/* The UltraSparc PCI controllers will disconnect at every 64-byte
341 * crossing anyways so it makes no sense to tell Tulip to burst
342 * any more than that.
343 */
344#define TULIP_MAX_CACHE_LINE 16 /* in units of 32-bit words */
345#else
346#define TULIP_MAX_CACHE_LINE 32 /* in units of 32-bit words */
347#endif
348
349
350/* Ring-wrap flag in length field, use for last ring entry.
351 0x01000000 means chain on buffer2 address,
352 0x02000000 means use the ring start address in CSR2/3.
353 Note: Some work-alike chips do not function correctly in chained mode.
354 The ASIX chip works only in chained mode.
355 Thus we indicates ring mode, but always write the 'next' field for
356 chained mode as well.
357*/
358#define DESC_RING_WRAP 0x02000000
359
360
361#define EEPROM_SIZE 512 /* 2 << EEPROM_ADDRLEN */
362
363
364#define RUN_AT(x) (jiffies + (x))
365
366#define get_u16(ptr) get_unaligned_le16((ptr))
367
368struct medialeaf {
369 u8 type;
370 u8 media;
371 unsigned char *leafdata;
372};
373
374
375struct mediatable {
376 u16 defaultmedia;
377 u8 leafcount;
378 u8 csr12dir; /* General purpose pin directions. */
379 unsigned has_mii:1;
380 unsigned has_nonmii:1;
381 unsigned has_reset:6;
382 u32 csr15dir;
383 u32 csr15val; /* 21143 NWay setting. */
384 struct medialeaf mleaf[0];
385};
386
387
388struct mediainfo {
389 struct mediainfo *next;
390 int info_type;
391 int index;
392 unsigned char *info;
393};
394
395struct ring_info {
396 struct sk_buff *skb;
397 dma_addr_t mapping;
398};
399
400
401struct tulip_private {
402 const char *product_name;
403 struct net_device *next_module;
404 struct tulip_rx_desc *rx_ring;
405 struct tulip_tx_desc *tx_ring;
406 dma_addr_t rx_ring_dma;
407 dma_addr_t tx_ring_dma;
408 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
409 struct ring_info tx_buffers[TX_RING_SIZE];
410 /* The addresses of receive-in-place skbuffs. */
411 struct ring_info rx_buffers[RX_RING_SIZE];
412 u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */
413 int chip_id;
414 int revision;
415 int flags;
416 struct napi_struct napi;
417 struct timer_list timer; /* Media selection timer. */
418 struct timer_list oom_timer; /* Out of memory timer. */
419 u32 mc_filter[2];
420 spinlock_t lock;
421 spinlock_t mii_lock;
422 unsigned int cur_rx, cur_tx; /* The next free ring entry */
423 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
424
425#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
426 int mit_on;
427#endif
428 unsigned int full_duplex:1; /* Full-duplex operation requested. */
429 unsigned int full_duplex_lock:1;
430 unsigned int fake_addr:1; /* Multiport board faked address. */
431 unsigned int default_port:4; /* Last dev->if_port value. */
432 unsigned int media2:4; /* Secondary monitored media port. */
433 unsigned int medialock:1; /* Don't sense media type. */
434 unsigned int mediasense:1; /* Media sensing in progress. */
435 unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
436 unsigned int timeout_recovery:1;
437 unsigned int csr0; /* CSR0 setting. */
438 unsigned int csr6; /* Current CSR6 control settings. */
439 unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
440 void (*link_change) (struct net_device * dev, int csr5);
441 struct ethtool_wolinfo wolinfo; /* WOL settings */
442 u16 sym_advertise, mii_advertise; /* NWay capabilities advertised. */
443 u16 lpar; /* 21143 Link partner ability. */
444 u16 advertising[4];
445 signed char phys[4], mii_cnt; /* MII device addresses. */
446 struct mediatable *mtable;
447 int cur_index; /* Current media index. */
448 int saved_if_port;
449 struct pci_dev *pdev;
450 int ttimer;
451 int susp_rx;
452 unsigned long nir;
453 void __iomem *base_addr;
454 int csr12_shadow;
455 int pad0; /* Used for 8-byte alignment */
456 struct work_struct media_work;
457 struct net_device *dev;
458};
459
460
461struct eeprom_fixup {
462 char *name;
463 unsigned char addr0;
464 unsigned char addr1;
465 unsigned char addr2;
466 u16 newtable[32]; /* Max length below. */
467};
468
469
470/* 21142.c */
471extern u16 t21142_csr14[];
472void t21142_media_task(struct work_struct *work);
473void t21142_start_nway(struct net_device *dev);
474void t21142_lnk_change(struct net_device *dev, int csr5);
475
476
477/* PNIC2.c */
478void pnic2_lnk_change(struct net_device *dev, int csr5);
479void pnic2_timer(unsigned long data);
480void pnic2_start_nway(struct net_device *dev);
481void pnic2_lnk_change(struct net_device *dev, int csr5);
482
483/* eeprom.c */
484void tulip_parse_eeprom(struct net_device *dev);
485int tulip_read_eeprom(struct net_device *dev, int location, int addr_len);
486
487/* interrupt.c */
488extern unsigned int tulip_max_interrupt_work;
489extern int tulip_rx_copybreak;
490irqreturn_t tulip_interrupt(int irq, void *dev_instance);
491int tulip_refill_rx(struct net_device *dev);
492#ifdef CONFIG_TULIP_NAPI
493int tulip_poll(struct napi_struct *napi, int budget);
494#endif
495
496
497/* media.c */
498int tulip_mdio_read(struct net_device *dev, int phy_id, int location);
499void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int value);
500void tulip_select_media(struct net_device *dev, int startup);
501int tulip_check_duplex(struct net_device *dev);
502void tulip_find_mii (struct net_device *dev, int board_idx);
503
504/* pnic.c */
505void pnic_do_nway(struct net_device *dev);
506void pnic_lnk_change(struct net_device *dev, int csr5);
507void pnic_timer(unsigned long data);
508
509/* timer.c */
510void tulip_media_task(struct work_struct *work);
511void mxic_timer(unsigned long data);
512void comet_timer(unsigned long data);
513
514/* tulip_core.c */
515extern int tulip_debug;
516extern const char * const medianame[];
517extern const char tulip_media_cap[];
518extern struct tulip_chip_table tulip_tbl[];
519void oom_timer(unsigned long data);
520extern u8 t21040_csr13[];
521
522static inline void tulip_start_rxtx(struct tulip_private *tp)
523{
524 void __iomem *ioaddr = tp->base_addr;
525 iowrite32(tp->csr6 | RxTx, ioaddr + CSR6);
526 barrier();
527 (void) ioread32(ioaddr + CSR6); /* mmio sync */
528}
529
530static inline void tulip_stop_rxtx(struct tulip_private *tp)
531{
532 void __iomem *ioaddr = tp->base_addr;
533 u32 csr6 = ioread32(ioaddr + CSR6);
534
535 if (csr6 & RxTx) {
536 unsigned i=1300/10;
537 iowrite32(csr6 & ~RxTx, ioaddr + CSR6);
538 barrier();
539 /* wait until in-flight frame completes.
540 * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
541 * Typically expect this loop to end in < 50 us on 100BT.
542 */
543 while (--i && (ioread32(ioaddr + CSR5) & (CSR5_TS|CSR5_RS)))
544 udelay(10);
545
546 if (!i)
547 netdev_dbg(tp->dev, "tulip_stop_rxtx() failed (CSR5 0x%x CSR6 0x%x)\n",
548 ioread32(ioaddr + CSR5),
549 ioread32(ioaddr + CSR6));
550 }
551}
552
553static inline void tulip_restart_rxtx(struct tulip_private *tp)
554{
555 tulip_stop_rxtx(tp);
556 udelay(5);
557 tulip_start_rxtx(tp);
558}
559
560static inline void tulip_tx_timeout_complete(struct tulip_private *tp, void __iomem *ioaddr)
561{
562 /* Stop and restart the chip's Tx processes. */
563 tulip_restart_rxtx(tp);
564 /* Trigger an immediate transmit demand. */
565 iowrite32(0, ioaddr + CSR1);
566
567 tp->dev->stats.tx_errors++;
568}
569
570#endif /* __NET_TULIP_H__ */
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
new file mode 100644
index 000000000000..9656dd0647d9
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -0,0 +1,2008 @@
1/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2
3 Copyright 2000,2001 The Linux Kernel Team
4 Written/copyright 1994-2001 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 Please submit bugs to http://bugzilla.kernel.org/ .
10*/
11
12#define pr_fmt(fmt) "tulip: " fmt
13
14#define DRV_NAME "tulip"
15#ifdef CONFIG_TULIP_NAPI
16#define DRV_VERSION "1.1.15-NAPI" /* Keep at least for test */
17#else
18#define DRV_VERSION "1.1.15"
19#endif
20#define DRV_RELDATE "Feb 27, 2007"
21
22
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/slab.h>
26#include "tulip.h"
27#include <linux/init.h>
28#include <linux/interrupt.h>
29#include <linux/etherdevice.h>
30#include <linux/delay.h>
31#include <linux/mii.h>
32#include <linux/crc32.h>
33#include <asm/unaligned.h>
34#include <asm/uaccess.h>
35
36#ifdef CONFIG_SPARC
37#include <asm/prom.h>
38#endif
39
40static char version[] __devinitdata =
41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42
43/* A few user-configurable values. */
44
45/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46static unsigned int max_interrupt_work = 25;
47
48#define MAX_UNITS 8
49/* Used to pass the full-duplex flag, etc. */
50static int full_duplex[MAX_UNITS];
51static int options[MAX_UNITS];
52static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
53
54/* The possible media types that can be set in options[] are: */
55const char * const medianame[32] = {
56 "10baseT", "10base2", "AUI", "100baseTx",
57 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 "","","","", "","","","", "","","","Transceiver reset",
62};
63
64/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
68static int rx_copybreak = 1518;
69#else
70static int rx_copybreak = 100;
71#endif
72
73/*
74 Set the bus performance register.
75 Typical: Set 16 longword cache alignment, no burst limit.
76 Cache alignment bits 15:14 Burst length 13:8
77 0000 No alignment 0x00000000 unlimited 0800 8 longwords
78 4000 8 longwords 0100 1 longword 1000 16 longwords
79 8000 16 longwords 0200 2 longwords 2000 32 longwords
80 C000 32 longwords 0400 4 longwords
81 Warning: many older 486 systems are broken and require setting 0x00A04800
82 8 longword cache alignment, 8 longword burst.
83 ToDo: Non-Intel setting could be better.
84*/
85
86#if defined(__alpha__) || defined(__ia64__)
87static int csr0 = 0x01A00000 | 0xE000;
88#elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89static int csr0 = 0x01A00000 | 0x8000;
90#elif defined(CONFIG_SPARC) || defined(__hppa__)
91/* The UltraSparc PCI controllers will disconnect at every 64-byte
92 * crossing anyways so it makes no sense to tell Tulip to burst
93 * any more than that.
94 */
95static int csr0 = 0x01A00000 | 0x9000;
96#elif defined(__arm__) || defined(__sh__)
97static int csr0 = 0x01A00000 | 0x4800;
98#elif defined(__mips__)
99static int csr0 = 0x00200000 | 0x4000;
100#else
101#warning Processor architecture undefined!
102static int csr0 = 0x00A00000 | 0x4800;
103#endif
104
105/* Operational parameters that usually are not changed. */
106/* Time in jiffies before concluding the transmitter is hung. */
107#define TX_TIMEOUT (4*HZ)
108
109
110MODULE_AUTHOR("The Linux Kernel Team");
111MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114module_param(tulip_debug, int, 0);
115module_param(max_interrupt_work, int, 0);
116module_param(rx_copybreak, int, 0);
117module_param(csr0, int, 0);
118module_param_array(options, int, NULL, 0);
119module_param_array(full_duplex, int, NULL, 0);
120
121#ifdef TULIP_DEBUG
122int tulip_debug = TULIP_DEBUG;
123#else
124int tulip_debug = 1;
125#endif
126
127static void tulip_timer(unsigned long data)
128{
129 struct net_device *dev = (struct net_device *)data;
130 struct tulip_private *tp = netdev_priv(dev);
131
132 if (netif_running(dev))
133 schedule_work(&tp->media_work);
134}
135
136/*
137 * This table use during operation for capabilities and media timer.
138 *
139 * It is indexed via the values in 'enum chips'
140 */
141
142struct tulip_chip_table tulip_tbl[] = {
143 { }, /* placeholder for array, slot unused currently */
144 { }, /* placeholder for array, slot unused currently */
145
146 /* DC21140 */
147 { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
149 tulip_media_task },
150
151 /* DC21142, DC21143 */
152 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
153 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
154 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
155
156 /* LC82C168 */
157 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
158 HAS_MII | HAS_PNICNWAY, pnic_timer, },
159
160 /* MX98713 */
161 { "Macronix 98713 PMAC", 128, 0x0001ebef,
162 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
163
164 /* MX98715 */
165 { "Macronix 98715 PMAC", 256, 0x0001ebef,
166 HAS_MEDIA_TABLE, mxic_timer, },
167
168 /* MX98725 */
169 { "Macronix 98725 PMAC", 256, 0x0001ebef,
170 HAS_MEDIA_TABLE, mxic_timer, },
171
172 /* AX88140 */
173 { "ASIX AX88140", 128, 0x0001fbff,
174 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
175 | IS_ASIX, tulip_timer, tulip_media_task },
176
177 /* PNIC2 */
178 { "Lite-On PNIC-II", 256, 0x0801fbff,
179 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
180
181 /* COMET */
182 { "ADMtek Comet", 256, 0x0001abef,
183 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
184
185 /* COMPEX9881 */
186 { "Compex 9881 PMAC", 128, 0x0001ebef,
187 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
188
189 /* I21145 */
190 { "Intel DS21145 Tulip", 128, 0x0801fbff,
191 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
192 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
193
194 /* DM910X */
195#ifdef CONFIG_TULIP_DM910X
196 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
197 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
198 tulip_timer, tulip_media_task },
199#else
200 { NULL },
201#endif
202
203 /* RS7112 */
204 { "Conexant LANfinity", 256, 0x0001ebef,
205 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
206
207};
208
209
210static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
211 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
212 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
213 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
214 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
215 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
216/* { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
217 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
218 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
219 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
231#ifdef CONFIG_TULIP_DM910X
232 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
234#endif
235 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
237 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
242 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
247 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
248 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
249 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 { } /* terminate list */
251};
252MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
253
254
255/* A full-duplex map for media types. */
256const char tulip_media_cap[32] =
257{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
258
259static void tulip_tx_timeout(struct net_device *dev);
260static void tulip_init_ring(struct net_device *dev);
261static void tulip_free_ring(struct net_device *dev);
262static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
263 struct net_device *dev);
264static int tulip_open(struct net_device *dev);
265static int tulip_close(struct net_device *dev);
266static void tulip_up(struct net_device *dev);
267static void tulip_down(struct net_device *dev);
268static struct net_device_stats *tulip_get_stats(struct net_device *dev);
269static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
270static void set_rx_mode(struct net_device *dev);
271static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
272#ifdef CONFIG_NET_POLL_CONTROLLER
273static void poll_tulip(struct net_device *dev);
274#endif
275
276static void tulip_set_power_state (struct tulip_private *tp,
277 int sleep, int snooze)
278{
279 if (tp->flags & HAS_ACPI) {
280 u32 tmp, newtmp;
281 pci_read_config_dword (tp->pdev, CFDD, &tmp);
282 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
283 if (sleep)
284 newtmp |= CFDD_Sleep;
285 else if (snooze)
286 newtmp |= CFDD_Snooze;
287 if (tmp != newtmp)
288 pci_write_config_dword (tp->pdev, CFDD, newtmp);
289 }
290
291}
292
293
294static void tulip_up(struct net_device *dev)
295{
296 struct tulip_private *tp = netdev_priv(dev);
297 void __iomem *ioaddr = tp->base_addr;
298 int next_tick = 3*HZ;
299 u32 reg;
300 int i;
301
302#ifdef CONFIG_TULIP_NAPI
303 napi_enable(&tp->napi);
304#endif
305
306 /* Wake the chip from sleep/snooze mode. */
307 tulip_set_power_state (tp, 0, 0);
308
309 /* Disable all WOL events */
310 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
311 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
312 tulip_set_wolopts(tp->pdev, 0);
313
314 /* On some chip revs we must set the MII/SYM port before the reset!? */
315 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
316 iowrite32(0x00040000, ioaddr + CSR6);
317
318 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
319 iowrite32(0x00000001, ioaddr + CSR0);
320 pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
321 udelay(100);
322
323 /* Deassert reset.
324 Wait the specified 50 PCI cycles after a reset by initializing
325 Tx and Rx queues and the address filter list. */
326 iowrite32(tp->csr0, ioaddr + CSR0);
327 pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg); /* flush write */
328 udelay(100);
329
330 if (tulip_debug > 1)
331 netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
332
333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
335 tp->cur_rx = tp->cur_tx = 0;
336 tp->dirty_rx = tp->dirty_tx = 0;
337
338 if (tp->flags & MC_HASH_ONLY) {
339 u32 addr_low = get_unaligned_le32(dev->dev_addr);
340 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
341 if (tp->chip_id == AX88140) {
342 iowrite32(0, ioaddr + CSR13);
343 iowrite32(addr_low, ioaddr + CSR14);
344 iowrite32(1, ioaddr + CSR13);
345 iowrite32(addr_high, ioaddr + CSR14);
346 } else if (tp->flags & COMET_MAC_ADDR) {
347 iowrite32(addr_low, ioaddr + 0xA4);
348 iowrite32(addr_high, ioaddr + 0xA8);
349 iowrite32(0, ioaddr + CSR27);
350 iowrite32(0, ioaddr + CSR28);
351 }
352 } else {
353 /* This is set_rx_mode(), but without starting the transmitter. */
354 u16 *eaddrs = (u16 *)dev->dev_addr;
355 u16 *setup_frm = &tp->setup_frame[15*6];
356 dma_addr_t mapping;
357
358 /* 21140 bug: you must add the broadcast address. */
359 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
360 /* Fill the final entry of the table with our physical address. */
361 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
362 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
363 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
364
365 mapping = pci_map_single(tp->pdev, tp->setup_frame,
366 sizeof(tp->setup_frame),
367 PCI_DMA_TODEVICE);
368 tp->tx_buffers[tp->cur_tx].skb = NULL;
369 tp->tx_buffers[tp->cur_tx].mapping = mapping;
370
371 /* Put the setup frame on the Tx list. */
372 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
373 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
374 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
375
376 tp->cur_tx++;
377 }
378
379 tp->saved_if_port = dev->if_port;
380 if (dev->if_port == 0)
381 dev->if_port = tp->default_port;
382
383 /* Allow selecting a default media. */
384 i = 0;
385 if (tp->mtable == NULL)
386 goto media_picked;
387 if (dev->if_port) {
388 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
389 (dev->if_port == 12 ? 0 : dev->if_port);
390 for (i = 0; i < tp->mtable->leafcount; i++)
391 if (tp->mtable->mleaf[i].media == looking_for) {
392 dev_info(&dev->dev,
393 "Using user-specified media %s\n",
394 medianame[dev->if_port]);
395 goto media_picked;
396 }
397 }
398 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
399 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
400 for (i = 0; i < tp->mtable->leafcount; i++)
401 if (tp->mtable->mleaf[i].media == looking_for) {
402 dev_info(&dev->dev,
403 "Using EEPROM-set media %s\n",
404 medianame[looking_for]);
405 goto media_picked;
406 }
407 }
408 /* Start sensing first non-full-duplex media. */
409 for (i = tp->mtable->leafcount - 1;
410 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
411 ;
412media_picked:
413
414 tp->csr6 = 0;
415 tp->cur_index = i;
416 tp->nwayset = 0;
417
418 if (dev->if_port) {
419 if (tp->chip_id == DC21143 &&
420 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
421 /* We must reset the media CSRs when we force-select MII mode. */
422 iowrite32(0x0000, ioaddr + CSR13);
423 iowrite32(0x0000, ioaddr + CSR14);
424 iowrite32(0x0008, ioaddr + CSR15);
425 }
426 tulip_select_media(dev, 1);
427 } else if (tp->chip_id == DC21142) {
428 if (tp->mii_cnt) {
429 tulip_select_media(dev, 1);
430 if (tulip_debug > 1)
431 dev_info(&dev->dev,
432 "Using MII transceiver %d, status %04x\n",
433 tp->phys[0],
434 tulip_mdio_read(dev, tp->phys[0], 1));
435 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
436 tp->csr6 = csr6_mask_hdcap;
437 dev->if_port = 11;
438 iowrite32(0x0000, ioaddr + CSR13);
439 iowrite32(0x0000, ioaddr + CSR14);
440 } else
441 t21142_start_nway(dev);
442 } else if (tp->chip_id == PNIC2) {
443 /* for initial startup advertise 10/100 Full and Half */
444 tp->sym_advertise = 0x01E0;
445 /* enable autonegotiate end interrupt */
446 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
447 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
448 pnic2_start_nway(dev);
449 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
450 if (tp->mii_cnt) {
451 dev->if_port = 11;
452 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
453 iowrite32(0x0001, ioaddr + CSR15);
454 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
455 pnic_do_nway(dev);
456 else {
457 /* Start with 10mbps to do autonegotiation. */
458 iowrite32(0x32, ioaddr + CSR12);
459 tp->csr6 = 0x00420000;
460 iowrite32(0x0001B078, ioaddr + 0xB8);
461 iowrite32(0x0201B078, ioaddr + 0xB8);
462 next_tick = 1*HZ;
463 }
464 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
465 ! tp->medialock) {
466 dev->if_port = 0;
467 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
468 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
469 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
470 /* Provided by BOLO, Macronix - 12/10/1998. */
471 dev->if_port = 0;
472 tp->csr6 = 0x01a80200;
473 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
474 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
475 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
476 /* Enable automatic Tx underrun recovery. */
477 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
478 dev->if_port = tp->mii_cnt ? 11 : 0;
479 tp->csr6 = 0x00040000;
480 } else if (tp->chip_id == AX88140) {
481 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
482 } else
483 tulip_select_media(dev, 1);
484
485 /* Start the chip's Tx to process setup frame. */
486 tulip_stop_rxtx(tp);
487 barrier();
488 udelay(5);
489 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
490
491 /* Enable interrupts by setting the interrupt mask. */
492 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
493 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
494 tulip_start_rxtx(tp);
495 iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
496
497 if (tulip_debug > 2) {
498 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
499 ioread32(ioaddr + CSR0),
500 ioread32(ioaddr + CSR5),
501 ioread32(ioaddr + CSR6));
502 }
503
504 /* Set the timer to switch to check for link beat and perhaps switch
505 to an alternate media type. */
506 tp->timer.expires = RUN_AT(next_tick);
507 add_timer(&tp->timer);
508#ifdef CONFIG_TULIP_NAPI
509 init_timer(&tp->oom_timer);
510 tp->oom_timer.data = (unsigned long)dev;
511 tp->oom_timer.function = oom_timer;
512#endif
513}
514
515static int
516tulip_open(struct net_device *dev)
517{
518 int retval;
519
520 tulip_init_ring (dev);
521
522 retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev);
523 if (retval)
524 goto free_ring;
525
526 tulip_up (dev);
527
528 netif_start_queue (dev);
529
530 return 0;
531
532free_ring:
533 tulip_free_ring (dev);
534 return retval;
535}
536
537
538static void tulip_tx_timeout(struct net_device *dev)
539{
540 struct tulip_private *tp = netdev_priv(dev);
541 void __iomem *ioaddr = tp->base_addr;
542 unsigned long flags;
543
544 spin_lock_irqsave (&tp->lock, flags);
545
546 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
547 /* Do nothing -- the media monitor should handle this. */
548 if (tulip_debug > 1)
549 dev_warn(&dev->dev,
550 "Transmit timeout using MII device\n");
551 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
552 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
553 tp->chip_id == DM910X) {
554 dev_warn(&dev->dev,
555 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
556 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
557 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
558 ioread32(ioaddr + CSR15));
559 tp->timeout_recovery = 1;
560 schedule_work(&tp->media_work);
561 goto out_unlock;
562 } else if (tp->chip_id == PNIC2) {
563 dev_warn(&dev->dev,
564 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
565 (int)ioread32(ioaddr + CSR5),
566 (int)ioread32(ioaddr + CSR6),
567 (int)ioread32(ioaddr + CSR7),
568 (int)ioread32(ioaddr + CSR12));
569 } else {
570 dev_warn(&dev->dev,
571 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
572 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
573 dev->if_port = 0;
574 }
575
576#if defined(way_too_many_messages)
577 if (tulip_debug > 3) {
578 int i;
579 for (i = 0; i < RX_RING_SIZE; i++) {
580 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
581 int j;
582 printk(KERN_DEBUG
583 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
584 i,
585 (unsigned int)tp->rx_ring[i].status,
586 (unsigned int)tp->rx_ring[i].length,
587 (unsigned int)tp->rx_ring[i].buffer1,
588 (unsigned int)tp->rx_ring[i].buffer2,
589 buf[0], buf[1], buf[2]);
590 for (j = 0; buf[j] != 0xee && j < 1600; j++)
591 if (j < 100)
592 pr_cont(" %02x", buf[j]);
593 pr_cont(" j=%d\n", j);
594 }
595 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
596 for (i = 0; i < RX_RING_SIZE; i++)
597 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
598 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
599 for (i = 0; i < TX_RING_SIZE; i++)
600 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
601 pr_cont("\n");
602 }
603#endif
604
605 tulip_tx_timeout_complete(tp, ioaddr);
606
607out_unlock:
608 spin_unlock_irqrestore (&tp->lock, flags);
609 dev->trans_start = jiffies; /* prevent tx timeout */
610 netif_wake_queue (dev);
611}
612
613
614/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
615static void tulip_init_ring(struct net_device *dev)
616{
617 struct tulip_private *tp = netdev_priv(dev);
618 int i;
619
620 tp->susp_rx = 0;
621 tp->ttimer = 0;
622 tp->nir = 0;
623
624 for (i = 0; i < RX_RING_SIZE; i++) {
625 tp->rx_ring[i].status = 0x00000000;
626 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
627 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
628 tp->rx_buffers[i].skb = NULL;
629 tp->rx_buffers[i].mapping = 0;
630 }
631 /* Mark the last entry as wrapping the ring. */
632 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
633 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
634
635 for (i = 0; i < RX_RING_SIZE; i++) {
636 dma_addr_t mapping;
637
638 /* Note the receive buffer must be longword aligned.
639 dev_alloc_skb() provides 16 byte alignment. But do *not*
640 use skb_reserve() to align the IP header! */
641 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
642 tp->rx_buffers[i].skb = skb;
643 if (skb == NULL)
644 break;
645 mapping = pci_map_single(tp->pdev, skb->data,
646 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
647 tp->rx_buffers[i].mapping = mapping;
648 skb->dev = dev; /* Mark as being used by this device. */
649 tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
650 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
651 }
652 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
653
654 /* The Tx buffer descriptor is filled in as needed, but we
655 do need to clear the ownership bit. */
656 for (i = 0; i < TX_RING_SIZE; i++) {
657 tp->tx_buffers[i].skb = NULL;
658 tp->tx_buffers[i].mapping = 0;
659 tp->tx_ring[i].status = 0x00000000;
660 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
661 }
662 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
663}
664
665static netdev_tx_t
666tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
667{
668 struct tulip_private *tp = netdev_priv(dev);
669 int entry;
670 u32 flag;
671 dma_addr_t mapping;
672 unsigned long flags;
673
674 spin_lock_irqsave(&tp->lock, flags);
675
676 /* Calculate the next Tx descriptor entry. */
677 entry = tp->cur_tx % TX_RING_SIZE;
678
679 tp->tx_buffers[entry].skb = skb;
680 mapping = pci_map_single(tp->pdev, skb->data,
681 skb->len, PCI_DMA_TODEVICE);
682 tp->tx_buffers[entry].mapping = mapping;
683 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
684
685 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
686 flag = 0x60000000; /* No interrupt */
687 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
688 flag = 0xe0000000; /* Tx-done intr. */
689 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
690 flag = 0x60000000; /* No Tx-done intr. */
691 } else { /* Leave room for set_rx_mode() to fill entries. */
692 flag = 0xe0000000; /* Tx-done intr. */
693 netif_stop_queue(dev);
694 }
695 if (entry == TX_RING_SIZE-1)
696 flag = 0xe0000000 | DESC_RING_WRAP;
697
698 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
699 /* if we were using Transmit Automatic Polling, we would need a
700 * wmb() here. */
701 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
702 wmb();
703
704 tp->cur_tx++;
705
706 /* Trigger an immediate transmit demand. */
707 iowrite32(0, tp->base_addr + CSR1);
708
709 spin_unlock_irqrestore(&tp->lock, flags);
710
711 return NETDEV_TX_OK;
712}
713
714static void tulip_clean_tx_ring(struct tulip_private *tp)
715{
716 unsigned int dirty_tx;
717
718 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
719 dirty_tx++) {
720 int entry = dirty_tx % TX_RING_SIZE;
721 int status = le32_to_cpu(tp->tx_ring[entry].status);
722
723 if (status < 0) {
724 tp->dev->stats.tx_errors++; /* It wasn't Txed */
725 tp->tx_ring[entry].status = 0;
726 }
727
728 /* Check for Tx filter setup frames. */
729 if (tp->tx_buffers[entry].skb == NULL) {
730 /* test because dummy frames not mapped */
731 if (tp->tx_buffers[entry].mapping)
732 pci_unmap_single(tp->pdev,
733 tp->tx_buffers[entry].mapping,
734 sizeof(tp->setup_frame),
735 PCI_DMA_TODEVICE);
736 continue;
737 }
738
739 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
740 tp->tx_buffers[entry].skb->len,
741 PCI_DMA_TODEVICE);
742
743 /* Free the original skb. */
744 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
745 tp->tx_buffers[entry].skb = NULL;
746 tp->tx_buffers[entry].mapping = 0;
747 }
748}
749
750static void tulip_down (struct net_device *dev)
751{
752 struct tulip_private *tp = netdev_priv(dev);
753 void __iomem *ioaddr = tp->base_addr;
754 unsigned long flags;
755
756 cancel_work_sync(&tp->media_work);
757
758#ifdef CONFIG_TULIP_NAPI
759 napi_disable(&tp->napi);
760#endif
761
762 del_timer_sync (&tp->timer);
763#ifdef CONFIG_TULIP_NAPI
764 del_timer_sync (&tp->oom_timer);
765#endif
766 spin_lock_irqsave (&tp->lock, flags);
767
768 /* Disable interrupts by clearing the interrupt mask. */
769 iowrite32 (0x00000000, ioaddr + CSR7);
770
771 /* Stop the Tx and Rx processes. */
772 tulip_stop_rxtx(tp);
773
774 /* prepare receive buffers */
775 tulip_refill_rx(dev);
776
777 /* release any unconsumed transmit buffers */
778 tulip_clean_tx_ring(tp);
779
780 if (ioread32(ioaddr + CSR6) != 0xffffffff)
781 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
782
783 spin_unlock_irqrestore (&tp->lock, flags);
784
785 init_timer(&tp->timer);
786 tp->timer.data = (unsigned long)dev;
787 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
788
789 dev->if_port = tp->saved_if_port;
790
791 /* Leave the driver in snooze, not sleep, mode. */
792 tulip_set_power_state (tp, 0, 1);
793}
794
795static void tulip_free_ring (struct net_device *dev)
796{
797 struct tulip_private *tp = netdev_priv(dev);
798 int i;
799
800 /* Free all the skbuffs in the Rx queue. */
801 for (i = 0; i < RX_RING_SIZE; i++) {
802 struct sk_buff *skb = tp->rx_buffers[i].skb;
803 dma_addr_t mapping = tp->rx_buffers[i].mapping;
804
805 tp->rx_buffers[i].skb = NULL;
806 tp->rx_buffers[i].mapping = 0;
807
808 tp->rx_ring[i].status = 0; /* Not owned by Tulip chip. */
809 tp->rx_ring[i].length = 0;
810 /* An invalid address. */
811 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
812 if (skb) {
813 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
814 PCI_DMA_FROMDEVICE);
815 dev_kfree_skb (skb);
816 }
817 }
818
819 for (i = 0; i < TX_RING_SIZE; i++) {
820 struct sk_buff *skb = tp->tx_buffers[i].skb;
821
822 if (skb != NULL) {
823 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
824 skb->len, PCI_DMA_TODEVICE);
825 dev_kfree_skb (skb);
826 }
827 tp->tx_buffers[i].skb = NULL;
828 tp->tx_buffers[i].mapping = 0;
829 }
830}
831
832static int tulip_close (struct net_device *dev)
833{
834 struct tulip_private *tp = netdev_priv(dev);
835 void __iomem *ioaddr = tp->base_addr;
836
837 netif_stop_queue (dev);
838
839 tulip_down (dev);
840
841 if (tulip_debug > 1)
842 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
843 ioread32 (ioaddr + CSR5));
844
845 free_irq (dev->irq, dev);
846
847 tulip_free_ring (dev);
848
849 return 0;
850}
851
852static struct net_device_stats *tulip_get_stats(struct net_device *dev)
853{
854 struct tulip_private *tp = netdev_priv(dev);
855 void __iomem *ioaddr = tp->base_addr;
856
857 if (netif_running(dev)) {
858 unsigned long flags;
859
860 spin_lock_irqsave (&tp->lock, flags);
861
862 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
863
864 spin_unlock_irqrestore(&tp->lock, flags);
865 }
866
867 return &dev->stats;
868}
869
870
871static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
872{
873 struct tulip_private *np = netdev_priv(dev);
874 strcpy(info->driver, DRV_NAME);
875 strcpy(info->version, DRV_VERSION);
876 strcpy(info->bus_info, pci_name(np->pdev));
877}
878
879
880static int tulip_ethtool_set_wol(struct net_device *dev,
881 struct ethtool_wolinfo *wolinfo)
882{
883 struct tulip_private *tp = netdev_priv(dev);
884
885 if (wolinfo->wolopts & (~tp->wolinfo.supported))
886 return -EOPNOTSUPP;
887
888 tp->wolinfo.wolopts = wolinfo->wolopts;
889 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
890 return 0;
891}
892
893static void tulip_ethtool_get_wol(struct net_device *dev,
894 struct ethtool_wolinfo *wolinfo)
895{
896 struct tulip_private *tp = netdev_priv(dev);
897
898 wolinfo->supported = tp->wolinfo.supported;
899 wolinfo->wolopts = tp->wolinfo.wolopts;
900 return;
901}
902
903
904static const struct ethtool_ops ops = {
905 .get_drvinfo = tulip_get_drvinfo,
906 .set_wol = tulip_ethtool_set_wol,
907 .get_wol = tulip_ethtool_get_wol,
908};
909
910/* Provide ioctl() calls to examine the MII xcvr state. */
911static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
912{
913 struct tulip_private *tp = netdev_priv(dev);
914 void __iomem *ioaddr = tp->base_addr;
915 struct mii_ioctl_data *data = if_mii(rq);
916 const unsigned int phy_idx = 0;
917 int phy = tp->phys[phy_idx] & 0x1f;
918 unsigned int regnum = data->reg_num;
919
920 switch (cmd) {
921 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
922 if (tp->mii_cnt)
923 data->phy_id = phy;
924 else if (tp->flags & HAS_NWAY)
925 data->phy_id = 32;
926 else if (tp->chip_id == COMET)
927 data->phy_id = 1;
928 else
929 return -ENODEV;
930
931 case SIOCGMIIREG: /* Read MII PHY register. */
932 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
933 int csr12 = ioread32 (ioaddr + CSR12);
934 int csr14 = ioread32 (ioaddr + CSR14);
935 switch (regnum) {
936 case 0:
937 if (((csr14<<5) & 0x1000) ||
938 (dev->if_port == 5 && tp->nwayset))
939 data->val_out = 0x1000;
940 else
941 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
942 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
943 break;
944 case 1:
945 data->val_out =
946 0x1848 +
947 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
948 ((csr12&0x06) == 6 ? 0 : 4);
949 data->val_out |= 0x6048;
950 break;
951 case 4:
952 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
953 data->val_out =
954 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
955 ((csr14 >> 1) & 0x20) + 1;
956 data->val_out |= ((csr14 >> 9) & 0x03C0);
957 break;
958 case 5: data->val_out = tp->lpar; break;
959 default: data->val_out = 0; break;
960 }
961 } else {
962 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
963 }
964 return 0;
965
966 case SIOCSMIIREG: /* Write MII PHY register. */
967 if (regnum & ~0x1f)
968 return -EINVAL;
969 if (data->phy_id == phy) {
970 u16 value = data->val_in;
971 switch (regnum) {
972 case 0: /* Check for autonegotiation on or reset. */
973 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
974 if (tp->full_duplex_lock)
975 tp->full_duplex = (value & 0x0100) ? 1 : 0;
976 break;
977 case 4:
978 tp->advertising[phy_idx] =
979 tp->mii_advertise = data->val_in;
980 break;
981 }
982 }
983 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
984 u16 value = data->val_in;
985 if (regnum == 0) {
986 if ((value & 0x1200) == 0x1200) {
987 if (tp->chip_id == PNIC2) {
988 pnic2_start_nway (dev);
989 } else {
990 t21142_start_nway (dev);
991 }
992 }
993 } else if (regnum == 4)
994 tp->sym_advertise = value;
995 } else {
996 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
997 }
998 return 0;
999 default:
1000 return -EOPNOTSUPP;
1001 }
1002
1003 return -EOPNOTSUPP;
1004}
1005
1006
1007/* Set or clear the multicast filter for this adaptor.
1008 Note that we only use exclusion around actually queueing the
1009 new frame, not around filling tp->setup_frame. This is non-deterministic
1010 when re-entered but still correct. */
1011
1012#undef set_bit_le
1013#define set_bit_le(i,p) do { ((char *)(p))[(i)/8] |= (1<<((i)%8)); } while(0)
1014
1015static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1016{
1017 struct tulip_private *tp = netdev_priv(dev);
1018 u16 hash_table[32];
1019 struct netdev_hw_addr *ha;
1020 int i;
1021 u16 *eaddrs;
1022
1023 memset(hash_table, 0, sizeof(hash_table));
1024 set_bit_le(255, hash_table); /* Broadcast entry */
1025 /* This should work on big-endian machines as well. */
1026 netdev_for_each_mc_addr(ha, dev) {
1027 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1028
1029 set_bit_le(index, hash_table);
1030 }
1031 for (i = 0; i < 32; i++) {
1032 *setup_frm++ = hash_table[i];
1033 *setup_frm++ = hash_table[i];
1034 }
1035 setup_frm = &tp->setup_frame[13*6];
1036
1037 /* Fill the final entry with our physical address. */
1038 eaddrs = (u16 *)dev->dev_addr;
1039 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1040 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1041 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1042}
1043
1044static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1045{
1046 struct tulip_private *tp = netdev_priv(dev);
1047 struct netdev_hw_addr *ha;
1048 u16 *eaddrs;
1049
1050 /* We have <= 14 addresses so we can use the wonderful
1051 16 address perfect filtering of the Tulip. */
1052 netdev_for_each_mc_addr(ha, dev) {
1053 eaddrs = (u16 *) ha->addr;
1054 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1055 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1056 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1057 }
1058 /* Fill the unused entries with the broadcast address. */
1059 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1060 setup_frm = &tp->setup_frame[15*6];
1061
1062 /* Fill the final entry with our physical address. */
1063 eaddrs = (u16 *)dev->dev_addr;
1064 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1065 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1066 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1067}
1068
1069
1070static void set_rx_mode(struct net_device *dev)
1071{
1072 struct tulip_private *tp = netdev_priv(dev);
1073 void __iomem *ioaddr = tp->base_addr;
1074 int csr6;
1075
1076 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1077
1078 tp->csr6 &= ~0x00D5;
1079 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1080 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1081 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1082 } else if ((netdev_mc_count(dev) > 1000) ||
1083 (dev->flags & IFF_ALLMULTI)) {
1084 /* Too many to filter well -- accept all multicasts. */
1085 tp->csr6 |= AcceptAllMulticast;
1086 csr6 |= AcceptAllMulticast;
1087 } else if (tp->flags & MC_HASH_ONLY) {
1088 /* Some work-alikes have only a 64-entry hash filter table. */
1089 /* Should verify correctness on big-endian/__powerpc__ */
1090 struct netdev_hw_addr *ha;
1091 if (netdev_mc_count(dev) > 64) {
1092 /* Arbitrary non-effective limit. */
1093 tp->csr6 |= AcceptAllMulticast;
1094 csr6 |= AcceptAllMulticast;
1095 } else {
1096 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1097 int filterbit;
1098 netdev_for_each_mc_addr(ha, dev) {
1099 if (tp->flags & COMET_MAC_ADDR)
1100 filterbit = ether_crc_le(ETH_ALEN,
1101 ha->addr);
1102 else
1103 filterbit = ether_crc(ETH_ALEN,
1104 ha->addr) >> 26;
1105 filterbit &= 0x3f;
1106 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1107 if (tulip_debug > 2)
1108 dev_info(&dev->dev,
1109 "Added filter for %pM %08x bit %d\n",
1110 ha->addr,
1111 ether_crc(ETH_ALEN, ha->addr),
1112 filterbit);
1113 }
1114 if (mc_filter[0] == tp->mc_filter[0] &&
1115 mc_filter[1] == tp->mc_filter[1])
1116 ; /* No change. */
1117 else if (tp->flags & IS_ASIX) {
1118 iowrite32(2, ioaddr + CSR13);
1119 iowrite32(mc_filter[0], ioaddr + CSR14);
1120 iowrite32(3, ioaddr + CSR13);
1121 iowrite32(mc_filter[1], ioaddr + CSR14);
1122 } else if (tp->flags & COMET_MAC_ADDR) {
1123 iowrite32(mc_filter[0], ioaddr + CSR27);
1124 iowrite32(mc_filter[1], ioaddr + CSR28);
1125 }
1126 tp->mc_filter[0] = mc_filter[0];
1127 tp->mc_filter[1] = mc_filter[1];
1128 }
1129 } else {
1130 unsigned long flags;
1131 u32 tx_flags = 0x08000000 | 192;
1132
1133 /* Note that only the low-address shortword of setup_frame is valid!
1134 The values are doubled for big-endian architectures. */
1135 if (netdev_mc_count(dev) > 14) {
1136 /* Must use a multicast hash table. */
1137 build_setup_frame_hash(tp->setup_frame, dev);
1138 tx_flags = 0x08400000 | 192;
1139 } else {
1140 build_setup_frame_perfect(tp->setup_frame, dev);
1141 }
1142
1143 spin_lock_irqsave(&tp->lock, flags);
1144
1145 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1146 /* Same setup recently queued, we need not add it. */
1147 } else {
1148 unsigned int entry;
1149 int dummy = -1;
1150
1151 /* Now add this frame to the Tx list. */
1152
1153 entry = tp->cur_tx++ % TX_RING_SIZE;
1154
1155 if (entry != 0) {
1156 /* Avoid a chip errata by prefixing a dummy entry. */
1157 tp->tx_buffers[entry].skb = NULL;
1158 tp->tx_buffers[entry].mapping = 0;
1159 tp->tx_ring[entry].length =
1160 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1161 tp->tx_ring[entry].buffer1 = 0;
1162 /* Must set DescOwned later to avoid race with chip */
1163 dummy = entry;
1164 entry = tp->cur_tx++ % TX_RING_SIZE;
1165
1166 }
1167
1168 tp->tx_buffers[entry].skb = NULL;
1169 tp->tx_buffers[entry].mapping =
1170 pci_map_single(tp->pdev, tp->setup_frame,
1171 sizeof(tp->setup_frame),
1172 PCI_DMA_TODEVICE);
1173 /* Put the setup frame on the Tx list. */
1174 if (entry == TX_RING_SIZE-1)
1175 tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
1176 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1177 tp->tx_ring[entry].buffer1 =
1178 cpu_to_le32(tp->tx_buffers[entry].mapping);
1179 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1180 if (dummy >= 0)
1181 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1182 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1183 netif_stop_queue(dev);
1184
1185 /* Trigger an immediate transmit demand. */
1186 iowrite32(0, ioaddr + CSR1);
1187 }
1188
1189 spin_unlock_irqrestore(&tp->lock, flags);
1190 }
1191
1192 iowrite32(csr6, ioaddr + CSR6);
1193}
1194
1195#ifdef CONFIG_TULIP_MWI
1196static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1197 struct net_device *dev)
1198{
1199 struct tulip_private *tp = netdev_priv(dev);
1200 u8 cache;
1201 u16 pci_command;
1202 u32 csr0;
1203
1204 if (tulip_debug > 3)
1205 netdev_dbg(dev, "tulip_mwi_config()\n");
1206
1207 tp->csr0 = csr0 = 0;
1208
1209 /* if we have any cache line size at all, we can do MRM and MWI */
1210 csr0 |= MRM | MWI;
1211
1212 /* Enable MWI in the standard PCI command bit.
1213 * Check for the case where MWI is desired but not available
1214 */
1215 pci_try_set_mwi(pdev);
1216
1217 /* read result from hardware (in case bit refused to enable) */
1218 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1219 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1220 csr0 &= ~MWI;
1221
1222 /* if cache line size hardwired to zero, no MWI */
1223 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1224 if ((csr0 & MWI) && (cache == 0)) {
1225 csr0 &= ~MWI;
1226 pci_clear_mwi(pdev);
1227 }
1228
1229 /* assign per-cacheline-size cache alignment and
1230 * burst length values
1231 */
1232 switch (cache) {
1233 case 8:
1234 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1235 break;
1236 case 16:
1237 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1238 break;
1239 case 32:
1240 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1241 break;
1242 default:
1243 cache = 0;
1244 break;
1245 }
1246
1247 /* if we have a good cache line size, we by now have a good
1248 * csr0, so save it and exit
1249 */
1250 if (cache)
1251 goto out;
1252
1253 /* we don't have a good csr0 or cache line size, disable MWI */
1254 if (csr0 & MWI) {
1255 pci_clear_mwi(pdev);
1256 csr0 &= ~MWI;
1257 }
1258
1259 /* sane defaults for burst length and cache alignment
1260 * originally from de4x5 driver
1261 */
1262 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1263
1264out:
1265 tp->csr0 = csr0;
1266 if (tulip_debug > 2)
1267 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1268 cache, csr0);
1269}
1270#endif
1271
1272/*
1273 * Chips that have the MRM/reserved bit quirk and the burst quirk. That
1274 * is the DM910X and the on chip ULi devices
1275 */
1276
1277static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1278{
1279 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1280 return 1;
1281 return 0;
1282}
1283
1284static const struct net_device_ops tulip_netdev_ops = {
1285 .ndo_open = tulip_open,
1286 .ndo_start_xmit = tulip_start_xmit,
1287 .ndo_tx_timeout = tulip_tx_timeout,
1288 .ndo_stop = tulip_close,
1289 .ndo_get_stats = tulip_get_stats,
1290 .ndo_do_ioctl = private_ioctl,
1291 .ndo_set_rx_mode = set_rx_mode,
1292 .ndo_change_mtu = eth_change_mtu,
1293 .ndo_set_mac_address = eth_mac_addr,
1294 .ndo_validate_addr = eth_validate_addr,
1295#ifdef CONFIG_NET_POLL_CONTROLLER
1296 .ndo_poll_controller = poll_tulip,
1297#endif
1298};
1299
1300DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
1301 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1302 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1303 { },
1304};
1305
1306static int __devinit tulip_init_one (struct pci_dev *pdev,
1307 const struct pci_device_id *ent)
1308{
1309 struct tulip_private *tp;
1310 /* See note below on the multiport cards. */
1311 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1312 static int last_irq;
1313 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1314 int i, irq;
1315 unsigned short sum;
1316 unsigned char *ee_data;
1317 struct net_device *dev;
1318 void __iomem *ioaddr;
1319 static int board_idx = -1;
1320 int chip_idx = ent->driver_data;
1321 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1322 unsigned int eeprom_missing = 0;
1323 unsigned int force_csr0 = 0;
1324
1325#ifndef MODULE
1326 if (tulip_debug > 0)
1327 printk_once(KERN_INFO "%s", version);
1328#endif
1329
1330 board_idx++;
1331
1332 /*
1333 * Lan media wire a tulip chip to a wan interface. Needs a very
1334 * different driver (lmc driver)
1335 */
1336
1337 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1338 pr_err("skipping LMC card\n");
1339 return -ENODEV;
1340 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1341 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1342 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1343 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1344 pr_err("skipping SBE T3E3 port\n");
1345 return -ENODEV;
1346 }
1347
1348 /*
1349 * DM910x chips should be handled by the dmfe driver, except
1350 * on-board chips on SPARC systems. Also, early DM9100s need
1351 * software CRC which only the dmfe driver supports.
1352 */
1353
1354#ifdef CONFIG_TULIP_DM910X
1355 if (chip_idx == DM910X) {
1356 struct device_node *dp;
1357
1358 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1359 pdev->revision < 0x30) {
1360 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1361 return -ENODEV;
1362 }
1363
1364 dp = pci_device_to_OF_node(pdev);
1365 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1366 pr_info("skipping DM910x expansion card (use dmfe)\n");
1367 return -ENODEV;
1368 }
1369 }
1370#endif
1371
1372 /*
1373 * Looks for early PCI chipsets where people report hangs
1374 * without the workarounds being on.
1375 */
1376
1377 /* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1378 aligned. Aries might need this too. The Saturn errata are not
1379 pretty reading but thankfully it's an old 486 chipset.
1380
1381 2. The dreaded SiS496 486 chipset. Same workaround as Intel
1382 Saturn.
1383 */
1384
1385 if (pci_dev_present(early_486_chipsets)) {
1386 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1387 force_csr0 = 1;
1388 }
1389
1390 /* bugfix: the ASIX must have a burst limit or horrible things happen. */
1391 if (chip_idx == AX88140) {
1392 if ((csr0 & 0x3f00) == 0)
1393 csr0 |= 0x2000;
1394 }
1395
1396 /* PNIC doesn't have MWI/MRL/MRM... */
1397 if (chip_idx == LC82C168)
1398 csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1399
1400 /* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1401 if (tulip_uli_dm_quirk(pdev)) {
1402 csr0 &= ~0x01f100ff;
1403#if defined(CONFIG_SPARC)
1404 csr0 = (csr0 & ~0xff00) | 0xe000;
1405#endif
1406 }
1407 /*
1408 * And back to business
1409 */
1410
1411 i = pci_enable_device(pdev);
1412 if (i) {
1413 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1414 return i;
1415 }
1416
1417 /* The chip will fail to enter a low-power state later unless
1418 * first explicitly commanded into D0 */
1419 if (pci_set_power_state(pdev, PCI_D0)) {
1420 pr_notice("Failed to set power state to D0\n");
1421 }
1422
1423 irq = pdev->irq;
1424
1425 /* alloc_etherdev ensures aligned and zeroed private structures */
1426 dev = alloc_etherdev (sizeof (*tp));
1427 if (!dev) {
1428 pr_err("ether device alloc failed, aborting\n");
1429 return -ENOMEM;
1430 }
1431
1432 SET_NETDEV_DEV(dev, &pdev->dev);
1433 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1434 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1435 pci_name(pdev),
1436 (unsigned long long)pci_resource_len (pdev, 0),
1437 (unsigned long long)pci_resource_start (pdev, 0));
1438 goto err_out_free_netdev;
1439 }
1440
1441 /* grab all resources from both PIO and MMIO regions, as we
1442 * don't want anyone else messing around with our hardware */
1443 if (pci_request_regions (pdev, DRV_NAME))
1444 goto err_out_free_netdev;
1445
1446 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1447
1448 if (!ioaddr)
1449 goto err_out_free_res;
1450
1451 /*
1452 * initialize private data structure 'tp'
1453 * it is zeroed and aligned in alloc_etherdev
1454 */
1455 tp = netdev_priv(dev);
1456 tp->dev = dev;
1457
1458 tp->rx_ring = pci_alloc_consistent(pdev,
1459 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1460 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1461 &tp->rx_ring_dma);
1462 if (!tp->rx_ring)
1463 goto err_out_mtable;
1464 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1465 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1466
1467 tp->chip_id = chip_idx;
1468 tp->flags = tulip_tbl[chip_idx].flags;
1469
1470 tp->wolinfo.supported = 0;
1471 tp->wolinfo.wolopts = 0;
1472 /* COMET: Enable power management only for AN983B */
1473 if (chip_idx == COMET ) {
1474 u32 sig;
1475 pci_read_config_dword (pdev, 0x80, &sig);
1476 if (sig == 0x09811317) {
1477 tp->flags |= COMET_PM;
1478 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1479 pr_info("%s: Enabled WOL support for AN983B\n",
1480 __func__);
1481 }
1482 }
1483 tp->pdev = pdev;
1484 tp->base_addr = ioaddr;
1485 tp->revision = pdev->revision;
1486 tp->csr0 = csr0;
1487 spin_lock_init(&tp->lock);
1488 spin_lock_init(&tp->mii_lock);
1489 init_timer(&tp->timer);
1490 tp->timer.data = (unsigned long)dev;
1491 tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1492
1493 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1494
1495 dev->base_addr = (unsigned long)ioaddr;
1496
1497#ifdef CONFIG_TULIP_MWI
1498 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1499 tulip_mwi_config (pdev, dev);
1500#endif
1501
1502 /* Stop the chip's Tx and Rx processes. */
1503 tulip_stop_rxtx(tp);
1504
1505 pci_set_master(pdev);
1506
1507#ifdef CONFIG_GSC
1508 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1509 switch (pdev->subsystem_device) {
1510 default:
1511 break;
1512 case 0x1061:
1513 case 0x1062:
1514 case 0x1063:
1515 case 0x1098:
1516 case 0x1099:
1517 case 0x10EE:
1518 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1519 chip_name = "GSC DS21140 Tulip";
1520 }
1521 }
1522#endif
1523
1524 /* Clear the missed-packet counter. */
1525 ioread32(ioaddr + CSR8);
1526
1527 /* The station address ROM is read byte serially. The register must
1528 be polled, waiting for the value to be read bit serially from the
1529 EEPROM.
1530 */
1531 ee_data = tp->eeprom;
1532 memset(ee_data, 0, sizeof(tp->eeprom));
1533 sum = 0;
1534 if (chip_idx == LC82C168) {
1535 for (i = 0; i < 3; i++) {
1536 int value, boguscnt = 100000;
1537 iowrite32(0x600 | i, ioaddr + 0x98);
1538 do {
1539 value = ioread32(ioaddr + CSR9);
1540 } while (value < 0 && --boguscnt > 0);
1541 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1542 sum += value & 0xffff;
1543 }
1544 } else if (chip_idx == COMET) {
1545 /* No need to read the EEPROM. */
1546 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1547 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1548 for (i = 0; i < 6; i ++)
1549 sum += dev->dev_addr[i];
1550 } else {
1551 /* A serial EEPROM interface, we read now and sort it out later. */
1552 int sa_offset = 0;
1553 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1554 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1555
1556 if (ee_max_addr > sizeof(tp->eeprom))
1557 ee_max_addr = sizeof(tp->eeprom);
1558
1559 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1560 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1561 ee_data[i] = data & 0xff;
1562 ee_data[i + 1] = data >> 8;
1563 }
1564
1565 /* DEC now has a specification (see Notes) but early board makers
1566 just put the address in the first EEPROM locations. */
1567 /* This does memcmp(ee_data, ee_data+16, 8) */
1568 for (i = 0; i < 8; i ++)
1569 if (ee_data[i] != ee_data[16+i])
1570 sa_offset = 20;
1571 if (chip_idx == CONEXANT) {
1572 /* Check that the tuple type and length is correct. */
1573 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1574 sa_offset = 0x19A;
1575 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1576 ee_data[2] == 0) {
1577 sa_offset = 2; /* Grrr, damn Matrox boards. */
1578 multiport_cnt = 4;
1579 }
1580#ifdef CONFIG_MIPS_COBALT
1581 if ((pdev->bus->number == 0) &&
1582 ((PCI_SLOT(pdev->devfn) == 7) ||
1583 (PCI_SLOT(pdev->devfn) == 12))) {
1584 /* Cobalt MAC address in first EEPROM locations. */
1585 sa_offset = 0;
1586 /* Ensure our media table fixup get's applied */
1587 memcpy(ee_data + 16, ee_data, 8);
1588 }
1589#endif
1590#ifdef CONFIG_GSC
1591 /* Check to see if we have a broken srom */
1592 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1593 /* pci_vendor_id and subsystem_id are swapped */
1594 ee_data[0] = ee_data[2];
1595 ee_data[1] = ee_data[3];
1596 ee_data[2] = 0x61;
1597 ee_data[3] = 0x10;
1598
1599 /* HSC-PCI boards need to be byte-swaped and shifted
1600 * up 1 word. This shift needs to happen at the end
1601 * of the MAC first because of the 2 byte overlap.
1602 */
1603 for (i = 4; i >= 0; i -= 2) {
1604 ee_data[17 + i + 3] = ee_data[17 + i];
1605 ee_data[16 + i + 5] = ee_data[16 + i];
1606 }
1607 }
1608#endif
1609
1610 for (i = 0; i < 6; i ++) {
1611 dev->dev_addr[i] = ee_data[i + sa_offset];
1612 sum += ee_data[i + sa_offset];
1613 }
1614 }
1615 /* Lite-On boards have the address byte-swapped. */
1616 if ((dev->dev_addr[0] == 0xA0 ||
1617 dev->dev_addr[0] == 0xC0 ||
1618 dev->dev_addr[0] == 0x02) &&
1619 dev->dev_addr[1] == 0x00)
1620 for (i = 0; i < 6; i+=2) {
1621 char tmp = dev->dev_addr[i];
1622 dev->dev_addr[i] = dev->dev_addr[i+1];
1623 dev->dev_addr[i+1] = tmp;
1624 }
1625 /* On the Zynx 315 Etherarray and other multiport boards only the
1626 first Tulip has an EEPROM.
1627 On Sparc systems the mac address is held in the OBP property
1628 "local-mac-address".
1629 The addresses of the subsequent ports are derived from the first.
1630 Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1631 that here as well. */
1632 if (sum == 0 || sum == 6*0xff) {
1633#if defined(CONFIG_SPARC)
1634 struct device_node *dp = pci_device_to_OF_node(pdev);
1635 const unsigned char *addr;
1636 int len;
1637#endif
1638 eeprom_missing = 1;
1639 for (i = 0; i < 5; i++)
1640 dev->dev_addr[i] = last_phys_addr[i];
1641 dev->dev_addr[i] = last_phys_addr[i] + 1;
1642#if defined(CONFIG_SPARC)
1643 addr = of_get_property(dp, "local-mac-address", &len);
1644 if (addr && len == 6)
1645 memcpy(dev->dev_addr, addr, 6);
1646#endif
1647#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
1648 if (last_irq)
1649 irq = last_irq;
1650#endif
1651 }
1652
1653 for (i = 0; i < 6; i++)
1654 last_phys_addr[i] = dev->dev_addr[i];
1655 last_irq = irq;
1656 dev->irq = irq;
1657
1658 /* The lower four bits are the media type. */
1659 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1660 if (options[board_idx] & MEDIA_MASK)
1661 tp->default_port = options[board_idx] & MEDIA_MASK;
1662 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1663 tp->full_duplex = 1;
1664 if (mtu[board_idx] > 0)
1665 dev->mtu = mtu[board_idx];
1666 }
1667 if (dev->mem_start & MEDIA_MASK)
1668 tp->default_port = dev->mem_start & MEDIA_MASK;
1669 if (tp->default_port) {
1670 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1671 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1672 tp->medialock = 1;
1673 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1674 tp->full_duplex = 1;
1675 }
1676 if (tp->full_duplex)
1677 tp->full_duplex_lock = 1;
1678
1679 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1680 static const u16 media2advert[] = {
1681 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1682 };
1683 tp->mii_advertise = media2advert[tp->default_port - 9];
1684 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1685 }
1686
1687 if (tp->flags & HAS_MEDIA_TABLE) {
1688 sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
1689 tulip_parse_eeprom(dev);
1690 strcpy(dev->name, "eth%d"); /* un-hack */
1691 }
1692
1693 if ((tp->flags & ALWAYS_CHECK_MII) ||
1694 (tp->mtable && tp->mtable->has_mii) ||
1695 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1696 if (tp->mtable && tp->mtable->has_mii) {
1697 for (i = 0; i < tp->mtable->leafcount; i++)
1698 if (tp->mtable->mleaf[i].media == 11) {
1699 tp->cur_index = i;
1700 tp->saved_if_port = dev->if_port;
1701 tulip_select_media(dev, 2);
1702 dev->if_port = tp->saved_if_port;
1703 break;
1704 }
1705 }
1706
1707 /* Find the connected MII xcvrs.
1708 Doing this in open() would allow detecting external xcvrs
1709 later, but takes much time. */
1710 tulip_find_mii (dev, board_idx);
1711 }
1712
1713 /* The Tulip-specific entries in the device structure. */
1714 dev->netdev_ops = &tulip_netdev_ops;
1715 dev->watchdog_timeo = TX_TIMEOUT;
1716#ifdef CONFIG_TULIP_NAPI
1717 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1718#endif
1719 SET_ETHTOOL_OPS(dev, &ops);
1720
1721 if (register_netdev(dev))
1722 goto err_out_free_ring;
1723
1724 pci_set_drvdata(pdev, dev);
1725
1726 dev_info(&dev->dev,
1727#ifdef CONFIG_TULIP_MMIO
1728 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1729#else
1730 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1731#endif
1732 chip_name, pdev->revision,
1733 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1734 eeprom_missing ? " EEPROM not present," : "",
1735 dev->dev_addr, irq);
1736
1737 if (tp->chip_id == PNIC2)
1738 tp->link_change = pnic2_lnk_change;
1739 else if (tp->flags & HAS_NWAY)
1740 tp->link_change = t21142_lnk_change;
1741 else if (tp->flags & HAS_PNICNWAY)
1742 tp->link_change = pnic_lnk_change;
1743
1744 /* Reset the xcvr interface and turn on heartbeat. */
1745 switch (chip_idx) {
1746 case DC21140:
1747 case DM910X:
1748 default:
1749 if (tp->mtable)
1750 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1751 break;
1752 case DC21142:
1753 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1754 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1755 iowrite32(0x0000, ioaddr + CSR13);
1756 iowrite32(0x0000, ioaddr + CSR14);
1757 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1758 } else
1759 t21142_start_nway(dev);
1760 break;
1761 case PNIC2:
1762 /* just do a reset for sanity sake */
1763 iowrite32(0x0000, ioaddr + CSR13);
1764 iowrite32(0x0000, ioaddr + CSR14);
1765 break;
1766 case LC82C168:
1767 if ( ! tp->mii_cnt) {
1768 tp->nway = 1;
1769 tp->nwayset = 0;
1770 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1771 iowrite32(0x30, ioaddr + CSR12);
1772 iowrite32(0x0001F078, ioaddr + CSR6);
1773 iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1774 }
1775 break;
1776 case MX98713:
1777 case COMPEX9881:
1778 iowrite32(0x00000000, ioaddr + CSR6);
1779 iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1780 iowrite32(0x00000001, ioaddr + CSR13);
1781 break;
1782 case MX98715:
1783 case MX98725:
1784 iowrite32(0x01a80000, ioaddr + CSR6);
1785 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1786 iowrite32(0x00001000, ioaddr + CSR12);
1787 break;
1788 case COMET:
1789 /* No initialization necessary. */
1790 break;
1791 }
1792
1793 /* put the chip in snooze mode until opened */
1794 tulip_set_power_state (tp, 0, 1);
1795
1796 return 0;
1797
1798err_out_free_ring:
1799 pci_free_consistent (pdev,
1800 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1801 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1802 tp->rx_ring, tp->rx_ring_dma);
1803
1804err_out_mtable:
1805 kfree (tp->mtable);
1806 pci_iounmap(pdev, ioaddr);
1807
1808err_out_free_res:
1809 pci_release_regions (pdev);
1810
1811err_out_free_netdev:
1812 free_netdev (dev);
1813 return -ENODEV;
1814}
1815
1816
1817/* set the registers according to the given wolopts */
1818static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1819{
1820 struct net_device *dev = pci_get_drvdata(pdev);
1821 struct tulip_private *tp = netdev_priv(dev);
1822 void __iomem *ioaddr = tp->base_addr;
1823
1824 if (tp->flags & COMET_PM) {
1825
1826 unsigned int tmp;
1827
1828 tmp = ioread32(ioaddr + CSR18);
1829 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1830 tmp |= comet_csr18_pm_mode;
1831 iowrite32(tmp, ioaddr + CSR18);
1832
1833 /* Set the Wake-up Control/Status Register to the given WOL options*/
1834 tmp = ioread32(ioaddr + CSR13);
1835 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1836 if (wolopts & WAKE_MAGIC)
1837 tmp |= comet_csr13_mpre;
1838 if (wolopts & WAKE_PHY)
1839 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1840 /* Clear the event flags */
1841 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1842 iowrite32(tmp, ioaddr + CSR13);
1843 }
1844}
1845
1846#ifdef CONFIG_PM
1847
1848
1849static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1850{
1851 pci_power_t pstate;
1852 struct net_device *dev = pci_get_drvdata(pdev);
1853 struct tulip_private *tp = netdev_priv(dev);
1854
1855 if (!dev)
1856 return -EINVAL;
1857
1858 if (!netif_running(dev))
1859 goto save_state;
1860
1861 tulip_down(dev);
1862
1863 netif_device_detach(dev);
1864 free_irq(dev->irq, dev);
1865
1866save_state:
1867 pci_save_state(pdev);
1868 pci_disable_device(pdev);
1869 pstate = pci_choose_state(pdev, state);
1870 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1871 int rc;
1872
1873 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1874 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1875 if (rc)
1876 pr_err("pci_enable_wake failed (%d)\n", rc);
1877 }
1878 pci_set_power_state(pdev, pstate);
1879
1880 return 0;
1881}
1882
1883
1884static int tulip_resume(struct pci_dev *pdev)
1885{
1886 struct net_device *dev = pci_get_drvdata(pdev);
1887 struct tulip_private *tp = netdev_priv(dev);
1888 void __iomem *ioaddr = tp->base_addr;
1889 int retval;
1890 unsigned int tmp;
1891
1892 if (!dev)
1893 return -EINVAL;
1894
1895 pci_set_power_state(pdev, PCI_D0);
1896 pci_restore_state(pdev);
1897
1898 if (!netif_running(dev))
1899 return 0;
1900
1901 if ((retval = pci_enable_device(pdev))) {
1902 pr_err("pci_enable_device failed in resume\n");
1903 return retval;
1904 }
1905
1906 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
1907 pr_err("request_irq failed in resume\n");
1908 return retval;
1909 }
1910
1911 if (tp->flags & COMET_PM) {
1912 pci_enable_wake(pdev, PCI_D3hot, 0);
1913 pci_enable_wake(pdev, PCI_D3cold, 0);
1914
1915 /* Clear the PMES flag */
1916 tmp = ioread32(ioaddr + CSR20);
1917 tmp |= comet_csr20_pmes;
1918 iowrite32(tmp, ioaddr + CSR20);
1919
1920 /* Disable all wake-up events */
1921 tulip_set_wolopts(pdev, 0);
1922 }
1923 netif_device_attach(dev);
1924
1925 if (netif_running(dev))
1926 tulip_up(dev);
1927
1928 return 0;
1929}
1930
1931#endif /* CONFIG_PM */
1932
1933
1934static void __devexit tulip_remove_one (struct pci_dev *pdev)
1935{
1936 struct net_device *dev = pci_get_drvdata (pdev);
1937 struct tulip_private *tp;
1938
1939 if (!dev)
1940 return;
1941
1942 tp = netdev_priv(dev);
1943 unregister_netdev(dev);
1944 pci_free_consistent (pdev,
1945 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1946 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1947 tp->rx_ring, tp->rx_ring_dma);
1948 kfree (tp->mtable);
1949 pci_iounmap(pdev, tp->base_addr);
1950 free_netdev (dev);
1951 pci_release_regions (pdev);
1952 pci_set_drvdata (pdev, NULL);
1953
1954 /* pci_power_off (pdev, -1); */
1955}
1956
1957#ifdef CONFIG_NET_POLL_CONTROLLER
1958/*
1959 * Polling 'interrupt' - used by things like netconsole to send skbs
1960 * without having to re-enable interrupts. It's not called while
1961 * the interrupt routine is executing.
1962 */
1963
1964static void poll_tulip (struct net_device *dev)
1965{
1966 /* disable_irq here is not very nice, but with the lockless
1967 interrupt handler we have no other choice. */
1968 disable_irq(dev->irq);
1969 tulip_interrupt (dev->irq, dev);
1970 enable_irq(dev->irq);
1971}
1972#endif
1973
1974static struct pci_driver tulip_driver = {
1975 .name = DRV_NAME,
1976 .id_table = tulip_pci_tbl,
1977 .probe = tulip_init_one,
1978 .remove = __devexit_p(tulip_remove_one),
1979#ifdef CONFIG_PM
1980 .suspend = tulip_suspend,
1981 .resume = tulip_resume,
1982#endif /* CONFIG_PM */
1983};
1984
1985
1986static int __init tulip_init (void)
1987{
1988#ifdef MODULE
1989 pr_info("%s", version);
1990#endif
1991
1992 /* copy module parms into globals */
1993 tulip_rx_copybreak = rx_copybreak;
1994 tulip_max_interrupt_work = max_interrupt_work;
1995
1996 /* probe for and init boards */
1997 return pci_register_driver(&tulip_driver);
1998}
1999
2000
2001static void __exit tulip_cleanup (void)
2002{
2003 pci_unregister_driver (&tulip_driver);
2004}
2005
2006
2007module_init(tulip_init);
2008module_exit(tulip_cleanup);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
new file mode 100644
index 000000000000..7a44a7a6adc8
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -0,0 +1,1850 @@
1/*
2 This program is free software; you can redistribute it and/or
3 modify it under the terms of the GNU General Public License
4 as published by the Free Software Foundation; either version 2
5 of the License, or (at your option) any later version.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12
13*/
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#define DRV_NAME "uli526x"
18#define DRV_VERSION "0.9.3"
19#define DRV_RELDATE "2005-7-29"
20
21#include <linux/module.h>
22
23#include <linux/kernel.h>
24#include <linux/string.h>
25#include <linux/timer.h>
26#include <linux/errno.h>
27#include <linux/ioport.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/ethtool.h>
34#include <linux/skbuff.h>
35#include <linux/delay.h>
36#include <linux/spinlock.h>
37#include <linux/dma-mapping.h>
38#include <linux/bitops.h>
39
40#include <asm/processor.h>
41#include <asm/io.h>
42#include <asm/dma.h>
43#include <asm/uaccess.h>
44
45
46/* Board/System/Debug information/definition ---------------- */
47#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/
48#define PCI_ULI5263_ID 0x526310B9 /* ULi M5263 ID*/
49
50#define ULI526X_IO_SIZE 0x100
51#define TX_DESC_CNT 0x20 /* Allocated Tx descriptors */
52#define RX_DESC_CNT 0x30 /* Allocated Rx descriptors */
53#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2) /* Max TX packet count */
54#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3) /* TX wakeup count */
55#define DESC_ALL_CNT (TX_DESC_CNT + RX_DESC_CNT)
56#define TX_BUF_ALLOC 0x600
57#define RX_ALLOC_SIZE 0x620
58#define ULI526X_RESET 1
59#define CR0_DEFAULT 0
60#define CR6_DEFAULT 0x22200000
61#define CR7_DEFAULT 0x180c1
62#define CR15_DEFAULT 0x06 /* TxJabber RxWatchdog */
63#define TDES0_ERR_MASK 0x4302 /* TXJT, LC, EC, FUE */
64#define MAX_PACKET_SIZE 1514
65#define ULI5261_MAX_MULTICAST 14
66#define RX_COPY_SIZE 100
67#define MAX_CHECK_PACKET 0x8000
68
69#define ULI526X_10MHF 0
70#define ULI526X_100MHF 1
71#define ULI526X_10MFD 4
72#define ULI526X_100MFD 5
73#define ULI526X_AUTO 8
74
75#define ULI526X_TXTH_72 0x400000 /* TX TH 72 byte */
76#define ULI526X_TXTH_96 0x404000 /* TX TH 96 byte */
77#define ULI526X_TXTH_128 0x0000 /* TX TH 128 byte */
78#define ULI526X_TXTH_256 0x4000 /* TX TH 256 byte */
79#define ULI526X_TXTH_512 0x8000 /* TX TH 512 byte */
80#define ULI526X_TXTH_1K 0xC000 /* TX TH 1K byte */
81
82#define ULI526X_TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */
83#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
84#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
85
86#define ULI526X_DBUG(dbug_now, msg, value) \
87do { \
88 if (uli526x_debug || (dbug_now)) \
89 pr_err("%s %lx\n", (msg), (long) (value)); \
90} while (0)
91
92#define SHOW_MEDIA_TYPE(mode) \
93 pr_err("Change Speed to %sMhz %s duplex\n", \
94 mode & 1 ? "100" : "10", \
95 mode & 4 ? "full" : "half");
96
97
98/* CR9 definition: SROM/MII */
99#define CR9_SROM_READ 0x4800
100#define CR9_SRCS 0x1
101#define CR9_SRCLK 0x2
102#define CR9_CRDOUT 0x8
103#define SROM_DATA_0 0x0
104#define SROM_DATA_1 0x4
105#define PHY_DATA_1 0x20000
106#define PHY_DATA_0 0x00000
107#define MDCLKH 0x10000
108
109#define PHY_POWER_DOWN 0x800
110
111#define SROM_V41_CODE 0x14
112
113#define SROM_CLK_WRITE(data, ioaddr) \
114 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
115 udelay(5); \
116 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
117 udelay(5); \
118 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
119 udelay(5);
120
121/* Structure/enum declaration ------------------------------- */
122struct tx_desc {
123 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
124 char *tx_buf_ptr; /* Data for us */
125 struct tx_desc *next_tx_desc;
126} __attribute__(( aligned(32) ));
127
128struct rx_desc {
129 __le32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
130 struct sk_buff *rx_skb_ptr; /* Data for us */
131 struct rx_desc *next_rx_desc;
132} __attribute__(( aligned(32) ));
133
134struct uli526x_board_info {
135 u32 chip_id; /* Chip vendor/Device ID */
136 struct net_device *next_dev; /* next device */
137 struct pci_dev *pdev; /* PCI device */
138 spinlock_t lock;
139
140 long ioaddr; /* I/O base address */
141 u32 cr0_data;
142 u32 cr5_data;
143 u32 cr6_data;
144 u32 cr7_data;
145 u32 cr15_data;
146
147 /* pointer for memory physical address */
148 dma_addr_t buf_pool_dma_ptr; /* Tx buffer pool memory */
149 dma_addr_t buf_pool_dma_start; /* Tx buffer pool align dword */
150 dma_addr_t desc_pool_dma_ptr; /* descriptor pool memory */
151 dma_addr_t first_tx_desc_dma;
152 dma_addr_t first_rx_desc_dma;
153
154 /* descriptor pointer */
155 unsigned char *buf_pool_ptr; /* Tx buffer pool memory */
156 unsigned char *buf_pool_start; /* Tx buffer pool align dword */
157 unsigned char *desc_pool_ptr; /* descriptor pool memory */
158 struct tx_desc *first_tx_desc;
159 struct tx_desc *tx_insert_ptr;
160 struct tx_desc *tx_remove_ptr;
161 struct rx_desc *first_rx_desc;
162 struct rx_desc *rx_insert_ptr;
163 struct rx_desc *rx_ready_ptr; /* packet come pointer */
164 unsigned long tx_packet_cnt; /* transmitted packet count */
165 unsigned long rx_avail_cnt; /* available rx descriptor count */
166 unsigned long interval_rx_cnt; /* rx packet count a callback time */
167
168 u16 dbug_cnt;
169 u16 NIC_capability; /* NIC media capability */
170 u16 PHY_reg4; /* Saved Phyxcer register 4 value */
171
172 u8 media_mode; /* user specify media mode */
173 u8 op_mode; /* real work media mode */
174 u8 phy_addr;
175 u8 link_failed; /* Ever link failed */
176 u8 wait_reset; /* Hardware failed, need to reset */
177 struct timer_list timer;
178
179 /* Driver defined statistic counter */
180 unsigned long tx_fifo_underrun;
181 unsigned long tx_loss_carrier;
182 unsigned long tx_no_carrier;
183 unsigned long tx_late_collision;
184 unsigned long tx_excessive_collision;
185 unsigned long tx_jabber_timeout;
186 unsigned long reset_count;
187 unsigned long reset_cr8;
188 unsigned long reset_fatal;
189 unsigned long reset_TXtimeout;
190
191 /* NIC SROM data */
192 unsigned char srom[128];
193 u8 init;
194};
195
196enum uli526x_offsets {
197 DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
198 DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
199 DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
200 DCR15 = 0x78
201};
202
203enum uli526x_CR6_bits {
204 CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
205 CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
206 CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
207};
208
209/* Global variable declaration ----------------------------- */
210static int __devinitdata printed_version;
211static const char version[] __devinitconst =
212 "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
213
214static int uli526x_debug;
215static unsigned char uli526x_media_mode = ULI526X_AUTO;
216static u32 uli526x_cr6_user_set;
217
218/* For module input parameter */
219static int debug;
220static u32 cr6set;
221static int mode = 8;
222
223/* function declaration ------------------------------------- */
224static int uli526x_open(struct net_device *);
225static netdev_tx_t uli526x_start_xmit(struct sk_buff *,
226 struct net_device *);
227static int uli526x_stop(struct net_device *);
228static void uli526x_set_filter_mode(struct net_device *);
229static const struct ethtool_ops netdev_ethtool_ops;
230static u16 read_srom_word(long, int);
231static irqreturn_t uli526x_interrupt(int, void *);
232#ifdef CONFIG_NET_POLL_CONTROLLER
233static void uli526x_poll(struct net_device *dev);
234#endif
235static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
236static void allocate_rx_buffer(struct uli526x_board_info *);
237static void update_cr6(u32, unsigned long);
238static void send_filter_frame(struct net_device *, int);
239static u16 phy_read(unsigned long, u8, u8, u32);
240static u16 phy_readby_cr10(unsigned long, u8, u8);
241static void phy_write(unsigned long, u8, u8, u16, u32);
242static void phy_writeby_cr10(unsigned long, u8, u8, u16);
243static void phy_write_1bit(unsigned long, u32, u32);
244static u16 phy_read_1bit(unsigned long, u32);
245static u8 uli526x_sense_speed(struct uli526x_board_info *);
246static void uli526x_process_mode(struct uli526x_board_info *);
247static void uli526x_timer(unsigned long);
248static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
249static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
250static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
251static void uli526x_dynamic_reset(struct net_device *);
252static void uli526x_free_rxbuffer(struct uli526x_board_info *);
253static void uli526x_init(struct net_device *);
254static void uli526x_set_phyxcer(struct uli526x_board_info *);
255
256/* ULI526X network board routine ---------------------------- */
257
258static const struct net_device_ops netdev_ops = {
259 .ndo_open = uli526x_open,
260 .ndo_stop = uli526x_stop,
261 .ndo_start_xmit = uli526x_start_xmit,
262 .ndo_set_rx_mode = uli526x_set_filter_mode,
263 .ndo_change_mtu = eth_change_mtu,
264 .ndo_set_mac_address = eth_mac_addr,
265 .ndo_validate_addr = eth_validate_addr,
266#ifdef CONFIG_NET_POLL_CONTROLLER
267 .ndo_poll_controller = uli526x_poll,
268#endif
269};
270
271/*
272 * Search ULI526X board, allocate space and register it
273 */
274
275static int __devinit uli526x_init_one (struct pci_dev *pdev,
276 const struct pci_device_id *ent)
277{
278 struct uli526x_board_info *db; /* board information structure */
279 struct net_device *dev;
280 int i, err;
281
282 ULI526X_DBUG(0, "uli526x_init_one()", 0);
283
284 if (!printed_version++)
285 pr_info("%s\n", version);
286
287 /* Init network device */
288 dev = alloc_etherdev(sizeof(*db));
289 if (dev == NULL)
290 return -ENOMEM;
291 SET_NETDEV_DEV(dev, &pdev->dev);
292
293 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
294 pr_warn("32-bit PCI DMA not available\n");
295 err = -ENODEV;
296 goto err_out_free;
297 }
298
299 /* Enable Master/IO access, Disable memory access */
300 err = pci_enable_device(pdev);
301 if (err)
302 goto err_out_free;
303
304 if (!pci_resource_start(pdev, 0)) {
305 pr_err("I/O base is zero\n");
306 err = -ENODEV;
307 goto err_out_disable;
308 }
309
310 if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
311 pr_err("Allocated I/O size too small\n");
312 err = -ENODEV;
313 goto err_out_disable;
314 }
315
316 if (pci_request_regions(pdev, DRV_NAME)) {
317 pr_err("Failed to request PCI regions\n");
318 err = -ENODEV;
319 goto err_out_disable;
320 }
321
322 /* Init system & device */
323 db = netdev_priv(dev);
324
325 /* Allocate Tx/Rx descriptor memory */
326 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
327 if(db->desc_pool_ptr == NULL)
328 {
329 err = -ENOMEM;
330 goto err_out_nomem;
331 }
332 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
333 if(db->buf_pool_ptr == NULL)
334 {
335 err = -ENOMEM;
336 goto err_out_nomem;
337 }
338
339 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
340 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
341 db->buf_pool_start = db->buf_pool_ptr;
342 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
343
344 db->chip_id = ent->driver_data;
345 db->ioaddr = pci_resource_start(pdev, 0);
346
347 db->pdev = pdev;
348 db->init = 1;
349
350 dev->base_addr = db->ioaddr;
351 dev->irq = pdev->irq;
352 pci_set_drvdata(pdev, dev);
353
354 /* Register some necessary functions */
355 dev->netdev_ops = &netdev_ops;
356 dev->ethtool_ops = &netdev_ethtool_ops;
357
358 spin_lock_init(&db->lock);
359
360
361 /* read 64 word srom data */
362 for (i = 0; i < 64; i++)
363 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
364
365 /* Set Node address */
366 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */
367 {
368 outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode
369 outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port
370 outl(0, db->ioaddr + DCR14); //Clear reset port
371 outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer
372 outl(0, db->ioaddr + DCR14); //Clear reset port
373 outl(0, db->ioaddr + DCR13); //Clear CR13
374 outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port
375 //Read MAC address from CR14
376 for (i = 0; i < 6; i++)
377 dev->dev_addr[i] = inl(db->ioaddr + DCR14);
378 //Read end
379 outl(0, db->ioaddr + DCR13); //Clear CR13
380 outl(0, db->ioaddr + DCR0); //Clear CR0
381 udelay(10);
382 }
383 else /*Exist SROM*/
384 {
385 for (i = 0; i < 6; i++)
386 dev->dev_addr[i] = db->srom[20 + i];
387 }
388 err = register_netdev (dev);
389 if (err)
390 goto err_out_res;
391
392 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
393 ent->driver_data >> 16, pci_name(pdev),
394 dev->dev_addr, dev->irq);
395
396 pci_set_master(pdev);
397
398 return 0;
399
400err_out_res:
401 pci_release_regions(pdev);
402err_out_nomem:
403 if(db->desc_pool_ptr)
404 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
405 db->desc_pool_ptr, db->desc_pool_dma_ptr);
406
407 if(db->buf_pool_ptr != NULL)
408 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
409 db->buf_pool_ptr, db->buf_pool_dma_ptr);
410err_out_disable:
411 pci_disable_device(pdev);
412err_out_free:
413 pci_set_drvdata(pdev, NULL);
414 free_netdev(dev);
415
416 return err;
417}
418
419
420static void __devexit uli526x_remove_one (struct pci_dev *pdev)
421{
422 struct net_device *dev = pci_get_drvdata(pdev);
423 struct uli526x_board_info *db = netdev_priv(dev);
424
425 ULI526X_DBUG(0, "uli526x_remove_one()", 0);
426
427 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
428 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
429 db->desc_pool_dma_ptr);
430 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
431 db->buf_pool_ptr, db->buf_pool_dma_ptr);
432 unregister_netdev(dev);
433 pci_release_regions(pdev);
434 free_netdev(dev); /* free board information */
435 pci_set_drvdata(pdev, NULL);
436 pci_disable_device(pdev);
437 ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
438}
439
440
441/*
442 * Open the interface.
443 * The interface is opened whenever "ifconfig" activates it.
444 */
445
446static int uli526x_open(struct net_device *dev)
447{
448 int ret;
449 struct uli526x_board_info *db = netdev_priv(dev);
450
451 ULI526X_DBUG(0, "uli526x_open", 0);
452
453 /* system variable init */
454 db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
455 db->tx_packet_cnt = 0;
456 db->rx_avail_cnt = 0;
457 db->link_failed = 1;
458 netif_carrier_off(dev);
459 db->wait_reset = 0;
460
461 db->NIC_capability = 0xf; /* All capability*/
462 db->PHY_reg4 = 0x1e0;
463
464 /* CR6 operation mode decision */
465 db->cr6_data |= ULI526X_TXTH_256;
466 db->cr0_data = CR0_DEFAULT;
467
468 /* Initialize ULI526X board */
469 uli526x_init(dev);
470
471 ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev);
472 if (ret)
473 return ret;
474
475 /* Active System Interface */
476 netif_wake_queue(dev);
477
478 /* set and active a timer process */
479 init_timer(&db->timer);
480 db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
481 db->timer.data = (unsigned long)dev;
482 db->timer.function = uli526x_timer;
483 add_timer(&db->timer);
484
485 return 0;
486}
487
488
489/* Initialize ULI526X board
490 * Reset ULI526X board
491 * Initialize TX/Rx descriptor chain structure
492 * Send the set-up frame
493 * Enable Tx/Rx machine
494 */
495
496static void uli526x_init(struct net_device *dev)
497{
498 struct uli526x_board_info *db = netdev_priv(dev);
499 unsigned long ioaddr = db->ioaddr;
500 u8 phy_tmp;
501 u8 timeout;
502 u16 phy_value;
503 u16 phy_reg_reset;
504
505
506 ULI526X_DBUG(0, "uli526x_init()", 0);
507
508 /* Reset M526x MAC controller */
509 outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */
510 udelay(100);
511 outl(db->cr0_data, ioaddr + DCR0);
512 udelay(5);
513
514 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
515 db->phy_addr = 1;
516 for(phy_tmp=0;phy_tmp<32;phy_tmp++)
517 {
518 phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
519 if(phy_value != 0xffff&&phy_value!=0)
520 {
521 db->phy_addr = phy_tmp;
522 break;
523 }
524 }
525 if(phy_tmp == 32)
526 pr_warn("Can not find the phy address!!!\n");
527 /* Parser SROM and media mode */
528 db->media_mode = uli526x_media_mode;
529
530 /* phyxcer capability setting */
531 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
532 phy_reg_reset = (phy_reg_reset | 0x8000);
533 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
534
535 /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
536 * functions") or phy data sheet for details on phy reset
537 */
538 udelay(500);
539 timeout = 10;
540 while (timeout-- &&
541 phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000)
542 udelay(100);
543
544 /* Process Phyxcer Media Mode */
545 uli526x_set_phyxcer(db);
546
547 /* Media Mode Process */
548 if ( !(db->media_mode & ULI526X_AUTO) )
549 db->op_mode = db->media_mode; /* Force Mode */
550
551 /* Initialize Transmit/Receive decriptor and CR3/4 */
552 uli526x_descriptor_init(db, ioaddr);
553
554 /* Init CR6 to program M526X operation */
555 update_cr6(db->cr6_data, ioaddr);
556
557 /* Send setup frame */
558 send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
559
560 /* Init CR7, interrupt active bit */
561 db->cr7_data = CR7_DEFAULT;
562 outl(db->cr7_data, ioaddr + DCR7);
563
564 /* Init CR15, Tx jabber and Rx watchdog timer */
565 outl(db->cr15_data, ioaddr + DCR15);
566
567 /* Enable ULI526X Tx/Rx function */
568 db->cr6_data |= CR6_RXSC | CR6_TXSC;
569 update_cr6(db->cr6_data, ioaddr);
570}
571
572
573/*
574 * Hardware start transmission.
575 * Send a packet to media from the upper layer.
576 */
577
578static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
579 struct net_device *dev)
580{
581 struct uli526x_board_info *db = netdev_priv(dev);
582 struct tx_desc *txptr;
583 unsigned long flags;
584
585 ULI526X_DBUG(0, "uli526x_start_xmit", 0);
586
587 /* Resource flag check */
588 netif_stop_queue(dev);
589
590 /* Too large packet check */
591 if (skb->len > MAX_PACKET_SIZE) {
592 netdev_err(dev, "big packet = %d\n", (u16)skb->len);
593 dev_kfree_skb(skb);
594 return NETDEV_TX_OK;
595 }
596
597 spin_lock_irqsave(&db->lock, flags);
598
599 /* No Tx resource check, it never happen nromally */
600 if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
601 spin_unlock_irqrestore(&db->lock, flags);
602 netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt);
603 return NETDEV_TX_BUSY;
604 }
605
606 /* Disable NIC interrupt */
607 outl(0, dev->base_addr + DCR7);
608
609 /* transmit this packet */
610 txptr = db->tx_insert_ptr;
611 skb_copy_from_linear_data(skb, txptr->tx_buf_ptr, skb->len);
612 txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
613
614 /* Point to next transmit free descriptor */
615 db->tx_insert_ptr = txptr->next_tx_desc;
616
617 /* Transmit Packet Process */
618 if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
619 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
620 db->tx_packet_cnt++; /* Ready to send */
621 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
622 dev->trans_start = jiffies; /* saved time stamp */
623 }
624
625 /* Tx resource check */
626 if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
627 netif_wake_queue(dev);
628
629 /* Restore CR7 to enable interrupt */
630 spin_unlock_irqrestore(&db->lock, flags);
631 outl(db->cr7_data, dev->base_addr + DCR7);
632
633 /* free this SKB */
634 dev_kfree_skb(skb);
635
636 return NETDEV_TX_OK;
637}
638
639
640/*
641 * Stop the interface.
642 * The interface is stopped when it is brought.
643 */
644
645static int uli526x_stop(struct net_device *dev)
646{
647 struct uli526x_board_info *db = netdev_priv(dev);
648 unsigned long ioaddr = dev->base_addr;
649
650 ULI526X_DBUG(0, "uli526x_stop", 0);
651
652 /* disable system */
653 netif_stop_queue(dev);
654
655 /* deleted timer */
656 del_timer_sync(&db->timer);
657
658 /* Reset & stop ULI526X board */
659 outl(ULI526X_RESET, ioaddr + DCR0);
660 udelay(5);
661 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
662
663 /* free interrupt */
664 free_irq(dev->irq, dev);
665
666 /* free allocated rx buffer */
667 uli526x_free_rxbuffer(db);
668
669 return 0;
670}
671
672
673/*
674 * M5261/M5263 insterrupt handler
675 * receive the packet to upper layer, free the transmitted packet
676 */
677
678static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
679{
680 struct net_device *dev = dev_id;
681 struct uli526x_board_info *db = netdev_priv(dev);
682 unsigned long ioaddr = dev->base_addr;
683 unsigned long flags;
684
685 spin_lock_irqsave(&db->lock, flags);
686 outl(0, ioaddr + DCR7);
687
688 /* Got ULI526X status */
689 db->cr5_data = inl(ioaddr + DCR5);
690 outl(db->cr5_data, ioaddr + DCR5);
691 if ( !(db->cr5_data & 0x180c1) ) {
692 /* Restore CR7 to enable interrupt mask */
693 outl(db->cr7_data, ioaddr + DCR7);
694 spin_unlock_irqrestore(&db->lock, flags);
695 return IRQ_HANDLED;
696 }
697
698 /* Check system status */
699 if (db->cr5_data & 0x2000) {
700 /* system bus error happen */
701 ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
702 db->reset_fatal++;
703 db->wait_reset = 1; /* Need to RESET */
704 spin_unlock_irqrestore(&db->lock, flags);
705 return IRQ_HANDLED;
706 }
707
708 /* Received the coming packet */
709 if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
710 uli526x_rx_packet(dev, db);
711
712 /* reallocate rx descriptor buffer */
713 if (db->rx_avail_cnt<RX_DESC_CNT)
714 allocate_rx_buffer(db);
715
716 /* Free the transmitted descriptor */
717 if ( db->cr5_data & 0x01)
718 uli526x_free_tx_pkt(dev, db);
719
720 /* Restore CR7 to enable interrupt mask */
721 outl(db->cr7_data, ioaddr + DCR7);
722
723 spin_unlock_irqrestore(&db->lock, flags);
724 return IRQ_HANDLED;
725}
726
727#ifdef CONFIG_NET_POLL_CONTROLLER
728static void uli526x_poll(struct net_device *dev)
729{
730 /* ISR grabs the irqsave lock, so this should be safe */
731 uli526x_interrupt(dev->irq, dev);
732}
733#endif
734
735/*
736 * Free TX resource after TX complete
737 */
738
739static void uli526x_free_tx_pkt(struct net_device *dev,
740 struct uli526x_board_info * db)
741{
742 struct tx_desc *txptr;
743 u32 tdes0;
744
745 txptr = db->tx_remove_ptr;
746 while(db->tx_packet_cnt) {
747 tdes0 = le32_to_cpu(txptr->tdes0);
748 if (tdes0 & 0x80000000)
749 break;
750
751 /* A packet sent completed */
752 db->tx_packet_cnt--;
753 dev->stats.tx_packets++;
754
755 /* Transmit statistic counter */
756 if ( tdes0 != 0x7fffffff ) {
757 dev->stats.collisions += (tdes0 >> 3) & 0xf;
758 dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
759 if (tdes0 & TDES0_ERR_MASK) {
760 dev->stats.tx_errors++;
761 if (tdes0 & 0x0002) { /* UnderRun */
762 db->tx_fifo_underrun++;
763 if ( !(db->cr6_data & CR6_SFT) ) {
764 db->cr6_data = db->cr6_data | CR6_SFT;
765 update_cr6(db->cr6_data, db->ioaddr);
766 }
767 }
768 if (tdes0 & 0x0100)
769 db->tx_excessive_collision++;
770 if (tdes0 & 0x0200)
771 db->tx_late_collision++;
772 if (tdes0 & 0x0400)
773 db->tx_no_carrier++;
774 if (tdes0 & 0x0800)
775 db->tx_loss_carrier++;
776 if (tdes0 & 0x4000)
777 db->tx_jabber_timeout++;
778 }
779 }
780
781 txptr = txptr->next_tx_desc;
782 }/* End of while */
783
784 /* Update TX remove pointer to next */
785 db->tx_remove_ptr = txptr;
786
787 /* Resource available check */
788 if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
789 netif_wake_queue(dev); /* Active upper layer, send again */
790}
791
792
793/*
794 * Receive the come packet and pass to upper layer
795 */
796
797static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
798{
799 struct rx_desc *rxptr;
800 struct sk_buff *skb;
801 int rxlen;
802 u32 rdes0;
803
804 rxptr = db->rx_ready_ptr;
805
806 while(db->rx_avail_cnt) {
807 rdes0 = le32_to_cpu(rxptr->rdes0);
808 if (rdes0 & 0x80000000) /* packet owner check */
809 {
810 break;
811 }
812
813 db->rx_avail_cnt--;
814 db->interval_rx_cnt++;
815
816 pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
817 if ( (rdes0 & 0x300) != 0x300) {
818 /* A packet without First/Last flag */
819 /* reuse this SKB */
820 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
821 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
822 } else {
823 /* A packet with First/Last flag */
824 rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
825
826 /* error summary bit check */
827 if (rdes0 & 0x8000) {
828 /* This is a error packet */
829 dev->stats.rx_errors++;
830 if (rdes0 & 1)
831 dev->stats.rx_fifo_errors++;
832 if (rdes0 & 2)
833 dev->stats.rx_crc_errors++;
834 if (rdes0 & 0x80)
835 dev->stats.rx_length_errors++;
836 }
837
838 if ( !(rdes0 & 0x8000) ||
839 ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
840 struct sk_buff *new_skb = NULL;
841
842 skb = rxptr->rx_skb_ptr;
843
844 /* Good packet, send to upper layer */
845 /* Shorst packet used new SKB */
846 if ((rxlen < RX_COPY_SIZE) &&
847 (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
848 skb = new_skb;
849 /* size less than COPY_SIZE, allocate a rxlen SKB */
850 skb_reserve(skb, 2); /* 16byte align */
851 memcpy(skb_put(skb, rxlen),
852 skb_tail_pointer(rxptr->rx_skb_ptr),
853 rxlen);
854 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
855 } else
856 skb_put(skb, rxlen);
857
858 skb->protocol = eth_type_trans(skb, dev);
859 netif_rx(skb);
860 dev->stats.rx_packets++;
861 dev->stats.rx_bytes += rxlen;
862
863 } else {
864 /* Reuse SKB buffer when the packet is error */
865 ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
866 uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
867 }
868 }
869
870 rxptr = rxptr->next_rx_desc;
871 }
872
873 db->rx_ready_ptr = rxptr;
874}
875
876
877/*
878 * Set ULI526X multicast address
879 */
880
881static void uli526x_set_filter_mode(struct net_device * dev)
882{
883 struct uli526x_board_info *db = netdev_priv(dev);
884 unsigned long flags;
885
886 ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
887 spin_lock_irqsave(&db->lock, flags);
888
889 if (dev->flags & IFF_PROMISC) {
890 ULI526X_DBUG(0, "Enable PROM Mode", 0);
891 db->cr6_data |= CR6_PM | CR6_PBF;
892 update_cr6(db->cr6_data, db->ioaddr);
893 spin_unlock_irqrestore(&db->lock, flags);
894 return;
895 }
896
897 if (dev->flags & IFF_ALLMULTI ||
898 netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
899 ULI526X_DBUG(0, "Pass all multicast address",
900 netdev_mc_count(dev));
901 db->cr6_data &= ~(CR6_PM | CR6_PBF);
902 db->cr6_data |= CR6_PAM;
903 spin_unlock_irqrestore(&db->lock, flags);
904 return;
905 }
906
907 ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
908 send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
909 spin_unlock_irqrestore(&db->lock, flags);
910}
911
912static void
913ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
914{
915 ecmd->supported = (SUPPORTED_10baseT_Half |
916 SUPPORTED_10baseT_Full |
917 SUPPORTED_100baseT_Half |
918 SUPPORTED_100baseT_Full |
919 SUPPORTED_Autoneg |
920 SUPPORTED_MII);
921
922 ecmd->advertising = (ADVERTISED_10baseT_Half |
923 ADVERTISED_10baseT_Full |
924 ADVERTISED_100baseT_Half |
925 ADVERTISED_100baseT_Full |
926 ADVERTISED_Autoneg |
927 ADVERTISED_MII);
928
929
930 ecmd->port = PORT_MII;
931 ecmd->phy_address = db->phy_addr;
932
933 ecmd->transceiver = XCVR_EXTERNAL;
934
935 ethtool_cmd_speed_set(ecmd, SPEED_10);
936 ecmd->duplex = DUPLEX_HALF;
937
938 if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
939 {
940 ethtool_cmd_speed_set(ecmd, SPEED_100);
941 }
942 if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
943 {
944 ecmd->duplex = DUPLEX_FULL;
945 }
946 if(db->link_failed)
947 {
948 ethtool_cmd_speed_set(ecmd, -1);
949 ecmd->duplex = -1;
950 }
951
952 if (db->media_mode & ULI526X_AUTO)
953 {
954 ecmd->autoneg = AUTONEG_ENABLE;
955 }
956}
957
958static void netdev_get_drvinfo(struct net_device *dev,
959 struct ethtool_drvinfo *info)
960{
961 struct uli526x_board_info *np = netdev_priv(dev);
962
963 strcpy(info->driver, DRV_NAME);
964 strcpy(info->version, DRV_VERSION);
965 if (np->pdev)
966 strcpy(info->bus_info, pci_name(np->pdev));
967 else
968 sprintf(info->bus_info, "EISA 0x%lx %d",
969 dev->base_addr, dev->irq);
970}
971
972static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
973 struct uli526x_board_info *np = netdev_priv(dev);
974
975 ULi_ethtool_gset(np, cmd);
976
977 return 0;
978}
979
980static u32 netdev_get_link(struct net_device *dev) {
981 struct uli526x_board_info *np = netdev_priv(dev);
982
983 if(np->link_failed)
984 return 0;
985 else
986 return 1;
987}
988
989static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
990{
991 wol->supported = WAKE_PHY | WAKE_MAGIC;
992 wol->wolopts = 0;
993}
994
995static const struct ethtool_ops netdev_ethtool_ops = {
996 .get_drvinfo = netdev_get_drvinfo,
997 .get_settings = netdev_get_settings,
998 .get_link = netdev_get_link,
999 .get_wol = uli526x_get_wol,
1000};
1001
1002/*
1003 * A periodic timer routine
1004 * Dynamic media sense, allocate Rx buffer...
1005 */
1006
1007static void uli526x_timer(unsigned long data)
1008{
1009 u32 tmp_cr8;
1010 unsigned char tmp_cr12=0;
1011 struct net_device *dev = (struct net_device *) data;
1012 struct uli526x_board_info *db = netdev_priv(dev);
1013 unsigned long flags;
1014
1015 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1016 spin_lock_irqsave(&db->lock, flags);
1017
1018
1019 /* Dynamic reset ULI526X : system error or transmit time-out */
1020 tmp_cr8 = inl(db->ioaddr + DCR8);
1021 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1022 db->reset_cr8++;
1023 db->wait_reset = 1;
1024 }
1025 db->interval_rx_cnt = 0;
1026
1027 /* TX polling kick monitor */
1028 if ( db->tx_packet_cnt &&
1029 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
1030 outl(0x1, dev->base_addr + DCR1); // Tx polling again
1031
1032 // TX Timeout
1033 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
1034 db->reset_TXtimeout++;
1035 db->wait_reset = 1;
1036 netdev_err(dev, " Tx timeout - resetting\n");
1037 }
1038 }
1039
1040 if (db->wait_reset) {
1041 ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
1042 db->reset_count++;
1043 uli526x_dynamic_reset(dev);
1044 db->timer.expires = ULI526X_TIMER_WUT;
1045 add_timer(&db->timer);
1046 spin_unlock_irqrestore(&db->lock, flags);
1047 return;
1048 }
1049
1050 /* Link status check, Dynamic media type change */
1051 if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
1052 tmp_cr12 = 3;
1053
1054 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
1055 /* Link Failed */
1056 ULI526X_DBUG(0, "Link Failed", tmp_cr12);
1057 netif_carrier_off(dev);
1058 netdev_info(dev, "NIC Link is Down\n");
1059 db->link_failed = 1;
1060
1061 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1062 /* AUTO don't need */
1063 if ( !(db->media_mode & 0x8) )
1064 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1065
1066 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1067 if (db->media_mode & ULI526X_AUTO) {
1068 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1069 update_cr6(db->cr6_data, db->ioaddr);
1070 }
1071 } else
1072 if ((tmp_cr12 & 0x3) && db->link_failed) {
1073 ULI526X_DBUG(0, "Link link OK", tmp_cr12);
1074 db->link_failed = 0;
1075
1076 /* Auto Sense Speed */
1077 if ( (db->media_mode & ULI526X_AUTO) &&
1078 uli526x_sense_speed(db) )
1079 db->link_failed = 1;
1080 uli526x_process_mode(db);
1081
1082 if(db->link_failed==0)
1083 {
1084 netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n",
1085 (db->op_mode == ULI526X_100MHF ||
1086 db->op_mode == ULI526X_100MFD)
1087 ? 100 : 10,
1088 (db->op_mode == ULI526X_10MFD ||
1089 db->op_mode == ULI526X_100MFD)
1090 ? "Full" : "Half");
1091 netif_carrier_on(dev);
1092 }
1093 /* SHOW_MEDIA_TYPE(db->op_mode); */
1094 }
1095 else if(!(tmp_cr12 & 0x3) && db->link_failed)
1096 {
1097 if(db->init==1)
1098 {
1099 netdev_info(dev, "NIC Link is Down\n");
1100 netif_carrier_off(dev);
1101 }
1102 }
1103 db->init=0;
1104
1105 /* Timer active again */
1106 db->timer.expires = ULI526X_TIMER_WUT;
1107 add_timer(&db->timer);
1108 spin_unlock_irqrestore(&db->lock, flags);
1109}
1110
1111
1112/*
1113 * Stop ULI526X board
1114 * Free Tx/Rx allocated memory
1115 * Init system variable
1116 */
1117
1118static void uli526x_reset_prepare(struct net_device *dev)
1119{
1120 struct uli526x_board_info *db = netdev_priv(dev);
1121
1122 /* Sopt MAC controller */
1123 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1124 update_cr6(db->cr6_data, dev->base_addr);
1125 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */
1126 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
1127
1128 /* Disable upper layer interface */
1129 netif_stop_queue(dev);
1130
1131 /* Free Rx Allocate buffer */
1132 uli526x_free_rxbuffer(db);
1133
1134 /* system variable init */
1135 db->tx_packet_cnt = 0;
1136 db->rx_avail_cnt = 0;
1137 db->link_failed = 1;
1138 db->init=1;
1139 db->wait_reset = 0;
1140}
1141
1142
1143/*
1144 * Dynamic reset the ULI526X board
1145 * Stop ULI526X board
1146 * Free Tx/Rx allocated memory
1147 * Reset ULI526X board
1148 * Re-initialize ULI526X board
1149 */
1150
1151static void uli526x_dynamic_reset(struct net_device *dev)
1152{
1153 ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
1154
1155 uli526x_reset_prepare(dev);
1156
1157 /* Re-initialize ULI526X board */
1158 uli526x_init(dev);
1159
1160 /* Restart upper layer interface */
1161 netif_wake_queue(dev);
1162}
1163
1164
1165#ifdef CONFIG_PM
1166
1167/*
1168 * Suspend the interface.
1169 */
1170
1171static int uli526x_suspend(struct pci_dev *pdev, pm_message_t state)
1172{
1173 struct net_device *dev = pci_get_drvdata(pdev);
1174 pci_power_t power_state;
1175 int err;
1176
1177 ULI526X_DBUG(0, "uli526x_suspend", 0);
1178
1179 if (!netdev_priv(dev))
1180 return 0;
1181
1182 pci_save_state(pdev);
1183
1184 if (!netif_running(dev))
1185 return 0;
1186
1187 netif_device_detach(dev);
1188 uli526x_reset_prepare(dev);
1189
1190 power_state = pci_choose_state(pdev, state);
1191 pci_enable_wake(pdev, power_state, 0);
1192 err = pci_set_power_state(pdev, power_state);
1193 if (err) {
1194 netif_device_attach(dev);
1195 /* Re-initialize ULI526X board */
1196 uli526x_init(dev);
1197 /* Restart upper layer interface */
1198 netif_wake_queue(dev);
1199 }
1200
1201 return err;
1202}
1203
1204/*
1205 * Resume the interface.
1206 */
1207
1208static int uli526x_resume(struct pci_dev *pdev)
1209{
1210 struct net_device *dev = pci_get_drvdata(pdev);
1211 int err;
1212
1213 ULI526X_DBUG(0, "uli526x_resume", 0);
1214
1215 if (!netdev_priv(dev))
1216 return 0;
1217
1218 pci_restore_state(pdev);
1219
1220 if (!netif_running(dev))
1221 return 0;
1222
1223 err = pci_set_power_state(pdev, PCI_D0);
1224 if (err) {
1225 netdev_warn(dev, "Could not put device into D0\n");
1226 return err;
1227 }
1228
1229 netif_device_attach(dev);
1230 /* Re-initialize ULI526X board */
1231 uli526x_init(dev);
1232 /* Restart upper layer interface */
1233 netif_wake_queue(dev);
1234
1235 return 0;
1236}
1237
1238#else /* !CONFIG_PM */
1239
1240#define uli526x_suspend NULL
1241#define uli526x_resume NULL
1242
1243#endif /* !CONFIG_PM */
1244
1245
1246/*
1247 * free all allocated rx buffer
1248 */
1249
1250static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
1251{
1252 ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
1253
1254 /* free allocated rx buffer */
1255 while (db->rx_avail_cnt) {
1256 dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
1257 db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
1258 db->rx_avail_cnt--;
1259 }
1260}
1261
1262
1263/*
1264 * Reuse the SK buffer
1265 */
1266
1267static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
1268{
1269 struct rx_desc *rxptr = db->rx_insert_ptr;
1270
1271 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1272 rxptr->rx_skb_ptr = skb;
1273 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
1274 skb_tail_pointer(skb),
1275 RX_ALLOC_SIZE,
1276 PCI_DMA_FROMDEVICE));
1277 wmb();
1278 rxptr->rdes0 = cpu_to_le32(0x80000000);
1279 db->rx_avail_cnt++;
1280 db->rx_insert_ptr = rxptr->next_rx_desc;
1281 } else
1282 ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
1283}
1284
1285
1286/*
1287 * Initialize transmit/Receive descriptor
1288 * Using Chain structure, and allocate Tx/Rx buffer
1289 */
1290
1291static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
1292{
1293 struct tx_desc *tmp_tx;
1294 struct rx_desc *tmp_rx;
1295 unsigned char *tmp_buf;
1296 dma_addr_t tmp_tx_dma, tmp_rx_dma;
1297 dma_addr_t tmp_buf_dma;
1298 int i;
1299
1300 ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
1301
1302 /* tx descriptor start pointer */
1303 db->tx_insert_ptr = db->first_tx_desc;
1304 db->tx_remove_ptr = db->first_tx_desc;
1305 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */
1306
1307 /* rx descriptor start pointer */
1308 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1309 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1310 db->rx_insert_ptr = db->first_rx_desc;
1311 db->rx_ready_ptr = db->first_rx_desc;
1312 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */
1313
1314 /* Init Transmit chain */
1315 tmp_buf = db->buf_pool_start;
1316 tmp_buf_dma = db->buf_pool_dma_start;
1317 tmp_tx_dma = db->first_tx_desc_dma;
1318 for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
1319 tmp_tx->tx_buf_ptr = tmp_buf;
1320 tmp_tx->tdes0 = cpu_to_le32(0);
1321 tmp_tx->tdes1 = cpu_to_le32(0x81000000); /* IC, chain */
1322 tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
1323 tmp_tx_dma += sizeof(struct tx_desc);
1324 tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
1325 tmp_tx->next_tx_desc = tmp_tx + 1;
1326 tmp_buf = tmp_buf + TX_BUF_ALLOC;
1327 tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
1328 }
1329 (--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
1330 tmp_tx->next_tx_desc = db->first_tx_desc;
1331
1332 /* Init Receive descriptor chain */
1333 tmp_rx_dma=db->first_rx_desc_dma;
1334 for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
1335 tmp_rx->rdes0 = cpu_to_le32(0);
1336 tmp_rx->rdes1 = cpu_to_le32(0x01000600);
1337 tmp_rx_dma += sizeof(struct rx_desc);
1338 tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
1339 tmp_rx->next_rx_desc = tmp_rx + 1;
1340 }
1341 (--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
1342 tmp_rx->next_rx_desc = db->first_rx_desc;
1343
1344 /* pre-allocate Rx buffer */
1345 allocate_rx_buffer(db);
1346}
1347
1348
1349/*
1350 * Update CR6 value
1351 * Firstly stop ULI526X, then written value and start
1352 */
1353
1354static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1355{
1356
1357 outl(cr6_data, ioaddr + DCR6);
1358 udelay(5);
1359}
1360
1361
1362/*
1363 * Send a setup frame for M5261/M5263
1364 * This setup frame initialize ULI526X address filter mode
1365 */
1366
1367#ifdef __BIG_ENDIAN
1368#define FLT_SHIFT 16
1369#else
1370#define FLT_SHIFT 0
1371#endif
1372
1373static void send_filter_frame(struct net_device *dev, int mc_cnt)
1374{
1375 struct uli526x_board_info *db = netdev_priv(dev);
1376 struct netdev_hw_addr *ha;
1377 struct tx_desc *txptr;
1378 u16 * addrptr;
1379 u32 * suptr;
1380 int i;
1381
1382 ULI526X_DBUG(0, "send_filter_frame()", 0);
1383
1384 txptr = db->tx_insert_ptr;
1385 suptr = (u32 *) txptr->tx_buf_ptr;
1386
1387 /* Node address */
1388 addrptr = (u16 *) dev->dev_addr;
1389 *suptr++ = addrptr[0] << FLT_SHIFT;
1390 *suptr++ = addrptr[1] << FLT_SHIFT;
1391 *suptr++ = addrptr[2] << FLT_SHIFT;
1392
1393 /* broadcast address */
1394 *suptr++ = 0xffff << FLT_SHIFT;
1395 *suptr++ = 0xffff << FLT_SHIFT;
1396 *suptr++ = 0xffff << FLT_SHIFT;
1397
1398 /* fit the multicast address */
1399 netdev_for_each_mc_addr(ha, dev) {
1400 addrptr = (u16 *) ha->addr;
1401 *suptr++ = addrptr[0] << FLT_SHIFT;
1402 *suptr++ = addrptr[1] << FLT_SHIFT;
1403 *suptr++ = addrptr[2] << FLT_SHIFT;
1404 }
1405
1406 for (i = netdev_mc_count(dev); i < 14; i++) {
1407 *suptr++ = 0xffff << FLT_SHIFT;
1408 *suptr++ = 0xffff << FLT_SHIFT;
1409 *suptr++ = 0xffff << FLT_SHIFT;
1410 }
1411
1412 /* prepare the setup frame */
1413 db->tx_insert_ptr = txptr->next_tx_desc;
1414 txptr->tdes1 = cpu_to_le32(0x890000c0);
1415
1416 /* Resource Check and Send the setup packet */
1417 if (db->tx_packet_cnt < TX_DESC_CNT) {
1418 /* Resource Empty */
1419 db->tx_packet_cnt++;
1420 txptr->tdes0 = cpu_to_le32(0x80000000);
1421 update_cr6(db->cr6_data | 0x2000, dev->base_addr);
1422 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */
1423 update_cr6(db->cr6_data, dev->base_addr);
1424 dev->trans_start = jiffies;
1425 } else
1426 netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
1427}
1428
1429
1430/*
1431 * Allocate rx buffer,
1432 * As possible as allocate maxiumn Rx buffer
1433 */
1434
1435static void allocate_rx_buffer(struct uli526x_board_info *db)
1436{
1437 struct rx_desc *rxptr;
1438 struct sk_buff *skb;
1439
1440 rxptr = db->rx_insert_ptr;
1441
1442 while(db->rx_avail_cnt < RX_DESC_CNT) {
1443 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1444 break;
1445 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1446 rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
1447 skb_tail_pointer(skb),
1448 RX_ALLOC_SIZE,
1449 PCI_DMA_FROMDEVICE));
1450 wmb();
1451 rxptr->rdes0 = cpu_to_le32(0x80000000);
1452 rxptr = rxptr->next_rx_desc;
1453 db->rx_avail_cnt++;
1454 }
1455
1456 db->rx_insert_ptr = rxptr;
1457}
1458
1459
1460/*
1461 * Read one word data from the serial ROM
1462 */
1463
1464static u16 read_srom_word(long ioaddr, int offset)
1465{
1466 int i;
1467 u16 srom_data = 0;
1468 long cr9_ioaddr = ioaddr + DCR9;
1469
1470 outl(CR9_SROM_READ, cr9_ioaddr);
1471 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1472
1473 /* Send the Read Command 110b */
1474 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1475 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
1476 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
1477
1478 /* Send the offset */
1479 for (i = 5; i >= 0; i--) {
1480 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1481 SROM_CLK_WRITE(srom_data, cr9_ioaddr);
1482 }
1483
1484 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1485
1486 for (i = 16; i > 0; i--) {
1487 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
1488 udelay(5);
1489 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
1490 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
1491 udelay(5);
1492 }
1493
1494 outl(CR9_SROM_READ, cr9_ioaddr);
1495 return srom_data;
1496}
1497
1498
1499/*
1500 * Auto sense the media mode
1501 */
1502
1503static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1504{
1505 u8 ErrFlag = 0;
1506 u16 phy_mode;
1507
1508 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1509 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1510
1511 if ( (phy_mode & 0x24) == 0x24 ) {
1512
1513 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
1514 if(phy_mode&0x8000)
1515 phy_mode = 0x8000;
1516 else if(phy_mode&0x4000)
1517 phy_mode = 0x4000;
1518 else if(phy_mode&0x2000)
1519 phy_mode = 0x2000;
1520 else
1521 phy_mode = 0x1000;
1522
1523 switch (phy_mode) {
1524 case 0x1000: db->op_mode = ULI526X_10MHF; break;
1525 case 0x2000: db->op_mode = ULI526X_10MFD; break;
1526 case 0x4000: db->op_mode = ULI526X_100MHF; break;
1527 case 0x8000: db->op_mode = ULI526X_100MFD; break;
1528 default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
1529 }
1530 } else {
1531 db->op_mode = ULI526X_10MHF;
1532 ULI526X_DBUG(0, "Link Failed :", phy_mode);
1533 ErrFlag = 1;
1534 }
1535
1536 return ErrFlag;
1537}
1538
1539
1540/*
1541 * Set 10/100 phyxcer capability
1542 * AUTO mode : phyxcer register4 is NIC capability
1543 * Force mode: phyxcer register4 is the force media
1544 */
1545
1546static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1547{
1548 u16 phy_reg;
1549
1550 /* Phyxcer capability setting */
1551 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
1552
1553 if (db->media_mode & ULI526X_AUTO) {
1554 /* AUTO Mode */
1555 phy_reg |= db->PHY_reg4;
1556 } else {
1557 /* Force Mode */
1558 switch(db->media_mode) {
1559 case ULI526X_10MHF: phy_reg |= 0x20; break;
1560 case ULI526X_10MFD: phy_reg |= 0x40; break;
1561 case ULI526X_100MHF: phy_reg |= 0x80; break;
1562 case ULI526X_100MFD: phy_reg |= 0x100; break;
1563 }
1564
1565 }
1566
1567 /* Write new capability to Phyxcer Reg4 */
1568 if ( !(phy_reg & 0x01e0)) {
1569 phy_reg|=db->PHY_reg4;
1570 db->media_mode|=ULI526X_AUTO;
1571 }
1572 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
1573
1574 /* Restart Auto-Negotiation */
1575 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
1576 udelay(50);
1577}
1578
1579
1580/*
1581 * Process op-mode
1582 AUTO mode : PHY controller in Auto-negotiation Mode
1583 * Force mode: PHY controller in force mode with HUB
1584 * N-way force capability with SWITCH
1585 */
1586
1587static void uli526x_process_mode(struct uli526x_board_info *db)
1588{
1589 u16 phy_reg;
1590
1591 /* Full Duplex Mode Check */
1592 if (db->op_mode & 0x4)
1593 db->cr6_data |= CR6_FDM; /* Set Full Duplex Bit */
1594 else
1595 db->cr6_data &= ~CR6_FDM; /* Clear Full Duplex Bit */
1596
1597 update_cr6(db->cr6_data, db->ioaddr);
1598
1599 /* 10/100M phyxcer force mode need */
1600 if ( !(db->media_mode & 0x8)) {
1601 /* Forece Mode */
1602 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
1603 if ( !(phy_reg & 0x1) ) {
1604 /* parter without N-Way capability */
1605 phy_reg = 0x0;
1606 switch(db->op_mode) {
1607 case ULI526X_10MHF: phy_reg = 0x0; break;
1608 case ULI526X_10MFD: phy_reg = 0x100; break;
1609 case ULI526X_100MHF: phy_reg = 0x2000; break;
1610 case ULI526X_100MFD: phy_reg = 0x2100; break;
1611 }
1612 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
1613 }
1614 }
1615}
1616
1617
1618/*
1619 * Write a word to Phy register
1620 */
1621
1622static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1623{
1624 u16 i;
1625 unsigned long ioaddr;
1626
1627 if(chip_id == PCI_ULI5263_ID)
1628 {
1629 phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
1630 return;
1631 }
1632 /* M5261/M5263 Chip */
1633 ioaddr = iobase + DCR9;
1634
1635 /* Send 33 synchronization clock to Phy controller */
1636 for (i = 0; i < 35; i++)
1637 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1638
1639 /* Send start command(01) to Phy */
1640 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1641 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1642
1643 /* Send write command(01) to Phy */
1644 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1645 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1646
1647 /* Send Phy address */
1648 for (i = 0x10; i > 0; i = i >> 1)
1649 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1650
1651 /* Send register address */
1652 for (i = 0x10; i > 0; i = i >> 1)
1653 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1654
1655 /* written trasnition */
1656 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1657 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1658
1659 /* Write a word data to PHY controller */
1660 for ( i = 0x8000; i > 0; i >>= 1)
1661 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1662
1663}
1664
1665
1666/*
1667 * Read a word data from phy register
1668 */
1669
1670static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1671{
1672 int i;
1673 u16 phy_data;
1674 unsigned long ioaddr;
1675
1676 if(chip_id == PCI_ULI5263_ID)
1677 return phy_readby_cr10(iobase, phy_addr, offset);
1678 /* M5261/M5263 Chip */
1679 ioaddr = iobase + DCR9;
1680
1681 /* Send 33 synchronization clock to Phy controller */
1682 for (i = 0; i < 35; i++)
1683 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1684
1685 /* Send start command(01) to Phy */
1686 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1687 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1688
1689 /* Send read command(10) to Phy */
1690 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
1691 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
1692
1693 /* Send Phy address */
1694 for (i = 0x10; i > 0; i = i >> 1)
1695 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1696
1697 /* Send register address */
1698 for (i = 0x10; i > 0; i = i >> 1)
1699 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
1700
1701 /* Skip transition state */
1702 phy_read_1bit(ioaddr, chip_id);
1703
1704 /* read 16bit data */
1705 for (phy_data = 0, i = 0; i < 16; i++) {
1706 phy_data <<= 1;
1707 phy_data |= phy_read_1bit(ioaddr, chip_id);
1708 }
1709
1710 return phy_data;
1711}
1712
1713static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
1714{
1715 unsigned long ioaddr,cr10_value;
1716
1717 ioaddr = iobase + DCR10;
1718 cr10_value = phy_addr;
1719 cr10_value = (cr10_value<<5) + offset;
1720 cr10_value = (cr10_value<<16) + 0x08000000;
1721 outl(cr10_value,ioaddr);
1722 udelay(1);
1723 while(1)
1724 {
1725 cr10_value = inl(ioaddr);
1726 if(cr10_value&0x10000000)
1727 break;
1728 }
1729 return cr10_value & 0x0ffff;
1730}
1731
1732static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
1733{
1734 unsigned long ioaddr,cr10_value;
1735
1736 ioaddr = iobase + DCR10;
1737 cr10_value = phy_addr;
1738 cr10_value = (cr10_value<<5) + offset;
1739 cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
1740 outl(cr10_value,ioaddr);
1741 udelay(1);
1742}
1743/*
1744 * Write one bit data to Phy Controller
1745 */
1746
1747static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
1748{
1749 outl(phy_data , ioaddr); /* MII Clock Low */
1750 udelay(1);
1751 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */
1752 udelay(1);
1753 outl(phy_data , ioaddr); /* MII Clock Low */
1754 udelay(1);
1755}
1756
1757
1758/*
1759 * Read one bit phy data from PHY controller
1760 */
1761
1762static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
1763{
1764 u16 phy_data;
1765
1766 outl(0x50000 , ioaddr);
1767 udelay(1);
1768 phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
1769 outl(0x40000 , ioaddr);
1770 udelay(1);
1771
1772 return phy_data;
1773}
1774
1775
1776static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
1777 { 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
1778 { 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
1779 { 0, }
1780};
1781MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
1782
1783
1784static struct pci_driver uli526x_driver = {
1785 .name = "uli526x",
1786 .id_table = uli526x_pci_tbl,
1787 .probe = uli526x_init_one,
1788 .remove = __devexit_p(uli526x_remove_one),
1789 .suspend = uli526x_suspend,
1790 .resume = uli526x_resume,
1791};
1792
1793MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
1794MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
1795MODULE_LICENSE("GPL");
1796
1797module_param(debug, int, 0644);
1798module_param(mode, int, 0);
1799module_param(cr6set, int, 0);
1800MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
1801MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
1802
1803/* Description:
1804 * when user used insmod to add module, system invoked init_module()
1805 * to register the services.
1806 */
1807
1808static int __init uli526x_init_module(void)
1809{
1810
1811 pr_info("%s\n", version);
1812 printed_version = 1;
1813
1814 ULI526X_DBUG(0, "init_module() ", debug);
1815
1816 if (debug)
1817 uli526x_debug = debug; /* set debug flag */
1818 if (cr6set)
1819 uli526x_cr6_user_set = cr6set;
1820
1821 switch (mode) {
1822 case ULI526X_10MHF:
1823 case ULI526X_100MHF:
1824 case ULI526X_10MFD:
1825 case ULI526X_100MFD:
1826 uli526x_media_mode = mode;
1827 break;
1828 default:
1829 uli526x_media_mode = ULI526X_AUTO;
1830 break;
1831 }
1832
1833 return pci_register_driver(&uli526x_driver);
1834}
1835
1836
1837/*
1838 * Description:
1839 * when user used rmmod to delete module, system invoked clean_module()
1840 * to un-register all registered services.
1841 */
1842
1843static void __exit uli526x_cleanup_module(void)
1844{
1845 ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
1846 pci_unregister_driver(&uli526x_driver);
1847}
1848
1849module_init(uli526x_init_module);
1850module_exit(uli526x_cleanup_module);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
new file mode 100644
index 000000000000..4d01219ba22f
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -0,0 +1,1670 @@
1/* winbond-840.c: A Linux PCI network adapter device driver. */
2/*
3 Written 1998-2001 by Donald Becker.
4
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
11
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
15 Annapolis MD 21403
16
17 Support and updates available at
18 http://www.scyld.com/network/drivers.html
19
20 Do not remove the copyright information.
21 Do not change the version information unless an improvement has been made.
22 Merely removing my name, as Compex has done in the past, does not count
23 as an improvement.
24
25 Changelog:
26 * ported to 2.4
27 ???
28 * spin lock update, memory barriers, new style dma mappings
29 limit each tx buffer to < 1024 bytes
30 remove DescIntr from Rx descriptors (that's an Tx flag)
31 remove next pointer from Tx descriptors
32 synchronize tx_q_bytes
33 software reset in tx_timeout
34 Copyright (C) 2000 Manfred Spraul
35 * further cleanups
36 power management.
37 support for big endian descriptors
38 Copyright (C) 2001 Manfred Spraul
39 * ethtool support (jgarzik)
40 * Replace some MII-related magic numbers with constants (jgarzik)
41
42 TODO:
43 * enable pci_power_off
44 * Wake-On-LAN
45*/
46
47#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49#define DRV_NAME "winbond-840"
50#define DRV_VERSION "1.01-e"
51#define DRV_RELDATE "Sep-11-2006"
52
53
54/* Automatically extracted configuration info:
55probe-func: winbond840_probe
56config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
57
58c-help-name: Winbond W89c840 PCI Ethernet support
59c-help-symbol: CONFIG_WINBOND_840
60c-help: This driver is for the Winbond W89c840 chip. It also works with
61c-help: the TX9882 chip on the Compex RL100-ATX board.
62c-help: More specific information and updates are available from
63c-help: http://www.scyld.com/network/drivers.html
64*/
65
66/* The user-configurable values.
67 These may be modified when a driver module is loaded.*/
68
69static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
70static int max_interrupt_work = 20;
71/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
72 The '840 uses a 64 element hash table based on the Ethernet CRC. */
73static int multicast_filter_limit = 32;
74
75/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
76 Setting to > 1518 effectively disables this feature. */
77static int rx_copybreak;
78
79/* Used to pass the media type, etc.
80 Both 'options[]' and 'full_duplex[]' should exist for driver
81 interoperability.
82 The media type is usually passed in 'options[]'.
83*/
84#define MAX_UNITS 8 /* More are supported, limit only on options */
85static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87
88/* Operational parameters that are set at compile time. */
89
90/* Keep the ring sizes a power of two for compile efficiency.
91 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
92 Making the Tx ring too large decreases the effectiveness of channel
93 bonding and packet priority.
94 There are no ill effects from too-large receive rings. */
95#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
96#define TX_QUEUE_LEN_RESTART 5
97
98#define TX_BUFLIMIT (1024-128)
99
100/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
101 To avoid overflowing we don't queue again until we have room for a
102 full-size packet.
103 */
104#define TX_FIFO_SIZE (2048)
105#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106
107
108/* Operational parameters that usually are not changed. */
109/* Time in jiffies before concluding the transmitter is hung. */
110#define TX_TIMEOUT (2*HZ)
111
112/* Include files, designed to support most kernel versions 2.0.0 and later. */
113#include <linux/module.h>
114#include <linux/kernel.h>
115#include <linux/string.h>
116#include <linux/timer.h>
117#include <linux/errno.h>
118#include <linux/ioport.h>
119#include <linux/interrupt.h>
120#include <linux/pci.h>
121#include <linux/dma-mapping.h>
122#include <linux/netdevice.h>
123#include <linux/etherdevice.h>
124#include <linux/skbuff.h>
125#include <linux/init.h>
126#include <linux/delay.h>
127#include <linux/ethtool.h>
128#include <linux/mii.h>
129#include <linux/rtnetlink.h>
130#include <linux/crc32.h>
131#include <linux/bitops.h>
132#include <asm/uaccess.h>
133#include <asm/processor.h> /* Processor type for cache alignment. */
134#include <asm/io.h>
135#include <asm/irq.h>
136
137#include "tulip.h"
138
139#undef PKT_BUF_SZ /* tulip.h also defines this */
140#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
141
142/* These identify the driver base version and may not be removed. */
143static const char version[] __initconst =
144 "v" DRV_VERSION " (2.4 port) "
145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
147
148MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150MODULE_LICENSE("GPL");
151MODULE_VERSION(DRV_VERSION);
152
153module_param(max_interrupt_work, int, 0);
154module_param(debug, int, 0);
155module_param(rx_copybreak, int, 0);
156module_param(multicast_filter_limit, int, 0);
157module_param_array(options, int, NULL, 0);
158module_param_array(full_duplex, int, NULL, 0);
159MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165
166/*
167 Theory of Operation
168
169I. Board Compatibility
170
171This driver is for the Winbond w89c840 chip.
172
173II. Board-specific settings
174
175None.
176
177III. Driver operation
178
179This chip is very similar to the Digital 21*4* "Tulip" family. The first
180twelve registers and the descriptor format are nearly identical. Read a
181Tulip manual for operational details.
182
183A significant difference is that the multicast filter and station address are
184stored in registers rather than loaded through a pseudo-transmit packet.
185
186Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
187full-sized packet we must use both data buffers in a descriptor. Thus the
188driver uses ring mode where descriptors are implicitly sequential in memory,
189rather than using the second descriptor address as a chain pointer to
190subsequent descriptors.
191
192IV. Notes
193
194If you are going to almost clone a Tulip, why not go all the way and avoid
195the need for a new driver?
196
197IVb. References
198
199http://www.scyld.com/expert/100mbps.html
200http://www.scyld.com/expert/NWay.html
201http://www.winbond.com.tw/
202
203IVc. Errata
204
205A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
206correctly detect a full FIFO, and queuing more than 2048 bytes may result in
207silent data corruption.
208
209Test with 'ping -s 10000' on a fast computer.
210
211*/
212
213
214
215/*
216 PCI probe table.
217*/
218enum chip_capability_flags {
219 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
220};
221
222static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
223 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
226 { }
227};
228MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
229
230enum {
231 netdev_res_size = 128, /* size of PCI BAR resource */
232};
233
234struct pci_id_info {
235 const char *name;
236 int drv_flags; /* Driver use, intended as capability flags. */
237};
238
239static const struct pci_id_info pci_id_tbl[] __devinitdata = {
240 { /* Sometime a Level-One switch card. */
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244 { } /* terminate list. */
245};
246
247/* This driver was written to use PCI memory space, however some x86 systems
248 work only with I/O space accesses. See CONFIG_TULIP_MMIO in .config
249*/
250
251/* Offsets to the Command and Status Registers, "CSRs".
252 While similar to the Tulip, these registers are longword aligned.
253 Note: It's not useful to define symbolic names for every register bit in
254 the device. The name can only partially document the semantics and make
255 the driver longer and more difficult to read.
256*/
257enum w840_offsets {
258 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
259 RxRingPtr=0x0C, TxRingPtr=0x10,
260 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
261 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262 CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
263 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
264 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
265};
266
267/* Bits in the NetworkConfig register. */
268enum rx_mode_bits {
269 AcceptErr=0x80,
270 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
271 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
272};
273
274enum mii_reg_bits {
275 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
277};
278
279/* The Tulip Rx and Tx buffer descriptors. */
280struct w840_rx_desc {
281 s32 status;
282 s32 length;
283 u32 buffer1;
284 u32 buffer2;
285};
286
287struct w840_tx_desc {
288 s32 status;
289 s32 length;
290 u32 buffer1, buffer2;
291};
292
293#define MII_CNT 1 /* winbond only supports one MII */
294struct netdev_private {
295 struct w840_rx_desc *rx_ring;
296 dma_addr_t rx_addr[RX_RING_SIZE];
297 struct w840_tx_desc *tx_ring;
298 dma_addr_t tx_addr[TX_RING_SIZE];
299 dma_addr_t ring_dma_addr;
300 /* The addresses of receive-in-place skbuffs. */
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
302 /* The saved address of a sent-in-place packet/buffer, for later free(). */
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
304 struct net_device_stats stats;
305 struct timer_list timer; /* Media monitoring timer. */
306 /* Frequently used values: keep some adjacent for cache effect. */
307 spinlock_t lock;
308 int chip_id, drv_flags;
309 struct pci_dev *pci_dev;
310 int csr6;
311 struct w840_rx_desc *rx_head_desc;
312 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
313 unsigned int rx_buf_sz; /* Based on MTU+slack. */
314 unsigned int cur_tx, dirty_tx;
315 unsigned int tx_q_bytes;
316 unsigned int tx_full; /* The Tx queue is full. */
317 /* MII transceiver section. */
318 int mii_cnt; /* MII device addresses. */
319 unsigned char phys[MII_CNT]; /* MII device addresses, but only the first is used */
320 u32 mii;
321 struct mii_if_info mii_if;
322 void __iomem *base_addr;
323};
324
325static int eeprom_read(void __iomem *ioaddr, int location);
326static int mdio_read(struct net_device *dev, int phy_id, int location);
327static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328static int netdev_open(struct net_device *dev);
329static int update_link(struct net_device *dev);
330static void netdev_timer(unsigned long data);
331static void init_rxtx_rings(struct net_device *dev);
332static void free_rxtx_rings(struct netdev_private *np);
333static void init_registers(struct net_device *dev);
334static void tx_timeout(struct net_device *dev);
335static int alloc_ringdesc(struct net_device *dev);
336static void free_ringdesc(struct netdev_private *np);
337static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
338static irqreturn_t intr_handler(int irq, void *dev_instance);
339static void netdev_error(struct net_device *dev, int intr_status);
340static int netdev_rx(struct net_device *dev);
341static u32 __set_rx_mode(struct net_device *dev);
342static void set_rx_mode(struct net_device *dev);
343static struct net_device_stats *get_stats(struct net_device *dev);
344static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345static const struct ethtool_ops netdev_ethtool_ops;
346static int netdev_close(struct net_device *dev);
347
348static const struct net_device_ops netdev_ops = {
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
352 .ndo_get_stats = get_stats,
353 .ndo_set_rx_mode = set_rx_mode,
354 .ndo_do_ioctl = netdev_ioctl,
355 .ndo_tx_timeout = tx_timeout,
356 .ndo_change_mtu = eth_change_mtu,
357 .ndo_set_mac_address = eth_mac_addr,
358 .ndo_validate_addr = eth_validate_addr,
359};
360
361static int __devinit w840_probe1 (struct pci_dev *pdev,
362 const struct pci_device_id *ent)
363{
364 struct net_device *dev;
365 struct netdev_private *np;
366 static int find_cnt;
367 int chip_idx = ent->driver_data;
368 int irq;
369 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
370 void __iomem *ioaddr;
371
372 i = pci_enable_device(pdev);
373 if (i) return i;
374
375 pci_set_master(pdev);
376
377 irq = pdev->irq;
378
379 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
380 pr_warn("Device %s disabled due to DMA limitations\n",
381 pci_name(pdev));
382 return -EIO;
383 }
384 dev = alloc_etherdev(sizeof(*np));
385 if (!dev)
386 return -ENOMEM;
387 SET_NETDEV_DEV(dev, &pdev->dev);
388
389 if (pci_request_regions(pdev, DRV_NAME))
390 goto err_out_netdev;
391
392 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
393 if (!ioaddr)
394 goto err_out_free_res;
395
396 for (i = 0; i < 3; i++)
397 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
398
399 /* Reset the chip to erase previous misconfiguration.
400 No hold time required! */
401 iowrite32(0x00000001, ioaddr + PCIBusCfg);
402
403 dev->base_addr = (unsigned long)ioaddr;
404 dev->irq = irq;
405
406 np = netdev_priv(dev);
407 np->pci_dev = pdev;
408 np->chip_id = chip_idx;
409 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
410 spin_lock_init(&np->lock);
411 np->mii_if.dev = dev;
412 np->mii_if.mdio_read = mdio_read;
413 np->mii_if.mdio_write = mdio_write;
414 np->base_addr = ioaddr;
415
416 pci_set_drvdata(pdev, dev);
417
418 if (dev->mem_start)
419 option = dev->mem_start;
420
421 /* The lower four bits are the media type. */
422 if (option > 0) {
423 if (option & 0x200)
424 np->mii_if.full_duplex = 1;
425 if (option & 15)
426 dev_info(&dev->dev,
427 "ignoring user supplied media type %d",
428 option & 15);
429 }
430 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
431 np->mii_if.full_duplex = 1;
432
433 if (np->mii_if.full_duplex)
434 np->mii_if.force_media = 1;
435
436 /* The chip-specific entries in the device structure. */
437 dev->netdev_ops = &netdev_ops;
438 dev->ethtool_ops = &netdev_ethtool_ops;
439 dev->watchdog_timeo = TX_TIMEOUT;
440
441 i = register_netdev(dev);
442 if (i)
443 goto err_out_cleardev;
444
445 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
446 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
447
448 if (np->drv_flags & CanHaveMII) {
449 int phy, phy_idx = 0;
450 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
451 int mii_status = mdio_read(dev, phy, MII_BMSR);
452 if (mii_status != 0xffff && mii_status != 0x0000) {
453 np->phys[phy_idx++] = phy;
454 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
455 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
456 mdio_read(dev, phy, MII_PHYSID2);
457 dev_info(&dev->dev,
458 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
459 np->mii, phy, mii_status,
460 np->mii_if.advertising);
461 }
462 }
463 np->mii_cnt = phy_idx;
464 np->mii_if.phy_id = np->phys[0];
465 if (phy_idx == 0) {
466 dev_warn(&dev->dev,
467 "MII PHY not found -- this device may not operate correctly\n");
468 }
469 }
470
471 find_cnt++;
472 return 0;
473
474err_out_cleardev:
475 pci_set_drvdata(pdev, NULL);
476 pci_iounmap(pdev, ioaddr);
477err_out_free_res:
478 pci_release_regions(pdev);
479err_out_netdev:
480 free_netdev (dev);
481 return -ENODEV;
482}
483
484
485/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
486 often serial bit streams generated by the host processor.
487 The example below is for the common 93c46 EEPROM, 64 16 bit words. */
488
489/* Delay between EEPROM clock transitions.
490 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
491 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
492 made udelay() unreliable.
493 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
494 deprecated.
495*/
496#define eeprom_delay(ee_addr) ioread32(ee_addr)
497
498enum EEPROM_Ctrl_Bits {
499 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
500 EE_ChipSelect=0x801, EE_DataIn=0x08,
501};
502
503/* The EEPROM commands include the alway-set leading bit. */
504enum EEPROM_Cmds {
505 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
506};
507
508static int eeprom_read(void __iomem *addr, int location)
509{
510 int i;
511 int retval = 0;
512 void __iomem *ee_addr = addr + EECtrl;
513 int read_cmd = location | EE_ReadCmd;
514 iowrite32(EE_ChipSelect, ee_addr);
515
516 /* Shift the read command bits out. */
517 for (i = 10; i >= 0; i--) {
518 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
519 iowrite32(dataval, ee_addr);
520 eeprom_delay(ee_addr);
521 iowrite32(dataval | EE_ShiftClk, ee_addr);
522 eeprom_delay(ee_addr);
523 }
524 iowrite32(EE_ChipSelect, ee_addr);
525 eeprom_delay(ee_addr);
526
527 for (i = 16; i > 0; i--) {
528 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
529 eeprom_delay(ee_addr);
530 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
531 iowrite32(EE_ChipSelect, ee_addr);
532 eeprom_delay(ee_addr);
533 }
534
535 /* Terminate the EEPROM access. */
536 iowrite32(0, ee_addr);
537 return retval;
538}
539
540/* MII transceiver control section.
541 Read and write the MII registers using software-generated serial
542 MDIO protocol. See the MII specifications or DP83840A data sheet
543 for details.
544
545 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
546 met by back-to-back 33Mhz PCI cycles. */
547#define mdio_delay(mdio_addr) ioread32(mdio_addr)
548
549/* Set iff a MII transceiver on any interface requires mdio preamble.
550 This only set with older transceivers, so the extra
551 code size of a per-interface flag is not worthwhile. */
552static char mii_preamble_required = 1;
553
554#define MDIO_WRITE0 (MDIO_EnbOutput)
555#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
556
557/* Generate the preamble required for initial synchronization and
558 a few older transceivers. */
559static void mdio_sync(void __iomem *mdio_addr)
560{
561 int bits = 32;
562
563 /* Establish sync by sending at least 32 logic ones. */
564 while (--bits >= 0) {
565 iowrite32(MDIO_WRITE1, mdio_addr);
566 mdio_delay(mdio_addr);
567 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
568 mdio_delay(mdio_addr);
569 }
570}
571
572static int mdio_read(struct net_device *dev, int phy_id, int location)
573{
574 struct netdev_private *np = netdev_priv(dev);
575 void __iomem *mdio_addr = np->base_addr + MIICtrl;
576 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
577 int i, retval = 0;
578
579 if (mii_preamble_required)
580 mdio_sync(mdio_addr);
581
582 /* Shift the read command bits out. */
583 for (i = 15; i >= 0; i--) {
584 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
585
586 iowrite32(dataval, mdio_addr);
587 mdio_delay(mdio_addr);
588 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
589 mdio_delay(mdio_addr);
590 }
591 /* Read the two transition, 16 data, and wire-idle bits. */
592 for (i = 20; i > 0; i--) {
593 iowrite32(MDIO_EnbIn, mdio_addr);
594 mdio_delay(mdio_addr);
595 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
596 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
597 mdio_delay(mdio_addr);
598 }
599 return (retval>>1) & 0xffff;
600}
601
602static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
603{
604 struct netdev_private *np = netdev_priv(dev);
605 void __iomem *mdio_addr = np->base_addr + MIICtrl;
606 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
607 int i;
608
609 if (location == 4 && phy_id == np->phys[0])
610 np->mii_if.advertising = value;
611
612 if (mii_preamble_required)
613 mdio_sync(mdio_addr);
614
615 /* Shift the command bits out. */
616 for (i = 31; i >= 0; i--) {
617 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
618
619 iowrite32(dataval, mdio_addr);
620 mdio_delay(mdio_addr);
621 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
622 mdio_delay(mdio_addr);
623 }
624 /* Clear out extra bits. */
625 for (i = 2; i > 0; i--) {
626 iowrite32(MDIO_EnbIn, mdio_addr);
627 mdio_delay(mdio_addr);
628 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
629 mdio_delay(mdio_addr);
630 }
631}
632
633
634static int netdev_open(struct net_device *dev)
635{
636 struct netdev_private *np = netdev_priv(dev);
637 void __iomem *ioaddr = np->base_addr;
638 int i;
639
640 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
641
642 netif_device_detach(dev);
643 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev);
644 if (i)
645 goto out_err;
646
647 if (debug > 1)
648 netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq);
649
650 if((i=alloc_ringdesc(dev)))
651 goto out_err;
652
653 spin_lock_irq(&np->lock);
654 netif_device_attach(dev);
655 init_registers(dev);
656 spin_unlock_irq(&np->lock);
657
658 netif_start_queue(dev);
659 if (debug > 2)
660 netdev_dbg(dev, "Done netdev_open()\n");
661
662 /* Set the timer to check for link beat. */
663 init_timer(&np->timer);
664 np->timer.expires = jiffies + 1*HZ;
665 np->timer.data = (unsigned long)dev;
666 np->timer.function = netdev_timer; /* timer handler */
667 add_timer(&np->timer);
668 return 0;
669out_err:
670 netif_device_attach(dev);
671 return i;
672}
673
674#define MII_DAVICOM_DM9101 0x0181b800
675
676static int update_link(struct net_device *dev)
677{
678 struct netdev_private *np = netdev_priv(dev);
679 int duplex, fasteth, result, mii_reg;
680
681 /* BSMR */
682 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
683
684 if (mii_reg == 0xffff)
685 return np->csr6;
686 /* reread: the link status bit is sticky */
687 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
688 if (!(mii_reg & 0x4)) {
689 if (netif_carrier_ok(dev)) {
690 if (debug)
691 dev_info(&dev->dev,
692 "MII #%d reports no link. Disabling watchdog\n",
693 np->phys[0]);
694 netif_carrier_off(dev);
695 }
696 return np->csr6;
697 }
698 if (!netif_carrier_ok(dev)) {
699 if (debug)
700 dev_info(&dev->dev,
701 "MII #%d link is back. Enabling watchdog\n",
702 np->phys[0]);
703 netif_carrier_on(dev);
704 }
705
706 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
707 /* If the link partner doesn't support autonegotiation
708 * the MII detects it's abilities with the "parallel detection".
709 * Some MIIs update the LPA register to the result of the parallel
710 * detection, some don't.
711 * The Davicom PHY [at least 0181b800] doesn't.
712 * Instead bit 9 and 13 of the BMCR are updated to the result
713 * of the negotiation..
714 */
715 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
716 duplex = mii_reg & BMCR_FULLDPLX;
717 fasteth = mii_reg & BMCR_SPEED100;
718 } else {
719 int negotiated;
720 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
721 negotiated = mii_reg & np->mii_if.advertising;
722
723 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
724 fasteth = negotiated & 0x380;
725 }
726 duplex |= np->mii_if.force_media;
727 /* remove fastether and fullduplex */
728 result = np->csr6 & ~0x20000200;
729 if (duplex)
730 result |= 0x200;
731 if (fasteth)
732 result |= 0x20000000;
733 if (result != np->csr6 && debug)
734 dev_info(&dev->dev,
735 "Setting %dMBit-%s-duplex based on MII#%d\n",
736 fasteth ? 100 : 10, duplex ? "full" : "half",
737 np->phys[0]);
738 return result;
739}
740
741#define RXTX_TIMEOUT 2000
742static inline void update_csr6(struct net_device *dev, int new)
743{
744 struct netdev_private *np = netdev_priv(dev);
745 void __iomem *ioaddr = np->base_addr;
746 int limit = RXTX_TIMEOUT;
747
748 if (!netif_device_present(dev))
749 new = 0;
750 if (new==np->csr6)
751 return;
752 /* stop both Tx and Rx processes */
753 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
754 /* wait until they have really stopped */
755 for (;;) {
756 int csr5 = ioread32(ioaddr + IntrStatus);
757 int t;
758
759 t = (csr5 >> 17) & 0x07;
760 if (t==0||t==1) {
761 /* rx stopped */
762 t = (csr5 >> 20) & 0x07;
763 if (t==0||t==1)
764 break;
765 }
766
767 limit--;
768 if(!limit) {
769 dev_info(&dev->dev,
770 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
771 break;
772 }
773 udelay(1);
774 }
775 np->csr6 = new;
776 /* and restart them with the new configuration */
777 iowrite32(np->csr6, ioaddr + NetworkConfig);
778 if (new & 0x200)
779 np->mii_if.full_duplex = 1;
780}
781
782static void netdev_timer(unsigned long data)
783{
784 struct net_device *dev = (struct net_device *)data;
785 struct netdev_private *np = netdev_priv(dev);
786 void __iomem *ioaddr = np->base_addr;
787
788 if (debug > 2)
789 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
790 ioread32(ioaddr + IntrStatus),
791 ioread32(ioaddr + NetworkConfig));
792 spin_lock_irq(&np->lock);
793 update_csr6(dev, update_link(dev));
794 spin_unlock_irq(&np->lock);
795 np->timer.expires = jiffies + 10*HZ;
796 add_timer(&np->timer);
797}
798
799static void init_rxtx_rings(struct net_device *dev)
800{
801 struct netdev_private *np = netdev_priv(dev);
802 int i;
803
804 np->rx_head_desc = &np->rx_ring[0];
805 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
806
807 /* Initial all Rx descriptors. */
808 for (i = 0; i < RX_RING_SIZE; i++) {
809 np->rx_ring[i].length = np->rx_buf_sz;
810 np->rx_ring[i].status = 0;
811 np->rx_skbuff[i] = NULL;
812 }
813 /* Mark the last entry as wrapping the ring. */
814 np->rx_ring[i-1].length |= DescEndRing;
815
816 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
817 for (i = 0; i < RX_RING_SIZE; i++) {
818 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
819 np->rx_skbuff[i] = skb;
820 if (skb == NULL)
821 break;
822 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
823 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
824
825 np->rx_ring[i].buffer1 = np->rx_addr[i];
826 np->rx_ring[i].status = DescOwned;
827 }
828
829 np->cur_rx = 0;
830 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
831
832 /* Initialize the Tx descriptors */
833 for (i = 0; i < TX_RING_SIZE; i++) {
834 np->tx_skbuff[i] = NULL;
835 np->tx_ring[i].status = 0;
836 }
837 np->tx_full = 0;
838 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
839
840 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
841 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
842 np->base_addr + TxRingPtr);
843
844}
845
846static void free_rxtx_rings(struct netdev_private* np)
847{
848 int i;
849 /* Free all the skbuffs in the Rx queue. */
850 for (i = 0; i < RX_RING_SIZE; i++) {
851 np->rx_ring[i].status = 0;
852 if (np->rx_skbuff[i]) {
853 pci_unmap_single(np->pci_dev,
854 np->rx_addr[i],
855 np->rx_skbuff[i]->len,
856 PCI_DMA_FROMDEVICE);
857 dev_kfree_skb(np->rx_skbuff[i]);
858 }
859 np->rx_skbuff[i] = NULL;
860 }
861 for (i = 0; i < TX_RING_SIZE; i++) {
862 if (np->tx_skbuff[i]) {
863 pci_unmap_single(np->pci_dev,
864 np->tx_addr[i],
865 np->tx_skbuff[i]->len,
866 PCI_DMA_TODEVICE);
867 dev_kfree_skb(np->tx_skbuff[i]);
868 }
869 np->tx_skbuff[i] = NULL;
870 }
871}
872
873static void init_registers(struct net_device *dev)
874{
875 struct netdev_private *np = netdev_priv(dev);
876 void __iomem *ioaddr = np->base_addr;
877 int i;
878
879 for (i = 0; i < 6; i++)
880 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
881
882 /* Initialize other registers. */
883#ifdef __BIG_ENDIAN
884 i = (1<<20); /* Big-endian descriptors */
885#else
886 i = 0;
887#endif
888 i |= (0x04<<2); /* skip length 4 u32 */
889 i |= 0x02; /* give Rx priority */
890
891 /* Configure the PCI bus bursts and FIFO thresholds.
892 486: Set 8 longword cache alignment, 8 longword burst.
893 586: Set 16 longword cache alignment, no burst limit.
894 Cache alignment bits 15:14 Burst length 13:8
895 0000 <not allowed> 0000 align to cache 0800 8 longwords
896 4000 8 longwords 0100 1 longword 1000 16 longwords
897 8000 16 longwords 0200 2 longwords 2000 32 longwords
898 C000 32 longwords 0400 4 longwords */
899
900#if defined (__i386__) && !defined(MODULE)
901 /* When not a module we can work around broken '486 PCI boards. */
902 if (boot_cpu_data.x86 <= 4) {
903 i |= 0x4800;
904 dev_info(&dev->dev,
905 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
906 } else {
907 i |= 0xE000;
908 }
909#elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
910 i |= 0xE000;
911#elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC)
912 i |= 0x4800;
913#else
914#warning Processor architecture undefined
915 i |= 0x4800;
916#endif
917 iowrite32(i, ioaddr + PCIBusCfg);
918
919 np->csr6 = 0;
920 /* 128 byte Tx threshold;
921 Transmit on; Receive on; */
922 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
923
924 /* Clear and Enable interrupts by setting the interrupt mask. */
925 iowrite32(0x1A0F5, ioaddr + IntrStatus);
926 iowrite32(0x1A0F5, ioaddr + IntrEnable);
927
928 iowrite32(0, ioaddr + RxStartDemand);
929}
930
931static void tx_timeout(struct net_device *dev)
932{
933 struct netdev_private *np = netdev_priv(dev);
934 void __iomem *ioaddr = np->base_addr;
935
936 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
937 ioread32(ioaddr + IntrStatus));
938
939 {
940 int i;
941 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
942 for (i = 0; i < RX_RING_SIZE; i++)
943 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
944 printk(KERN_CONT "\n");
945 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
946 for (i = 0; i < TX_RING_SIZE; i++)
947 printk(KERN_CONT " %08x", np->tx_ring[i].status);
948 printk(KERN_CONT "\n");
949 }
950 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
951 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
952 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
953
954 disable_irq(dev->irq);
955 spin_lock_irq(&np->lock);
956 /*
957 * Under high load dirty_tx and the internal tx descriptor pointer
958 * come out of sync, thus perform a software reset and reinitialize
959 * everything.
960 */
961
962 iowrite32(1, np->base_addr+PCIBusCfg);
963 udelay(1);
964
965 free_rxtx_rings(np);
966 init_rxtx_rings(dev);
967 init_registers(dev);
968 spin_unlock_irq(&np->lock);
969 enable_irq(dev->irq);
970
971 netif_wake_queue(dev);
972 dev->trans_start = jiffies; /* prevent tx timeout */
973 np->stats.tx_errors++;
974}
975
976/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
977static int alloc_ringdesc(struct net_device *dev)
978{
979 struct netdev_private *np = netdev_priv(dev);
980
981 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
982
983 np->rx_ring = pci_alloc_consistent(np->pci_dev,
984 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
985 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
986 &np->ring_dma_addr);
987 if(!np->rx_ring)
988 return -ENOMEM;
989 init_rxtx_rings(dev);
990 return 0;
991}
992
993static void free_ringdesc(struct netdev_private *np)
994{
995 pci_free_consistent(np->pci_dev,
996 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
997 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
998 np->rx_ring, np->ring_dma_addr);
999
1000}
1001
1002static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1003{
1004 struct netdev_private *np = netdev_priv(dev);
1005 unsigned entry;
1006
1007 /* Caution: the write order is important here, set the field
1008 with the "ownership" bits last. */
1009
1010 /* Calculate the next Tx descriptor entry. */
1011 entry = np->cur_tx % TX_RING_SIZE;
1012
1013 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1014 skb->data,skb->len, PCI_DMA_TODEVICE);
1015 np->tx_skbuff[entry] = skb;
1016
1017 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1018 if (skb->len < TX_BUFLIMIT) {
1019 np->tx_ring[entry].length = DescWholePkt | skb->len;
1020 } else {
1021 int len = skb->len - TX_BUFLIMIT;
1022
1023 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1024 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1025 }
1026 if(entry == TX_RING_SIZE-1)
1027 np->tx_ring[entry].length |= DescEndRing;
1028
1029 /* Now acquire the irq spinlock.
1030 * The difficult race is the ordering between
1031 * increasing np->cur_tx and setting DescOwned:
1032 * - if np->cur_tx is increased first the interrupt
1033 * handler could consider the packet as transmitted
1034 * since DescOwned is cleared.
1035 * - If DescOwned is set first the NIC could report the
1036 * packet as sent, but the interrupt handler would ignore it
1037 * since the np->cur_tx was not yet increased.
1038 */
1039 spin_lock_irq(&np->lock);
1040 np->cur_tx++;
1041
1042 wmb(); /* flush length, buffer1, buffer2 */
1043 np->tx_ring[entry].status = DescOwned;
1044 wmb(); /* flush status and kick the hardware */
1045 iowrite32(0, np->base_addr + TxStartDemand);
1046 np->tx_q_bytes += skb->len;
1047 /* Work around horrible bug in the chip by marking the queue as full
1048 when we do not have FIFO room for a maximum sized packet. */
1049 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1050 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1051 netif_stop_queue(dev);
1052 wmb();
1053 np->tx_full = 1;
1054 }
1055 spin_unlock_irq(&np->lock);
1056
1057 if (debug > 4) {
1058 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1059 np->cur_tx, entry);
1060 }
1061 return NETDEV_TX_OK;
1062}
1063
1064static void netdev_tx_done(struct net_device *dev)
1065{
1066 struct netdev_private *np = netdev_priv(dev);
1067 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1068 int entry = np->dirty_tx % TX_RING_SIZE;
1069 int tx_status = np->tx_ring[entry].status;
1070
1071 if (tx_status < 0)
1072 break;
1073 if (tx_status & 0x8000) { /* There was an error, log it. */
1074#ifndef final_version
1075 if (debug > 1)
1076 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1077 tx_status);
1078#endif
1079 np->stats.tx_errors++;
1080 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1081 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1082 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1083 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1084 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1085 np->stats.tx_heartbeat_errors++;
1086 } else {
1087#ifndef final_version
1088 if (debug > 3)
1089 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1090 entry, tx_status);
1091#endif
1092 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1093 np->stats.collisions += (tx_status >> 3) & 15;
1094 np->stats.tx_packets++;
1095 }
1096 /* Free the original skb. */
1097 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1098 np->tx_skbuff[entry]->len,
1099 PCI_DMA_TODEVICE);
1100 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1101 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1102 np->tx_skbuff[entry] = NULL;
1103 }
1104 if (np->tx_full &&
1105 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1106 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1107 /* The ring is no longer full, clear tbusy. */
1108 np->tx_full = 0;
1109 wmb();
1110 netif_wake_queue(dev);
1111 }
1112}
1113
1114/* The interrupt handler does all of the Rx thread work and cleans up
1115 after the Tx thread. */
1116static irqreturn_t intr_handler(int irq, void *dev_instance)
1117{
1118 struct net_device *dev = (struct net_device *)dev_instance;
1119 struct netdev_private *np = netdev_priv(dev);
1120 void __iomem *ioaddr = np->base_addr;
1121 int work_limit = max_interrupt_work;
1122 int handled = 0;
1123
1124 if (!netif_device_present(dev))
1125 return IRQ_NONE;
1126 do {
1127 u32 intr_status = ioread32(ioaddr + IntrStatus);
1128
1129 /* Acknowledge all of the current interrupt sources ASAP. */
1130 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1131
1132 if (debug > 4)
1133 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1134
1135 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1136 break;
1137
1138 handled = 1;
1139
1140 if (intr_status & (RxIntr | RxNoBuf))
1141 netdev_rx(dev);
1142 if (intr_status & RxNoBuf)
1143 iowrite32(0, ioaddr + RxStartDemand);
1144
1145 if (intr_status & (TxNoBuf | TxIntr) &&
1146 np->cur_tx != np->dirty_tx) {
1147 spin_lock(&np->lock);
1148 netdev_tx_done(dev);
1149 spin_unlock(&np->lock);
1150 }
1151
1152 /* Abnormal error summary/uncommon events handlers. */
1153 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1154 TimerInt | TxDied))
1155 netdev_error(dev, intr_status);
1156
1157 if (--work_limit < 0) {
1158 dev_warn(&dev->dev,
1159 "Too much work at interrupt, status=0x%04x\n",
1160 intr_status);
1161 /* Set the timer to re-enable the other interrupts after
1162 10*82usec ticks. */
1163 spin_lock(&np->lock);
1164 if (netif_device_present(dev)) {
1165 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1166 iowrite32(10, ioaddr + GPTimer);
1167 }
1168 spin_unlock(&np->lock);
1169 break;
1170 }
1171 } while (1);
1172
1173 if (debug > 3)
1174 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1175 ioread32(ioaddr + IntrStatus));
1176 return IRQ_RETVAL(handled);
1177}
1178
1179/* This routine is logically part of the interrupt handler, but separated
1180 for clarity and better register allocation. */
1181static int netdev_rx(struct net_device *dev)
1182{
1183 struct netdev_private *np = netdev_priv(dev);
1184 int entry = np->cur_rx % RX_RING_SIZE;
1185 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1186
1187 if (debug > 4) {
1188 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1189 entry, np->rx_ring[entry].status);
1190 }
1191
1192 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1193 while (--work_limit >= 0) {
1194 struct w840_rx_desc *desc = np->rx_head_desc;
1195 s32 status = desc->status;
1196
1197 if (debug > 4)
1198 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1199 status);
1200 if (status < 0)
1201 break;
1202 if ((status & 0x38008300) != 0x0300) {
1203 if ((status & 0x38000300) != 0x0300) {
1204 /* Ingore earlier buffers. */
1205 if ((status & 0xffff) != 0x7fff) {
1206 dev_warn(&dev->dev,
1207 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1208 np->cur_rx, status);
1209 np->stats.rx_length_errors++;
1210 }
1211 } else if (status & 0x8000) {
1212 /* There was a fatal error. */
1213 if (debug > 2)
1214 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1215 status);
1216 np->stats.rx_errors++; /* end of a packet.*/
1217 if (status & 0x0890) np->stats.rx_length_errors++;
1218 if (status & 0x004C) np->stats.rx_frame_errors++;
1219 if (status & 0x0002) np->stats.rx_crc_errors++;
1220 }
1221 } else {
1222 struct sk_buff *skb;
1223 /* Omit the four octet CRC from the length. */
1224 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1225
1226#ifndef final_version
1227 if (debug > 4)
1228 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1229 pkt_len, status);
1230#endif
1231 /* Check if the packet is long enough to accept without copying
1232 to a minimally-sized skbuff. */
1233 if (pkt_len < rx_copybreak &&
1234 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1235 skb_reserve(skb, 2); /* 16 byte align the IP header */
1236 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1237 np->rx_skbuff[entry]->len,
1238 PCI_DMA_FROMDEVICE);
1239 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1240 skb_put(skb, pkt_len);
1241 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1242 np->rx_skbuff[entry]->len,
1243 PCI_DMA_FROMDEVICE);
1244 } else {
1245 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1246 np->rx_skbuff[entry]->len,
1247 PCI_DMA_FROMDEVICE);
1248 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1249 np->rx_skbuff[entry] = NULL;
1250 }
1251#ifndef final_version /* Remove after testing. */
1252 /* You will want this info for the initial debug. */
1253 if (debug > 5)
1254 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1255 &skb->data[0], &skb->data[6],
1256 skb->data[12], skb->data[13],
1257 &skb->data[14]);
1258#endif
1259 skb->protocol = eth_type_trans(skb, dev);
1260 netif_rx(skb);
1261 np->stats.rx_packets++;
1262 np->stats.rx_bytes += pkt_len;
1263 }
1264 entry = (++np->cur_rx) % RX_RING_SIZE;
1265 np->rx_head_desc = &np->rx_ring[entry];
1266 }
1267
1268 /* Refill the Rx ring buffers. */
1269 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1270 struct sk_buff *skb;
1271 entry = np->dirty_rx % RX_RING_SIZE;
1272 if (np->rx_skbuff[entry] == NULL) {
1273 skb = dev_alloc_skb(np->rx_buf_sz);
1274 np->rx_skbuff[entry] = skb;
1275 if (skb == NULL)
1276 break; /* Better luck next round. */
1277 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1278 skb->data,
1279 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1280 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1281 }
1282 wmb();
1283 np->rx_ring[entry].status = DescOwned;
1284 }
1285
1286 return 0;
1287}
1288
1289static void netdev_error(struct net_device *dev, int intr_status)
1290{
1291 struct netdev_private *np = netdev_priv(dev);
1292 void __iomem *ioaddr = np->base_addr;
1293
1294 if (debug > 2)
1295 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1296 if (intr_status == 0xffffffff)
1297 return;
1298 spin_lock(&np->lock);
1299 if (intr_status & TxFIFOUnderflow) {
1300 int new;
1301 /* Bump up the Tx threshold */
1302#if 0
1303 /* This causes lots of dropped packets,
1304 * and under high load even tx_timeouts
1305 */
1306 new = np->csr6 + 0x4000;
1307#else
1308 new = (np->csr6 >> 14)&0x7f;
1309 if (new < 64)
1310 new *= 2;
1311 else
1312 new = 127; /* load full packet before starting */
1313 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1314#endif
1315 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1316 update_csr6(dev, new);
1317 }
1318 if (intr_status & RxDied) { /* Missed a Rx frame. */
1319 np->stats.rx_errors++;
1320 }
1321 if (intr_status & TimerInt) {
1322 /* Re-enable other interrupts. */
1323 if (netif_device_present(dev))
1324 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1325 }
1326 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1327 iowrite32(0, ioaddr + RxStartDemand);
1328 spin_unlock(&np->lock);
1329}
1330
1331static struct net_device_stats *get_stats(struct net_device *dev)
1332{
1333 struct netdev_private *np = netdev_priv(dev);
1334 void __iomem *ioaddr = np->base_addr;
1335
1336 /* The chip only need report frame silently dropped. */
1337 spin_lock_irq(&np->lock);
1338 if (netif_running(dev) && netif_device_present(dev))
1339 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1340 spin_unlock_irq(&np->lock);
1341
1342 return &np->stats;
1343}
1344
1345
1346static u32 __set_rx_mode(struct net_device *dev)
1347{
1348 struct netdev_private *np = netdev_priv(dev);
1349 void __iomem *ioaddr = np->base_addr;
1350 u32 mc_filter[2]; /* Multicast hash filter */
1351 u32 rx_mode;
1352
1353 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1354 memset(mc_filter, 0xff, sizeof(mc_filter));
1355 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1356 | AcceptMyPhys;
1357 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1358 (dev->flags & IFF_ALLMULTI)) {
1359 /* Too many to match, or accept all multicasts. */
1360 memset(mc_filter, 0xff, sizeof(mc_filter));
1361 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1362 } else {
1363 struct netdev_hw_addr *ha;
1364
1365 memset(mc_filter, 0, sizeof(mc_filter));
1366 netdev_for_each_mc_addr(ha, dev) {
1367 int filbit;
1368
1369 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1370 filbit &= 0x3f;
1371 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1372 }
1373 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1374 }
1375 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1376 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1377 return rx_mode;
1378}
1379
1380static void set_rx_mode(struct net_device *dev)
1381{
1382 struct netdev_private *np = netdev_priv(dev);
1383 u32 rx_mode = __set_rx_mode(dev);
1384 spin_lock_irq(&np->lock);
1385 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1386 spin_unlock_irq(&np->lock);
1387}
1388
1389static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1390{
1391 struct netdev_private *np = netdev_priv(dev);
1392
1393 strcpy (info->driver, DRV_NAME);
1394 strcpy (info->version, DRV_VERSION);
1395 strcpy (info->bus_info, pci_name(np->pci_dev));
1396}
1397
1398static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1399{
1400 struct netdev_private *np = netdev_priv(dev);
1401 int rc;
1402
1403 spin_lock_irq(&np->lock);
1404 rc = mii_ethtool_gset(&np->mii_if, cmd);
1405 spin_unlock_irq(&np->lock);
1406
1407 return rc;
1408}
1409
1410static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1411{
1412 struct netdev_private *np = netdev_priv(dev);
1413 int rc;
1414
1415 spin_lock_irq(&np->lock);
1416 rc = mii_ethtool_sset(&np->mii_if, cmd);
1417 spin_unlock_irq(&np->lock);
1418
1419 return rc;
1420}
1421
1422static int netdev_nway_reset(struct net_device *dev)
1423{
1424 struct netdev_private *np = netdev_priv(dev);
1425 return mii_nway_restart(&np->mii_if);
1426}
1427
1428static u32 netdev_get_link(struct net_device *dev)
1429{
1430 struct netdev_private *np = netdev_priv(dev);
1431 return mii_link_ok(&np->mii_if);
1432}
1433
1434static u32 netdev_get_msglevel(struct net_device *dev)
1435{
1436 return debug;
1437}
1438
1439static void netdev_set_msglevel(struct net_device *dev, u32 value)
1440{
1441 debug = value;
1442}
1443
1444static const struct ethtool_ops netdev_ethtool_ops = {
1445 .get_drvinfo = netdev_get_drvinfo,
1446 .get_settings = netdev_get_settings,
1447 .set_settings = netdev_set_settings,
1448 .nway_reset = netdev_nway_reset,
1449 .get_link = netdev_get_link,
1450 .get_msglevel = netdev_get_msglevel,
1451 .set_msglevel = netdev_set_msglevel,
1452};
1453
1454static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1455{
1456 struct mii_ioctl_data *data = if_mii(rq);
1457 struct netdev_private *np = netdev_priv(dev);
1458
1459 switch(cmd) {
1460 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1461 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1462 /* Fall Through */
1463
1464 case SIOCGMIIREG: /* Read MII PHY register. */
1465 spin_lock_irq(&np->lock);
1466 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1467 spin_unlock_irq(&np->lock);
1468 return 0;
1469
1470 case SIOCSMIIREG: /* Write MII PHY register. */
1471 spin_lock_irq(&np->lock);
1472 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1473 spin_unlock_irq(&np->lock);
1474 return 0;
1475 default:
1476 return -EOPNOTSUPP;
1477 }
1478}
1479
1480static int netdev_close(struct net_device *dev)
1481{
1482 struct netdev_private *np = netdev_priv(dev);
1483 void __iomem *ioaddr = np->base_addr;
1484
1485 netif_stop_queue(dev);
1486
1487 if (debug > 1) {
1488 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1489 ioread32(ioaddr + IntrStatus),
1490 ioread32(ioaddr + NetworkConfig));
1491 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1492 np->cur_tx, np->dirty_tx,
1493 np->cur_rx, np->dirty_rx);
1494 }
1495
1496 /* Stop the chip's Tx and Rx processes. */
1497 spin_lock_irq(&np->lock);
1498 netif_device_detach(dev);
1499 update_csr6(dev, 0);
1500 iowrite32(0x0000, ioaddr + IntrEnable);
1501 spin_unlock_irq(&np->lock);
1502
1503 free_irq(dev->irq, dev);
1504 wmb();
1505 netif_device_attach(dev);
1506
1507 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1508 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1509
1510#ifdef __i386__
1511 if (debug > 2) {
1512 int i;
1513
1514 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1515 for (i = 0; i < TX_RING_SIZE; i++)
1516 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1517 i, np->tx_ring[i].length,
1518 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1519 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1520 for (i = 0; i < RX_RING_SIZE; i++) {
1521 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1522 i, np->rx_ring[i].length,
1523 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1524 }
1525 }
1526#endif /* __i386__ debugging only */
1527
1528 del_timer_sync(&np->timer);
1529
1530 free_rxtx_rings(np);
1531 free_ringdesc(np);
1532
1533 return 0;
1534}
1535
1536static void __devexit w840_remove1 (struct pci_dev *pdev)
1537{
1538 struct net_device *dev = pci_get_drvdata(pdev);
1539
1540 if (dev) {
1541 struct netdev_private *np = netdev_priv(dev);
1542 unregister_netdev(dev);
1543 pci_release_regions(pdev);
1544 pci_iounmap(pdev, np->base_addr);
1545 free_netdev(dev);
1546 }
1547
1548 pci_set_drvdata(pdev, NULL);
1549}
1550
1551#ifdef CONFIG_PM
1552
1553/*
1554 * suspend/resume synchronization:
1555 * - open, close, do_ioctl:
1556 * rtnl_lock, & netif_device_detach after the rtnl_unlock.
1557 * - get_stats:
1558 * spin_lock_irq(np->lock), doesn't touch hw if not present
1559 * - start_xmit:
1560 * synchronize_irq + netif_tx_disable;
1561 * - tx_timeout:
1562 * netif_device_detach + netif_tx_disable;
1563 * - set_multicast_list
1564 * netif_device_detach + netif_tx_disable;
1565 * - interrupt handler
1566 * doesn't touch hw if not present, synchronize_irq waits for
1567 * running instances of the interrupt handler.
1568 *
1569 * Disabling hw requires clearing csr6 & IntrEnable.
1570 * update_csr6 & all function that write IntrEnable check netif_device_present
1571 * before settings any bits.
1572 *
1573 * Detach must occur under spin_unlock_irq(), interrupts from a detached
1574 * device would cause an irq storm.
1575 */
1576static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1577{
1578 struct net_device *dev = pci_get_drvdata (pdev);
1579 struct netdev_private *np = netdev_priv(dev);
1580 void __iomem *ioaddr = np->base_addr;
1581
1582 rtnl_lock();
1583 if (netif_running (dev)) {
1584 del_timer_sync(&np->timer);
1585
1586 spin_lock_irq(&np->lock);
1587 netif_device_detach(dev);
1588 update_csr6(dev, 0);
1589 iowrite32(0, ioaddr + IntrEnable);
1590 spin_unlock_irq(&np->lock);
1591
1592 synchronize_irq(dev->irq);
1593 netif_tx_disable(dev);
1594
1595 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1596
1597 /* no more hardware accesses behind this line. */
1598
1599 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1600
1601 /* pci_power_off(pdev, -1); */
1602
1603 free_rxtx_rings(np);
1604 } else {
1605 netif_device_detach(dev);
1606 }
1607 rtnl_unlock();
1608 return 0;
1609}
1610
1611static int w840_resume (struct pci_dev *pdev)
1612{
1613 struct net_device *dev = pci_get_drvdata (pdev);
1614 struct netdev_private *np = netdev_priv(dev);
1615 int retval = 0;
1616
1617 rtnl_lock();
1618 if (netif_device_present(dev))
1619 goto out; /* device not suspended */
1620 if (netif_running(dev)) {
1621 if ((retval = pci_enable_device(pdev))) {
1622 dev_err(&dev->dev,
1623 "pci_enable_device failed in resume\n");
1624 goto out;
1625 }
1626 spin_lock_irq(&np->lock);
1627 iowrite32(1, np->base_addr+PCIBusCfg);
1628 ioread32(np->base_addr+PCIBusCfg);
1629 udelay(1);
1630 netif_device_attach(dev);
1631 init_rxtx_rings(dev);
1632 init_registers(dev);
1633 spin_unlock_irq(&np->lock);
1634
1635 netif_wake_queue(dev);
1636
1637 mod_timer(&np->timer, jiffies + 1*HZ);
1638 } else {
1639 netif_device_attach(dev);
1640 }
1641out:
1642 rtnl_unlock();
1643 return retval;
1644}
1645#endif
1646
1647static struct pci_driver w840_driver = {
1648 .name = DRV_NAME,
1649 .id_table = w840_pci_tbl,
1650 .probe = w840_probe1,
1651 .remove = __devexit_p(w840_remove1),
1652#ifdef CONFIG_PM
1653 .suspend = w840_suspend,
1654 .resume = w840_resume,
1655#endif
1656};
1657
1658static int __init w840_init(void)
1659{
1660 printk(version);
1661 return pci_register_driver(&w840_driver);
1662}
1663
1664static void __exit w840_exit(void)
1665{
1666 pci_unregister_driver(&w840_driver);
1667}
1668
1669module_init(w840_init);
1670module_exit(w840_exit);
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
new file mode 100644
index 000000000000..988b8eb24d37
--- /dev/null
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -0,0 +1,1154 @@
1/*
2 * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
3 *
4 * This software is (C) by the respective authors, and licensed under the GPL
5 * License.
6 *
7 * Written by Arjan van de Ven for Red Hat, Inc.
8 * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 *
14 * $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
15 */
16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/skbuff.h>
30#include <linux/delay.h>
31#include <linux/init.h>
32#include <linux/bitops.h>
33
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#ifdef CONFIG_NET_POLL_CONTROLLER
37#include <asm/irq.h>
38#endif
39
40MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
41MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
42MODULE_LICENSE("GPL");
43
44
45
46/* IO registers on the card, offsets */
47#define CSR0 0x00
48#define CSR1 0x08
49#define CSR2 0x10
50#define CSR3 0x18
51#define CSR4 0x20
52#define CSR5 0x28
53#define CSR6 0x30
54#define CSR7 0x38
55#define CSR8 0x40
56#define CSR9 0x48
57#define CSR10 0x50
58#define CSR11 0x58
59#define CSR12 0x60
60#define CSR13 0x68
61#define CSR14 0x70
62#define CSR15 0x78
63#define CSR16 0x80
64
65/* PCI registers */
66#define PCI_POWERMGMT 0x40
67
68/* Offsets of the buffers within the descriptor pages, in bytes */
69
70#define NUMDESCRIPTORS 4
71
72static int bufferoffsets[NUMDESCRIPTORS] = {128,2048,4096,6144};
73
74
75struct xircom_private {
76 /* Send and receive buffers, kernel-addressable and dma addressable forms */
77
78 __le32 *rx_buffer;
79 __le32 *tx_buffer;
80
81 dma_addr_t rx_dma_handle;
82 dma_addr_t tx_dma_handle;
83
84 struct sk_buff *tx_skb[4];
85
86 unsigned long io_port;
87 int open;
88
89 /* transmit_used is the rotating counter that indicates which transmit
90 descriptor has to be used next */
91 int transmit_used;
92
93 /* Spinlock to serialize register operations.
94 It must be helt while manipulating the following registers:
95 CSR0, CSR6, CSR7, CSR9, CSR10, CSR15
96 */
97 spinlock_t lock;
98
99 struct pci_dev *pdev;
100 struct net_device *dev;
101};
102
103
104/* Function prototypes */
105static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id);
106static void xircom_remove(struct pci_dev *pdev);
107static irqreturn_t xircom_interrupt(int irq, void *dev_instance);
108static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
109 struct net_device *dev);
110static int xircom_open(struct net_device *dev);
111static int xircom_close(struct net_device *dev);
112static void xircom_up(struct xircom_private *card);
113#ifdef CONFIG_NET_POLL_CONTROLLER
114static void xircom_poll_controller(struct net_device *dev);
115#endif
116
117static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset);
118static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset);
119static void read_mac_address(struct xircom_private *card);
120static void transceiver_voodoo(struct xircom_private *card);
121static void initialize_card(struct xircom_private *card);
122static void trigger_transmit(struct xircom_private *card);
123static void trigger_receive(struct xircom_private *card);
124static void setup_descriptors(struct xircom_private *card);
125static void remove_descriptors(struct xircom_private *card);
126static int link_status_changed(struct xircom_private *card);
127static void activate_receiver(struct xircom_private *card);
128static void deactivate_receiver(struct xircom_private *card);
129static void activate_transmitter(struct xircom_private *card);
130static void deactivate_transmitter(struct xircom_private *card);
131static void enable_transmit_interrupt(struct xircom_private *card);
132static void enable_receive_interrupt(struct xircom_private *card);
133static void enable_link_interrupt(struct xircom_private *card);
134static void disable_all_interrupts(struct xircom_private *card);
135static int link_status(struct xircom_private *card);
136
137
138
139static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
140 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
141 {0,},
142};
143MODULE_DEVICE_TABLE(pci, xircom_pci_table);
144
145static struct pci_driver xircom_ops = {
146 .name = "xircom_cb",
147 .id_table = xircom_pci_table,
148 .probe = xircom_probe,
149 .remove = xircom_remove,
150 .suspend =NULL,
151 .resume =NULL
152};
153
154
155#if defined DEBUG && DEBUG > 1
156static void print_binary(unsigned int number)
157{
158 int i,i2;
159 char buffer[64];
160 memset(buffer,0,64);
161 i2=0;
162 for (i=31;i>=0;i--) {
163 if (number & (1<<i))
164 buffer[i2++]='1';
165 else
166 buffer[i2++]='0';
167 if ((i&3)==0)
168 buffer[i2++]=' ';
169 }
170 pr_debug("%s\n",buffer);
171}
172#endif
173
174static const struct net_device_ops netdev_ops = {
175 .ndo_open = xircom_open,
176 .ndo_stop = xircom_close,
177 .ndo_start_xmit = xircom_start_xmit,
178 .ndo_change_mtu = eth_change_mtu,
179 .ndo_set_mac_address = eth_mac_addr,
180 .ndo_validate_addr = eth_validate_addr,
181#ifdef CONFIG_NET_POLL_CONTROLLER
182 .ndo_poll_controller = xircom_poll_controller,
183#endif
184};
185
186/* xircom_probe is the code that gets called on device insertion.
187 it sets up the hardware and registers the device to the networklayer.
188
189 TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
190 first two packets that get send, and pump hates that.
191
192 */
193static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
194{
195 struct net_device *dev = NULL;
196 struct xircom_private *private;
197 unsigned long flags;
198 unsigned short tmp16;
199
200 /* First do the PCI initialisation */
201
202 if (pci_enable_device(pdev))
203 return -ENODEV;
204
205 /* disable all powermanagement */
206 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
207
208 pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
209
210 /* clear PCI status, if any */
211 pci_read_config_word (pdev,PCI_STATUS, &tmp16);
212 pci_write_config_word (pdev, PCI_STATUS,tmp16);
213
214 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
215 pr_err("%s: failed to allocate io-region\n", __func__);
216 return -ENODEV;
217 }
218
219 /*
220 Before changing the hardware, allocate the memory.
221 This way, we can fail gracefully if not enough memory
222 is available.
223 */
224 dev = alloc_etherdev(sizeof(struct xircom_private));
225 if (!dev) {
226 pr_err("%s: failed to allocate etherdev\n", __func__);
227 goto device_fail;
228 }
229 private = netdev_priv(dev);
230
231 /* Allocate the send/receive buffers */
232 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
233 if (private->rx_buffer == NULL) {
234 pr_err("%s: no memory for rx buffer\n", __func__);
235 goto rx_buf_fail;
236 }
237 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
238 if (private->tx_buffer == NULL) {
239 pr_err("%s: no memory for tx buffer\n", __func__);
240 goto tx_buf_fail;
241 }
242
243 SET_NETDEV_DEV(dev, &pdev->dev);
244
245
246 private->dev = dev;
247 private->pdev = pdev;
248 private->io_port = pci_resource_start(pdev, 0);
249 spin_lock_init(&private->lock);
250 dev->irq = pdev->irq;
251 dev->base_addr = private->io_port;
252
253 initialize_card(private);
254 read_mac_address(private);
255 setup_descriptors(private);
256
257 dev->netdev_ops = &netdev_ops;
258 pci_set_drvdata(pdev, dev);
259
260 if (register_netdev(dev)) {
261 pr_err("%s: netdevice registration failed\n", __func__);
262 goto reg_fail;
263 }
264
265 netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
266 pdev->revision, pdev->irq);
267 /* start the transmitter to get a heartbeat */
268 /* TODO: send 2 dummy packets here */
269 transceiver_voodoo(private);
270
271 spin_lock_irqsave(&private->lock,flags);
272 activate_transmitter(private);
273 activate_receiver(private);
274 spin_unlock_irqrestore(&private->lock,flags);
275
276 trigger_receive(private);
277
278 return 0;
279
280reg_fail:
281 kfree(private->tx_buffer);
282tx_buf_fail:
283 kfree(private->rx_buffer);
284rx_buf_fail:
285 free_netdev(dev);
286device_fail:
287 return -ENODEV;
288}
289
290
291/*
292 xircom_remove is called on module-unload or on device-eject.
293 it unregisters the irq, io-region and network device.
294 Interrupts and such are already stopped in the "ifconfig ethX down"
295 code.
296 */
297static void __devexit xircom_remove(struct pci_dev *pdev)
298{
299 struct net_device *dev = pci_get_drvdata(pdev);
300 struct xircom_private *card = netdev_priv(dev);
301
302 pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
303 pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
304
305 release_region(dev->base_addr, 128);
306 unregister_netdev(dev);
307 free_netdev(dev);
308 pci_set_drvdata(pdev, NULL);
309}
310
311static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
312{
313 struct net_device *dev = (struct net_device *) dev_instance;
314 struct xircom_private *card = netdev_priv(dev);
315 unsigned int status;
316 int i;
317
318 spin_lock(&card->lock);
319 status = inl(card->io_port+CSR5);
320
321#if defined DEBUG && DEBUG > 1
322 print_binary(status);
323 pr_debug("tx status 0x%08x 0x%08x\n",
324 card->tx_buffer[0], card->tx_buffer[4]);
325 pr_debug("rx status 0x%08x 0x%08x\n",
326 card->rx_buffer[0], card->rx_buffer[4]);
327#endif
328 /* Handle shared irq and hotplug */
329 if (status == 0 || status == 0xffffffff) {
330 spin_unlock(&card->lock);
331 return IRQ_NONE;
332 }
333
334 if (link_status_changed(card)) {
335 int newlink;
336 netdev_dbg(dev, "Link status has changed\n");
337 newlink = link_status(card);
338 netdev_info(dev, "Link is %d mbit\n", newlink);
339 if (newlink)
340 netif_carrier_on(dev);
341 else
342 netif_carrier_off(dev);
343
344 }
345
346 /* Clear all remaining interrupts */
347 status |= 0xffffffff; /* FIXME: make this clear only the
348 real existing bits */
349 outl(status,card->io_port+CSR5);
350
351
352 for (i=0;i<NUMDESCRIPTORS;i++)
353 investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
354 for (i=0;i<NUMDESCRIPTORS;i++)
355 investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
356
357 spin_unlock(&card->lock);
358 return IRQ_HANDLED;
359}
360
361static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
362 struct net_device *dev)
363{
364 struct xircom_private *card;
365 unsigned long flags;
366 int nextdescriptor;
367 int desc;
368
369 card = netdev_priv(dev);
370 spin_lock_irqsave(&card->lock,flags);
371
372 /* First see if we can free some descriptors */
373 for (desc=0;desc<NUMDESCRIPTORS;desc++)
374 investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
375
376
377 nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
378 desc = card->transmit_used;
379
380 /* only send the packet if the descriptor is free */
381 if (card->tx_buffer[4*desc]==0) {
382 /* Copy the packet data; zero the memory first as the card
383 sometimes sends more than you ask it to. */
384
385 memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
386 skb_copy_from_linear_data(skb,
387 &(card->tx_buffer[bufferoffsets[desc] / 4]),
388 skb->len);
389 /* FIXME: The specification tells us that the length we send HAS to be a multiple of
390 4 bytes. */
391
392 card->tx_buffer[4*desc+1] = cpu_to_le32(skb->len);
393 if (desc == NUMDESCRIPTORS - 1) /* bit 25: last descriptor of the ring */
394 card->tx_buffer[4*desc+1] |= cpu_to_le32(1<<25);
395
396 card->tx_buffer[4*desc+1] |= cpu_to_le32(0xF0000000);
397 /* 0xF0... means want interrupts*/
398 card->tx_skb[desc] = skb;
399
400 wmb();
401 /* This gives the descriptor to the card */
402 card->tx_buffer[4*desc] = cpu_to_le32(0x80000000);
403 trigger_transmit(card);
404 if (card->tx_buffer[nextdescriptor*4] & cpu_to_le32(0x8000000)) {
405 /* next descriptor is occupied... */
406 netif_stop_queue(dev);
407 }
408 card->transmit_used = nextdescriptor;
409 spin_unlock_irqrestore(&card->lock,flags);
410 return NETDEV_TX_OK;
411 }
412
413 /* Uh oh... no free descriptor... drop the packet */
414 netif_stop_queue(dev);
415 spin_unlock_irqrestore(&card->lock,flags);
416 trigger_transmit(card);
417
418 return NETDEV_TX_BUSY;
419}
420
421
422
423
424static int xircom_open(struct net_device *dev)
425{
426 struct xircom_private *xp = netdev_priv(dev);
427 int retval;
428
429 netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n",
430 dev->irq);
431 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
432 if (retval)
433 return retval;
434
435 xircom_up(xp);
436 xp->open = 1;
437
438 return 0;
439}
440
441static int xircom_close(struct net_device *dev)
442{
443 struct xircom_private *card;
444 unsigned long flags;
445
446 card = netdev_priv(dev);
447 netif_stop_queue(dev); /* we don't want new packets */
448
449
450 spin_lock_irqsave(&card->lock,flags);
451
452 disable_all_interrupts(card);
453#if 0
454 /* We can enable this again once we send dummy packets on ifconfig ethX up */
455 deactivate_receiver(card);
456 deactivate_transmitter(card);
457#endif
458 remove_descriptors(card);
459
460 spin_unlock_irqrestore(&card->lock,flags);
461
462 card->open = 0;
463 free_irq(dev->irq,dev);
464
465 return 0;
466
467}
468
469
470#ifdef CONFIG_NET_POLL_CONTROLLER
471static void xircom_poll_controller(struct net_device *dev)
472{
473 disable_irq(dev->irq);
474 xircom_interrupt(dev->irq, dev);
475 enable_irq(dev->irq);
476}
477#endif
478
479
480static void initialize_card(struct xircom_private *card)
481{
482 unsigned int val;
483 unsigned long flags;
484
485 spin_lock_irqsave(&card->lock, flags);
486
487 /* First: reset the card */
488 val = inl(card->io_port + CSR0);
489 val |= 0x01; /* Software reset */
490 outl(val, card->io_port + CSR0);
491
492 udelay(100); /* give the card some time to reset */
493
494 val = inl(card->io_port + CSR0);
495 val &= ~0x01; /* disable Software reset */
496 outl(val, card->io_port + CSR0);
497
498
499 val = 0; /* Value 0x00 is a safe and conservative value
500 for the PCI configuration settings */
501 outl(val, card->io_port + CSR0);
502
503
504 disable_all_interrupts(card);
505 deactivate_receiver(card);
506 deactivate_transmitter(card);
507
508 spin_unlock_irqrestore(&card->lock, flags);
509}
510
511/*
512trigger_transmit causes the card to check for frames to be transmitted.
513This is accomplished by writing to the CSR1 port. The documentation
514claims that the act of writing is sufficient and that the value is
515ignored; I chose zero.
516*/
517static void trigger_transmit(struct xircom_private *card)
518{
519 unsigned int val;
520
521 val = 0;
522 outl(val, card->io_port + CSR1);
523}
524
525/*
526trigger_receive causes the card to check for empty frames in the
527descriptor list in which packets can be received.
528This is accomplished by writing to the CSR2 port. The documentation
529claims that the act of writing is sufficient and that the value is
530ignored; I chose zero.
531*/
532static void trigger_receive(struct xircom_private *card)
533{
534 unsigned int val;
535
536 val = 0;
537 outl(val, card->io_port + CSR2);
538}
539
540/*
541setup_descriptors initializes the send and receive buffers to be valid
542descriptors and programs the addresses into the card.
543*/
544static void setup_descriptors(struct xircom_private *card)
545{
546 u32 address;
547 int i;
548
549 BUG_ON(card->rx_buffer == NULL);
550 BUG_ON(card->tx_buffer == NULL);
551
552 /* Receive descriptors */
553 memset(card->rx_buffer, 0, 128); /* clear the descriptors */
554 for (i=0;i<NUMDESCRIPTORS;i++ ) {
555
556 /* Rx Descr0: It's empty, let the card own it, no errors -> 0x80000000 */
557 card->rx_buffer[i*4 + 0] = cpu_to_le32(0x80000000);
558 /* Rx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */
559 card->rx_buffer[i*4 + 1] = cpu_to_le32(1536);
560 if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */
561 card->rx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25);
562
563 /* Rx Descr2: address of the buffer
564 we store the buffer at the 2nd half of the page */
565
566 address = card->rx_dma_handle;
567 card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
568 /* Rx Desc3: address of 2nd buffer -> 0 */
569 card->rx_buffer[i*4 + 3] = 0;
570 }
571
572 wmb();
573 /* Write the receive descriptor ring address to the card */
574 address = card->rx_dma_handle;
575 outl(address, card->io_port + CSR3); /* Receive descr list address */
576
577
578 /* transmit descriptors */
579 memset(card->tx_buffer, 0, 128); /* clear the descriptors */
580
581 for (i=0;i<NUMDESCRIPTORS;i++ ) {
582 /* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
583 card->tx_buffer[i*4 + 0] = 0x00000000;
584 /* Tx Descr1: buffer 1 is 1536 bytes, buffer 2 is 0 bytes */
585 card->tx_buffer[i*4 + 1] = cpu_to_le32(1536);
586 if (i == NUMDESCRIPTORS - 1) /* bit 25 is "last descriptor" */
587 card->tx_buffer[i*4 + 1] |= cpu_to_le32(1 << 25);
588
589 /* Tx Descr2: address of the buffer
590 we store the buffer at the 2nd half of the page */
591 address = card->tx_dma_handle;
592 card->tx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
593 /* Tx Desc3: address of 2nd buffer -> 0 */
594 card->tx_buffer[i*4 + 3] = 0;
595 }
596
597 wmb();
598 /* wite the transmit descriptor ring to the card */
599 address = card->tx_dma_handle;
600 outl(address, card->io_port + CSR4); /* xmit descr list address */
601}
602
603/*
604remove_descriptors informs the card the descriptors are no longer
605valid by setting the address in the card to 0x00.
606*/
607static void remove_descriptors(struct xircom_private *card)
608{
609 unsigned int val;
610
611 val = 0;
612 outl(val, card->io_port + CSR3); /* Receive descriptor address */
613 outl(val, card->io_port + CSR4); /* Send descriptor address */
614}
615
616/*
617link_status_changed returns 1 if the card has indicated that
618the link status has changed. The new link status has to be read from CSR12.
619
620This function also clears the status-bit.
621*/
622static int link_status_changed(struct xircom_private *card)
623{
624 unsigned int val;
625
626 val = inl(card->io_port + CSR5); /* Status register */
627
628 if ((val & (1 << 27)) == 0) /* no change */
629 return 0;
630
631 /* clear the event by writing a 1 to the bit in the
632 status register. */
633 val = (1 << 27);
634 outl(val, card->io_port + CSR5);
635
636 return 1;
637}
638
639
640/*
641transmit_active returns 1 if the transmitter on the card is
642in a non-stopped state.
643*/
644static int transmit_active(struct xircom_private *card)
645{
646 unsigned int val;
647
648 val = inl(card->io_port + CSR5); /* Status register */
649
650 if ((val & (7 << 20)) == 0) /* transmitter disabled */
651 return 0;
652
653 return 1;
654}
655
656/*
657receive_active returns 1 if the receiver on the card is
658in a non-stopped state.
659*/
660static int receive_active(struct xircom_private *card)
661{
662 unsigned int val;
663
664 val = inl(card->io_port + CSR5); /* Status register */
665
666 if ((val & (7 << 17)) == 0) /* receiver disabled */
667 return 0;
668
669 return 1;
670}
671
672/*
673activate_receiver enables the receiver on the card.
674Before being allowed to active the receiver, the receiver
675must be completely de-activated. To achieve this,
676this code actually disables the receiver first; then it waits for the
677receiver to become inactive, then it activates the receiver and then
678it waits for the receiver to be active.
679
680must be called with the lock held and interrupts disabled.
681*/
682static void activate_receiver(struct xircom_private *card)
683{
684 unsigned int val;
685 int counter;
686
687 val = inl(card->io_port + CSR6); /* Operation mode */
688
689 /* If the "active" bit is set and the receiver is already
690 active, no need to do the expensive thing */
691 if ((val&2) && (receive_active(card)))
692 return;
693
694
695 val = val & ~2; /* disable the receiver */
696 outl(val, card->io_port + CSR6);
697
698 counter = 10;
699 while (counter > 0) {
700 if (!receive_active(card))
701 break;
702 /* wait a while */
703 udelay(50);
704 counter--;
705 if (counter <= 0)
706 netdev_err(card->dev, "Receiver failed to deactivate\n");
707 }
708
709 /* enable the receiver */
710 val = inl(card->io_port + CSR6); /* Operation mode */
711 val = val | 2; /* enable the receiver */
712 outl(val, card->io_port + CSR6);
713
714 /* now wait for the card to activate again */
715 counter = 10;
716 while (counter > 0) {
717 if (receive_active(card))
718 break;
719 /* wait a while */
720 udelay(50);
721 counter--;
722 if (counter <= 0)
723 netdev_err(card->dev,
724 "Receiver failed to re-activate\n");
725 }
726}
727
728/*
729deactivate_receiver disables the receiver on the card.
730To achieve this this code disables the receiver first;
731then it waits for the receiver to become inactive.
732
733must be called with the lock held and interrupts disabled.
734*/
735static void deactivate_receiver(struct xircom_private *card)
736{
737 unsigned int val;
738 int counter;
739
740 val = inl(card->io_port + CSR6); /* Operation mode */
741 val = val & ~2; /* disable the receiver */
742 outl(val, card->io_port + CSR6);
743
744 counter = 10;
745 while (counter > 0) {
746 if (!receive_active(card))
747 break;
748 /* wait a while */
749 udelay(50);
750 counter--;
751 if (counter <= 0)
752 netdev_err(card->dev, "Receiver failed to deactivate\n");
753 }
754}
755
756
757/*
758activate_transmitter enables the transmitter on the card.
759Before being allowed to active the transmitter, the transmitter
760must be completely de-activated. To achieve this,
761this code actually disables the transmitter first; then it waits for the
762transmitter to become inactive, then it activates the transmitter and then
763it waits for the transmitter to be active again.
764
765must be called with the lock held and interrupts disabled.
766*/
767static void activate_transmitter(struct xircom_private *card)
768{
769 unsigned int val;
770 int counter;
771
772 val = inl(card->io_port + CSR6); /* Operation mode */
773
774 /* If the "active" bit is set and the receiver is already
775 active, no need to do the expensive thing */
776 if ((val&(1<<13)) && (transmit_active(card)))
777 return;
778
779 val = val & ~(1 << 13); /* disable the transmitter */
780 outl(val, card->io_port + CSR6);
781
782 counter = 10;
783 while (counter > 0) {
784 if (!transmit_active(card))
785 break;
786 /* wait a while */
787 udelay(50);
788 counter--;
789 if (counter <= 0)
790 netdev_err(card->dev,
791 "Transmitter failed to deactivate\n");
792 }
793
794 /* enable the transmitter */
795 val = inl(card->io_port + CSR6); /* Operation mode */
796 val = val | (1 << 13); /* enable the transmitter */
797 outl(val, card->io_port + CSR6);
798
799 /* now wait for the card to activate again */
800 counter = 10;
801 while (counter > 0) {
802 if (transmit_active(card))
803 break;
804 /* wait a while */
805 udelay(50);
806 counter--;
807 if (counter <= 0)
808 netdev_err(card->dev,
809 "Transmitter failed to re-activate\n");
810 }
811}
812
813/*
814deactivate_transmitter disables the transmitter on the card.
815To achieve this this code disables the transmitter first;
816then it waits for the transmitter to become inactive.
817
818must be called with the lock held and interrupts disabled.
819*/
820static void deactivate_transmitter(struct xircom_private *card)
821{
822 unsigned int val;
823 int counter;
824
825 val = inl(card->io_port + CSR6); /* Operation mode */
826 val = val & ~2; /* disable the transmitter */
827 outl(val, card->io_port + CSR6);
828
829 counter = 20;
830 while (counter > 0) {
831 if (!transmit_active(card))
832 break;
833 /* wait a while */
834 udelay(50);
835 counter--;
836 if (counter <= 0)
837 netdev_err(card->dev,
838 "Transmitter failed to deactivate\n");
839 }
840}
841
842
843/*
844enable_transmit_interrupt enables the transmit interrupt
845
846must be called with the lock held and interrupts disabled.
847*/
848static void enable_transmit_interrupt(struct xircom_private *card)
849{
850 unsigned int val;
851
852 val = inl(card->io_port + CSR7); /* Interrupt enable register */
853 val |= 1; /* enable the transmit interrupt */
854 outl(val, card->io_port + CSR7);
855}
856
857
858/*
859enable_receive_interrupt enables the receive interrupt
860
861must be called with the lock held and interrupts disabled.
862*/
863static void enable_receive_interrupt(struct xircom_private *card)
864{
865 unsigned int val;
866
867 val = inl(card->io_port + CSR7); /* Interrupt enable register */
868 val = val | (1 << 6); /* enable the receive interrupt */
869 outl(val, card->io_port + CSR7);
870}
871
872/*
873enable_link_interrupt enables the link status change interrupt
874
875must be called with the lock held and interrupts disabled.
876*/
877static void enable_link_interrupt(struct xircom_private *card)
878{
879 unsigned int val;
880
881 val = inl(card->io_port + CSR7); /* Interrupt enable register */
882 val = val | (1 << 27); /* enable the link status chage interrupt */
883 outl(val, card->io_port + CSR7);
884}
885
886
887
888/*
889disable_all_interrupts disables all interrupts
890
891must be called with the lock held and interrupts disabled.
892*/
893static void disable_all_interrupts(struct xircom_private *card)
894{
895 unsigned int val;
896
897 val = 0; /* disable all interrupts */
898 outl(val, card->io_port + CSR7);
899}
900
901/*
902enable_common_interrupts enables several weird interrupts
903
904must be called with the lock held and interrupts disabled.
905*/
906static void enable_common_interrupts(struct xircom_private *card)
907{
908 unsigned int val;
909
910 val = inl(card->io_port + CSR7); /* Interrupt enable register */
911 val |= (1<<16); /* Normal Interrupt Summary */
912 val |= (1<<15); /* Abnormal Interrupt Summary */
913 val |= (1<<13); /* Fatal bus error */
914 val |= (1<<8); /* Receive Process Stopped */
915 val |= (1<<7); /* Receive Buffer Unavailable */
916 val |= (1<<5); /* Transmit Underflow */
917 val |= (1<<2); /* Transmit Buffer Unavailable */
918 val |= (1<<1); /* Transmit Process Stopped */
919 outl(val, card->io_port + CSR7);
920}
921
922/*
923enable_promisc starts promisc mode
924
925must be called with the lock held and interrupts disabled.
926*/
927static int enable_promisc(struct xircom_private *card)
928{
929 unsigned int val;
930
931 val = inl(card->io_port + CSR6);
932 val = val | (1 << 6);
933 outl(val, card->io_port + CSR6);
934
935 return 1;
936}
937
938
939
940
941/*
942link_status() checks the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
943
944Must be called in locked state with interrupts disabled
945*/
946static int link_status(struct xircom_private *card)
947{
948 unsigned int val;
949
950 val = inb(card->io_port + CSR12);
951
952 if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
953 return 10;
954 if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
955 return 100;
956
957 /* If we get here -> no link at all */
958
959 return 0;
960}
961
962
963
964
965
966/*
967 read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
968
969 This function will take the spinlock itself and can, as a result, not be called with the lock helt.
970 */
971static void read_mac_address(struct xircom_private *card)
972{
973 unsigned char j, tuple, link, data_id, data_count;
974 unsigned long flags;
975 int i;
976
977 spin_lock_irqsave(&card->lock, flags);
978
979 outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
980 for (i = 0x100; i < 0x1f7; i += link + 2) {
981 outl(i, card->io_port + CSR10);
982 tuple = inl(card->io_port + CSR9) & 0xff;
983 outl(i + 1, card->io_port + CSR10);
984 link = inl(card->io_port + CSR9) & 0xff;
985 outl(i + 2, card->io_port + CSR10);
986 data_id = inl(card->io_port + CSR9) & 0xff;
987 outl(i + 3, card->io_port + CSR10);
988 data_count = inl(card->io_port + CSR9) & 0xff;
989 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
990 /*
991 * This is it. We have the data we want.
992 */
993 for (j = 0; j < 6; j++) {
994 outl(i + j + 4, card->io_port + CSR10);
995 card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff;
996 }
997 break;
998 } else if (link == 0) {
999 break;
1000 }
1001 }
1002 spin_unlock_irqrestore(&card->lock, flags);
1003 pr_debug(" %pM\n", card->dev->dev_addr);
1004}
1005
1006
1007/*
1008 transceiver_voodoo() enables the external UTP plug thingy.
1009 it's called voodoo as I stole this code and cannot cross-reference
1010 it with the specification.
1011 */
1012static void transceiver_voodoo(struct xircom_private *card)
1013{
1014 unsigned long flags;
1015
1016 /* disable all powermanagement */
1017 pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
1018
1019 setup_descriptors(card);
1020
1021 spin_lock_irqsave(&card->lock, flags);
1022
1023 outl(0x0008, card->io_port + CSR15);
1024 udelay(25);
1025 outl(0xa8050000, card->io_port + CSR15);
1026 udelay(25);
1027 outl(0xa00f0000, card->io_port + CSR15);
1028 udelay(25);
1029
1030 spin_unlock_irqrestore(&card->lock, flags);
1031
1032 netif_start_queue(card->dev);
1033}
1034
1035
1036static void xircom_up(struct xircom_private *card)
1037{
1038 unsigned long flags;
1039 int i;
1040
1041 /* disable all powermanagement */
1042 pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
1043
1044 setup_descriptors(card);
1045
1046 spin_lock_irqsave(&card->lock, flags);
1047
1048
1049 enable_link_interrupt(card);
1050 enable_transmit_interrupt(card);
1051 enable_receive_interrupt(card);
1052 enable_common_interrupts(card);
1053 enable_promisc(card);
1054
1055 /* The card can have received packets already, read them away now */
1056 for (i=0;i<NUMDESCRIPTORS;i++)
1057 investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
1058
1059
1060 spin_unlock_irqrestore(&card->lock, flags);
1061 trigger_receive(card);
1062 trigger_transmit(card);
1063 netif_start_queue(card->dev);
1064}
1065
1066/* Bufferoffset is in BYTES */
1067static void
1068investigate_read_descriptor(struct net_device *dev, struct xircom_private *card,
1069 int descnr, unsigned int bufferoffset)
1070{
1071 int status;
1072
1073 status = le32_to_cpu(card->rx_buffer[4*descnr]);
1074
1075 if (status > 0) { /* packet received */
1076
1077 /* TODO: discard error packets */
1078
1079 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1080 /* minus 4, we don't want the CRC */
1081 struct sk_buff *skb;
1082
1083 if (pkt_len > 1518) {
1084 netdev_err(dev, "Packet length %i is bogus\n", pkt_len);
1085 pkt_len = 1518;
1086 }
1087
1088 skb = dev_alloc_skb(pkt_len + 2);
1089 if (skb == NULL) {
1090 dev->stats.rx_dropped++;
1091 goto out;
1092 }
1093 skb_reserve(skb, 2);
1094 skb_copy_to_linear_data(skb,
1095 &card->rx_buffer[bufferoffset / 4],
1096 pkt_len);
1097 skb_put(skb, pkt_len);
1098 skb->protocol = eth_type_trans(skb, dev);
1099 netif_rx(skb);
1100 dev->stats.rx_packets++;
1101 dev->stats.rx_bytes += pkt_len;
1102
1103out:
1104 /* give the buffer back to the card */
1105 card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000);
1106 trigger_receive(card);
1107 }
1108}
1109
1110
1111/* Bufferoffset is in BYTES */
1112static void
1113investigate_write_descriptor(struct net_device *dev,
1114 struct xircom_private *card,
1115 int descnr, unsigned int bufferoffset)
1116{
1117 int status;
1118
1119 status = le32_to_cpu(card->tx_buffer[4*descnr]);
1120#if 0
1121 if (status & 0x8000) { /* Major error */
1122 pr_err("Major transmit error status %x\n", status);
1123 card->tx_buffer[4*descnr] = 0;
1124 netif_wake_queue (dev);
1125 }
1126#endif
1127 if (status > 0) { /* bit 31 is 0 when done */
1128 if (card->tx_skb[descnr]!=NULL) {
1129 dev->stats.tx_bytes += card->tx_skb[descnr]->len;
1130 dev_kfree_skb_irq(card->tx_skb[descnr]);
1131 }
1132 card->tx_skb[descnr] = NULL;
1133 /* Bit 8 in the status field is 1 if there was a collision */
1134 if (status & (1 << 8))
1135 dev->stats.collisions++;
1136 card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
1137 netif_wake_queue (dev);
1138 dev->stats.tx_packets++;
1139 }
1140}
1141
1142static int __init xircom_init(void)
1143{
1144 return pci_register_driver(&xircom_ops);
1145}
1146
1147static void __exit xircom_exit(void)
1148{
1149 pci_unregister_driver(&xircom_ops);
1150}
1151
1152module_init(xircom_init)
1153module_exit(xircom_exit)
1154