diff options
author | Ondrej Zary <linux@rainbow-software.org> | 2015-11-15 16:36:12 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-11-16 17:11:31 -0500 |
commit | f1a454a37618b819f2528ccd234f77a02b3a6016 (patch) | |
tree | dabd3df91d989a0563021fc361d7a2514425c35b | |
parent | c3f45d322cbd379c46466cc2ecab7e2d719b22ed (diff) |
ipg: Remove ipg driver
Now that IP1000A chips are supported by dl2k driver, the buggy ipg
driver can be removed.
Signed-off-by: Ondrej Zary <linux@rainbow-software.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | MAINTAINERS | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/icplus/Kconfig | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/icplus/Makefile | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/icplus/ipg.c | 2300 | ||||
-rw-r--r-- | drivers/net/ethernet/icplus/ipg.h | 748 |
7 files changed, 0 insertions, 3075 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 35fe7ae0492e..45320675a460 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5709,13 +5709,6 @@ M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar> | |||
5709 | S: Maintained | 5709 | S: Maintained |
5710 | F: net/ipv4/netfilter/ipt_MASQUERADE.c | 5710 | F: net/ipv4/netfilter/ipt_MASQUERADE.c |
5711 | 5711 | ||
5712 | IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER | ||
5713 | M: Francois Romieu <romieu@fr.zoreil.com> | ||
5714 | M: Sorbica Shieh <sorbica@icplus.com.tw> | ||
5715 | L: netdev@vger.kernel.org | ||
5716 | S: Maintained | ||
5717 | F: drivers/net/ethernet/icplus/ipg.* | ||
5718 | |||
5719 | IPATH DRIVER | 5712 | IPATH DRIVER |
5720 | M: Mike Marciniszyn <infinipath@intel.com> | 5713 | M: Mike Marciniszyn <infinipath@intel.com> |
5721 | L: linux-rdma@vger.kernel.org | 5714 | L: linux-rdma@vger.kernel.org |
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 05aa7597dab9..955d06b9cdba 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -78,7 +78,6 @@ source "drivers/net/ethernet/ibm/Kconfig" | |||
78 | source "drivers/net/ethernet/intel/Kconfig" | 78 | source "drivers/net/ethernet/intel/Kconfig" |
79 | source "drivers/net/ethernet/i825xx/Kconfig" | 79 | source "drivers/net/ethernet/i825xx/Kconfig" |
80 | source "drivers/net/ethernet/xscale/Kconfig" | 80 | source "drivers/net/ethernet/xscale/Kconfig" |
81 | source "drivers/net/ethernet/icplus/Kconfig" | ||
82 | 81 | ||
83 | config JME | 82 | config JME |
84 | tristate "JMicron(R) PCI-Express Gigabit Ethernet support" | 83 | tristate "JMicron(R) PCI-Express Gigabit Ethernet support" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index ddfc808110a1..4a2ee98738f0 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -41,7 +41,6 @@ obj-$(CONFIG_NET_VENDOR_IBM) += ibm/ | |||
41 | obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ | 41 | obj-$(CONFIG_NET_VENDOR_INTEL) += intel/ |
42 | obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ | 42 | obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/ |
43 | obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ | 43 | obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/ |
44 | obj-$(CONFIG_IP1000) += icplus/ | ||
45 | obj-$(CONFIG_JME) += jme.o | 44 | obj-$(CONFIG_JME) += jme.o |
46 | obj-$(CONFIG_KORINA) += korina.o | 45 | obj-$(CONFIG_KORINA) += korina.o |
47 | obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o | 46 | obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o |
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig deleted file mode 100644 index 14a66e9d2e26..000000000000 --- a/drivers/net/ethernet/icplus/Kconfig +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | # | ||
2 | # IC Plus device configuration | ||
3 | # | ||
4 | |||
5 | config IP1000 | ||
6 | tristate "IP1000 Gigabit Ethernet support" | ||
7 | depends on PCI | ||
8 | select MII | ||
9 | ---help--- | ||
10 | This driver supports IP1000 gigabit Ethernet cards. | ||
11 | |||
12 | To compile this driver as a module, choose M here: the module | ||
13 | will be called ipg. This is recommended. | ||
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile deleted file mode 100644 index 5bc87c1f36aa..000000000000 --- a/drivers/net/ethernet/icplus/Makefile +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | # | ||
2 | # Makefile for the IC Plus device drivers | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_IP1000) += ipg.o | ||
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c deleted file mode 100644 index c3b6af83f070..000000000000 --- a/drivers/net/ethernet/icplus/ipg.c +++ /dev/null | |||
@@ -1,2300 +0,0 @@ | |||
1 | /* | ||
2 | * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter | ||
3 | * | ||
4 | * Copyright (C) 2003, 2007 IC Plus Corp | ||
5 | * | ||
6 | * Original Author: | ||
7 | * | ||
8 | * Craig Rich | ||
9 | * Sundance Technology, Inc. | ||
10 | * www.sundanceti.com | ||
11 | * craig_rich@sundanceti.com | ||
12 | * | ||
13 | * Current Maintainer: | ||
14 | * | ||
15 | * Sorbica Shieh. | ||
16 | * http://www.icplus.com.tw | ||
17 | * sorbica@icplus.com.tw | ||
18 | * | ||
19 | * Jesse Huang | ||
20 | * http://www.icplus.com.tw | ||
21 | * jesse@icplus.com.tw | ||
22 | */ | ||
23 | |||
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
25 | |||
26 | #include <linux/crc32.h> | ||
27 | #include <linux/ethtool.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/gfp.h> | ||
30 | #include <linux/mii.h> | ||
31 | #include <linux/mutex.h> | ||
32 | |||
33 | #include <asm/div64.h> | ||
34 | |||
35 | #define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) | ||
36 | #define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) | ||
37 | #define IPG_RESET_MASK \ | ||
38 | (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ | ||
39 | IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ | ||
40 | IPG_AC_AUTO_INIT) | ||
41 | |||
42 | #define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg)) | ||
43 | #define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg)) | ||
44 | #define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg)) | ||
45 | |||
46 | #define ipg_r32(reg) ioread32(ioaddr + (reg)) | ||
47 | #define ipg_r16(reg) ioread16(ioaddr + (reg)) | ||
48 | #define ipg_r8(reg) ioread8(ioaddr + (reg)) | ||
49 | |||
50 | enum { | ||
51 | netdev_io_size = 128 | ||
52 | }; | ||
53 | |||
54 | #include "ipg.h" | ||
55 | #define DRV_NAME "ipg" | ||
56 | |||
57 | MODULE_AUTHOR("IC Plus Corp. 2003"); | ||
58 | MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver"); | ||
59 | MODULE_LICENSE("GPL"); | ||
60 | |||
61 | /* | ||
62 | * Defaults | ||
63 | */ | ||
64 | #define IPG_MAX_RXFRAME_SIZE 0x0600 | ||
65 | #define IPG_RXFRAG_SIZE 0x0600 | ||
66 | #define IPG_RXSUPPORT_SIZE 0x0600 | ||
67 | #define IPG_IS_JUMBO false | ||
68 | |||
69 | /* | ||
70 | * Variable record -- index by leading revision/length | ||
71 | * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN | ||
72 | */ | ||
73 | static const unsigned short DefaultPhyParam[] = { | ||
74 | /* 11/12/03 IP1000A v1-3 rev=0x40 */ | ||
75 | /*-------------------------------------------------------------------------- | ||
76 | (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, | ||
77 | 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, | ||
78 | 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, | ||
79 | --------------------------------------------------------------------------*/ | ||
80 | /* 12/17/03 IP1000A v1-4 rev=0x40 */ | ||
81 | (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
82 | 0x0000, | ||
83 | 30, 0x005e, 9, 0x0700, | ||
84 | /* 01/09/04 IP1000A v1-5 rev=0x41 */ | ||
85 | (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
86 | 0x0000, | ||
87 | 30, 0x005e, 9, 0x0700, | ||
88 | 0x0000 | ||
89 | }; | ||
90 | |||
91 | static const char * const ipg_brand_name[] = { | ||
92 | "IC PLUS IP1000 1000/100/10 based NIC", | ||
93 | "Sundance Technology ST2021 based NIC", | ||
94 | "Tamarack Microelectronics TC9020/9021 based NIC", | ||
95 | "D-Link NIC IP1000A" | ||
96 | }; | ||
97 | |||
98 | static const struct pci_device_id ipg_pci_tbl[] = { | ||
99 | { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, | ||
100 | { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, | ||
101 | { PCI_VDEVICE(DLINK, 0x9021), 2 }, | ||
102 | { PCI_VDEVICE(DLINK, 0x4020), 3 }, | ||
103 | { 0, } | ||
104 | }; | ||
105 | |||
106 | MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); | ||
107 | |||
108 | static inline void __iomem *ipg_ioaddr(struct net_device *dev) | ||
109 | { | ||
110 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
111 | return sp->ioaddr; | ||
112 | } | ||
113 | |||
114 | #ifdef IPG_DEBUG | ||
115 | static void ipg_dump_rfdlist(struct net_device *dev) | ||
116 | { | ||
117 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
118 | void __iomem *ioaddr = sp->ioaddr; | ||
119 | unsigned int i; | ||
120 | u32 offset; | ||
121 | |||
122 | IPG_DEBUG_MSG("_dump_rfdlist\n"); | ||
123 | |||
124 | netdev_info(dev, "rx_current = %02x\n", sp->rx_current); | ||
125 | netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty); | ||
126 | netdev_info(dev, "RFDList start address = %016lx\n", | ||
127 | (unsigned long)sp->rxd_map); | ||
128 | netdev_info(dev, "RFDListPtr register = %08x%08x\n", | ||
129 | ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); | ||
130 | |||
131 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
132 | offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; | ||
133 | netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n", | ||
134 | i, offset, (unsigned long)sp->rxd[i].next_desc); | ||
135 | offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; | ||
136 | netdev_info(dev, "%02x %04x RFS = %016lx\n", | ||
137 | i, offset, (unsigned long)sp->rxd[i].rfs); | ||
138 | offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; | ||
139 | netdev_info(dev, "%02x %04x frag_info = %016lx\n", | ||
140 | i, offset, (unsigned long)sp->rxd[i].frag_info); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static void ipg_dump_tfdlist(struct net_device *dev) | ||
145 | { | ||
146 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
147 | void __iomem *ioaddr = sp->ioaddr; | ||
148 | unsigned int i; | ||
149 | u32 offset; | ||
150 | |||
151 | IPG_DEBUG_MSG("_dump_tfdlist\n"); | ||
152 | |||
153 | netdev_info(dev, "tx_current = %02x\n", sp->tx_current); | ||
154 | netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty); | ||
155 | netdev_info(dev, "TFDList start address = %016lx\n", | ||
156 | (unsigned long) sp->txd_map); | ||
157 | netdev_info(dev, "TFDListPtr register = %08x%08x\n", | ||
158 | ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); | ||
159 | |||
160 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
161 | offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; | ||
162 | netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n", | ||
163 | i, offset, (unsigned long)sp->txd[i].next_desc); | ||
164 | |||
165 | offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; | ||
166 | netdev_info(dev, "%02x %04x TFC = %016lx\n", | ||
167 | i, offset, (unsigned long) sp->txd[i].tfc); | ||
168 | offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; | ||
169 | netdev_info(dev, "%02x %04x frag_info = %016lx\n", | ||
170 | i, offset, (unsigned long) sp->txd[i].frag_info); | ||
171 | } | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) | ||
176 | { | ||
177 | ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); | ||
178 | ndelay(IPG_PC_PHYCTRLWAIT_NS); | ||
179 | } | ||
180 | |||
181 | static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) | ||
182 | { | ||
183 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); | ||
184 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); | ||
185 | } | ||
186 | |||
187 | static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
188 | { | ||
189 | phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; | ||
190 | |||
191 | ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); | ||
192 | } | ||
193 | |||
194 | static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
195 | { | ||
196 | ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | | ||
197 | phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); | ||
198 | } | ||
199 | |||
200 | static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
201 | { | ||
202 | u16 bit_data; | ||
203 | |||
204 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); | ||
205 | |||
206 | bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; | ||
207 | |||
208 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); | ||
209 | |||
210 | return bit_data; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Read a register from the Physical Layer device located | ||
215 | * on the IPG NIC, using the IPG PHYCTRL register. | ||
216 | */ | ||
217 | static int mdio_read(struct net_device *dev, int phy_id, int phy_reg) | ||
218 | { | ||
219 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
220 | /* | ||
221 | * The GMII mangement frame structure for a read is as follows: | ||
222 | * | ||
223 | * |Preamble|st|op|phyad|regad|ta| data |idle| | ||
224 | * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | | ||
225 | * | ||
226 | * <32 1s> = 32 consecutive logic 1 values | ||
227 | * A = bit of Physical Layer device address (MSB first) | ||
228 | * R = bit of register address (MSB first) | ||
229 | * z = High impedance state | ||
230 | * D = bit of read data (MSB first) | ||
231 | * | ||
232 | * Transmission order is 'Preamble' field first, bits transmitted | ||
233 | * left to right (first to last). | ||
234 | */ | ||
235 | struct { | ||
236 | u32 field; | ||
237 | unsigned int len; | ||
238 | } p[] = { | ||
239 | { GMII_PREAMBLE, 32 }, /* Preamble */ | ||
240 | { GMII_ST, 2 }, /* ST */ | ||
241 | { GMII_READ, 2 }, /* OP */ | ||
242 | { phy_id, 5 }, /* PHYAD */ | ||
243 | { phy_reg, 5 }, /* REGAD */ | ||
244 | { 0x0000, 2 }, /* TA */ | ||
245 | { 0x0000, 16 }, /* DATA */ | ||
246 | { 0x0000, 1 } /* IDLE */ | ||
247 | }; | ||
248 | unsigned int i, j; | ||
249 | u8 polarity, data; | ||
250 | |||
251 | polarity = ipg_r8(PHY_CTRL); | ||
252 | polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); | ||
253 | |||
254 | /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ | ||
255 | for (j = 0; j < 5; j++) { | ||
256 | for (i = 0; i < p[j].len; i++) { | ||
257 | /* For each variable length field, the MSB must be | ||
258 | * transmitted first. Rotate through the field bits, | ||
259 | * starting with the MSB, and move each bit into the | ||
260 | * the 1st (2^1) bit position (this is the bit position | ||
261 | * corresponding to the MgmtData bit of the PhyCtrl | ||
262 | * register for the IPG). | ||
263 | * | ||
264 | * Example: ST = 01; | ||
265 | * | ||
266 | * First write a '0' to bit 1 of the PhyCtrl | ||
267 | * register, then write a '1' to bit 1 of the | ||
268 | * PhyCtrl register. | ||
269 | * | ||
270 | * To do this, right shift the MSB of ST by the value: | ||
271 | * [field length - 1 - #ST bits already written] | ||
272 | * then left shift this result by 1. | ||
273 | */ | ||
274 | data = (p[j].field >> (p[j].len - 1 - i)) << 1; | ||
275 | data &= IPG_PC_MGMTDATA; | ||
276 | data |= polarity | IPG_PC_MGMTDIR; | ||
277 | |||
278 | ipg_drive_phy_ctl_low_high(ioaddr, data); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | send_three_state(ioaddr, polarity); | ||
283 | |||
284 | read_phy_bit(ioaddr, polarity); | ||
285 | |||
286 | /* | ||
287 | * For a read cycle, the bits for the next two fields (TA and | ||
288 | * DATA) are driven by the PHY (the IPG reads these bits). | ||
289 | */ | ||
290 | for (i = 0; i < p[6].len; i++) { | ||
291 | p[6].field |= | ||
292 | (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); | ||
293 | } | ||
294 | |||
295 | send_three_state(ioaddr, polarity); | ||
296 | send_three_state(ioaddr, polarity); | ||
297 | send_three_state(ioaddr, polarity); | ||
298 | send_end(ioaddr, polarity); | ||
299 | |||
300 | /* Return the value of the DATA field. */ | ||
301 | return p[6].field; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * Write to a register from the Physical Layer device located | ||
306 | * on the IPG NIC, using the IPG PHYCTRL register. | ||
307 | */ | ||
308 | static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) | ||
309 | { | ||
310 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
311 | /* | ||
312 | * The GMII mangement frame structure for a read is as follows: | ||
313 | * | ||
314 | * |Preamble|st|op|phyad|regad|ta| data |idle| | ||
315 | * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | | ||
316 | * | ||
317 | * <32 1s> = 32 consecutive logic 1 values | ||
318 | * A = bit of Physical Layer device address (MSB first) | ||
319 | * R = bit of register address (MSB first) | ||
320 | * z = High impedance state | ||
321 | * D = bit of write data (MSB first) | ||
322 | * | ||
323 | * Transmission order is 'Preamble' field first, bits transmitted | ||
324 | * left to right (first to last). | ||
325 | */ | ||
326 | struct { | ||
327 | u32 field; | ||
328 | unsigned int len; | ||
329 | } p[] = { | ||
330 | { GMII_PREAMBLE, 32 }, /* Preamble */ | ||
331 | { GMII_ST, 2 }, /* ST */ | ||
332 | { GMII_WRITE, 2 }, /* OP */ | ||
333 | { phy_id, 5 }, /* PHYAD */ | ||
334 | { phy_reg, 5 }, /* REGAD */ | ||
335 | { 0x0002, 2 }, /* TA */ | ||
336 | { val & 0xffff, 16 }, /* DATA */ | ||
337 | { 0x0000, 1 } /* IDLE */ | ||
338 | }; | ||
339 | unsigned int i, j; | ||
340 | u8 polarity, data; | ||
341 | |||
342 | polarity = ipg_r8(PHY_CTRL); | ||
343 | polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); | ||
344 | |||
345 | /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ | ||
346 | for (j = 0; j < 7; j++) { | ||
347 | for (i = 0; i < p[j].len; i++) { | ||
348 | /* For each variable length field, the MSB must be | ||
349 | * transmitted first. Rotate through the field bits, | ||
350 | * starting with the MSB, and move each bit into the | ||
351 | * the 1st (2^1) bit position (this is the bit position | ||
352 | * corresponding to the MgmtData bit of the PhyCtrl | ||
353 | * register for the IPG). | ||
354 | * | ||
355 | * Example: ST = 01; | ||
356 | * | ||
357 | * First write a '0' to bit 1 of the PhyCtrl | ||
358 | * register, then write a '1' to bit 1 of the | ||
359 | * PhyCtrl register. | ||
360 | * | ||
361 | * To do this, right shift the MSB of ST by the value: | ||
362 | * [field length - 1 - #ST bits already written] | ||
363 | * then left shift this result by 1. | ||
364 | */ | ||
365 | data = (p[j].field >> (p[j].len - 1 - i)) << 1; | ||
366 | data &= IPG_PC_MGMTDATA; | ||
367 | data |= polarity | IPG_PC_MGMTDIR; | ||
368 | |||
369 | ipg_drive_phy_ctl_low_high(ioaddr, data); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | /* The last cycle is a tri-state, so read from the PHY. */ | ||
374 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); | ||
375 | ipg_r8(PHY_CTRL); | ||
376 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); | ||
377 | } | ||
378 | |||
379 | static void ipg_set_led_mode(struct net_device *dev) | ||
380 | { | ||
381 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
382 | void __iomem *ioaddr = sp->ioaddr; | ||
383 | u32 mode; | ||
384 | |||
385 | mode = ipg_r32(ASIC_CTRL); | ||
386 | mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); | ||
387 | |||
388 | if ((sp->led_mode & 0x03) > 1) | ||
389 | mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ | ||
390 | |||
391 | if ((sp->led_mode & 0x01) == 1) | ||
392 | mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ | ||
393 | |||
394 | if ((sp->led_mode & 0x08) == 8) | ||
395 | mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ | ||
396 | |||
397 | ipg_w32(mode, ASIC_CTRL); | ||
398 | } | ||
399 | |||
400 | static void ipg_set_phy_set(struct net_device *dev) | ||
401 | { | ||
402 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
403 | void __iomem *ioaddr = sp->ioaddr; | ||
404 | int physet; | ||
405 | |||
406 | physet = ipg_r8(PHY_SET); | ||
407 | physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); | ||
408 | physet |= ((sp->led_mode & 0x70) >> 4); | ||
409 | ipg_w8(physet, PHY_SET); | ||
410 | } | ||
411 | |||
412 | static int ipg_reset(struct net_device *dev, u32 resetflags) | ||
413 | { | ||
414 | /* Assert functional resets via the IPG AsicCtrl | ||
415 | * register as specified by the 'resetflags' input | ||
416 | * parameter. | ||
417 | */ | ||
418 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
419 | unsigned int timeout_count = 0; | ||
420 | |||
421 | IPG_DEBUG_MSG("_reset\n"); | ||
422 | |||
423 | ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); | ||
424 | |||
425 | /* Delay added to account for problem with 10Mbps reset. */ | ||
426 | mdelay(IPG_AC_RESETWAIT); | ||
427 | |||
428 | while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { | ||
429 | mdelay(IPG_AC_RESETWAIT); | ||
430 | if (++timeout_count > IPG_AC_RESET_TIMEOUT) | ||
431 | return -ETIME; | ||
432 | } | ||
433 | /* Set LED Mode in Asic Control */ | ||
434 | ipg_set_led_mode(dev); | ||
435 | |||
436 | /* Set PHYSet Register Value */ | ||
437 | ipg_set_phy_set(dev); | ||
438 | return 0; | ||
439 | } | ||
440 | |||
441 | /* Find the GMII PHY address. */ | ||
442 | static int ipg_find_phyaddr(struct net_device *dev) | ||
443 | { | ||
444 | unsigned int phyaddr, i; | ||
445 | |||
446 | for (i = 0; i < 32; i++) { | ||
447 | u32 status; | ||
448 | |||
449 | /* Search for the correct PHY address among 32 possible. */ | ||
450 | phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; | ||
451 | |||
452 | /* 10/22/03 Grace change verify from GMII_PHY_STATUS to | ||
453 | GMII_PHY_ID1 | ||
454 | */ | ||
455 | |||
456 | status = mdio_read(dev, phyaddr, MII_BMSR); | ||
457 | |||
458 | if ((status != 0xFFFF) && (status != 0)) | ||
459 | return phyaddr; | ||
460 | } | ||
461 | |||
462 | return 0x1f; | ||
463 | } | ||
464 | |||
465 | /* | ||
466 | * Configure IPG based on result of IEEE 802.3 PHY | ||
467 | * auto-negotiation. | ||
468 | */ | ||
469 | static int ipg_config_autoneg(struct net_device *dev) | ||
470 | { | ||
471 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
472 | void __iomem *ioaddr = sp->ioaddr; | ||
473 | unsigned int txflowcontrol; | ||
474 | unsigned int rxflowcontrol; | ||
475 | unsigned int fullduplex; | ||
476 | u32 mac_ctrl_val; | ||
477 | u32 asicctrl; | ||
478 | u8 phyctrl; | ||
479 | const char *speed; | ||
480 | const char *duplex; | ||
481 | const char *tx_desc; | ||
482 | const char *rx_desc; | ||
483 | |||
484 | IPG_DEBUG_MSG("_config_autoneg\n"); | ||
485 | |||
486 | asicctrl = ipg_r32(ASIC_CTRL); | ||
487 | phyctrl = ipg_r8(PHY_CTRL); | ||
488 | mac_ctrl_val = ipg_r32(MAC_CTRL); | ||
489 | |||
490 | /* Set flags for use in resolving auto-negotiation, assuming | ||
491 | * non-1000Mbps, half duplex, no flow control. | ||
492 | */ | ||
493 | fullduplex = 0; | ||
494 | txflowcontrol = 0; | ||
495 | rxflowcontrol = 0; | ||
496 | |||
497 | /* To accommodate a problem in 10Mbps operation, | ||
498 | * set a global flag if PHY running in 10Mbps mode. | ||
499 | */ | ||
500 | sp->tenmbpsmode = 0; | ||
501 | |||
502 | /* Determine actual speed of operation. */ | ||
503 | switch (phyctrl & IPG_PC_LINK_SPEED) { | ||
504 | case IPG_PC_LINK_SPEED_10MBPS: | ||
505 | speed = "10Mbps"; | ||
506 | sp->tenmbpsmode = 1; | ||
507 | break; | ||
508 | case IPG_PC_LINK_SPEED_100MBPS: | ||
509 | speed = "100Mbps"; | ||
510 | break; | ||
511 | case IPG_PC_LINK_SPEED_1000MBPS: | ||
512 | speed = "1000Mbps"; | ||
513 | break; | ||
514 | default: | ||
515 | speed = "undefined!"; | ||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | netdev_info(dev, "Link speed = %s\n", speed); | ||
520 | if (sp->tenmbpsmode == 1) | ||
521 | netdev_info(dev, "10Mbps operational mode enabled\n"); | ||
522 | |||
523 | if (phyctrl & IPG_PC_DUPLEX_STATUS) { | ||
524 | fullduplex = 1; | ||
525 | txflowcontrol = 1; | ||
526 | rxflowcontrol = 1; | ||
527 | } | ||
528 | |||
529 | /* Configure full duplex, and flow control. */ | ||
530 | if (fullduplex == 1) { | ||
531 | |||
532 | /* Configure IPG for full duplex operation. */ | ||
533 | |||
534 | duplex = "full"; | ||
535 | |||
536 | mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; | ||
537 | |||
538 | if (txflowcontrol == 1) { | ||
539 | tx_desc = ""; | ||
540 | mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; | ||
541 | } else { | ||
542 | tx_desc = "no "; | ||
543 | mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; | ||
544 | } | ||
545 | |||
546 | if (rxflowcontrol == 1) { | ||
547 | rx_desc = ""; | ||
548 | mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; | ||
549 | } else { | ||
550 | rx_desc = "no "; | ||
551 | mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; | ||
552 | } | ||
553 | } else { | ||
554 | duplex = "half"; | ||
555 | tx_desc = "no "; | ||
556 | rx_desc = "no "; | ||
557 | mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD & | ||
558 | ~IPG_MC_TX_FLOW_CONTROL_ENABLE & | ||
559 | ~IPG_MC_RX_FLOW_CONTROL_ENABLE); | ||
560 | } | ||
561 | |||
562 | netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n", | ||
563 | duplex, tx_desc, rx_desc); | ||
564 | ipg_w32(mac_ctrl_val, MAC_CTRL); | ||
565 | |||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | /* Determine and configure multicast operation and set | ||
570 | * receive mode for IPG. | ||
571 | */ | ||
572 | static void ipg_nic_set_multicast_list(struct net_device *dev) | ||
573 | { | ||
574 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
575 | struct netdev_hw_addr *ha; | ||
576 | unsigned int hashindex; | ||
577 | u32 hashtable[2]; | ||
578 | u8 receivemode; | ||
579 | |||
580 | IPG_DEBUG_MSG("_nic_set_multicast_list\n"); | ||
581 | |||
582 | receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; | ||
583 | |||
584 | if (dev->flags & IFF_PROMISC) { | ||
585 | /* NIC to be configured in promiscuous mode. */ | ||
586 | receivemode = IPG_RM_RECEIVEALLFRAMES; | ||
587 | } else if ((dev->flags & IFF_ALLMULTI) || | ||
588 | ((dev->flags & IFF_MULTICAST) && | ||
589 | (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) { | ||
590 | /* NIC to be configured to receive all multicast | ||
591 | * frames. */ | ||
592 | receivemode |= IPG_RM_RECEIVEMULTICAST; | ||
593 | } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { | ||
594 | /* NIC to be configured to receive selected | ||
595 | * multicast addresses. */ | ||
596 | receivemode |= IPG_RM_RECEIVEMULTICASTHASH; | ||
597 | } | ||
598 | |||
599 | /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. | ||
600 | * The IPG applies a cyclic-redundancy-check (the same CRC | ||
601 | * used to calculate the frame data FCS) to the destination | ||
602 | * address all incoming multicast frames whose destination | ||
603 | * address has the multicast bit set. The least significant | ||
604 | * 6 bits of the CRC result are used as an addressing index | ||
605 | * into the hash table. If the value of the bit addressed by | ||
606 | * this index is a 1, the frame is passed to the host system. | ||
607 | */ | ||
608 | |||
609 | /* Clear hashtable. */ | ||
610 | hashtable[0] = 0x00000000; | ||
611 | hashtable[1] = 0x00000000; | ||
612 | |||
613 | /* Cycle through all multicast addresses to filter. */ | ||
614 | netdev_for_each_mc_addr(ha, dev) { | ||
615 | /* Calculate CRC result for each multicast address. */ | ||
616 | hashindex = crc32_le(0xffffffff, ha->addr, | ||
617 | ETH_ALEN); | ||
618 | |||
619 | /* Use only the least significant 6 bits. */ | ||
620 | hashindex = hashindex & 0x3F; | ||
621 | |||
622 | /* Within "hashtable", set bit number "hashindex" | ||
623 | * to a logic 1. | ||
624 | */ | ||
625 | set_bit(hashindex, (void *)hashtable); | ||
626 | } | ||
627 | |||
628 | /* Write the value of the hashtable, to the 4, 16 bit | ||
629 | * HASHTABLE IPG registers. | ||
630 | */ | ||
631 | ipg_w32(hashtable[0], HASHTABLE_0); | ||
632 | ipg_w32(hashtable[1], HASHTABLE_1); | ||
633 | |||
634 | ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); | ||
635 | |||
636 | IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); | ||
637 | } | ||
638 | |||
639 | static int ipg_io_config(struct net_device *dev) | ||
640 | { | ||
641 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
642 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
643 | u32 origmacctrl; | ||
644 | u32 restoremacctrl; | ||
645 | |||
646 | IPG_DEBUG_MSG("_io_config\n"); | ||
647 | |||
648 | origmacctrl = ipg_r32(MAC_CTRL); | ||
649 | |||
650 | restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; | ||
651 | |||
652 | /* Based on compilation option, determine if FCS is to be | ||
653 | * stripped on receive frames by IPG. | ||
654 | */ | ||
655 | if (!IPG_STRIP_FCS_ON_RX) | ||
656 | restoremacctrl |= IPG_MC_RCV_FCS; | ||
657 | |||
658 | /* Determine if transmitter and/or receiver are | ||
659 | * enabled so we may restore MACCTRL correctly. | ||
660 | */ | ||
661 | if (origmacctrl & IPG_MC_TX_ENABLED) | ||
662 | restoremacctrl |= IPG_MC_TX_ENABLE; | ||
663 | |||
664 | if (origmacctrl & IPG_MC_RX_ENABLED) | ||
665 | restoremacctrl |= IPG_MC_RX_ENABLE; | ||
666 | |||
667 | /* Transmitter and receiver must be disabled before setting | ||
668 | * IFSSelect. | ||
669 | */ | ||
670 | ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & | ||
671 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
672 | |||
673 | /* Now that transmitter and receiver are disabled, write | ||
674 | * to IFSSelect. | ||
675 | */ | ||
676 | ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); | ||
677 | |||
678 | /* Set RECEIVEMODE register. */ | ||
679 | ipg_nic_set_multicast_list(dev); | ||
680 | |||
681 | ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE); | ||
682 | |||
683 | ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); | ||
684 | ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); | ||
685 | ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); | ||
686 | ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); | ||
687 | ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); | ||
688 | ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); | ||
689 | ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | | ||
690 | IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | | ||
691 | IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | | ||
692 | IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); | ||
693 | ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); | ||
694 | ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); | ||
695 | |||
696 | /* IPG multi-frag frame bug workaround. | ||
697 | * Per silicon revision B3 eratta. | ||
698 | */ | ||
699 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); | ||
700 | |||
701 | /* IPG TX poll now bug workaround. | ||
702 | * Per silicon revision B3 eratta. | ||
703 | */ | ||
704 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); | ||
705 | |||
706 | /* IPG RX poll now bug workaround. | ||
707 | * Per silicon revision B3 eratta. | ||
708 | */ | ||
709 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); | ||
710 | |||
711 | /* Now restore MACCTRL to original setting. */ | ||
712 | ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); | ||
713 | |||
714 | /* Disable unused RMON statistics. */ | ||
715 | ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); | ||
716 | |||
717 | /* Disable unused MIB statistics. */ | ||
718 | ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | | ||
719 | IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | | ||
720 | IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | | ||
721 | IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | | ||
722 | IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | | ||
723 | IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); | ||
724 | |||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | /* | ||
729 | * Create a receive buffer within system memory and update | ||
730 | * NIC private structure appropriately. | ||
731 | */ | ||
732 | static int ipg_get_rxbuff(struct net_device *dev, int entry) | ||
733 | { | ||
734 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
735 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
736 | struct sk_buff *skb; | ||
737 | u64 rxfragsize; | ||
738 | |||
739 | IPG_DEBUG_MSG("_get_rxbuff\n"); | ||
740 | |||
741 | skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size); | ||
742 | if (!skb) { | ||
743 | sp->rx_buff[entry] = NULL; | ||
744 | return -ENOMEM; | ||
745 | } | ||
746 | |||
747 | /* Save the address of the sk_buff structure. */ | ||
748 | sp->rx_buff[entry] = skb; | ||
749 | |||
750 | rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, | ||
751 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | ||
752 | |||
753 | /* Set the RFD fragment length. */ | ||
754 | rxfragsize = sp->rxfrag_size; | ||
755 | rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | static int init_rfdlist(struct net_device *dev) | ||
761 | { | ||
762 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
763 | void __iomem *ioaddr = sp->ioaddr; | ||
764 | unsigned int i; | ||
765 | |||
766 | IPG_DEBUG_MSG("_init_rfdlist\n"); | ||
767 | |||
768 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
769 | struct ipg_rx *rxfd = sp->rxd + i; | ||
770 | |||
771 | if (sp->rx_buff[i]) { | ||
772 | pci_unmap_single(sp->pdev, | ||
773 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
774 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
775 | dev_kfree_skb_irq(sp->rx_buff[i]); | ||
776 | sp->rx_buff[i] = NULL; | ||
777 | } | ||
778 | |||
779 | /* Clear out the RFS field. */ | ||
780 | rxfd->rfs = 0x0000000000000000; | ||
781 | |||
782 | if (ipg_get_rxbuff(dev, i) < 0) { | ||
783 | /* | ||
784 | * A receive buffer was not ready, break the | ||
785 | * RFD list here. | ||
786 | */ | ||
787 | IPG_DEBUG_MSG("Cannot allocate Rx buffer\n"); | ||
788 | |||
789 | /* Just in case we cannot allocate a single RFD. | ||
790 | * Should not occur. | ||
791 | */ | ||
792 | if (i == 0) { | ||
793 | netdev_err(dev, "No memory available for RFD list\n"); | ||
794 | return -ENOMEM; | ||
795 | } | ||
796 | } | ||
797 | |||
798 | rxfd->next_desc = cpu_to_le64(sp->rxd_map + | ||
799 | sizeof(struct ipg_rx)*(i + 1)); | ||
800 | } | ||
801 | sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); | ||
802 | |||
803 | sp->rx_current = 0; | ||
804 | sp->rx_dirty = 0; | ||
805 | |||
806 | /* Write the location of the RFDList to the IPG. */ | ||
807 | ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); | ||
808 | ipg_w32(0x00000000, RFD_LIST_PTR_1); | ||
809 | |||
810 | return 0; | ||
811 | } | ||
812 | |||
813 | static void init_tfdlist(struct net_device *dev) | ||
814 | { | ||
815 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
816 | void __iomem *ioaddr = sp->ioaddr; | ||
817 | unsigned int i; | ||
818 | |||
819 | IPG_DEBUG_MSG("_init_tfdlist\n"); | ||
820 | |||
821 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
822 | struct ipg_tx *txfd = sp->txd + i; | ||
823 | |||
824 | txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); | ||
825 | |||
826 | if (sp->tx_buff[i]) { | ||
827 | dev_kfree_skb_irq(sp->tx_buff[i]); | ||
828 | sp->tx_buff[i] = NULL; | ||
829 | } | ||
830 | |||
831 | txfd->next_desc = cpu_to_le64(sp->txd_map + | ||
832 | sizeof(struct ipg_tx)*(i + 1)); | ||
833 | } | ||
834 | sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); | ||
835 | |||
836 | sp->tx_current = 0; | ||
837 | sp->tx_dirty = 0; | ||
838 | |||
839 | /* Write the location of the TFDList to the IPG. */ | ||
840 | IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n", | ||
841 | (u32) sp->txd_map); | ||
842 | ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); | ||
843 | ipg_w32(0x00000000, TFD_LIST_PTR_1); | ||
844 | |||
845 | sp->reset_current_tfd = 1; | ||
846 | } | ||
847 | |||
848 | /* | ||
849 | * Free all transmit buffers which have already been transferred | ||
850 | * via DMA to the IPG. | ||
851 | */ | ||
852 | static void ipg_nic_txfree(struct net_device *dev) | ||
853 | { | ||
854 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
855 | unsigned int released, pending, dirty; | ||
856 | |||
857 | IPG_DEBUG_MSG("_nic_txfree\n"); | ||
858 | |||
859 | pending = sp->tx_current - sp->tx_dirty; | ||
860 | dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; | ||
861 | |||
862 | for (released = 0; released < pending; released++) { | ||
863 | struct sk_buff *skb = sp->tx_buff[dirty]; | ||
864 | struct ipg_tx *txfd = sp->txd + dirty; | ||
865 | |||
866 | IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc); | ||
867 | |||
868 | /* Look at each TFD's TFC field beginning | ||
869 | * at the last freed TFD up to the current TFD. | ||
870 | * If the TFDDone bit is set, free the associated | ||
871 | * buffer. | ||
872 | */ | ||
873 | if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE))) | ||
874 | break; | ||
875 | |||
876 | /* Free the transmit buffer. */ | ||
877 | if (skb) { | ||
878 | pci_unmap_single(sp->pdev, | ||
879 | le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, | ||
880 | skb->len, PCI_DMA_TODEVICE); | ||
881 | |||
882 | dev_kfree_skb_irq(skb); | ||
883 | |||
884 | sp->tx_buff[dirty] = NULL; | ||
885 | } | ||
886 | dirty = (dirty + 1) % IPG_TFDLIST_LENGTH; | ||
887 | } | ||
888 | |||
889 | sp->tx_dirty += released; | ||
890 | |||
891 | if (netif_queue_stopped(dev) && | ||
892 | (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { | ||
893 | netif_wake_queue(dev); | ||
894 | } | ||
895 | } | ||
896 | |||
897 | static void ipg_tx_timeout(struct net_device *dev) | ||
898 | { | ||
899 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
900 | void __iomem *ioaddr = sp->ioaddr; | ||
901 | |||
902 | ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | | ||
903 | IPG_AC_FIFO); | ||
904 | |||
905 | spin_lock_irq(&sp->lock); | ||
906 | |||
907 | /* Re-configure after DMA reset. */ | ||
908 | if (ipg_io_config(dev) < 0) | ||
909 | netdev_info(dev, "Error during re-configuration\n"); | ||
910 | |||
911 | init_tfdlist(dev); | ||
912 | |||
913 | spin_unlock_irq(&sp->lock); | ||
914 | |||
915 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, | ||
916 | MAC_CTRL); | ||
917 | } | ||
918 | |||
919 | /* | ||
920 | * For TxComplete interrupts, free all transmit | ||
921 | * buffers which have already been transferred via DMA | ||
922 | * to the IPG. | ||
923 | */ | ||
924 | static void ipg_nic_txcleanup(struct net_device *dev) | ||
925 | { | ||
926 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
927 | void __iomem *ioaddr = sp->ioaddr; | ||
928 | unsigned int i; | ||
929 | |||
930 | IPG_DEBUG_MSG("_nic_txcleanup\n"); | ||
931 | |||
932 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
933 | /* Reading the TXSTATUS register clears the | ||
934 | * TX_COMPLETE interrupt. | ||
935 | */ | ||
936 | u32 txstatusdword = ipg_r32(TX_STATUS); | ||
937 | |||
938 | IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword); | ||
939 | |||
940 | /* Check for Transmit errors. Error bits only valid if | ||
941 | * TX_COMPLETE bit in the TXSTATUS register is a 1. | ||
942 | */ | ||
943 | if (!(txstatusdword & IPG_TS_TX_COMPLETE)) | ||
944 | break; | ||
945 | |||
946 | /* If in 10Mbps mode, indicate transmit is ready. */ | ||
947 | if (sp->tenmbpsmode) { | ||
948 | netif_wake_queue(dev); | ||
949 | } | ||
950 | |||
951 | /* Transmit error, increment stat counters. */ | ||
952 | if (txstatusdword & IPG_TS_TX_ERROR) { | ||
953 | IPG_DEBUG_MSG("Transmit error\n"); | ||
954 | sp->stats.tx_errors++; | ||
955 | } | ||
956 | |||
957 | /* Late collision, re-enable transmitter. */ | ||
958 | if (txstatusdword & IPG_TS_LATE_COLLISION) { | ||
959 | IPG_DEBUG_MSG("Late collision on transmit\n"); | ||
960 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
961 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
962 | } | ||
963 | |||
964 | /* Maximum collisions, re-enable transmitter. */ | ||
965 | if (txstatusdword & IPG_TS_TX_MAX_COLL) { | ||
966 | IPG_DEBUG_MSG("Maximum collisions on transmit\n"); | ||
967 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
968 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
969 | } | ||
970 | |||
971 | /* Transmit underrun, reset and re-enable | ||
972 | * transmitter. | ||
973 | */ | ||
974 | if (txstatusdword & IPG_TS_TX_UNDERRUN) { | ||
975 | IPG_DEBUG_MSG("Transmitter underrun\n"); | ||
976 | sp->stats.tx_fifo_errors++; | ||
977 | ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | | ||
978 | IPG_AC_NETWORK | IPG_AC_FIFO); | ||
979 | |||
980 | /* Re-configure after DMA reset. */ | ||
981 | if (ipg_io_config(dev) < 0) { | ||
982 | netdev_info(dev, "Error during re-configuration\n"); | ||
983 | } | ||
984 | init_tfdlist(dev); | ||
985 | |||
986 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
987 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
988 | } | ||
989 | } | ||
990 | |||
991 | ipg_nic_txfree(dev); | ||
992 | } | ||
993 | |||
994 | /* Provides statistical information about the IPG NIC. */ | ||
995 | static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) | ||
996 | { | ||
997 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
998 | void __iomem *ioaddr = sp->ioaddr; | ||
999 | u16 temp1; | ||
1000 | u16 temp2; | ||
1001 | |||
1002 | IPG_DEBUG_MSG("_nic_get_stats\n"); | ||
1003 | |||
1004 | /* Check to see if the NIC has been initialized via nic_open, | ||
1005 | * before trying to read statistic registers. | ||
1006 | */ | ||
1007 | if (!netif_running(dev)) | ||
1008 | return &sp->stats; | ||
1009 | |||
1010 | sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); | ||
1011 | sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); | ||
1012 | sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); | ||
1013 | sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); | ||
1014 | temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); | ||
1015 | sp->stats.rx_errors += temp1; | ||
1016 | sp->stats.rx_missed_errors += temp1; | ||
1017 | temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + | ||
1018 | ipg_r32(IPG_LATECOLLISIONS); | ||
1019 | temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); | ||
1020 | sp->stats.collisions += temp1; | ||
1021 | sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); | ||
1022 | sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + | ||
1023 | ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; | ||
1024 | sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); | ||
1025 | |||
1026 | /* detailed tx_errors */ | ||
1027 | sp->stats.tx_carrier_errors += temp2; | ||
1028 | |||
1029 | /* detailed rx_errors */ | ||
1030 | sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + | ||
1031 | ipg_r16(IPG_FRAMETOOLONGERRORS); | ||
1032 | sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); | ||
1033 | |||
1034 | /* Unutilized IPG statistic registers. */ | ||
1035 | ipg_r32(IPG_MCSTFRAMESRCVDOK); | ||
1036 | |||
1037 | return &sp->stats; | ||
1038 | } | ||
1039 | |||
1040 | /* Restore used receive buffers. */ | ||
1041 | static int ipg_nic_rxrestore(struct net_device *dev) | ||
1042 | { | ||
1043 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1044 | const unsigned int curr = sp->rx_current; | ||
1045 | unsigned int dirty = sp->rx_dirty; | ||
1046 | |||
1047 | IPG_DEBUG_MSG("_nic_rxrestore\n"); | ||
1048 | |||
1049 | for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { | ||
1050 | unsigned int entry = dirty % IPG_RFDLIST_LENGTH; | ||
1051 | |||
1052 | /* rx_copybreak may poke hole here and there. */ | ||
1053 | if (sp->rx_buff[entry]) | ||
1054 | continue; | ||
1055 | |||
1056 | /* Generate a new receive buffer to replace the | ||
1057 | * current buffer (which will be released by the | ||
1058 | * Linux system). | ||
1059 | */ | ||
1060 | if (ipg_get_rxbuff(dev, entry) < 0) { | ||
1061 | IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n"); | ||
1062 | |||
1063 | break; | ||
1064 | } | ||
1065 | |||
1066 | /* Reset the RFS field. */ | ||
1067 | sp->rxd[entry].rfs = 0x0000000000000000; | ||
1068 | } | ||
1069 | sp->rx_dirty = dirty; | ||
1070 | |||
1071 | return 0; | ||
1072 | } | ||
1073 | |||
1074 | /* use jumboindex and jumbosize to control jumbo frame status | ||
1075 | * initial status is jumboindex=-1 and jumbosize=0 | ||
1076 | * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. | ||
1077 | * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving | ||
1078 | * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump | ||
1079 | * previous receiving and need to continue dumping the current one | ||
1080 | */ | ||
1081 | enum { | ||
1082 | NORMAL_PACKET, | ||
1083 | ERROR_PACKET | ||
1084 | }; | ||
1085 | |||
1086 | enum { | ||
1087 | FRAME_NO_START_NO_END = 0, | ||
1088 | FRAME_WITH_START = 1, | ||
1089 | FRAME_WITH_END = 10, | ||
1090 | FRAME_WITH_START_WITH_END = 11 | ||
1091 | }; | ||
1092 | |||
1093 | static void ipg_nic_rx_free_skb(struct net_device *dev) | ||
1094 | { | ||
1095 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1096 | unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; | ||
1097 | |||
1098 | if (sp->rx_buff[entry]) { | ||
1099 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
1100 | |||
1101 | pci_unmap_single(sp->pdev, | ||
1102 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1103 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1104 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
1105 | sp->rx_buff[entry] = NULL; | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | static int ipg_nic_rx_check_frame_type(struct net_device *dev) | ||
1110 | { | ||
1111 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1112 | struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); | ||
1113 | int type = FRAME_NO_START_NO_END; | ||
1114 | |||
1115 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) | ||
1116 | type += FRAME_WITH_START; | ||
1117 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) | ||
1118 | type += FRAME_WITH_END; | ||
1119 | return type; | ||
1120 | } | ||
1121 | |||
1122 | static int ipg_nic_rx_check_error(struct net_device *dev) | ||
1123 | { | ||
1124 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1125 | unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; | ||
1126 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
1127 | |||
1128 | if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & | ||
1129 | (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | | ||
1130 | IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | | ||
1131 | IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { | ||
1132 | IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", | ||
1133 | (unsigned long) rxfd->rfs); | ||
1134 | |||
1135 | /* Increment general receive error statistic. */ | ||
1136 | sp->stats.rx_errors++; | ||
1137 | |||
1138 | /* Increment detailed receive error statistics. */ | ||
1139 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { | ||
1140 | IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); | ||
1141 | |||
1142 | sp->stats.rx_fifo_errors++; | ||
1143 | } | ||
1144 | |||
1145 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { | ||
1146 | IPG_DEBUG_MSG("RX runt occurred\n"); | ||
1147 | sp->stats.rx_length_errors++; | ||
1148 | } | ||
1149 | |||
1150 | /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, | ||
1151 | * error count handled by a IPG statistic register. | ||
1152 | */ | ||
1153 | |||
1154 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { | ||
1155 | IPG_DEBUG_MSG("RX alignment error occurred\n"); | ||
1156 | sp->stats.rx_frame_errors++; | ||
1157 | } | ||
1158 | |||
1159 | /* Do nothing for IPG_RFS_RXFCSERROR, error count | ||
1160 | * handled by a IPG statistic register. | ||
1161 | */ | ||
1162 | |||
1163 | /* Free the memory associated with the RX | ||
1164 | * buffer since it is erroneous and we will | ||
1165 | * not pass it to higher layer processes. | ||
1166 | */ | ||
1167 | if (sp->rx_buff[entry]) { | ||
1168 | pci_unmap_single(sp->pdev, | ||
1169 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1170 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1171 | |||
1172 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
1173 | sp->rx_buff[entry] = NULL; | ||
1174 | } | ||
1175 | return ERROR_PACKET; | ||
1176 | } | ||
1177 | return NORMAL_PACKET; | ||
1178 | } | ||
1179 | |||
1180 | static void ipg_nic_rx_with_start_and_end(struct net_device *dev, | ||
1181 | struct ipg_nic_private *sp, | ||
1182 | struct ipg_rx *rxfd, unsigned entry) | ||
1183 | { | ||
1184 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
1185 | struct sk_buff *skb; | ||
1186 | int framelen; | ||
1187 | |||
1188 | if (jumbo->found_start) { | ||
1189 | dev_kfree_skb_irq(jumbo->skb); | ||
1190 | jumbo->found_start = 0; | ||
1191 | jumbo->current_size = 0; | ||
1192 | jumbo->skb = NULL; | ||
1193 | } | ||
1194 | |||
1195 | /* 1: found error, 0 no error */ | ||
1196 | if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) | ||
1197 | return; | ||
1198 | |||
1199 | skb = sp->rx_buff[entry]; | ||
1200 | if (!skb) | ||
1201 | return; | ||
1202 | |||
1203 | /* accept this frame and send to upper layer */ | ||
1204 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
1205 | if (framelen > sp->rxfrag_size) | ||
1206 | framelen = sp->rxfrag_size; | ||
1207 | |||
1208 | skb_put(skb, framelen); | ||
1209 | skb->protocol = eth_type_trans(skb, dev); | ||
1210 | skb_checksum_none_assert(skb); | ||
1211 | netif_rx(skb); | ||
1212 | sp->rx_buff[entry] = NULL; | ||
1213 | } | ||
1214 | |||
1215 | static void ipg_nic_rx_with_start(struct net_device *dev, | ||
1216 | struct ipg_nic_private *sp, | ||
1217 | struct ipg_rx *rxfd, unsigned entry) | ||
1218 | { | ||
1219 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
1220 | struct pci_dev *pdev = sp->pdev; | ||
1221 | struct sk_buff *skb; | ||
1222 | |||
1223 | /* 1: found error, 0 no error */ | ||
1224 | if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) | ||
1225 | return; | ||
1226 | |||
1227 | /* accept this frame and send to upper layer */ | ||
1228 | skb = sp->rx_buff[entry]; | ||
1229 | if (!skb) | ||
1230 | return; | ||
1231 | |||
1232 | if (jumbo->found_start) | ||
1233 | dev_kfree_skb_irq(jumbo->skb); | ||
1234 | |||
1235 | pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1236 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1237 | |||
1238 | skb_put(skb, sp->rxfrag_size); | ||
1239 | |||
1240 | jumbo->found_start = 1; | ||
1241 | jumbo->current_size = sp->rxfrag_size; | ||
1242 | jumbo->skb = skb; | ||
1243 | |||
1244 | sp->rx_buff[entry] = NULL; | ||
1245 | } | ||
1246 | |||
1247 | static void ipg_nic_rx_with_end(struct net_device *dev, | ||
1248 | struct ipg_nic_private *sp, | ||
1249 | struct ipg_rx *rxfd, unsigned entry) | ||
1250 | { | ||
1251 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
1252 | |||
1253 | /* 1: found error, 0 no error */ | ||
1254 | if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { | ||
1255 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
1256 | |||
1257 | if (!skb) | ||
1258 | return; | ||
1259 | |||
1260 | if (jumbo->found_start) { | ||
1261 | int framelen, endframelen; | ||
1262 | |||
1263 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
1264 | |||
1265 | endframelen = framelen - jumbo->current_size; | ||
1266 | if (framelen > sp->rxsupport_size) | ||
1267 | dev_kfree_skb_irq(jumbo->skb); | ||
1268 | else { | ||
1269 | memcpy(skb_put(jumbo->skb, endframelen), | ||
1270 | skb->data, endframelen); | ||
1271 | |||
1272 | jumbo->skb->protocol = | ||
1273 | eth_type_trans(jumbo->skb, dev); | ||
1274 | |||
1275 | skb_checksum_none_assert(jumbo->skb); | ||
1276 | netif_rx(jumbo->skb); | ||
1277 | } | ||
1278 | } | ||
1279 | |||
1280 | jumbo->found_start = 0; | ||
1281 | jumbo->current_size = 0; | ||
1282 | jumbo->skb = NULL; | ||
1283 | |||
1284 | ipg_nic_rx_free_skb(dev); | ||
1285 | } else { | ||
1286 | dev_kfree_skb_irq(jumbo->skb); | ||
1287 | jumbo->found_start = 0; | ||
1288 | jumbo->current_size = 0; | ||
1289 | jumbo->skb = NULL; | ||
1290 | } | ||
1291 | } | ||
1292 | |||
1293 | static void ipg_nic_rx_no_start_no_end(struct net_device *dev, | ||
1294 | struct ipg_nic_private *sp, | ||
1295 | struct ipg_rx *rxfd, unsigned entry) | ||
1296 | { | ||
1297 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
1298 | |||
1299 | /* 1: found error, 0 no error */ | ||
1300 | if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { | ||
1301 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
1302 | |||
1303 | if (skb) { | ||
1304 | if (jumbo->found_start) { | ||
1305 | jumbo->current_size += sp->rxfrag_size; | ||
1306 | if (jumbo->current_size <= sp->rxsupport_size) { | ||
1307 | memcpy(skb_put(jumbo->skb, | ||
1308 | sp->rxfrag_size), | ||
1309 | skb->data, sp->rxfrag_size); | ||
1310 | } | ||
1311 | } | ||
1312 | ipg_nic_rx_free_skb(dev); | ||
1313 | } | ||
1314 | } else { | ||
1315 | dev_kfree_skb_irq(jumbo->skb); | ||
1316 | jumbo->found_start = 0; | ||
1317 | jumbo->current_size = 0; | ||
1318 | jumbo->skb = NULL; | ||
1319 | } | ||
1320 | } | ||
1321 | |||
1322 | static int ipg_nic_rx_jumbo(struct net_device *dev) | ||
1323 | { | ||
1324 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1325 | unsigned int curr = sp->rx_current; | ||
1326 | void __iomem *ioaddr = sp->ioaddr; | ||
1327 | unsigned int i; | ||
1328 | |||
1329 | IPG_DEBUG_MSG("_nic_rx\n"); | ||
1330 | |||
1331 | for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { | ||
1332 | unsigned int entry = curr % IPG_RFDLIST_LENGTH; | ||
1333 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
1334 | |||
1335 | if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE))) | ||
1336 | break; | ||
1337 | |||
1338 | switch (ipg_nic_rx_check_frame_type(dev)) { | ||
1339 | case FRAME_WITH_START_WITH_END: | ||
1340 | ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); | ||
1341 | break; | ||
1342 | case FRAME_WITH_START: | ||
1343 | ipg_nic_rx_with_start(dev, sp, rxfd, entry); | ||
1344 | break; | ||
1345 | case FRAME_WITH_END: | ||
1346 | ipg_nic_rx_with_end(dev, sp, rxfd, entry); | ||
1347 | break; | ||
1348 | case FRAME_NO_START_NO_END: | ||
1349 | ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); | ||
1350 | break; | ||
1351 | } | ||
1352 | } | ||
1353 | |||
1354 | sp->rx_current = curr; | ||
1355 | |||
1356 | if (i == IPG_MAXRFDPROCESS_COUNT) { | ||
1357 | /* There are more RFDs to process, however the | ||
1358 | * allocated amount of RFD processing time has | ||
1359 | * expired. Assert Interrupt Requested to make | ||
1360 | * sure we come back to process the remaining RFDs. | ||
1361 | */ | ||
1362 | ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); | ||
1363 | } | ||
1364 | |||
1365 | ipg_nic_rxrestore(dev); | ||
1366 | |||
1367 | return 0; | ||
1368 | } | ||
1369 | |||
1370 | static int ipg_nic_rx(struct net_device *dev) | ||
1371 | { | ||
1372 | /* Transfer received Ethernet frames to higher network layers. */ | ||
1373 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1374 | unsigned int curr = sp->rx_current; | ||
1375 | void __iomem *ioaddr = sp->ioaddr; | ||
1376 | struct ipg_rx *rxfd; | ||
1377 | unsigned int i; | ||
1378 | |||
1379 | IPG_DEBUG_MSG("_nic_rx\n"); | ||
1380 | |||
1381 | #define __RFS_MASK \ | ||
1382 | cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) | ||
1383 | |||
1384 | for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { | ||
1385 | unsigned int entry = curr % IPG_RFDLIST_LENGTH; | ||
1386 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
1387 | unsigned int framelen; | ||
1388 | |||
1389 | rxfd = sp->rxd + entry; | ||
1390 | |||
1391 | if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) | ||
1392 | break; | ||
1393 | |||
1394 | /* Get received frame length. */ | ||
1395 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
1396 | |||
1397 | /* Check for jumbo frame arrival with too small | ||
1398 | * RXFRAG_SIZE. | ||
1399 | */ | ||
1400 | if (framelen > sp->rxfrag_size) { | ||
1401 | IPG_DEBUG_MSG | ||
1402 | ("RFS FrameLen > allocated fragment size\n"); | ||
1403 | |||
1404 | framelen = sp->rxfrag_size; | ||
1405 | } | ||
1406 | |||
1407 | if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & | ||
1408 | (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | | ||
1409 | IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | | ||
1410 | IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { | ||
1411 | |||
1412 | IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", | ||
1413 | (unsigned long int) rxfd->rfs); | ||
1414 | |||
1415 | /* Increment general receive error statistic. */ | ||
1416 | sp->stats.rx_errors++; | ||
1417 | |||
1418 | /* Increment detailed receive error statistics. */ | ||
1419 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { | ||
1420 | IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); | ||
1421 | sp->stats.rx_fifo_errors++; | ||
1422 | } | ||
1423 | |||
1424 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { | ||
1425 | IPG_DEBUG_MSG("RX runt occurred\n"); | ||
1426 | sp->stats.rx_length_errors++; | ||
1427 | } | ||
1428 | |||
1429 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; | ||
1430 | /* Do nothing, error count handled by a IPG | ||
1431 | * statistic register. | ||
1432 | */ | ||
1433 | |||
1434 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { | ||
1435 | IPG_DEBUG_MSG("RX alignment error occurred\n"); | ||
1436 | sp->stats.rx_frame_errors++; | ||
1437 | } | ||
1438 | |||
1439 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; | ||
1440 | /* Do nothing, error count handled by a IPG | ||
1441 | * statistic register. | ||
1442 | */ | ||
1443 | |||
1444 | /* Free the memory associated with the RX | ||
1445 | * buffer since it is erroneous and we will | ||
1446 | * not pass it to higher layer processes. | ||
1447 | */ | ||
1448 | if (skb) { | ||
1449 | __le64 info = rxfd->frag_info; | ||
1450 | |||
1451 | pci_unmap_single(sp->pdev, | ||
1452 | le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, | ||
1453 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1454 | |||
1455 | dev_kfree_skb_irq(skb); | ||
1456 | } | ||
1457 | } else { | ||
1458 | |||
1459 | /* Adjust the new buffer length to accommodate the size | ||
1460 | * of the received frame. | ||
1461 | */ | ||
1462 | skb_put(skb, framelen); | ||
1463 | |||
1464 | /* Set the buffer's protocol field to Ethernet. */ | ||
1465 | skb->protocol = eth_type_trans(skb, dev); | ||
1466 | |||
1467 | /* The IPG encountered an error with (or | ||
1468 | * there were no) IP/TCP/UDP checksums. | ||
1469 | * This may or may not indicate an invalid | ||
1470 | * IP/TCP/UDP frame was received. Let the | ||
1471 | * upper layer decide. | ||
1472 | */ | ||
1473 | skb_checksum_none_assert(skb); | ||
1474 | |||
1475 | /* Hand off frame for higher layer processing. | ||
1476 | * The function netif_rx() releases the sk_buff | ||
1477 | * when processing completes. | ||
1478 | */ | ||
1479 | netif_rx(skb); | ||
1480 | } | ||
1481 | |||
1482 | /* Assure RX buffer is not reused by IPG. */ | ||
1483 | sp->rx_buff[entry] = NULL; | ||
1484 | } | ||
1485 | |||
1486 | /* | ||
1487 | * If there are more RFDs to process and the allocated amount of RFD | ||
1488 | * processing time has expired, assert Interrupt Requested to make | ||
1489 | * sure we come back to process the remaining RFDs. | ||
1490 | */ | ||
1491 | if (i == IPG_MAXRFDPROCESS_COUNT) | ||
1492 | ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); | ||
1493 | |||
1494 | #ifdef IPG_DEBUG | ||
1495 | /* Check if the RFD list contained no receive frame data. */ | ||
1496 | if (!i) | ||
1497 | sp->EmptyRFDListCount++; | ||
1498 | #endif | ||
1499 | while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && | ||
1500 | !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && | ||
1501 | (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { | ||
1502 | unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; | ||
1503 | |||
1504 | rxfd = sp->rxd + entry; | ||
1505 | |||
1506 | IPG_DEBUG_MSG("Frame requires multiple RFDs\n"); | ||
1507 | |||
1508 | /* An unexpected event, additional code needed to handle | ||
1509 | * properly. So for the time being, just disregard the | ||
1510 | * frame. | ||
1511 | */ | ||
1512 | |||
1513 | /* Free the memory associated with the RX | ||
1514 | * buffer since it is erroneous and we will | ||
1515 | * not pass it to higher layer processes. | ||
1516 | */ | ||
1517 | if (sp->rx_buff[entry]) { | ||
1518 | pci_unmap_single(sp->pdev, | ||
1519 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1520 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1521 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
1522 | } | ||
1523 | |||
1524 | /* Assure RX buffer is not reused by IPG. */ | ||
1525 | sp->rx_buff[entry] = NULL; | ||
1526 | } | ||
1527 | |||
1528 | sp->rx_current = curr; | ||
1529 | |||
1530 | /* Check to see if there are a minimum number of used | ||
1531 | * RFDs before restoring any (should improve performance.) | ||
1532 | */ | ||
1533 | if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) | ||
1534 | ipg_nic_rxrestore(dev); | ||
1535 | |||
1536 | return 0; | ||
1537 | } | ||
1538 | |||
1539 | static void ipg_reset_after_host_error(struct work_struct *work) | ||
1540 | { | ||
1541 | struct ipg_nic_private *sp = | ||
1542 | container_of(work, struct ipg_nic_private, task.work); | ||
1543 | struct net_device *dev = sp->dev; | ||
1544 | |||
1545 | /* | ||
1546 | * Acknowledge HostError interrupt by resetting | ||
1547 | * IPG DMA and HOST. | ||
1548 | */ | ||
1549 | ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); | ||
1550 | |||
1551 | init_rfdlist(dev); | ||
1552 | init_tfdlist(dev); | ||
1553 | |||
1554 | if (ipg_io_config(dev) < 0) { | ||
1555 | netdev_info(dev, "Cannot recover from PCI error\n"); | ||
1556 | schedule_delayed_work(&sp->task, HZ); | ||
1557 | } | ||
1558 | } | ||
1559 | |||
1560 | static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) | ||
1561 | { | ||
1562 | struct net_device *dev = dev_inst; | ||
1563 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1564 | void __iomem *ioaddr = sp->ioaddr; | ||
1565 | unsigned int handled = 0; | ||
1566 | u16 status; | ||
1567 | |||
1568 | IPG_DEBUG_MSG("_interrupt_handler\n"); | ||
1569 | |||
1570 | if (sp->is_jumbo) | ||
1571 | ipg_nic_rxrestore(dev); | ||
1572 | |||
1573 | spin_lock(&sp->lock); | ||
1574 | |||
1575 | /* Get interrupt source information, and acknowledge | ||
1576 | * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, | ||
1577 | * IntRequested, MacControlFrame, LinkEvent) interrupts | ||
1578 | * if issued. Also, all IPG interrupts are disabled by | ||
1579 | * reading IntStatusAck. | ||
1580 | */ | ||
1581 | status = ipg_r16(INT_STATUS_ACK); | ||
1582 | |||
1583 | IPG_DEBUG_MSG("IntStatusAck = %04x\n", status); | ||
1584 | |||
1585 | /* Shared IRQ of remove event. */ | ||
1586 | if (!(status & IPG_IS_RSVD_MASK)) | ||
1587 | goto out_enable; | ||
1588 | |||
1589 | handled = 1; | ||
1590 | |||
1591 | if (unlikely(!netif_running(dev))) | ||
1592 | goto out_unlock; | ||
1593 | |||
1594 | /* If RFDListEnd interrupt, restore all used RFDs. */ | ||
1595 | if (status & IPG_IS_RFD_LIST_END) { | ||
1596 | IPG_DEBUG_MSG("RFDListEnd Interrupt\n"); | ||
1597 | |||
1598 | /* The RFD list end indicates an RFD was encountered | ||
1599 | * with a 0 NextPtr, or with an RFDDone bit set to 1 | ||
1600 | * (indicating the RFD is not read for use by the | ||
1601 | * IPG.) Try to restore all RFDs. | ||
1602 | */ | ||
1603 | ipg_nic_rxrestore(dev); | ||
1604 | |||
1605 | #ifdef IPG_DEBUG | ||
1606 | /* Increment the RFDlistendCount counter. */ | ||
1607 | sp->RFDlistendCount++; | ||
1608 | #endif | ||
1609 | } | ||
1610 | |||
1611 | /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or | ||
1612 | * IntRequested interrupt, process received frames. */ | ||
1613 | if ((status & IPG_IS_RX_DMA_PRIORITY) || | ||
1614 | (status & IPG_IS_RFD_LIST_END) || | ||
1615 | (status & IPG_IS_RX_DMA_COMPLETE) || | ||
1616 | (status & IPG_IS_INT_REQUESTED)) { | ||
1617 | #ifdef IPG_DEBUG | ||
1618 | /* Increment the RFD list checked counter if interrupted | ||
1619 | * only to check the RFD list. */ | ||
1620 | if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | | ||
1621 | IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & | ||
1622 | (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | | ||
1623 | IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | | ||
1624 | IPG_IS_UPDATE_STATS))) | ||
1625 | sp->RFDListCheckedCount++; | ||
1626 | #endif | ||
1627 | |||
1628 | if (sp->is_jumbo) | ||
1629 | ipg_nic_rx_jumbo(dev); | ||
1630 | else | ||
1631 | ipg_nic_rx(dev); | ||
1632 | } | ||
1633 | |||
1634 | /* If TxDMAComplete interrupt, free used TFDs. */ | ||
1635 | if (status & IPG_IS_TX_DMA_COMPLETE) | ||
1636 | ipg_nic_txfree(dev); | ||
1637 | |||
1638 | /* TxComplete interrupts indicate one of numerous actions. | ||
1639 | * Determine what action to take based on TXSTATUS register. | ||
1640 | */ | ||
1641 | if (status & IPG_IS_TX_COMPLETE) | ||
1642 | ipg_nic_txcleanup(dev); | ||
1643 | |||
1644 | /* If UpdateStats interrupt, update Linux Ethernet statistics */ | ||
1645 | if (status & IPG_IS_UPDATE_STATS) | ||
1646 | ipg_nic_get_stats(dev); | ||
1647 | |||
1648 | /* If HostError interrupt, reset IPG. */ | ||
1649 | if (status & IPG_IS_HOST_ERROR) { | ||
1650 | IPG_DDEBUG_MSG("HostError Interrupt\n"); | ||
1651 | |||
1652 | schedule_delayed_work(&sp->task, 0); | ||
1653 | } | ||
1654 | |||
1655 | /* If LinkEvent interrupt, resolve autonegotiation. */ | ||
1656 | if (status & IPG_IS_LINK_EVENT) { | ||
1657 | if (ipg_config_autoneg(dev) < 0) | ||
1658 | netdev_info(dev, "Auto-negotiation error\n"); | ||
1659 | } | ||
1660 | |||
1661 | /* If MACCtrlFrame interrupt, do nothing. */ | ||
1662 | if (status & IPG_IS_MAC_CTRL_FRAME) | ||
1663 | IPG_DEBUG_MSG("MACCtrlFrame interrupt\n"); | ||
1664 | |||
1665 | /* If RxComplete interrupt, do nothing. */ | ||
1666 | if (status & IPG_IS_RX_COMPLETE) | ||
1667 | IPG_DEBUG_MSG("RxComplete interrupt\n"); | ||
1668 | |||
1669 | /* If RxEarly interrupt, do nothing. */ | ||
1670 | if (status & IPG_IS_RX_EARLY) | ||
1671 | IPG_DEBUG_MSG("RxEarly interrupt\n"); | ||
1672 | |||
1673 | out_enable: | ||
1674 | /* Re-enable IPG interrupts. */ | ||
1675 | ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | | ||
1676 | IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | | ||
1677 | IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); | ||
1678 | out_unlock: | ||
1679 | spin_unlock(&sp->lock); | ||
1680 | |||
1681 | return IRQ_RETVAL(handled); | ||
1682 | } | ||
1683 | |||
1684 | static void ipg_rx_clear(struct ipg_nic_private *sp) | ||
1685 | { | ||
1686 | unsigned int i; | ||
1687 | |||
1688 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
1689 | if (sp->rx_buff[i]) { | ||
1690 | struct ipg_rx *rxfd = sp->rxd + i; | ||
1691 | |||
1692 | dev_kfree_skb_irq(sp->rx_buff[i]); | ||
1693 | sp->rx_buff[i] = NULL; | ||
1694 | pci_unmap_single(sp->pdev, | ||
1695 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1696 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1697 | } | ||
1698 | } | ||
1699 | } | ||
1700 | |||
1701 | static void ipg_tx_clear(struct ipg_nic_private *sp) | ||
1702 | { | ||
1703 | unsigned int i; | ||
1704 | |||
1705 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
1706 | if (sp->tx_buff[i]) { | ||
1707 | struct ipg_tx *txfd = sp->txd + i; | ||
1708 | |||
1709 | pci_unmap_single(sp->pdev, | ||
1710 | le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, | ||
1711 | sp->tx_buff[i]->len, PCI_DMA_TODEVICE); | ||
1712 | |||
1713 | dev_kfree_skb_irq(sp->tx_buff[i]); | ||
1714 | |||
1715 | sp->tx_buff[i] = NULL; | ||
1716 | } | ||
1717 | } | ||
1718 | } | ||
1719 | |||
1720 | static int ipg_nic_open(struct net_device *dev) | ||
1721 | { | ||
1722 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1723 | void __iomem *ioaddr = sp->ioaddr; | ||
1724 | struct pci_dev *pdev = sp->pdev; | ||
1725 | int rc; | ||
1726 | |||
1727 | IPG_DEBUG_MSG("_nic_open\n"); | ||
1728 | |||
1729 | sp->rx_buf_sz = sp->rxsupport_size; | ||
1730 | |||
1731 | /* Check for interrupt line conflicts, and request interrupt | ||
1732 | * line for IPG. | ||
1733 | * | ||
1734 | * IMPORTANT: Disable IPG interrupts prior to registering | ||
1735 | * IRQ. | ||
1736 | */ | ||
1737 | ipg_w16(0x0000, INT_ENABLE); | ||
1738 | |||
1739 | /* Register the interrupt line to be used by the IPG within | ||
1740 | * the Linux system. | ||
1741 | */ | ||
1742 | rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED, | ||
1743 | dev->name, dev); | ||
1744 | if (rc < 0) { | ||
1745 | netdev_info(dev, "Error when requesting interrupt\n"); | ||
1746 | goto out; | ||
1747 | } | ||
1748 | |||
1749 | dev->irq = pdev->irq; | ||
1750 | |||
1751 | rc = -ENOMEM; | ||
1752 | |||
1753 | sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, | ||
1754 | &sp->rxd_map, GFP_KERNEL); | ||
1755 | if (!sp->rxd) | ||
1756 | goto err_free_irq_0; | ||
1757 | |||
1758 | sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, | ||
1759 | &sp->txd_map, GFP_KERNEL); | ||
1760 | if (!sp->txd) | ||
1761 | goto err_free_rx_1; | ||
1762 | |||
1763 | rc = init_rfdlist(dev); | ||
1764 | if (rc < 0) { | ||
1765 | netdev_info(dev, "Error during configuration\n"); | ||
1766 | goto err_free_tx_2; | ||
1767 | } | ||
1768 | |||
1769 | init_tfdlist(dev); | ||
1770 | |||
1771 | rc = ipg_io_config(dev); | ||
1772 | if (rc < 0) { | ||
1773 | netdev_info(dev, "Error during configuration\n"); | ||
1774 | goto err_release_tfdlist_3; | ||
1775 | } | ||
1776 | |||
1777 | /* Resolve autonegotiation. */ | ||
1778 | if (ipg_config_autoneg(dev) < 0) | ||
1779 | netdev_info(dev, "Auto-negotiation error\n"); | ||
1780 | |||
1781 | /* initialize JUMBO Frame control variable */ | ||
1782 | sp->jumbo.found_start = 0; | ||
1783 | sp->jumbo.current_size = 0; | ||
1784 | sp->jumbo.skb = NULL; | ||
1785 | |||
1786 | /* Enable transmit and receive operation of the IPG. */ | ||
1787 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & | ||
1788 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
1789 | |||
1790 | netif_start_queue(dev); | ||
1791 | out: | ||
1792 | return rc; | ||
1793 | |||
1794 | err_release_tfdlist_3: | ||
1795 | ipg_tx_clear(sp); | ||
1796 | ipg_rx_clear(sp); | ||
1797 | err_free_tx_2: | ||
1798 | dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); | ||
1799 | err_free_rx_1: | ||
1800 | dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); | ||
1801 | err_free_irq_0: | ||
1802 | free_irq(pdev->irq, dev); | ||
1803 | goto out; | ||
1804 | } | ||
1805 | |||
1806 | static int ipg_nic_stop(struct net_device *dev) | ||
1807 | { | ||
1808 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1809 | void __iomem *ioaddr = sp->ioaddr; | ||
1810 | struct pci_dev *pdev = sp->pdev; | ||
1811 | |||
1812 | IPG_DEBUG_MSG("_nic_stop\n"); | ||
1813 | |||
1814 | netif_stop_queue(dev); | ||
1815 | |||
1816 | IPG_DUMPTFDLIST(dev); | ||
1817 | |||
1818 | do { | ||
1819 | (void) ipg_r16(INT_STATUS_ACK); | ||
1820 | |||
1821 | ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); | ||
1822 | |||
1823 | synchronize_irq(pdev->irq); | ||
1824 | } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); | ||
1825 | |||
1826 | ipg_rx_clear(sp); | ||
1827 | |||
1828 | ipg_tx_clear(sp); | ||
1829 | |||
1830 | pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); | ||
1831 | pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); | ||
1832 | |||
1833 | free_irq(pdev->irq, dev); | ||
1834 | |||
1835 | return 0; | ||
1836 | } | ||
1837 | |||
1838 | static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, | ||
1839 | struct net_device *dev) | ||
1840 | { | ||
1841 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1842 | void __iomem *ioaddr = sp->ioaddr; | ||
1843 | unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; | ||
1844 | unsigned long flags; | ||
1845 | struct ipg_tx *txfd; | ||
1846 | |||
1847 | IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); | ||
1848 | |||
1849 | /* If in 10Mbps mode, stop the transmit queue so | ||
1850 | * no more transmit frames are accepted. | ||
1851 | */ | ||
1852 | if (sp->tenmbpsmode) | ||
1853 | netif_stop_queue(dev); | ||
1854 | |||
1855 | if (sp->reset_current_tfd) { | ||
1856 | sp->reset_current_tfd = 0; | ||
1857 | entry = 0; | ||
1858 | } | ||
1859 | |||
1860 | txfd = sp->txd + entry; | ||
1861 | |||
1862 | sp->tx_buff[entry] = skb; | ||
1863 | |||
1864 | /* Clear all TFC fields, except TFDDONE. */ | ||
1865 | txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); | ||
1866 | |||
1867 | /* Specify the TFC field within the TFD. */ | ||
1868 | txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | | ||
1869 | (IPG_TFC_FRAMEID & sp->tx_current) | | ||
1870 | (IPG_TFC_FRAGCOUNT & (1 << 24))); | ||
1871 | /* | ||
1872 | * 16--17 (WordAlign) <- 3 (disable), | ||
1873 | * 0--15 (FrameId) <- sp->tx_current, | ||
1874 | * 24--27 (FragCount) <- 1 | ||
1875 | */ | ||
1876 | |||
1877 | /* Request TxComplete interrupts at an interval defined | ||
1878 | * by the constant IPG_FRAMESBETWEENTXCOMPLETES. | ||
1879 | * Request TxComplete interrupt for every frame | ||
1880 | * if in 10Mbps mode to accommodate problem with 10Mbps | ||
1881 | * processing. | ||
1882 | */ | ||
1883 | if (sp->tenmbpsmode) | ||
1884 | txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); | ||
1885 | txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); | ||
1886 | /* Based on compilation option, determine if FCS is to be | ||
1887 | * appended to transmit frame by IPG. | ||
1888 | */ | ||
1889 | if (!(IPG_APPEND_FCS_ON_TX)) | ||
1890 | txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); | ||
1891 | |||
1892 | /* Based on compilation option, determine if IP, TCP and/or | ||
1893 | * UDP checksums are to be added to transmit frame by IPG. | ||
1894 | */ | ||
1895 | if (IPG_ADD_IPCHECKSUM_ON_TX) | ||
1896 | txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); | ||
1897 | |||
1898 | if (IPG_ADD_TCPCHECKSUM_ON_TX) | ||
1899 | txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); | ||
1900 | |||
1901 | if (IPG_ADD_UDPCHECKSUM_ON_TX) | ||
1902 | txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); | ||
1903 | |||
1904 | /* Based on compilation option, determine if VLAN tag info is to be | ||
1905 | * inserted into transmit frame by IPG. | ||
1906 | */ | ||
1907 | if (IPG_INSERT_MANUAL_VLAN_TAG) { | ||
1908 | txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | | ||
1909 | ((u64) IPG_MANUAL_VLAN_VID << 32) | | ||
1910 | ((u64) IPG_MANUAL_VLAN_CFI << 44) | | ||
1911 | ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); | ||
1912 | } | ||
1913 | |||
1914 | /* The fragment start location within system memory is defined | ||
1915 | * by the sk_buff structure's data field. The physical address | ||
1916 | * of this location within the system's virtual memory space | ||
1917 | * is determined using the IPG_HOST2BUS_MAP function. | ||
1918 | */ | ||
1919 | txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, | ||
1920 | skb->len, PCI_DMA_TODEVICE)); | ||
1921 | |||
1922 | /* The length of the fragment within system memory is defined by | ||
1923 | * the sk_buff structure's len field. | ||
1924 | */ | ||
1925 | txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & | ||
1926 | ((u64) (skb->len & 0xffff) << 48)); | ||
1927 | |||
1928 | /* Clear the TFDDone bit last to indicate the TFD is ready | ||
1929 | * for transfer to the IPG. | ||
1930 | */ | ||
1931 | txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); | ||
1932 | |||
1933 | spin_lock_irqsave(&sp->lock, flags); | ||
1934 | |||
1935 | sp->tx_current++; | ||
1936 | |||
1937 | mmiowb(); | ||
1938 | |||
1939 | ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); | ||
1940 | |||
1941 | if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) | ||
1942 | netif_stop_queue(dev); | ||
1943 | |||
1944 | spin_unlock_irqrestore(&sp->lock, flags); | ||
1945 | |||
1946 | return NETDEV_TX_OK; | ||
1947 | } | ||
1948 | |||
1949 | static void ipg_set_phy_default_param(unsigned char rev, | ||
1950 | struct net_device *dev, int phy_address) | ||
1951 | { | ||
1952 | unsigned short length; | ||
1953 | unsigned char revision; | ||
1954 | const unsigned short *phy_param; | ||
1955 | unsigned short address, value; | ||
1956 | |||
1957 | phy_param = &DefaultPhyParam[0]; | ||
1958 | length = *phy_param & 0x00FF; | ||
1959 | revision = (unsigned char)((*phy_param) >> 8); | ||
1960 | phy_param++; | ||
1961 | while (length != 0) { | ||
1962 | if (rev == revision) { | ||
1963 | while (length > 1) { | ||
1964 | address = *phy_param; | ||
1965 | value = *(phy_param + 1); | ||
1966 | phy_param += 2; | ||
1967 | mdio_write(dev, phy_address, address, value); | ||
1968 | length -= 4; | ||
1969 | } | ||
1970 | break; | ||
1971 | } else { | ||
1972 | phy_param += length / 2; | ||
1973 | length = *phy_param & 0x00FF; | ||
1974 | revision = (unsigned char)((*phy_param) >> 8); | ||
1975 | phy_param++; | ||
1976 | } | ||
1977 | } | ||
1978 | } | ||
1979 | |||
1980 | static int read_eeprom(struct net_device *dev, int eep_addr) | ||
1981 | { | ||
1982 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
1983 | unsigned int i; | ||
1984 | int ret = 0; | ||
1985 | u16 value; | ||
1986 | |||
1987 | value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); | ||
1988 | ipg_w16(value, EEPROM_CTRL); | ||
1989 | |||
1990 | for (i = 0; i < 1000; i++) { | ||
1991 | u16 data; | ||
1992 | |||
1993 | mdelay(10); | ||
1994 | data = ipg_r16(EEPROM_CTRL); | ||
1995 | if (!(data & IPG_EC_EEPROM_BUSY)) { | ||
1996 | ret = ipg_r16(EEPROM_DATA); | ||
1997 | break; | ||
1998 | } | ||
1999 | } | ||
2000 | return ret; | ||
2001 | } | ||
2002 | |||
2003 | static void ipg_init_mii(struct net_device *dev) | ||
2004 | { | ||
2005 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2006 | struct mii_if_info *mii_if = &sp->mii_if; | ||
2007 | int phyaddr; | ||
2008 | |||
2009 | mii_if->dev = dev; | ||
2010 | mii_if->mdio_read = mdio_read; | ||
2011 | mii_if->mdio_write = mdio_write; | ||
2012 | mii_if->phy_id_mask = 0x1f; | ||
2013 | mii_if->reg_num_mask = 0x1f; | ||
2014 | |||
2015 | mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); | ||
2016 | |||
2017 | if (phyaddr != 0x1f) { | ||
2018 | u16 mii_phyctrl, mii_1000cr; | ||
2019 | |||
2020 | mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); | ||
2021 | mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | | ||
2022 | GMII_PHY_1000BASETCONTROL_PreferMaster; | ||
2023 | mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); | ||
2024 | |||
2025 | mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); | ||
2026 | |||
2027 | /* Set default phyparam */ | ||
2028 | ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr); | ||
2029 | |||
2030 | /* Reset PHY */ | ||
2031 | mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; | ||
2032 | mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); | ||
2033 | |||
2034 | } | ||
2035 | } | ||
2036 | |||
2037 | static int ipg_hw_init(struct net_device *dev) | ||
2038 | { | ||
2039 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2040 | void __iomem *ioaddr = sp->ioaddr; | ||
2041 | unsigned int i; | ||
2042 | int rc; | ||
2043 | |||
2044 | /* Read/Write and Reset EEPROM Value */ | ||
2045 | /* Read LED Mode Configuration from EEPROM */ | ||
2046 | sp->led_mode = read_eeprom(dev, 6); | ||
2047 | |||
2048 | /* Reset all functions within the IPG. Do not assert | ||
2049 | * RST_OUT as not compatible with some PHYs. | ||
2050 | */ | ||
2051 | rc = ipg_reset(dev, IPG_RESET_MASK); | ||
2052 | if (rc < 0) | ||
2053 | goto out; | ||
2054 | |||
2055 | ipg_init_mii(dev); | ||
2056 | |||
2057 | /* Read MAC Address from EEPROM */ | ||
2058 | for (i = 0; i < 3; i++) | ||
2059 | sp->station_addr[i] = read_eeprom(dev, 16 + i); | ||
2060 | |||
2061 | for (i = 0; i < 3; i++) | ||
2062 | ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); | ||
2063 | |||
2064 | /* Set station address in ethernet_device structure. */ | ||
2065 | dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; | ||
2066 | dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; | ||
2067 | dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; | ||
2068 | dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; | ||
2069 | dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; | ||
2070 | dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; | ||
2071 | out: | ||
2072 | return rc; | ||
2073 | } | ||
2074 | |||
2075 | static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2076 | { | ||
2077 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2078 | int rc; | ||
2079 | |||
2080 | mutex_lock(&sp->mii_mutex); | ||
2081 | rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); | ||
2082 | mutex_unlock(&sp->mii_mutex); | ||
2083 | |||
2084 | return rc; | ||
2085 | } | ||
2086 | |||
2087 | static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) | ||
2088 | { | ||
2089 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2090 | int err; | ||
2091 | |||
2092 | /* Function to accommodate changes to Maximum Transfer Unit | ||
2093 | * (or MTU) of IPG NIC. Cannot use default function since | ||
2094 | * the default will not allow for MTU > 1500 bytes. | ||
2095 | */ | ||
2096 | |||
2097 | IPG_DEBUG_MSG("_nic_change_mtu\n"); | ||
2098 | |||
2099 | /* | ||
2100 | * Check that the new MTU value is between 68 (14 byte header, 46 byte | ||
2101 | * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU. | ||
2102 | */ | ||
2103 | if (new_mtu < 68 || new_mtu > 10240) | ||
2104 | return -EINVAL; | ||
2105 | |||
2106 | err = ipg_nic_stop(dev); | ||
2107 | if (err) | ||
2108 | return err; | ||
2109 | |||
2110 | dev->mtu = new_mtu; | ||
2111 | |||
2112 | sp->max_rxframe_size = new_mtu; | ||
2113 | |||
2114 | sp->rxfrag_size = new_mtu; | ||
2115 | if (sp->rxfrag_size > 4088) | ||
2116 | sp->rxfrag_size = 4088; | ||
2117 | |||
2118 | sp->rxsupport_size = sp->max_rxframe_size; | ||
2119 | |||
2120 | if (new_mtu > 0x0600) | ||
2121 | sp->is_jumbo = true; | ||
2122 | else | ||
2123 | sp->is_jumbo = false; | ||
2124 | |||
2125 | return ipg_nic_open(dev); | ||
2126 | } | ||
2127 | |||
2128 | static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2129 | { | ||
2130 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2131 | int rc; | ||
2132 | |||
2133 | mutex_lock(&sp->mii_mutex); | ||
2134 | rc = mii_ethtool_gset(&sp->mii_if, cmd); | ||
2135 | mutex_unlock(&sp->mii_mutex); | ||
2136 | |||
2137 | return rc; | ||
2138 | } | ||
2139 | |||
2140 | static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2141 | { | ||
2142 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2143 | int rc; | ||
2144 | |||
2145 | mutex_lock(&sp->mii_mutex); | ||
2146 | rc = mii_ethtool_sset(&sp->mii_if, cmd); | ||
2147 | mutex_unlock(&sp->mii_mutex); | ||
2148 | |||
2149 | return rc; | ||
2150 | } | ||
2151 | |||
2152 | static int ipg_nway_reset(struct net_device *dev) | ||
2153 | { | ||
2154 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2155 | int rc; | ||
2156 | |||
2157 | mutex_lock(&sp->mii_mutex); | ||
2158 | rc = mii_nway_restart(&sp->mii_if); | ||
2159 | mutex_unlock(&sp->mii_mutex); | ||
2160 | |||
2161 | return rc; | ||
2162 | } | ||
2163 | |||
2164 | static const struct ethtool_ops ipg_ethtool_ops = { | ||
2165 | .get_settings = ipg_get_settings, | ||
2166 | .set_settings = ipg_set_settings, | ||
2167 | .nway_reset = ipg_nway_reset, | ||
2168 | }; | ||
2169 | |||
2170 | static void ipg_remove(struct pci_dev *pdev) | ||
2171 | { | ||
2172 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2173 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2174 | |||
2175 | IPG_DEBUG_MSG("_remove\n"); | ||
2176 | |||
2177 | /* Un-register Ethernet device. */ | ||
2178 | unregister_netdev(dev); | ||
2179 | |||
2180 | pci_iounmap(pdev, sp->ioaddr); | ||
2181 | |||
2182 | pci_release_regions(pdev); | ||
2183 | |||
2184 | free_netdev(dev); | ||
2185 | pci_disable_device(pdev); | ||
2186 | } | ||
2187 | |||
2188 | static const struct net_device_ops ipg_netdev_ops = { | ||
2189 | .ndo_open = ipg_nic_open, | ||
2190 | .ndo_stop = ipg_nic_stop, | ||
2191 | .ndo_start_xmit = ipg_nic_hard_start_xmit, | ||
2192 | .ndo_get_stats = ipg_nic_get_stats, | ||
2193 | .ndo_set_rx_mode = ipg_nic_set_multicast_list, | ||
2194 | .ndo_do_ioctl = ipg_ioctl, | ||
2195 | .ndo_tx_timeout = ipg_tx_timeout, | ||
2196 | .ndo_change_mtu = ipg_nic_change_mtu, | ||
2197 | .ndo_set_mac_address = eth_mac_addr, | ||
2198 | .ndo_validate_addr = eth_validate_addr, | ||
2199 | }; | ||
2200 | |||
2201 | static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
2202 | { | ||
2203 | unsigned int i = id->driver_data; | ||
2204 | struct ipg_nic_private *sp; | ||
2205 | struct net_device *dev; | ||
2206 | void __iomem *ioaddr; | ||
2207 | int rc; | ||
2208 | |||
2209 | rc = pci_enable_device(pdev); | ||
2210 | if (rc < 0) | ||
2211 | goto out; | ||
2212 | |||
2213 | pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]); | ||
2214 | |||
2215 | pci_set_master(pdev); | ||
2216 | |||
2217 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); | ||
2218 | if (rc < 0) { | ||
2219 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2220 | if (rc < 0) { | ||
2221 | pr_err("%s: DMA config failed\n", pci_name(pdev)); | ||
2222 | goto err_disable_0; | ||
2223 | } | ||
2224 | } | ||
2225 | |||
2226 | /* | ||
2227 | * Initialize net device. | ||
2228 | */ | ||
2229 | dev = alloc_etherdev(sizeof(struct ipg_nic_private)); | ||
2230 | if (!dev) { | ||
2231 | rc = -ENOMEM; | ||
2232 | goto err_disable_0; | ||
2233 | } | ||
2234 | |||
2235 | sp = netdev_priv(dev); | ||
2236 | spin_lock_init(&sp->lock); | ||
2237 | mutex_init(&sp->mii_mutex); | ||
2238 | |||
2239 | sp->is_jumbo = IPG_IS_JUMBO; | ||
2240 | sp->rxfrag_size = IPG_RXFRAG_SIZE; | ||
2241 | sp->rxsupport_size = IPG_RXSUPPORT_SIZE; | ||
2242 | sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE; | ||
2243 | |||
2244 | /* Declare IPG NIC functions for Ethernet device methods. | ||
2245 | */ | ||
2246 | dev->netdev_ops = &ipg_netdev_ops; | ||
2247 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
2248 | dev->ethtool_ops = &ipg_ethtool_ops; | ||
2249 | |||
2250 | rc = pci_request_regions(pdev, DRV_NAME); | ||
2251 | if (rc) | ||
2252 | goto err_free_dev_1; | ||
2253 | |||
2254 | ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); | ||
2255 | if (!ioaddr) { | ||
2256 | pr_err("%s: cannot map MMIO\n", pci_name(pdev)); | ||
2257 | rc = -EIO; | ||
2258 | goto err_release_regions_2; | ||
2259 | } | ||
2260 | |||
2261 | /* Save the pointer to the PCI device information. */ | ||
2262 | sp->ioaddr = ioaddr; | ||
2263 | sp->pdev = pdev; | ||
2264 | sp->dev = dev; | ||
2265 | |||
2266 | INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); | ||
2267 | |||
2268 | pci_set_drvdata(pdev, dev); | ||
2269 | |||
2270 | rc = ipg_hw_init(dev); | ||
2271 | if (rc < 0) | ||
2272 | goto err_unmap_3; | ||
2273 | |||
2274 | rc = register_netdev(dev); | ||
2275 | if (rc < 0) | ||
2276 | goto err_unmap_3; | ||
2277 | |||
2278 | netdev_info(dev, "Ethernet device registered\n"); | ||
2279 | out: | ||
2280 | return rc; | ||
2281 | |||
2282 | err_unmap_3: | ||
2283 | pci_iounmap(pdev, ioaddr); | ||
2284 | err_release_regions_2: | ||
2285 | pci_release_regions(pdev); | ||
2286 | err_free_dev_1: | ||
2287 | free_netdev(dev); | ||
2288 | err_disable_0: | ||
2289 | pci_disable_device(pdev); | ||
2290 | goto out; | ||
2291 | } | ||
2292 | |||
2293 | static struct pci_driver ipg_pci_driver = { | ||
2294 | .name = IPG_DRIVER_NAME, | ||
2295 | .id_table = ipg_pci_tbl, | ||
2296 | .probe = ipg_probe, | ||
2297 | .remove = ipg_remove, | ||
2298 | }; | ||
2299 | |||
2300 | module_pci_driver(ipg_pci_driver); | ||
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h deleted file mode 100644 index de606281f97b..000000000000 --- a/drivers/net/ethernet/icplus/ipg.h +++ /dev/null | |||
@@ -1,748 +0,0 @@ | |||
1 | /* | ||
2 | * Include file for Gigabit Ethernet device driver for Network | ||
3 | * Interface Cards (NICs) utilizing the Tamarack Microelectronics | ||
4 | * Inc. IPG Gigabit or Triple Speed Ethernet Media Access | ||
5 | * Controller. | ||
6 | */ | ||
7 | #ifndef __LINUX_IPG_H | ||
8 | #define __LINUX_IPG_H | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/ioport.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <asm/io.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/netdevice.h> | ||
20 | #include <linux/etherdevice.h> | ||
21 | #include <linux/skbuff.h> | ||
22 | #include <asm/bitops.h> | ||
23 | |||
24 | /* | ||
25 | * Constants | ||
26 | */ | ||
27 | |||
28 | /* GMII based PHY IDs */ | ||
29 | #define NS 0x2000 | ||
30 | #define MARVELL 0x0141 | ||
31 | #define ICPLUS_PHY 0x243 | ||
32 | |||
33 | /* NIC Physical Layer Device MII register fields. */ | ||
34 | #define MII_PHY_SELECTOR_IEEE8023 0x0001 | ||
35 | #define MII_PHY_TECHABILITYFIELD 0x1FE0 | ||
36 | |||
37 | /* GMII_PHY_1000 need to set to prefer master */ | ||
38 | #define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400 | ||
39 | |||
40 | /* NIC Physical Layer Device GMII constants. */ | ||
41 | #define GMII_PREAMBLE 0xFFFFFFFF | ||
42 | #define GMII_ST 0x1 | ||
43 | #define GMII_READ 0x2 | ||
44 | #define GMII_WRITE 0x1 | ||
45 | #define GMII_TA_READ_MASK 0x1 | ||
46 | #define GMII_TA_WRITE 0x2 | ||
47 | |||
48 | /* I/O register offsets. */ | ||
49 | enum ipg_regs { | ||
50 | DMA_CTRL = 0x00, | ||
51 | RX_DMA_STATUS = 0x08, /* Unused + reserved */ | ||
52 | TFD_LIST_PTR_0 = 0x10, | ||
53 | TFD_LIST_PTR_1 = 0x14, | ||
54 | TX_DMA_BURST_THRESH = 0x18, | ||
55 | TX_DMA_URGENT_THRESH = 0x19, | ||
56 | TX_DMA_POLL_PERIOD = 0x1a, | ||
57 | RFD_LIST_PTR_0 = 0x1c, | ||
58 | RFD_LIST_PTR_1 = 0x20, | ||
59 | RX_DMA_BURST_THRESH = 0x24, | ||
60 | RX_DMA_URGENT_THRESH = 0x25, | ||
61 | RX_DMA_POLL_PERIOD = 0x26, | ||
62 | DEBUG_CTRL = 0x2c, | ||
63 | ASIC_CTRL = 0x30, | ||
64 | FIFO_CTRL = 0x38, /* Unused */ | ||
65 | FLOW_OFF_THRESH = 0x3c, | ||
66 | FLOW_ON_THRESH = 0x3e, | ||
67 | EEPROM_DATA = 0x48, | ||
68 | EEPROM_CTRL = 0x4a, | ||
69 | EXPROM_ADDR = 0x4c, /* Unused */ | ||
70 | EXPROM_DATA = 0x50, /* Unused */ | ||
71 | WAKE_EVENT = 0x51, /* Unused */ | ||
72 | COUNTDOWN = 0x54, /* Unused */ | ||
73 | INT_STATUS_ACK = 0x5a, | ||
74 | INT_ENABLE = 0x5c, | ||
75 | INT_STATUS = 0x5e, /* Unused */ | ||
76 | TX_STATUS = 0x60, | ||
77 | MAC_CTRL = 0x6c, | ||
78 | VLAN_TAG = 0x70, /* Unused */ | ||
79 | PHY_SET = 0x75, | ||
80 | PHY_CTRL = 0x76, | ||
81 | STATION_ADDRESS_0 = 0x78, | ||
82 | STATION_ADDRESS_1 = 0x7a, | ||
83 | STATION_ADDRESS_2 = 0x7c, | ||
84 | MAX_FRAME_SIZE = 0x86, | ||
85 | RECEIVE_MODE = 0x88, | ||
86 | HASHTABLE_0 = 0x8c, | ||
87 | HASHTABLE_1 = 0x90, | ||
88 | RMON_STATISTICS_MASK = 0x98, | ||
89 | STATISTICS_MASK = 0x9c, | ||
90 | RX_JUMBO_FRAMES = 0xbc, /* Unused */ | ||
91 | TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */ | ||
92 | IP_CHECKSUM_ERRORS = 0xc2, /* Unused */ | ||
93 | UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */ | ||
94 | TX_JUMBO_FRAMES = 0xf4 /* Unused */ | ||
95 | }; | ||
96 | |||
97 | /* Ethernet MIB statistic register offsets. */ | ||
98 | #define IPG_OCTETRCVOK 0xA8 | ||
99 | #define IPG_MCSTOCTETRCVDOK 0xAC | ||
100 | #define IPG_BCSTOCTETRCVOK 0xB0 | ||
101 | #define IPG_FRAMESRCVDOK 0xB4 | ||
102 | #define IPG_MCSTFRAMESRCVDOK 0xB8 | ||
103 | #define IPG_BCSTFRAMESRCVDOK 0xBE | ||
104 | #define IPG_MACCONTROLFRAMESRCVD 0xC6 | ||
105 | #define IPG_FRAMETOOLONGERRORS 0xC8 | ||
106 | #define IPG_INRANGELENGTHERRORS 0xCA | ||
107 | #define IPG_FRAMECHECKSEQERRORS 0xCC | ||
108 | #define IPG_FRAMESLOSTRXERRORS 0xCE | ||
109 | #define IPG_OCTETXMTOK 0xD0 | ||
110 | #define IPG_MCSTOCTETXMTOK 0xD4 | ||
111 | #define IPG_BCSTOCTETXMTOK 0xD8 | ||
112 | #define IPG_FRAMESXMTDOK 0xDC | ||
113 | #define IPG_MCSTFRAMESXMTDOK 0xE0 | ||
114 | #define IPG_FRAMESWDEFERREDXMT 0xE4 | ||
115 | #define IPG_LATECOLLISIONS 0xE8 | ||
116 | #define IPG_MULTICOLFRAMES 0xEC | ||
117 | #define IPG_SINGLECOLFRAMES 0xF0 | ||
118 | #define IPG_BCSTFRAMESXMTDOK 0xF6 | ||
119 | #define IPG_CARRIERSENSEERRORS 0xF8 | ||
120 | #define IPG_MACCONTROLFRAMESXMTDOK 0xFA | ||
121 | #define IPG_FRAMESABORTXSCOLLS 0xFC | ||
122 | #define IPG_FRAMESWEXDEFERRAL 0xFE | ||
123 | |||
124 | /* RMON statistic register offsets. */ | ||
125 | #define IPG_ETHERSTATSCOLLISIONS 0x100 | ||
126 | #define IPG_ETHERSTATSOCTETSTRANSMIT 0x104 | ||
127 | #define IPG_ETHERSTATSPKTSTRANSMIT 0x108 | ||
128 | #define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C | ||
129 | #define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110 | ||
130 | #define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114 | ||
131 | #define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118 | ||
132 | #define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C | ||
133 | #define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 | ||
134 | #define IPG_ETHERSTATSCRCALIGNERRORS 0x124 | ||
135 | #define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 | ||
136 | #define IPG_ETHERSTATSFRAGMENTS 0x12C | ||
137 | #define IPG_ETHERSTATSJABBERS 0x130 | ||
138 | #define IPG_ETHERSTATSOCTETS 0x134 | ||
139 | #define IPG_ETHERSTATSPKTS 0x138 | ||
140 | #define IPG_ETHERSTATSPKTS64OCTESTS 0x13C | ||
141 | #define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140 | ||
142 | #define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144 | ||
143 | #define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148 | ||
144 | #define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C | ||
145 | #define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150 | ||
146 | |||
147 | /* RMON statistic register equivalents. */ | ||
148 | #define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0 | ||
149 | #define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6 | ||
150 | #define IPG_ETHERSTATSMULTICASTPKTS 0xB8 | ||
151 | #define IPG_ETHERSTATSBROADCASTPKTS 0xBE | ||
152 | #define IPG_ETHERSTATSOVERSIZEPKTS 0xC8 | ||
153 | #define IPG_ETHERSTATSDROPEVENTS 0xCE | ||
154 | |||
155 | /* Serial EEPROM offsets */ | ||
156 | #define IPG_EEPROM_CONFIGPARAM 0x00 | ||
157 | #define IPG_EEPROM_ASICCTRL 0x01 | ||
158 | #define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 | ||
159 | #define IPG_EEPROM_SUBSYSTEMID 0x03 | ||
160 | #define IPG_EEPROM_STATIONADDRESS0 0x10 | ||
161 | #define IPG_EEPROM_STATIONADDRESS1 0x11 | ||
162 | #define IPG_EEPROM_STATIONADDRESS2 0x12 | ||
163 | |||
164 | /* Register & data structure bit masks */ | ||
165 | |||
166 | /* PCI register masks. */ | ||
167 | |||
168 | /* IOBaseAddress */ | ||
169 | #define IPG_PIB_RSVD_MASK 0xFFFFFE01 | ||
170 | #define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 | ||
171 | #define IPG_PIB_IOBASEADDRIND 0x00000001 | ||
172 | |||
173 | /* MemBaseAddress */ | ||
174 | #define IPG_PMB_RSVD_MASK 0xFFFFFE07 | ||
175 | #define IPG_PMB_MEMBASEADDRIND 0x00000001 | ||
176 | #define IPG_PMB_MEMMAPTYPE 0x00000006 | ||
177 | #define IPG_PMB_MEMMAPTYPE0 0x00000002 | ||
178 | #define IPG_PMB_MEMMAPTYPE1 0x00000004 | ||
179 | #define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 | ||
180 | |||
181 | /* ConfigStatus */ | ||
182 | #define IPG_CS_RSVD_MASK 0xFFB0 | ||
183 | #define IPG_CS_CAPABILITIES 0x0010 | ||
184 | #define IPG_CS_66MHZCAPABLE 0x0020 | ||
185 | #define IPG_CS_FASTBACK2BACK 0x0080 | ||
186 | #define IPG_CS_DATAPARITYREPORTED 0x0100 | ||
187 | #define IPG_CS_DEVSELTIMING 0x0600 | ||
188 | #define IPG_CS_SIGNALEDTARGETABORT 0x0800 | ||
189 | #define IPG_CS_RECEIVEDTARGETABORT 0x1000 | ||
190 | #define IPG_CS_RECEIVEDMASTERABORT 0x2000 | ||
191 | #define IPG_CS_SIGNALEDSYSTEMERROR 0x4000 | ||
192 | #define IPG_CS_DETECTEDPARITYERROR 0x8000 | ||
193 | |||
194 | /* TFD data structure masks. */ | ||
195 | |||
196 | /* TFDList, TFC */ | ||
197 | #define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL | ||
198 | #define IPG_TFC_FRAMEID 0x000000000000FFFFULL | ||
199 | #define IPG_TFC_WORDALIGN 0x0000000000030000ULL | ||
200 | #define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL | ||
201 | #define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL | ||
202 | #define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL | ||
203 | #define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL | ||
204 | #define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL | ||
205 | #define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL | ||
206 | #define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL | ||
207 | #define IPG_TFC_TXINDICATE 0x0000000000400000ULL | ||
208 | #define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL | ||
209 | #define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL | ||
210 | #define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL | ||
211 | #define IPG_TFC_TFDDONE 0x0000000080000000ULL | ||
212 | #define IPG_TFC_VID 0x00000FFF00000000ULL | ||
213 | #define IPG_TFC_CFI 0x0000100000000000ULL | ||
214 | #define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL | ||
215 | |||
216 | /* TFDList, FragInfo */ | ||
217 | #define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL | ||
218 | #define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL | ||
219 | #define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL | ||
220 | |||
221 | /* RFD data structure masks. */ | ||
222 | |||
223 | /* RFDList, RFS */ | ||
224 | #define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL | ||
225 | #define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL | ||
226 | #define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL | ||
227 | #define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL | ||
228 | #define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL | ||
229 | #define IPG_RFS_RXFCSERROR 0x0000000000080000ULL | ||
230 | #define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL | ||
231 | #define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL | ||
232 | #define IPG_RFS_VLANDETECTED 0x0000000000400000ULL | ||
233 | #define IPG_RFS_TCPDETECTED 0x0000000000800000ULL | ||
234 | #define IPG_RFS_TCPERROR 0x0000000001000000ULL | ||
235 | #define IPG_RFS_UDPDETECTED 0x0000000002000000ULL | ||
236 | #define IPG_RFS_UDPERROR 0x0000000004000000ULL | ||
237 | #define IPG_RFS_IPDETECTED 0x0000000008000000ULL | ||
238 | #define IPG_RFS_IPERROR 0x0000000010000000ULL | ||
239 | #define IPG_RFS_FRAMESTART 0x0000000020000000ULL | ||
240 | #define IPG_RFS_FRAMEEND 0x0000000040000000ULL | ||
241 | #define IPG_RFS_RFDDONE 0x0000000080000000ULL | ||
242 | #define IPG_RFS_TCI 0x0000FFFF00000000ULL | ||
243 | |||
244 | /* RFDList, FragInfo */ | ||
245 | #define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL | ||
246 | #define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL | ||
247 | #define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL | ||
248 | |||
249 | /* I/O Register masks. */ | ||
250 | |||
251 | /* RMON Statistics Mask */ | ||
252 | #define IPG_RZ_ALL 0x0FFFFFFF | ||
253 | |||
254 | /* Statistics Mask */ | ||
255 | #define IPG_SM_ALL 0x0FFFFFFF | ||
256 | #define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 | ||
257 | #define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 | ||
258 | #define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 | ||
259 | #define IPG_SM_RXJUMBOFRAMES 0x00000008 | ||
260 | #define IPG_SM_TCPCHECKSUMERRORS 0x00000010 | ||
261 | #define IPG_SM_IPCHECKSUMERRORS 0x00000020 | ||
262 | #define IPG_SM_UDPCHECKSUMERRORS 0x00000040 | ||
263 | #define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 | ||
264 | #define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 | ||
265 | #define IPG_SM_INRANGELENGTHERRORS 0x00000200 | ||
266 | #define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 | ||
267 | #define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 | ||
268 | #define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 | ||
269 | #define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 | ||
270 | #define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 | ||
271 | #define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 | ||
272 | #define IPG_SM_LATECOLLISIONS 0x00010000 | ||
273 | #define IPG_SM_MULTICOLFRAMES 0x00020000 | ||
274 | #define IPG_SM_SINGLECOLFRAMES 0x00040000 | ||
275 | #define IPG_SM_TXJUMBOFRAMES 0x00080000 | ||
276 | #define IPG_SM_CARRIERSENSEERRORS 0x00100000 | ||
277 | #define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 | ||
278 | #define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 | ||
279 | #define IPG_SM_FRAMESWEXDEFERAL 0x00800000 | ||
280 | |||
281 | /* Countdown */ | ||
282 | #define IPG_CD_RSVD_MASK 0x0700FFFF | ||
283 | #define IPG_CD_COUNT 0x0000FFFF | ||
284 | #define IPG_CD_COUNTDOWNSPEED 0x01000000 | ||
285 | #define IPG_CD_COUNTDOWNMODE 0x02000000 | ||
286 | #define IPG_CD_COUNTINTENABLED 0x04000000 | ||
287 | |||
288 | /* TxDMABurstThresh */ | ||
289 | #define IPG_TB_RSVD_MASK 0xFF | ||
290 | |||
291 | /* TxDMAUrgentThresh */ | ||
292 | #define IPG_TU_RSVD_MASK 0xFF | ||
293 | |||
294 | /* TxDMAPollPeriod */ | ||
295 | #define IPG_TP_RSVD_MASK 0xFF | ||
296 | |||
297 | /* RxDMAUrgentThresh */ | ||
298 | #define IPG_RU_RSVD_MASK 0xFF | ||
299 | |||
300 | /* RxDMAPollPeriod */ | ||
301 | #define IPG_RP_RSVD_MASK 0xFF | ||
302 | |||
303 | /* ReceiveMode */ | ||
304 | #define IPG_RM_RSVD_MASK 0x3F | ||
305 | #define IPG_RM_RECEIVEUNICAST 0x01 | ||
306 | #define IPG_RM_RECEIVEMULTICAST 0x02 | ||
307 | #define IPG_RM_RECEIVEBROADCAST 0x04 | ||
308 | #define IPG_RM_RECEIVEALLFRAMES 0x08 | ||
309 | #define IPG_RM_RECEIVEMULTICASTHASH 0x10 | ||
310 | #define IPG_RM_RECEIVEIPMULTICAST 0x20 | ||
311 | |||
312 | /* PhySet */ | ||
313 | #define IPG_PS_MEM_LENB9B 0x01 | ||
314 | #define IPG_PS_MEM_LEN9 0x02 | ||
315 | #define IPG_PS_NON_COMPDET 0x04 | ||
316 | |||
317 | /* PhyCtrl */ | ||
318 | #define IPG_PC_RSVD_MASK 0xFF | ||
319 | #define IPG_PC_MGMTCLK_LO 0x00 | ||
320 | #define IPG_PC_MGMTCLK_HI 0x01 | ||
321 | #define IPG_PC_MGMTCLK 0x01 | ||
322 | #define IPG_PC_MGMTDATA 0x02 | ||
323 | #define IPG_PC_MGMTDIR 0x04 | ||
324 | #define IPG_PC_DUPLEX_POLARITY 0x08 | ||
325 | #define IPG_PC_DUPLEX_STATUS 0x10 | ||
326 | #define IPG_PC_LINK_POLARITY 0x20 | ||
327 | #define IPG_PC_LINK_SPEED 0xC0 | ||
328 | #define IPG_PC_LINK_SPEED_10MBPS 0x40 | ||
329 | #define IPG_PC_LINK_SPEED_100MBPS 0x80 | ||
330 | #define IPG_PC_LINK_SPEED_1000MBPS 0xC0 | ||
331 | |||
332 | /* DMACtrl */ | ||
333 | #define IPG_DC_RSVD_MASK 0xC07D9818 | ||
334 | #define IPG_DC_RX_DMA_COMPLETE 0x00000008 | ||
335 | #define IPG_DC_RX_DMA_POLL_NOW 0x00000010 | ||
336 | #define IPG_DC_TX_DMA_COMPLETE 0x00000800 | ||
337 | #define IPG_DC_TX_DMA_POLL_NOW 0x00001000 | ||
338 | #define IPG_DC_TX_DMA_IN_PROG 0x00008000 | ||
339 | #define IPG_DC_RX_EARLY_DISABLE 0x00010000 | ||
340 | #define IPG_DC_MWI_DISABLE 0x00040000 | ||
341 | #define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000 | ||
342 | #define IPG_DC_TX_BURST_LIMIT 0x00700000 | ||
343 | #define IPG_DC_TARGET_ABORT 0x40000000 | ||
344 | #define IPG_DC_MASTER_ABORT 0x80000000 | ||
345 | |||
346 | /* ASICCtrl */ | ||
347 | #define IPG_AC_RSVD_MASK 0x07FFEFF2 | ||
348 | #define IPG_AC_EXP_ROM_SIZE 0x00000002 | ||
349 | #define IPG_AC_PHY_SPEED10 0x00000010 | ||
350 | #define IPG_AC_PHY_SPEED100 0x00000020 | ||
351 | #define IPG_AC_PHY_SPEED1000 0x00000040 | ||
352 | #define IPG_AC_PHY_MEDIA 0x00000080 | ||
353 | #define IPG_AC_FORCED_CFG 0x00000700 | ||
354 | #define IPG_AC_D3RESETDISABLE 0x00000800 | ||
355 | #define IPG_AC_SPEED_UP_MODE 0x00002000 | ||
356 | #define IPG_AC_LED_MODE 0x00004000 | ||
357 | #define IPG_AC_RST_OUT_POLARITY 0x00008000 | ||
358 | #define IPG_AC_GLOBAL_RESET 0x00010000 | ||
359 | #define IPG_AC_RX_RESET 0x00020000 | ||
360 | #define IPG_AC_TX_RESET 0x00040000 | ||
361 | #define IPG_AC_DMA 0x00080000 | ||
362 | #define IPG_AC_FIFO 0x00100000 | ||
363 | #define IPG_AC_NETWORK 0x00200000 | ||
364 | #define IPG_AC_HOST 0x00400000 | ||
365 | #define IPG_AC_AUTO_INIT 0x00800000 | ||
366 | #define IPG_AC_RST_OUT 0x01000000 | ||
367 | #define IPG_AC_INT_REQUEST 0x02000000 | ||
368 | #define IPG_AC_RESET_BUSY 0x04000000 | ||
369 | #define IPG_AC_LED_SPEED 0x08000000 | ||
370 | #define IPG_AC_LED_MODE_BIT_1 0x20000000 | ||
371 | |||
372 | /* EepromCtrl */ | ||
373 | #define IPG_EC_RSVD_MASK 0x83FF | ||
374 | #define IPG_EC_EEPROM_ADDR 0x00FF | ||
375 | #define IPG_EC_EEPROM_OPCODE 0x0300 | ||
376 | #define IPG_EC_EEPROM_SUBCOMMAD 0x0000 | ||
377 | #define IPG_EC_EEPROM_WRITEOPCODE 0x0100 | ||
378 | #define IPG_EC_EEPROM_READOPCODE 0x0200 | ||
379 | #define IPG_EC_EEPROM_ERASEOPCODE 0x0300 | ||
380 | #define IPG_EC_EEPROM_BUSY 0x8000 | ||
381 | |||
382 | /* FIFOCtrl */ | ||
383 | #define IPG_FC_RSVD_MASK 0xC001 | ||
384 | #define IPG_FC_RAM_TEST_MODE 0x0001 | ||
385 | #define IPG_FC_TRANSMITTING 0x4000 | ||
386 | #define IPG_FC_RECEIVING 0x8000 | ||
387 | |||
388 | /* TxStatus */ | ||
389 | #define IPG_TS_RSVD_MASK 0xFFFF00DD | ||
390 | #define IPG_TS_TX_ERROR 0x00000001 | ||
391 | #define IPG_TS_LATE_COLLISION 0x00000004 | ||
392 | #define IPG_TS_TX_MAX_COLL 0x00000008 | ||
393 | #define IPG_TS_TX_UNDERRUN 0x00000010 | ||
394 | #define IPG_TS_TX_IND_REQD 0x00000040 | ||
395 | #define IPG_TS_TX_COMPLETE 0x00000080 | ||
396 | #define IPG_TS_TX_FRAMEID 0xFFFF0000 | ||
397 | |||
398 | /* WakeEvent */ | ||
399 | #define IPG_WE_WAKE_PKT_ENABLE 0x01 | ||
400 | #define IPG_WE_MAGIC_PKT_ENABLE 0x02 | ||
401 | #define IPG_WE_LINK_EVT_ENABLE 0x04 | ||
402 | #define IPG_WE_WAKE_POLARITY 0x08 | ||
403 | #define IPG_WE_WAKE_PKT_EVT 0x10 | ||
404 | #define IPG_WE_MAGIC_PKT_EVT 0x20 | ||
405 | #define IPG_WE_LINK_EVT 0x40 | ||
406 | #define IPG_WE_WOL_ENABLE 0x80 | ||
407 | |||
408 | /* IntEnable */ | ||
409 | #define IPG_IE_RSVD_MASK 0x1FFE | ||
410 | #define IPG_IE_HOST_ERROR 0x0002 | ||
411 | #define IPG_IE_TX_COMPLETE 0x0004 | ||
412 | #define IPG_IE_MAC_CTRL_FRAME 0x0008 | ||
413 | #define IPG_IE_RX_COMPLETE 0x0010 | ||
414 | #define IPG_IE_RX_EARLY 0x0020 | ||
415 | #define IPG_IE_INT_REQUESTED 0x0040 | ||
416 | #define IPG_IE_UPDATE_STATS 0x0080 | ||
417 | #define IPG_IE_LINK_EVENT 0x0100 | ||
418 | #define IPG_IE_TX_DMA_COMPLETE 0x0200 | ||
419 | #define IPG_IE_RX_DMA_COMPLETE 0x0400 | ||
420 | #define IPG_IE_RFD_LIST_END 0x0800 | ||
421 | #define IPG_IE_RX_DMA_PRIORITY 0x1000 | ||
422 | |||
423 | /* IntStatus */ | ||
424 | #define IPG_IS_RSVD_MASK 0x1FFF | ||
425 | #define IPG_IS_INTERRUPT_STATUS 0x0001 | ||
426 | #define IPG_IS_HOST_ERROR 0x0002 | ||
427 | #define IPG_IS_TX_COMPLETE 0x0004 | ||
428 | #define IPG_IS_MAC_CTRL_FRAME 0x0008 | ||
429 | #define IPG_IS_RX_COMPLETE 0x0010 | ||
430 | #define IPG_IS_RX_EARLY 0x0020 | ||
431 | #define IPG_IS_INT_REQUESTED 0x0040 | ||
432 | #define IPG_IS_UPDATE_STATS 0x0080 | ||
433 | #define IPG_IS_LINK_EVENT 0x0100 | ||
434 | #define IPG_IS_TX_DMA_COMPLETE 0x0200 | ||
435 | #define IPG_IS_RX_DMA_COMPLETE 0x0400 | ||
436 | #define IPG_IS_RFD_LIST_END 0x0800 | ||
437 | #define IPG_IS_RX_DMA_PRIORITY 0x1000 | ||
438 | |||
439 | /* MACCtrl */ | ||
440 | #define IPG_MC_RSVD_MASK 0x7FE33FA3 | ||
441 | #define IPG_MC_IFS_SELECT 0x00000003 | ||
442 | #define IPG_MC_IFS_4352BIT 0x00000003 | ||
443 | #define IPG_MC_IFS_1792BIT 0x00000002 | ||
444 | #define IPG_MC_IFS_1024BIT 0x00000001 | ||
445 | #define IPG_MC_IFS_96BIT 0x00000000 | ||
446 | #define IPG_MC_DUPLEX_SELECT 0x00000020 | ||
447 | #define IPG_MC_DUPLEX_SELECT_FD 0x00000020 | ||
448 | #define IPG_MC_DUPLEX_SELECT_HD 0x00000000 | ||
449 | #define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080 | ||
450 | #define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100 | ||
451 | #define IPG_MC_RCV_FCS 0x00000200 | ||
452 | #define IPG_MC_FIFO_LOOPBACK 0x00000400 | ||
453 | #define IPG_MC_MAC_LOOPBACK 0x00000800 | ||
454 | #define IPG_MC_AUTO_VLAN_TAGGING 0x00001000 | ||
455 | #define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000 | ||
456 | #define IPG_MC_COLLISION_DETECT 0x00010000 | ||
457 | #define IPG_MC_CARRIER_SENSE 0x00020000 | ||
458 | #define IPG_MC_STATISTICS_ENABLE 0x00200000 | ||
459 | #define IPG_MC_STATISTICS_DISABLE 0x00400000 | ||
460 | #define IPG_MC_STATISTICS_ENABLED 0x00800000 | ||
461 | #define IPG_MC_TX_ENABLE 0x01000000 | ||
462 | #define IPG_MC_TX_DISABLE 0x02000000 | ||
463 | #define IPG_MC_TX_ENABLED 0x04000000 | ||
464 | #define IPG_MC_RX_ENABLE 0x08000000 | ||
465 | #define IPG_MC_RX_DISABLE 0x10000000 | ||
466 | #define IPG_MC_RX_ENABLED 0x20000000 | ||
467 | #define IPG_MC_PAUSED 0x40000000 | ||
468 | |||
469 | /* | ||
470 | * Tune | ||
471 | */ | ||
472 | |||
473 | /* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */ | ||
474 | #define IPG_APPEND_FCS_ON_TX 1 | ||
475 | |||
476 | /* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */ | ||
477 | #define IPG_STRIP_FCS_ON_RX 1 | ||
478 | |||
479 | /* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with | ||
480 | * Ethernet errors. | ||
481 | */ | ||
482 | #define IPG_DROP_ON_RX_ETH_ERRORS 1 | ||
483 | |||
484 | /* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually | ||
485 | * (via TFC). | ||
486 | */ | ||
487 | #define IPG_INSERT_MANUAL_VLAN_TAG 0 | ||
488 | |||
489 | /* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */ | ||
490 | #define IPG_ADD_IPCHECKSUM_ON_TX 0 | ||
491 | |||
492 | /* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX. | ||
493 | * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. | ||
494 | */ | ||
495 | #define IPG_ADD_TCPCHECKSUM_ON_TX 0 | ||
496 | |||
497 | /* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX. | ||
498 | * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. | ||
499 | */ | ||
500 | #define IPG_ADD_UDPCHECKSUM_ON_TX 0 | ||
501 | |||
502 | /* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx | ||
503 | * constants as desired. | ||
504 | */ | ||
505 | #define IPG_MANUAL_VLAN_VID 0xABC | ||
506 | #define IPG_MANUAL_VLAN_CFI 0x1 | ||
507 | #define IPG_MANUAL_VLAN_USERPRIORITY 0x5 | ||
508 | |||
509 | #define IPG_IO_REG_RANGE 0xFF | ||
510 | #define IPG_MEM_REG_RANGE 0x154 | ||
511 | #define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet" | ||
512 | #define IPG_NIC_PHY_ADDRESS 0x01 | ||
513 | #define IPG_DMALIST_ALIGN_PAD 0x07 | ||
514 | #define IPG_MULTICAST_HASHTABLE_SIZE 0x40 | ||
515 | |||
516 | /* Number of milliseconds to wait after issuing a software reset. | ||
517 | * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. | ||
518 | */ | ||
519 | #define IPG_AC_RESETWAIT 0x05 | ||
520 | |||
521 | /* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */ | ||
522 | #define IPG_AC_RESET_TIMEOUT 0x0A | ||
523 | |||
524 | /* Minimum number of nanoseconds used to toggle MDC clock during | ||
525 | * MII/GMII register access. | ||
526 | */ | ||
527 | #define IPG_PC_PHYCTRLWAIT_NS 200 | ||
528 | |||
529 | #define IPG_TFDLIST_LENGTH 0x100 | ||
530 | |||
531 | /* Number of frames between TxDMAComplete interrupt. | ||
532 | * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH | ||
533 | */ | ||
534 | #define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1 | ||
535 | |||
536 | #define IPG_RFDLIST_LENGTH 0x100 | ||
537 | |||
538 | /* Maximum number of RFDs to process per interrupt. | ||
539 | * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH | ||
540 | */ | ||
541 | #define IPG_MAXRFDPROCESS_COUNT 0x80 | ||
542 | |||
543 | /* Minimum margin between last freed RFD, and current RFD. | ||
544 | * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH | ||
545 | */ | ||
546 | #define IPG_MINUSEDRFDSTOFREE 0x80 | ||
547 | |||
548 | /* specify the jumbo frame maximum size | ||
549 | * per unit is 0x600 (the rx_buffer size that one RFD can carry) | ||
550 | */ | ||
551 | #define MAX_JUMBOSIZE 0x8 /* max is 12K */ | ||
552 | |||
553 | /* Key register values loaded at driver start up. */ | ||
554 | |||
555 | /* TXDMAPollPeriod is specified in 320ns increments. | ||
556 | * | ||
557 | * Value Time | ||
558 | * --------------------- | ||
559 | * 0x00-0x01 320ns | ||
560 | * 0x03 ~1us | ||
561 | * 0x1F ~10us | ||
562 | * 0xFF ~82us | ||
563 | */ | ||
564 | #define IPG_TXDMAPOLLPERIOD_VALUE 0x26 | ||
565 | |||
566 | /* TxDMAUrgentThresh specifies the minimum amount of | ||
567 | * data in the transmit FIFO before asserting an | ||
568 | * urgent transmit DMA request. | ||
569 | * | ||
570 | * Value Min TxFIFO occupied space before urgent TX request | ||
571 | * --------------------------------------------------------------- | ||
572 | * 0x00-0x04 128 bytes (1024 bits) | ||
573 | * 0x27 1248 bytes (~10000 bits) | ||
574 | * 0x30 1536 bytes (12288 bits) | ||
575 | * 0xFF 8192 bytes (65535 bits) | ||
576 | */ | ||
577 | #define IPG_TXDMAURGENTTHRESH_VALUE 0x04 | ||
578 | |||
579 | /* TxDMABurstThresh specifies the minimum amount of | ||
580 | * free space in the transmit FIFO before asserting an | ||
581 | * transmit DMA request. | ||
582 | * | ||
583 | * Value Min TxFIFO free space before TX request | ||
584 | * ---------------------------------------------------- | ||
585 | * 0x00-0x08 256 bytes | ||
586 | * 0x30 1536 bytes | ||
587 | * 0xFF 8192 bytes | ||
588 | */ | ||
589 | #define IPG_TXDMABURSTTHRESH_VALUE 0x30 | ||
590 | |||
591 | /* RXDMAPollPeriod is specified in 320ns increments. | ||
592 | * | ||
593 | * Value Time | ||
594 | * --------------------- | ||
595 | * 0x00-0x01 320ns | ||
596 | * 0x03 ~1us | ||
597 | * 0x1F ~10us | ||
598 | * 0xFF ~82us | ||
599 | */ | ||
600 | #define IPG_RXDMAPOLLPERIOD_VALUE 0x01 | ||
601 | |||
602 | /* RxDMAUrgentThresh specifies the minimum amount of | ||
603 | * free space within the receive FIFO before asserting | ||
604 | * a urgent receive DMA request. | ||
605 | * | ||
606 | * Value Min RxFIFO free space before urgent RX request | ||
607 | * --------------------------------------------------------------- | ||
608 | * 0x00-0x04 128 bytes (1024 bits) | ||
609 | * 0x27 1248 bytes (~10000 bits) | ||
610 | * 0x30 1536 bytes (12288 bits) | ||
611 | * 0xFF 8192 bytes (65535 bits) | ||
612 | */ | ||
613 | #define IPG_RXDMAURGENTTHRESH_VALUE 0x30 | ||
614 | |||
615 | /* RxDMABurstThresh specifies the minimum amount of | ||
616 | * occupied space within the receive FIFO before asserting | ||
617 | * a receive DMA request. | ||
618 | * | ||
619 | * Value Min TxFIFO free space before TX request | ||
620 | * ---------------------------------------------------- | ||
621 | * 0x00-0x08 256 bytes | ||
622 | * 0x30 1536 bytes | ||
623 | * 0xFF 8192 bytes | ||
624 | */ | ||
625 | #define IPG_RXDMABURSTTHRESH_VALUE 0x30 | ||
626 | |||
627 | /* FlowOnThresh specifies the maximum amount of occupied | ||
628 | * space in the receive FIFO before a PAUSE frame with | ||
629 | * maximum pause time transmitted. | ||
630 | * | ||
631 | * Value Max RxFIFO occupied space before PAUSE | ||
632 | * --------------------------------------------------- | ||
633 | * 0x0000 0 bytes | ||
634 | * 0x0740 29,696 bytes | ||
635 | * 0x07FF 32,752 bytes | ||
636 | */ | ||
637 | #define IPG_FLOWONTHRESH_VALUE 0x0740 | ||
638 | |||
639 | /* FlowOffThresh specifies the minimum amount of occupied | ||
640 | * space in the receive FIFO before a PAUSE frame with | ||
641 | * zero pause time is transmitted. | ||
642 | * | ||
643 | * Value Max RxFIFO occupied space before PAUSE | ||
644 | * --------------------------------------------------- | ||
645 | * 0x0000 0 bytes | ||
646 | * 0x00BF 3056 bytes | ||
647 | * 0x07FF 32,752 bytes | ||
648 | */ | ||
649 | #define IPG_FLOWOFFTHRESH_VALUE 0x00BF | ||
650 | |||
651 | /* | ||
652 | * Miscellaneous macros. | ||
653 | */ | ||
654 | |||
655 | /* Macros for printing debug statements. */ | ||
656 | #ifdef IPG_DEBUG | ||
657 | # define IPG_DEBUG_MSG(fmt, args...) \ | ||
658 | do { \ | ||
659 | if (0) \ | ||
660 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
661 | } while (0) | ||
662 | # define IPG_DDEBUG_MSG(fmt, args...) \ | ||
663 | printk(KERN_DEBUG "IPG: " fmt, ##args) | ||
664 | # define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) | ||
665 | # define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) | ||
666 | #else | ||
667 | # define IPG_DEBUG_MSG(fmt, args...) \ | ||
668 | do { \ | ||
669 | if (0) \ | ||
670 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
671 | } while (0) | ||
672 | # define IPG_DDEBUG_MSG(fmt, args...) \ | ||
673 | do { \ | ||
674 | if (0) \ | ||
675 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
676 | } while (0) | ||
677 | # define IPG_DUMPRFDLIST(args) | ||
678 | # define IPG_DUMPTFDLIST(args) | ||
679 | #endif | ||
680 | |||
681 | /* | ||
682 | * End miscellaneous macros. | ||
683 | */ | ||
684 | |||
685 | /* Transmit Frame Descriptor. The IPG supports 15 fragments, | ||
686 | * however Linux requires only a single fragment. Note, each | ||
687 | * TFD field is 64 bits wide. | ||
688 | */ | ||
689 | struct ipg_tx { | ||
690 | __le64 next_desc; | ||
691 | __le64 tfc; | ||
692 | __le64 frag_info; | ||
693 | }; | ||
694 | |||
695 | /* Receive Frame Descriptor. Note, each RFD field is 64 bits wide. | ||
696 | */ | ||
697 | struct ipg_rx { | ||
698 | __le64 next_desc; | ||
699 | __le64 rfs; | ||
700 | __le64 frag_info; | ||
701 | }; | ||
702 | |||
703 | struct ipg_jumbo { | ||
704 | int found_start; | ||
705 | int current_size; | ||
706 | struct sk_buff *skb; | ||
707 | }; | ||
708 | |||
709 | /* Structure of IPG NIC specific data. */ | ||
710 | struct ipg_nic_private { | ||
711 | void __iomem *ioaddr; | ||
712 | struct ipg_tx *txd; | ||
713 | struct ipg_rx *rxd; | ||
714 | dma_addr_t txd_map; | ||
715 | dma_addr_t rxd_map; | ||
716 | struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH]; | ||
717 | struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH]; | ||
718 | unsigned int tx_current; | ||
719 | unsigned int tx_dirty; | ||
720 | unsigned int rx_current; | ||
721 | unsigned int rx_dirty; | ||
722 | bool is_jumbo; | ||
723 | struct ipg_jumbo jumbo; | ||
724 | unsigned long rxfrag_size; | ||
725 | unsigned long rxsupport_size; | ||
726 | unsigned long max_rxframe_size; | ||
727 | unsigned int rx_buf_sz; | ||
728 | struct pci_dev *pdev; | ||
729 | struct net_device *dev; | ||
730 | struct net_device_stats stats; | ||
731 | spinlock_t lock; | ||
732 | int tenmbpsmode; | ||
733 | |||
734 | u16 led_mode; | ||
735 | u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */ | ||
736 | |||
737 | struct mutex mii_mutex; | ||
738 | struct mii_if_info mii_if; | ||
739 | int reset_current_tfd; | ||
740 | #ifdef IPG_DEBUG | ||
741 | int RFDlistendCount; | ||
742 | int RFDListCheckedCount; | ||
743 | int EmptyRFDListCount; | ||
744 | #endif | ||
745 | struct delayed_work task; | ||
746 | }; | ||
747 | |||
748 | #endif /* __LINUX_IPG_H */ | ||