diff options
Diffstat (limited to 'drivers/net/ethernet/icplus')
-rw-r--r-- | drivers/net/ethernet/icplus/Kconfig | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/icplus/Makefile | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/icplus/ipg.c | 2324 | ||||
-rw-r--r-- | drivers/net/ethernet/icplus/ipg.h | 749 |
4 files changed, 3091 insertions, 0 deletions
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig new file mode 100644 index 000000000000..e88822276269 --- /dev/null +++ b/drivers/net/ethernet/icplus/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | # | ||
2 | # IC Plus device configuration | ||
3 | # | ||
4 | |||
5 | config IP1000 | ||
6 | tristate "IP1000 Gigabit Ethernet support" | ||
7 | depends on PCI && EXPERIMENTAL | ||
8 | select MII | ||
9 | ---help--- | ||
10 | This driver supports IP1000 gigabit Ethernet cards. | ||
11 | |||
12 | To compile this driver as a module, choose M here: the module | ||
13 | will be called ipg. This is recommended. | ||
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile new file mode 100644 index 000000000000..5bc87c1f36aa --- /dev/null +++ b/drivers/net/ethernet/icplus/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for the IC Plus device drivers | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_IP1000) += ipg.o | ||
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c new file mode 100644 index 000000000000..b470281158e9 --- /dev/null +++ b/drivers/net/ethernet/icplus/ipg.c | |||
@@ -0,0 +1,2324 @@ | |||
1 | /* | ||
2 | * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter | ||
3 | * | ||
4 | * Copyright (C) 2003, 2007 IC Plus Corp | ||
5 | * | ||
6 | * Original Author: | ||
7 | * | ||
8 | * Craig Rich | ||
9 | * Sundance Technology, Inc. | ||
10 | * www.sundanceti.com | ||
11 | * craig_rich@sundanceti.com | ||
12 | * | ||
13 | * Current Maintainer: | ||
14 | * | ||
15 | * Sorbica Shieh. | ||
16 | * http://www.icplus.com.tw | ||
17 | * sorbica@icplus.com.tw | ||
18 | * | ||
19 | * Jesse Huang | ||
20 | * http://www.icplus.com.tw | ||
21 | * jesse@icplus.com.tw | ||
22 | */ | ||
23 | |||
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
25 | |||
26 | #include <linux/crc32.h> | ||
27 | #include <linux/ethtool.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/gfp.h> | ||
30 | #include <linux/mii.h> | ||
31 | #include <linux/mutex.h> | ||
32 | |||
33 | #include <asm/div64.h> | ||
34 | |||
35 | #define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) | ||
36 | #define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) | ||
37 | #define IPG_RESET_MASK \ | ||
38 | (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ | ||
39 | IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ | ||
40 | IPG_AC_AUTO_INIT) | ||
41 | |||
42 | #define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg)) | ||
43 | #define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg)) | ||
44 | #define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg)) | ||
45 | |||
46 | #define ipg_r32(reg) ioread32(ioaddr + (reg)) | ||
47 | #define ipg_r16(reg) ioread16(ioaddr + (reg)) | ||
48 | #define ipg_r8(reg) ioread8(ioaddr + (reg)) | ||
49 | |||
50 | enum { | ||
51 | netdev_io_size = 128 | ||
52 | }; | ||
53 | |||
54 | #include "ipg.h" | ||
55 | #define DRV_NAME "ipg" | ||
56 | |||
57 | MODULE_AUTHOR("IC Plus Corp. 2003"); | ||
58 | MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver"); | ||
59 | MODULE_LICENSE("GPL"); | ||
60 | |||
61 | /* | ||
62 | * Defaults | ||
63 | */ | ||
64 | #define IPG_MAX_RXFRAME_SIZE 0x0600 | ||
65 | #define IPG_RXFRAG_SIZE 0x0600 | ||
66 | #define IPG_RXSUPPORT_SIZE 0x0600 | ||
67 | #define IPG_IS_JUMBO false | ||
68 | |||
69 | /* | ||
70 | * Variable record -- index by leading revision/length | ||
71 | * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN | ||
72 | */ | ||
73 | static const unsigned short DefaultPhyParam[] = { | ||
74 | /* 11/12/03 IP1000A v1-3 rev=0x40 */ | ||
75 | /*-------------------------------------------------------------------------- | ||
76 | (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, | ||
77 | 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, | ||
78 | 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, | ||
79 | --------------------------------------------------------------------------*/ | ||
80 | /* 12/17/03 IP1000A v1-4 rev=0x40 */ | ||
81 | (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
82 | 0x0000, | ||
83 | 30, 0x005e, 9, 0x0700, | ||
84 | /* 01/09/04 IP1000A v1-5 rev=0x41 */ | ||
85 | (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, | ||
86 | 0x0000, | ||
87 | 30, 0x005e, 9, 0x0700, | ||
88 | 0x0000 | ||
89 | }; | ||
90 | |||
91 | static const char * const ipg_brand_name[] = { | ||
92 | "IC PLUS IP1000 1000/100/10 based NIC", | ||
93 | "Sundance Technology ST2021 based NIC", | ||
94 | "Tamarack Microelectronics TC9020/9021 based NIC", | ||
95 | "D-Link NIC IP1000A" | ||
96 | }; | ||
97 | |||
98 | static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { | ||
99 | { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, | ||
100 | { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, | ||
101 | { PCI_VDEVICE(DLINK, 0x9021), 2 }, | ||
102 | { PCI_VDEVICE(DLINK, 0x4020), 3 }, | ||
103 | { 0, } | ||
104 | }; | ||
105 | |||
106 | MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); | ||
107 | |||
108 | static inline void __iomem *ipg_ioaddr(struct net_device *dev) | ||
109 | { | ||
110 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
111 | return sp->ioaddr; | ||
112 | } | ||
113 | |||
114 | #ifdef IPG_DEBUG | ||
115 | static void ipg_dump_rfdlist(struct net_device *dev) | ||
116 | { | ||
117 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
118 | void __iomem *ioaddr = sp->ioaddr; | ||
119 | unsigned int i; | ||
120 | u32 offset; | ||
121 | |||
122 | IPG_DEBUG_MSG("_dump_rfdlist\n"); | ||
123 | |||
124 | netdev_info(dev, "rx_current = %02x\n", sp->rx_current); | ||
125 | netdev_info(dev, "rx_dirty = %02x\n", sp->rx_dirty); | ||
126 | netdev_info(dev, "RFDList start address = %016lx\n", | ||
127 | (unsigned long)sp->rxd_map); | ||
128 | netdev_info(dev, "RFDListPtr register = %08x%08x\n", | ||
129 | ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); | ||
130 | |||
131 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
132 | offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; | ||
133 | netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n", | ||
134 | i, offset, (unsigned long)sp->rxd[i].next_desc); | ||
135 | offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; | ||
136 | netdev_info(dev, "%02x %04x RFS = %016lx\n", | ||
137 | i, offset, (unsigned long)sp->rxd[i].rfs); | ||
138 | offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; | ||
139 | netdev_info(dev, "%02x %04x frag_info = %016lx\n", | ||
140 | i, offset, (unsigned long)sp->rxd[i].frag_info); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static void ipg_dump_tfdlist(struct net_device *dev) | ||
145 | { | ||
146 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
147 | void __iomem *ioaddr = sp->ioaddr; | ||
148 | unsigned int i; | ||
149 | u32 offset; | ||
150 | |||
151 | IPG_DEBUG_MSG("_dump_tfdlist\n"); | ||
152 | |||
153 | netdev_info(dev, "tx_current = %02x\n", sp->tx_current); | ||
154 | netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty); | ||
155 | netdev_info(dev, "TFDList start address = %016lx\n", | ||
156 | (unsigned long) sp->txd_map); | ||
157 | netdev_info(dev, "TFDListPtr register = %08x%08x\n", | ||
158 | ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); | ||
159 | |||
160 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
161 | offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; | ||
162 | netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n", | ||
163 | i, offset, (unsigned long)sp->txd[i].next_desc); | ||
164 | |||
165 | offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; | ||
166 | netdev_info(dev, "%02x %04x TFC = %016lx\n", | ||
167 | i, offset, (unsigned long) sp->txd[i].tfc); | ||
168 | offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; | ||
169 | netdev_info(dev, "%02x %04x frag_info = %016lx\n", | ||
170 | i, offset, (unsigned long) sp->txd[i].frag_info); | ||
171 | } | ||
172 | } | ||
173 | #endif | ||
174 | |||
175 | static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) | ||
176 | { | ||
177 | ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); | ||
178 | ndelay(IPG_PC_PHYCTRLWAIT_NS); | ||
179 | } | ||
180 | |||
181 | static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) | ||
182 | { | ||
183 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); | ||
184 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); | ||
185 | } | ||
186 | |||
187 | static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
188 | { | ||
189 | phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; | ||
190 | |||
191 | ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); | ||
192 | } | ||
193 | |||
194 | static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
195 | { | ||
196 | ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | | ||
197 | phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); | ||
198 | } | ||
199 | |||
200 | static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity) | ||
201 | { | ||
202 | u16 bit_data; | ||
203 | |||
204 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); | ||
205 | |||
206 | bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; | ||
207 | |||
208 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); | ||
209 | |||
210 | return bit_data; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Read a register from the Physical Layer device located | ||
215 | * on the IPG NIC, using the IPG PHYCTRL register. | ||
216 | */ | ||
217 | static int mdio_read(struct net_device *dev, int phy_id, int phy_reg) | ||
218 | { | ||
219 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
220 | /* | ||
221 | * The GMII mangement frame structure for a read is as follows: | ||
222 | * | ||
223 | * |Preamble|st|op|phyad|regad|ta| data |idle| | ||
224 | * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | | ||
225 | * | ||
226 | * <32 1s> = 32 consecutive logic 1 values | ||
227 | * A = bit of Physical Layer device address (MSB first) | ||
228 | * R = bit of register address (MSB first) | ||
229 | * z = High impedance state | ||
230 | * D = bit of read data (MSB first) | ||
231 | * | ||
232 | * Transmission order is 'Preamble' field first, bits transmitted | ||
233 | * left to right (first to last). | ||
234 | */ | ||
235 | struct { | ||
236 | u32 field; | ||
237 | unsigned int len; | ||
238 | } p[] = { | ||
239 | { GMII_PREAMBLE, 32 }, /* Preamble */ | ||
240 | { GMII_ST, 2 }, /* ST */ | ||
241 | { GMII_READ, 2 }, /* OP */ | ||
242 | { phy_id, 5 }, /* PHYAD */ | ||
243 | { phy_reg, 5 }, /* REGAD */ | ||
244 | { 0x0000, 2 }, /* TA */ | ||
245 | { 0x0000, 16 }, /* DATA */ | ||
246 | { 0x0000, 1 } /* IDLE */ | ||
247 | }; | ||
248 | unsigned int i, j; | ||
249 | u8 polarity, data; | ||
250 | |||
251 | polarity = ipg_r8(PHY_CTRL); | ||
252 | polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); | ||
253 | |||
254 | /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ | ||
255 | for (j = 0; j < 5; j++) { | ||
256 | for (i = 0; i < p[j].len; i++) { | ||
257 | /* For each variable length field, the MSB must be | ||
258 | * transmitted first. Rotate through the field bits, | ||
259 | * starting with the MSB, and move each bit into the | ||
260 | * the 1st (2^1) bit position (this is the bit position | ||
261 | * corresponding to the MgmtData bit of the PhyCtrl | ||
262 | * register for the IPG). | ||
263 | * | ||
264 | * Example: ST = 01; | ||
265 | * | ||
266 | * First write a '0' to bit 1 of the PhyCtrl | ||
267 | * register, then write a '1' to bit 1 of the | ||
268 | * PhyCtrl register. | ||
269 | * | ||
270 | * To do this, right shift the MSB of ST by the value: | ||
271 | * [field length - 1 - #ST bits already written] | ||
272 | * then left shift this result by 1. | ||
273 | */ | ||
274 | data = (p[j].field >> (p[j].len - 1 - i)) << 1; | ||
275 | data &= IPG_PC_MGMTDATA; | ||
276 | data |= polarity | IPG_PC_MGMTDIR; | ||
277 | |||
278 | ipg_drive_phy_ctl_low_high(ioaddr, data); | ||
279 | } | ||
280 | } | ||
281 | |||
282 | send_three_state(ioaddr, polarity); | ||
283 | |||
284 | read_phy_bit(ioaddr, polarity); | ||
285 | |||
286 | /* | ||
287 | * For a read cycle, the bits for the next two fields (TA and | ||
288 | * DATA) are driven by the PHY (the IPG reads these bits). | ||
289 | */ | ||
290 | for (i = 0; i < p[6].len; i++) { | ||
291 | p[6].field |= | ||
292 | (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); | ||
293 | } | ||
294 | |||
295 | send_three_state(ioaddr, polarity); | ||
296 | send_three_state(ioaddr, polarity); | ||
297 | send_three_state(ioaddr, polarity); | ||
298 | send_end(ioaddr, polarity); | ||
299 | |||
300 | /* Return the value of the DATA field. */ | ||
301 | return p[6].field; | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * Write to a register from the Physical Layer device located | ||
306 | * on the IPG NIC, using the IPG PHYCTRL register. | ||
307 | */ | ||
308 | static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) | ||
309 | { | ||
310 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
311 | /* | ||
312 | * The GMII mangement frame structure for a read is as follows: | ||
313 | * | ||
314 | * |Preamble|st|op|phyad|regad|ta| data |idle| | ||
315 | * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | | ||
316 | * | ||
317 | * <32 1s> = 32 consecutive logic 1 values | ||
318 | * A = bit of Physical Layer device address (MSB first) | ||
319 | * R = bit of register address (MSB first) | ||
320 | * z = High impedance state | ||
321 | * D = bit of write data (MSB first) | ||
322 | * | ||
323 | * Transmission order is 'Preamble' field first, bits transmitted | ||
324 | * left to right (first to last). | ||
325 | */ | ||
326 | struct { | ||
327 | u32 field; | ||
328 | unsigned int len; | ||
329 | } p[] = { | ||
330 | { GMII_PREAMBLE, 32 }, /* Preamble */ | ||
331 | { GMII_ST, 2 }, /* ST */ | ||
332 | { GMII_WRITE, 2 }, /* OP */ | ||
333 | { phy_id, 5 }, /* PHYAD */ | ||
334 | { phy_reg, 5 }, /* REGAD */ | ||
335 | { 0x0002, 2 }, /* TA */ | ||
336 | { val & 0xffff, 16 }, /* DATA */ | ||
337 | { 0x0000, 1 } /* IDLE */ | ||
338 | }; | ||
339 | unsigned int i, j; | ||
340 | u8 polarity, data; | ||
341 | |||
342 | polarity = ipg_r8(PHY_CTRL); | ||
343 | polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); | ||
344 | |||
345 | /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ | ||
346 | for (j = 0; j < 7; j++) { | ||
347 | for (i = 0; i < p[j].len; i++) { | ||
348 | /* For each variable length field, the MSB must be | ||
349 | * transmitted first. Rotate through the field bits, | ||
350 | * starting with the MSB, and move each bit into the | ||
351 | * the 1st (2^1) bit position (this is the bit position | ||
352 | * corresponding to the MgmtData bit of the PhyCtrl | ||
353 | * register for the IPG). | ||
354 | * | ||
355 | * Example: ST = 01; | ||
356 | * | ||
357 | * First write a '0' to bit 1 of the PhyCtrl | ||
358 | * register, then write a '1' to bit 1 of the | ||
359 | * PhyCtrl register. | ||
360 | * | ||
361 | * To do this, right shift the MSB of ST by the value: | ||
362 | * [field length - 1 - #ST bits already written] | ||
363 | * then left shift this result by 1. | ||
364 | */ | ||
365 | data = (p[j].field >> (p[j].len - 1 - i)) << 1; | ||
366 | data &= IPG_PC_MGMTDATA; | ||
367 | data |= polarity | IPG_PC_MGMTDIR; | ||
368 | |||
369 | ipg_drive_phy_ctl_low_high(ioaddr, data); | ||
370 | } | ||
371 | } | ||
372 | |||
373 | /* The last cycle is a tri-state, so read from the PHY. */ | ||
374 | for (j = 7; j < 8; j++) { | ||
375 | for (i = 0; i < p[j].len; i++) { | ||
376 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); | ||
377 | |||
378 | p[j].field |= ((ipg_r8(PHY_CTRL) & | ||
379 | IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i); | ||
380 | |||
381 | ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); | ||
382 | } | ||
383 | } | ||
384 | } | ||
385 | |||
386 | static void ipg_set_led_mode(struct net_device *dev) | ||
387 | { | ||
388 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
389 | void __iomem *ioaddr = sp->ioaddr; | ||
390 | u32 mode; | ||
391 | |||
392 | mode = ipg_r32(ASIC_CTRL); | ||
393 | mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); | ||
394 | |||
395 | if ((sp->led_mode & 0x03) > 1) | ||
396 | mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ | ||
397 | |||
398 | if ((sp->led_mode & 0x01) == 1) | ||
399 | mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ | ||
400 | |||
401 | if ((sp->led_mode & 0x08) == 8) | ||
402 | mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ | ||
403 | |||
404 | ipg_w32(mode, ASIC_CTRL); | ||
405 | } | ||
406 | |||
407 | static void ipg_set_phy_set(struct net_device *dev) | ||
408 | { | ||
409 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
410 | void __iomem *ioaddr = sp->ioaddr; | ||
411 | int physet; | ||
412 | |||
413 | physet = ipg_r8(PHY_SET); | ||
414 | physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); | ||
415 | physet |= ((sp->led_mode & 0x70) >> 4); | ||
416 | ipg_w8(physet, PHY_SET); | ||
417 | } | ||
418 | |||
419 | static int ipg_reset(struct net_device *dev, u32 resetflags) | ||
420 | { | ||
421 | /* Assert functional resets via the IPG AsicCtrl | ||
422 | * register as specified by the 'resetflags' input | ||
423 | * parameter. | ||
424 | */ | ||
425 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
426 | unsigned int timeout_count = 0; | ||
427 | |||
428 | IPG_DEBUG_MSG("_reset\n"); | ||
429 | |||
430 | ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); | ||
431 | |||
432 | /* Delay added to account for problem with 10Mbps reset. */ | ||
433 | mdelay(IPG_AC_RESETWAIT); | ||
434 | |||
435 | while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { | ||
436 | mdelay(IPG_AC_RESETWAIT); | ||
437 | if (++timeout_count > IPG_AC_RESET_TIMEOUT) | ||
438 | return -ETIME; | ||
439 | } | ||
440 | /* Set LED Mode in Asic Control */ | ||
441 | ipg_set_led_mode(dev); | ||
442 | |||
443 | /* Set PHYSet Register Value */ | ||
444 | ipg_set_phy_set(dev); | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | /* Find the GMII PHY address. */ | ||
449 | static int ipg_find_phyaddr(struct net_device *dev) | ||
450 | { | ||
451 | unsigned int phyaddr, i; | ||
452 | |||
453 | for (i = 0; i < 32; i++) { | ||
454 | u32 status; | ||
455 | |||
456 | /* Search for the correct PHY address among 32 possible. */ | ||
457 | phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; | ||
458 | |||
459 | /* 10/22/03 Grace change verify from GMII_PHY_STATUS to | ||
460 | GMII_PHY_ID1 | ||
461 | */ | ||
462 | |||
463 | status = mdio_read(dev, phyaddr, MII_BMSR); | ||
464 | |||
465 | if ((status != 0xFFFF) && (status != 0)) | ||
466 | return phyaddr; | ||
467 | } | ||
468 | |||
469 | return 0x1f; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * Configure IPG based on result of IEEE 802.3 PHY | ||
474 | * auto-negotiation. | ||
475 | */ | ||
476 | static int ipg_config_autoneg(struct net_device *dev) | ||
477 | { | ||
478 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
479 | void __iomem *ioaddr = sp->ioaddr; | ||
480 | unsigned int txflowcontrol; | ||
481 | unsigned int rxflowcontrol; | ||
482 | unsigned int fullduplex; | ||
483 | u32 mac_ctrl_val; | ||
484 | u32 asicctrl; | ||
485 | u8 phyctrl; | ||
486 | const char *speed; | ||
487 | const char *duplex; | ||
488 | const char *tx_desc; | ||
489 | const char *rx_desc; | ||
490 | |||
491 | IPG_DEBUG_MSG("_config_autoneg\n"); | ||
492 | |||
493 | asicctrl = ipg_r32(ASIC_CTRL); | ||
494 | phyctrl = ipg_r8(PHY_CTRL); | ||
495 | mac_ctrl_val = ipg_r32(MAC_CTRL); | ||
496 | |||
497 | /* Set flags for use in resolving auto-negotiation, assuming | ||
498 | * non-1000Mbps, half duplex, no flow control. | ||
499 | */ | ||
500 | fullduplex = 0; | ||
501 | txflowcontrol = 0; | ||
502 | rxflowcontrol = 0; | ||
503 | |||
504 | /* To accommodate a problem in 10Mbps operation, | ||
505 | * set a global flag if PHY running in 10Mbps mode. | ||
506 | */ | ||
507 | sp->tenmbpsmode = 0; | ||
508 | |||
509 | /* Determine actual speed of operation. */ | ||
510 | switch (phyctrl & IPG_PC_LINK_SPEED) { | ||
511 | case IPG_PC_LINK_SPEED_10MBPS: | ||
512 | speed = "10Mbps"; | ||
513 | sp->tenmbpsmode = 1; | ||
514 | break; | ||
515 | case IPG_PC_LINK_SPEED_100MBPS: | ||
516 | speed = "100Mbps"; | ||
517 | break; | ||
518 | case IPG_PC_LINK_SPEED_1000MBPS: | ||
519 | speed = "1000Mbps"; | ||
520 | break; | ||
521 | default: | ||
522 | speed = "undefined!"; | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | netdev_info(dev, "Link speed = %s\n", speed); | ||
527 | if (sp->tenmbpsmode == 1) | ||
528 | netdev_info(dev, "10Mbps operational mode enabled\n"); | ||
529 | |||
530 | if (phyctrl & IPG_PC_DUPLEX_STATUS) { | ||
531 | fullduplex = 1; | ||
532 | txflowcontrol = 1; | ||
533 | rxflowcontrol = 1; | ||
534 | } | ||
535 | |||
536 | /* Configure full duplex, and flow control. */ | ||
537 | if (fullduplex == 1) { | ||
538 | |||
539 | /* Configure IPG for full duplex operation. */ | ||
540 | |||
541 | duplex = "full"; | ||
542 | |||
543 | mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; | ||
544 | |||
545 | if (txflowcontrol == 1) { | ||
546 | tx_desc = ""; | ||
547 | mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; | ||
548 | } else { | ||
549 | tx_desc = "no "; | ||
550 | mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; | ||
551 | } | ||
552 | |||
553 | if (rxflowcontrol == 1) { | ||
554 | rx_desc = ""; | ||
555 | mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; | ||
556 | } else { | ||
557 | rx_desc = "no "; | ||
558 | mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; | ||
559 | } | ||
560 | } else { | ||
561 | duplex = "half"; | ||
562 | tx_desc = "no "; | ||
563 | rx_desc = "no "; | ||
564 | mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD & | ||
565 | ~IPG_MC_TX_FLOW_CONTROL_ENABLE & | ||
566 | ~IPG_MC_RX_FLOW_CONTROL_ENABLE); | ||
567 | } | ||
568 | |||
569 | netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n", | ||
570 | duplex, tx_desc, rx_desc); | ||
571 | ipg_w32(mac_ctrl_val, MAC_CTRL); | ||
572 | |||
573 | return 0; | ||
574 | } | ||
575 | |||
576 | /* Determine and configure multicast operation and set | ||
577 | * receive mode for IPG. | ||
578 | */ | ||
579 | static void ipg_nic_set_multicast_list(struct net_device *dev) | ||
580 | { | ||
581 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
582 | struct netdev_hw_addr *ha; | ||
583 | unsigned int hashindex; | ||
584 | u32 hashtable[2]; | ||
585 | u8 receivemode; | ||
586 | |||
587 | IPG_DEBUG_MSG("_nic_set_multicast_list\n"); | ||
588 | |||
589 | receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; | ||
590 | |||
591 | if (dev->flags & IFF_PROMISC) { | ||
592 | /* NIC to be configured in promiscuous mode. */ | ||
593 | receivemode = IPG_RM_RECEIVEALLFRAMES; | ||
594 | } else if ((dev->flags & IFF_ALLMULTI) || | ||
595 | ((dev->flags & IFF_MULTICAST) && | ||
596 | (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) { | ||
597 | /* NIC to be configured to receive all multicast | ||
598 | * frames. */ | ||
599 | receivemode |= IPG_RM_RECEIVEMULTICAST; | ||
600 | } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { | ||
601 | /* NIC to be configured to receive selected | ||
602 | * multicast addresses. */ | ||
603 | receivemode |= IPG_RM_RECEIVEMULTICASTHASH; | ||
604 | } | ||
605 | |||
606 | /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. | ||
607 | * The IPG applies a cyclic-redundancy-check (the same CRC | ||
608 | * used to calculate the frame data FCS) to the destination | ||
609 | * address all incoming multicast frames whose destination | ||
610 | * address has the multicast bit set. The least significant | ||
611 | * 6 bits of the CRC result are used as an addressing index | ||
612 | * into the hash table. If the value of the bit addressed by | ||
613 | * this index is a 1, the frame is passed to the host system. | ||
614 | */ | ||
615 | |||
616 | /* Clear hashtable. */ | ||
617 | hashtable[0] = 0x00000000; | ||
618 | hashtable[1] = 0x00000000; | ||
619 | |||
620 | /* Cycle through all multicast addresses to filter. */ | ||
621 | netdev_for_each_mc_addr(ha, dev) { | ||
622 | /* Calculate CRC result for each multicast address. */ | ||
623 | hashindex = crc32_le(0xffffffff, ha->addr, | ||
624 | ETH_ALEN); | ||
625 | |||
626 | /* Use only the least significant 6 bits. */ | ||
627 | hashindex = hashindex & 0x3F; | ||
628 | |||
629 | /* Within "hashtable", set bit number "hashindex" | ||
630 | * to a logic 1. | ||
631 | */ | ||
632 | set_bit(hashindex, (void *)hashtable); | ||
633 | } | ||
634 | |||
635 | /* Write the value of the hashtable, to the 4, 16 bit | ||
636 | * HASHTABLE IPG registers. | ||
637 | */ | ||
638 | ipg_w32(hashtable[0], HASHTABLE_0); | ||
639 | ipg_w32(hashtable[1], HASHTABLE_1); | ||
640 | |||
641 | ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); | ||
642 | |||
643 | IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); | ||
644 | } | ||
645 | |||
646 | static int ipg_io_config(struct net_device *dev) | ||
647 | { | ||
648 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
649 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
650 | u32 origmacctrl; | ||
651 | u32 restoremacctrl; | ||
652 | |||
653 | IPG_DEBUG_MSG("_io_config\n"); | ||
654 | |||
655 | origmacctrl = ipg_r32(MAC_CTRL); | ||
656 | |||
657 | restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; | ||
658 | |||
659 | /* Based on compilation option, determine if FCS is to be | ||
660 | * stripped on receive frames by IPG. | ||
661 | */ | ||
662 | if (!IPG_STRIP_FCS_ON_RX) | ||
663 | restoremacctrl |= IPG_MC_RCV_FCS; | ||
664 | |||
665 | /* Determine if transmitter and/or receiver are | ||
666 | * enabled so we may restore MACCTRL correctly. | ||
667 | */ | ||
668 | if (origmacctrl & IPG_MC_TX_ENABLED) | ||
669 | restoremacctrl |= IPG_MC_TX_ENABLE; | ||
670 | |||
671 | if (origmacctrl & IPG_MC_RX_ENABLED) | ||
672 | restoremacctrl |= IPG_MC_RX_ENABLE; | ||
673 | |||
674 | /* Transmitter and receiver must be disabled before setting | ||
675 | * IFSSelect. | ||
676 | */ | ||
677 | ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & | ||
678 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
679 | |||
680 | /* Now that transmitter and receiver are disabled, write | ||
681 | * to IFSSelect. | ||
682 | */ | ||
683 | ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); | ||
684 | |||
685 | /* Set RECEIVEMODE register. */ | ||
686 | ipg_nic_set_multicast_list(dev); | ||
687 | |||
688 | ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE); | ||
689 | |||
690 | ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); | ||
691 | ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); | ||
692 | ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); | ||
693 | ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); | ||
694 | ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); | ||
695 | ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); | ||
696 | ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | | ||
697 | IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | | ||
698 | IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | | ||
699 | IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); | ||
700 | ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); | ||
701 | ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); | ||
702 | |||
703 | /* IPG multi-frag frame bug workaround. | ||
704 | * Per silicon revision B3 eratta. | ||
705 | */ | ||
706 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); | ||
707 | |||
708 | /* IPG TX poll now bug workaround. | ||
709 | * Per silicon revision B3 eratta. | ||
710 | */ | ||
711 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); | ||
712 | |||
713 | /* IPG RX poll now bug workaround. | ||
714 | * Per silicon revision B3 eratta. | ||
715 | */ | ||
716 | ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); | ||
717 | |||
718 | /* Now restore MACCTRL to original setting. */ | ||
719 | ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); | ||
720 | |||
721 | /* Disable unused RMON statistics. */ | ||
722 | ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); | ||
723 | |||
724 | /* Disable unused MIB statistics. */ | ||
725 | ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | | ||
726 | IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | | ||
727 | IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | | ||
728 | IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | | ||
729 | IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | | ||
730 | IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); | ||
731 | |||
732 | return 0; | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * Create a receive buffer within system memory and update | ||
737 | * NIC private structure appropriately. | ||
738 | */ | ||
739 | static int ipg_get_rxbuff(struct net_device *dev, int entry) | ||
740 | { | ||
741 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
742 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
743 | struct sk_buff *skb; | ||
744 | u64 rxfragsize; | ||
745 | |||
746 | IPG_DEBUG_MSG("_get_rxbuff\n"); | ||
747 | |||
748 | skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size); | ||
749 | if (!skb) { | ||
750 | sp->rx_buff[entry] = NULL; | ||
751 | return -ENOMEM; | ||
752 | } | ||
753 | |||
754 | /* Associate the receive buffer with the IPG NIC. */ | ||
755 | skb->dev = dev; | ||
756 | |||
757 | /* Save the address of the sk_buff structure. */ | ||
758 | sp->rx_buff[entry] = skb; | ||
759 | |||
760 | rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, | ||
761 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); | ||
762 | |||
763 | /* Set the RFD fragment length. */ | ||
764 | rxfragsize = sp->rxfrag_size; | ||
765 | rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); | ||
766 | |||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static int init_rfdlist(struct net_device *dev) | ||
771 | { | ||
772 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
773 | void __iomem *ioaddr = sp->ioaddr; | ||
774 | unsigned int i; | ||
775 | |||
776 | IPG_DEBUG_MSG("_init_rfdlist\n"); | ||
777 | |||
778 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
779 | struct ipg_rx *rxfd = sp->rxd + i; | ||
780 | |||
781 | if (sp->rx_buff[i]) { | ||
782 | pci_unmap_single(sp->pdev, | ||
783 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
784 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
785 | dev_kfree_skb_irq(sp->rx_buff[i]); | ||
786 | sp->rx_buff[i] = NULL; | ||
787 | } | ||
788 | |||
789 | /* Clear out the RFS field. */ | ||
790 | rxfd->rfs = 0x0000000000000000; | ||
791 | |||
792 | if (ipg_get_rxbuff(dev, i) < 0) { | ||
793 | /* | ||
794 | * A receive buffer was not ready, break the | ||
795 | * RFD list here. | ||
796 | */ | ||
797 | IPG_DEBUG_MSG("Cannot allocate Rx buffer\n"); | ||
798 | |||
799 | /* Just in case we cannot allocate a single RFD. | ||
800 | * Should not occur. | ||
801 | */ | ||
802 | if (i == 0) { | ||
803 | netdev_err(dev, "No memory available for RFD list\n"); | ||
804 | return -ENOMEM; | ||
805 | } | ||
806 | } | ||
807 | |||
808 | rxfd->next_desc = cpu_to_le64(sp->rxd_map + | ||
809 | sizeof(struct ipg_rx)*(i + 1)); | ||
810 | } | ||
811 | sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); | ||
812 | |||
813 | sp->rx_current = 0; | ||
814 | sp->rx_dirty = 0; | ||
815 | |||
816 | /* Write the location of the RFDList to the IPG. */ | ||
817 | ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); | ||
818 | ipg_w32(0x00000000, RFD_LIST_PTR_1); | ||
819 | |||
820 | return 0; | ||
821 | } | ||
822 | |||
823 | static void init_tfdlist(struct net_device *dev) | ||
824 | { | ||
825 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
826 | void __iomem *ioaddr = sp->ioaddr; | ||
827 | unsigned int i; | ||
828 | |||
829 | IPG_DEBUG_MSG("_init_tfdlist\n"); | ||
830 | |||
831 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
832 | struct ipg_tx *txfd = sp->txd + i; | ||
833 | |||
834 | txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); | ||
835 | |||
836 | if (sp->tx_buff[i]) { | ||
837 | dev_kfree_skb_irq(sp->tx_buff[i]); | ||
838 | sp->tx_buff[i] = NULL; | ||
839 | } | ||
840 | |||
841 | txfd->next_desc = cpu_to_le64(sp->txd_map + | ||
842 | sizeof(struct ipg_tx)*(i + 1)); | ||
843 | } | ||
844 | sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); | ||
845 | |||
846 | sp->tx_current = 0; | ||
847 | sp->tx_dirty = 0; | ||
848 | |||
849 | /* Write the location of the TFDList to the IPG. */ | ||
850 | IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n", | ||
851 | (u32) sp->txd_map); | ||
852 | ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); | ||
853 | ipg_w32(0x00000000, TFD_LIST_PTR_1); | ||
854 | |||
855 | sp->reset_current_tfd = 1; | ||
856 | } | ||
857 | |||
858 | /* | ||
859 | * Free all transmit buffers which have already been transferred | ||
860 | * via DMA to the IPG. | ||
861 | */ | ||
862 | static void ipg_nic_txfree(struct net_device *dev) | ||
863 | { | ||
864 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
865 | unsigned int released, pending, dirty; | ||
866 | |||
867 | IPG_DEBUG_MSG("_nic_txfree\n"); | ||
868 | |||
869 | pending = sp->tx_current - sp->tx_dirty; | ||
870 | dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; | ||
871 | |||
872 | for (released = 0; released < pending; released++) { | ||
873 | struct sk_buff *skb = sp->tx_buff[dirty]; | ||
874 | struct ipg_tx *txfd = sp->txd + dirty; | ||
875 | |||
876 | IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc); | ||
877 | |||
878 | /* Look at each TFD's TFC field beginning | ||
879 | * at the last freed TFD up to the current TFD. | ||
880 | * If the TFDDone bit is set, free the associated | ||
881 | * buffer. | ||
882 | */ | ||
883 | if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE))) | ||
884 | break; | ||
885 | |||
886 | /* Free the transmit buffer. */ | ||
887 | if (skb) { | ||
888 | pci_unmap_single(sp->pdev, | ||
889 | le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, | ||
890 | skb->len, PCI_DMA_TODEVICE); | ||
891 | |||
892 | dev_kfree_skb_irq(skb); | ||
893 | |||
894 | sp->tx_buff[dirty] = NULL; | ||
895 | } | ||
896 | dirty = (dirty + 1) % IPG_TFDLIST_LENGTH; | ||
897 | } | ||
898 | |||
899 | sp->tx_dirty += released; | ||
900 | |||
901 | if (netif_queue_stopped(dev) && | ||
902 | (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { | ||
903 | netif_wake_queue(dev); | ||
904 | } | ||
905 | } | ||
906 | |||
907 | static void ipg_tx_timeout(struct net_device *dev) | ||
908 | { | ||
909 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
910 | void __iomem *ioaddr = sp->ioaddr; | ||
911 | |||
912 | ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | | ||
913 | IPG_AC_FIFO); | ||
914 | |||
915 | spin_lock_irq(&sp->lock); | ||
916 | |||
917 | /* Re-configure after DMA reset. */ | ||
918 | if (ipg_io_config(dev) < 0) | ||
919 | netdev_info(dev, "Error during re-configuration\n"); | ||
920 | |||
921 | init_tfdlist(dev); | ||
922 | |||
923 | spin_unlock_irq(&sp->lock); | ||
924 | |||
925 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, | ||
926 | MAC_CTRL); | ||
927 | } | ||
928 | |||
929 | /* | ||
930 | * For TxComplete interrupts, free all transmit | ||
931 | * buffers which have already been transferred via DMA | ||
932 | * to the IPG. | ||
933 | */ | ||
934 | static void ipg_nic_txcleanup(struct net_device *dev) | ||
935 | { | ||
936 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
937 | void __iomem *ioaddr = sp->ioaddr; | ||
938 | unsigned int i; | ||
939 | |||
940 | IPG_DEBUG_MSG("_nic_txcleanup\n"); | ||
941 | |||
942 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
943 | /* Reading the TXSTATUS register clears the | ||
944 | * TX_COMPLETE interrupt. | ||
945 | */ | ||
946 | u32 txstatusdword = ipg_r32(TX_STATUS); | ||
947 | |||
948 | IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword); | ||
949 | |||
950 | /* Check for Transmit errors. Error bits only valid if | ||
951 | * TX_COMPLETE bit in the TXSTATUS register is a 1. | ||
952 | */ | ||
953 | if (!(txstatusdword & IPG_TS_TX_COMPLETE)) | ||
954 | break; | ||
955 | |||
956 | /* If in 10Mbps mode, indicate transmit is ready. */ | ||
957 | if (sp->tenmbpsmode) { | ||
958 | netif_wake_queue(dev); | ||
959 | } | ||
960 | |||
961 | /* Transmit error, increment stat counters. */ | ||
962 | if (txstatusdword & IPG_TS_TX_ERROR) { | ||
963 | IPG_DEBUG_MSG("Transmit error\n"); | ||
964 | sp->stats.tx_errors++; | ||
965 | } | ||
966 | |||
967 | /* Late collision, re-enable transmitter. */ | ||
968 | if (txstatusdword & IPG_TS_LATE_COLLISION) { | ||
969 | IPG_DEBUG_MSG("Late collision on transmit\n"); | ||
970 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
971 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
972 | } | ||
973 | |||
974 | /* Maximum collisions, re-enable transmitter. */ | ||
975 | if (txstatusdword & IPG_TS_TX_MAX_COLL) { | ||
976 | IPG_DEBUG_MSG("Maximum collisions on transmit\n"); | ||
977 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
978 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
979 | } | ||
980 | |||
981 | /* Transmit underrun, reset and re-enable | ||
982 | * transmitter. | ||
983 | */ | ||
984 | if (txstatusdword & IPG_TS_TX_UNDERRUN) { | ||
985 | IPG_DEBUG_MSG("Transmitter underrun\n"); | ||
986 | sp->stats.tx_fifo_errors++; | ||
987 | ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | | ||
988 | IPG_AC_NETWORK | IPG_AC_FIFO); | ||
989 | |||
990 | /* Re-configure after DMA reset. */ | ||
991 | if (ipg_io_config(dev) < 0) { | ||
992 | netdev_info(dev, "Error during re-configuration\n"); | ||
993 | } | ||
994 | init_tfdlist(dev); | ||
995 | |||
996 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & | ||
997 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | ipg_nic_txfree(dev); | ||
1002 | } | ||
1003 | |||
1004 | /* Provides statistical information about the IPG NIC. */ | ||
1005 | static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) | ||
1006 | { | ||
1007 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1008 | void __iomem *ioaddr = sp->ioaddr; | ||
1009 | u16 temp1; | ||
1010 | u16 temp2; | ||
1011 | |||
1012 | IPG_DEBUG_MSG("_nic_get_stats\n"); | ||
1013 | |||
1014 | /* Check to see if the NIC has been initialized via nic_open, | ||
1015 | * before trying to read statistic registers. | ||
1016 | */ | ||
1017 | if (!test_bit(__LINK_STATE_START, &dev->state)) | ||
1018 | return &sp->stats; | ||
1019 | |||
1020 | sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); | ||
1021 | sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); | ||
1022 | sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); | ||
1023 | sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); | ||
1024 | temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); | ||
1025 | sp->stats.rx_errors += temp1; | ||
1026 | sp->stats.rx_missed_errors += temp1; | ||
1027 | temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + | ||
1028 | ipg_r32(IPG_LATECOLLISIONS); | ||
1029 | temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); | ||
1030 | sp->stats.collisions += temp1; | ||
1031 | sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); | ||
1032 | sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + | ||
1033 | ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; | ||
1034 | sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); | ||
1035 | |||
1036 | /* detailed tx_errors */ | ||
1037 | sp->stats.tx_carrier_errors += temp2; | ||
1038 | |||
1039 | /* detailed rx_errors */ | ||
1040 | sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + | ||
1041 | ipg_r16(IPG_FRAMETOOLONGERRRORS); | ||
1042 | sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); | ||
1043 | |||
1044 | /* Unutilized IPG statistic registers. */ | ||
1045 | ipg_r32(IPG_MCSTFRAMESRCVDOK); | ||
1046 | |||
1047 | return &sp->stats; | ||
1048 | } | ||
1049 | |||
1050 | /* Restore used receive buffers. */ | ||
1051 | static int ipg_nic_rxrestore(struct net_device *dev) | ||
1052 | { | ||
1053 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1054 | const unsigned int curr = sp->rx_current; | ||
1055 | unsigned int dirty = sp->rx_dirty; | ||
1056 | |||
1057 | IPG_DEBUG_MSG("_nic_rxrestore\n"); | ||
1058 | |||
1059 | for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { | ||
1060 | unsigned int entry = dirty % IPG_RFDLIST_LENGTH; | ||
1061 | |||
1062 | /* rx_copybreak may poke hole here and there. */ | ||
1063 | if (sp->rx_buff[entry]) | ||
1064 | continue; | ||
1065 | |||
1066 | /* Generate a new receive buffer to replace the | ||
1067 | * current buffer (which will be released by the | ||
1068 | * Linux system). | ||
1069 | */ | ||
1070 | if (ipg_get_rxbuff(dev, entry) < 0) { | ||
1071 | IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n"); | ||
1072 | |||
1073 | break; | ||
1074 | } | ||
1075 | |||
1076 | /* Reset the RFS field. */ | ||
1077 | sp->rxd[entry].rfs = 0x0000000000000000; | ||
1078 | } | ||
1079 | sp->rx_dirty = dirty; | ||
1080 | |||
1081 | return 0; | ||
1082 | } | ||
1083 | |||
1084 | /* use jumboindex and jumbosize to control jumbo frame status | ||
1085 | * initial status is jumboindex=-1 and jumbosize=0 | ||
1086 | * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. | ||
1087 | * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving | ||
1088 | * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump | ||
1089 | * previous receiving and need to continue dumping the current one | ||
1090 | */ | ||
1091 | enum { | ||
1092 | NORMAL_PACKET, | ||
1093 | ERROR_PACKET | ||
1094 | }; | ||
1095 | |||
1096 | enum { | ||
1097 | FRAME_NO_START_NO_END = 0, | ||
1098 | FRAME_WITH_START = 1, | ||
1099 | FRAME_WITH_END = 10, | ||
1100 | FRAME_WITH_START_WITH_END = 11 | ||
1101 | }; | ||
1102 | |||
1103 | static void ipg_nic_rx_free_skb(struct net_device *dev) | ||
1104 | { | ||
1105 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1106 | unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; | ||
1107 | |||
1108 | if (sp->rx_buff[entry]) { | ||
1109 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
1110 | |||
1111 | pci_unmap_single(sp->pdev, | ||
1112 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1113 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1114 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
1115 | sp->rx_buff[entry] = NULL; | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | static int ipg_nic_rx_check_frame_type(struct net_device *dev) | ||
1120 | { | ||
1121 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1122 | struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); | ||
1123 | int type = FRAME_NO_START_NO_END; | ||
1124 | |||
1125 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) | ||
1126 | type += FRAME_WITH_START; | ||
1127 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) | ||
1128 | type += FRAME_WITH_END; | ||
1129 | return type; | ||
1130 | } | ||
1131 | |||
1132 | static int ipg_nic_rx_check_error(struct net_device *dev) | ||
1133 | { | ||
1134 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1135 | unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; | ||
1136 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
1137 | |||
1138 | if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & | ||
1139 | (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | | ||
1140 | IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | | ||
1141 | IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { | ||
1142 | IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", | ||
1143 | (unsigned long) rxfd->rfs); | ||
1144 | |||
1145 | /* Increment general receive error statistic. */ | ||
1146 | sp->stats.rx_errors++; | ||
1147 | |||
1148 | /* Increment detailed receive error statistics. */ | ||
1149 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { | ||
1150 | IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); | ||
1151 | |||
1152 | sp->stats.rx_fifo_errors++; | ||
1153 | } | ||
1154 | |||
1155 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { | ||
1156 | IPG_DEBUG_MSG("RX runt occurred\n"); | ||
1157 | sp->stats.rx_length_errors++; | ||
1158 | } | ||
1159 | |||
1160 | /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, | ||
1161 | * error count handled by a IPG statistic register. | ||
1162 | */ | ||
1163 | |||
1164 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { | ||
1165 | IPG_DEBUG_MSG("RX alignment error occurred\n"); | ||
1166 | sp->stats.rx_frame_errors++; | ||
1167 | } | ||
1168 | |||
1169 | /* Do nothing for IPG_RFS_RXFCSERROR, error count | ||
1170 | * handled by a IPG statistic register. | ||
1171 | */ | ||
1172 | |||
1173 | /* Free the memory associated with the RX | ||
1174 | * buffer since it is erroneous and we will | ||
1175 | * not pass it to higher layer processes. | ||
1176 | */ | ||
1177 | if (sp->rx_buff[entry]) { | ||
1178 | pci_unmap_single(sp->pdev, | ||
1179 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1180 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1181 | |||
1182 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
1183 | sp->rx_buff[entry] = NULL; | ||
1184 | } | ||
1185 | return ERROR_PACKET; | ||
1186 | } | ||
1187 | return NORMAL_PACKET; | ||
1188 | } | ||
1189 | |||
1190 | static void ipg_nic_rx_with_start_and_end(struct net_device *dev, | ||
1191 | struct ipg_nic_private *sp, | ||
1192 | struct ipg_rx *rxfd, unsigned entry) | ||
1193 | { | ||
1194 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
1195 | struct sk_buff *skb; | ||
1196 | int framelen; | ||
1197 | |||
1198 | if (jumbo->found_start) { | ||
1199 | dev_kfree_skb_irq(jumbo->skb); | ||
1200 | jumbo->found_start = 0; | ||
1201 | jumbo->current_size = 0; | ||
1202 | jumbo->skb = NULL; | ||
1203 | } | ||
1204 | |||
1205 | /* 1: found error, 0 no error */ | ||
1206 | if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) | ||
1207 | return; | ||
1208 | |||
1209 | skb = sp->rx_buff[entry]; | ||
1210 | if (!skb) | ||
1211 | return; | ||
1212 | |||
1213 | /* accept this frame and send to upper layer */ | ||
1214 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
1215 | if (framelen > sp->rxfrag_size) | ||
1216 | framelen = sp->rxfrag_size; | ||
1217 | |||
1218 | skb_put(skb, framelen); | ||
1219 | skb->protocol = eth_type_trans(skb, dev); | ||
1220 | skb_checksum_none_assert(skb); | ||
1221 | netif_rx(skb); | ||
1222 | sp->rx_buff[entry] = NULL; | ||
1223 | } | ||
1224 | |||
1225 | static void ipg_nic_rx_with_start(struct net_device *dev, | ||
1226 | struct ipg_nic_private *sp, | ||
1227 | struct ipg_rx *rxfd, unsigned entry) | ||
1228 | { | ||
1229 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
1230 | struct pci_dev *pdev = sp->pdev; | ||
1231 | struct sk_buff *skb; | ||
1232 | |||
1233 | /* 1: found error, 0 no error */ | ||
1234 | if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) | ||
1235 | return; | ||
1236 | |||
1237 | /* accept this frame and send to upper layer */ | ||
1238 | skb = sp->rx_buff[entry]; | ||
1239 | if (!skb) | ||
1240 | return; | ||
1241 | |||
1242 | if (jumbo->found_start) | ||
1243 | dev_kfree_skb_irq(jumbo->skb); | ||
1244 | |||
1245 | pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1246 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1247 | |||
1248 | skb_put(skb, sp->rxfrag_size); | ||
1249 | |||
1250 | jumbo->found_start = 1; | ||
1251 | jumbo->current_size = sp->rxfrag_size; | ||
1252 | jumbo->skb = skb; | ||
1253 | |||
1254 | sp->rx_buff[entry] = NULL; | ||
1255 | } | ||
1256 | |||
1257 | static void ipg_nic_rx_with_end(struct net_device *dev, | ||
1258 | struct ipg_nic_private *sp, | ||
1259 | struct ipg_rx *rxfd, unsigned entry) | ||
1260 | { | ||
1261 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
1262 | |||
1263 | /* 1: found error, 0 no error */ | ||
1264 | if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { | ||
1265 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
1266 | |||
1267 | if (!skb) | ||
1268 | return; | ||
1269 | |||
1270 | if (jumbo->found_start) { | ||
1271 | int framelen, endframelen; | ||
1272 | |||
1273 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
1274 | |||
1275 | endframelen = framelen - jumbo->current_size; | ||
1276 | if (framelen > sp->rxsupport_size) | ||
1277 | dev_kfree_skb_irq(jumbo->skb); | ||
1278 | else { | ||
1279 | memcpy(skb_put(jumbo->skb, endframelen), | ||
1280 | skb->data, endframelen); | ||
1281 | |||
1282 | jumbo->skb->protocol = | ||
1283 | eth_type_trans(jumbo->skb, dev); | ||
1284 | |||
1285 | skb_checksum_none_assert(jumbo->skb); | ||
1286 | netif_rx(jumbo->skb); | ||
1287 | } | ||
1288 | } | ||
1289 | |||
1290 | jumbo->found_start = 0; | ||
1291 | jumbo->current_size = 0; | ||
1292 | jumbo->skb = NULL; | ||
1293 | |||
1294 | ipg_nic_rx_free_skb(dev); | ||
1295 | } else { | ||
1296 | dev_kfree_skb_irq(jumbo->skb); | ||
1297 | jumbo->found_start = 0; | ||
1298 | jumbo->current_size = 0; | ||
1299 | jumbo->skb = NULL; | ||
1300 | } | ||
1301 | } | ||
1302 | |||
1303 | static void ipg_nic_rx_no_start_no_end(struct net_device *dev, | ||
1304 | struct ipg_nic_private *sp, | ||
1305 | struct ipg_rx *rxfd, unsigned entry) | ||
1306 | { | ||
1307 | struct ipg_jumbo *jumbo = &sp->jumbo; | ||
1308 | |||
1309 | /* 1: found error, 0 no error */ | ||
1310 | if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { | ||
1311 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
1312 | |||
1313 | if (skb) { | ||
1314 | if (jumbo->found_start) { | ||
1315 | jumbo->current_size += sp->rxfrag_size; | ||
1316 | if (jumbo->current_size <= sp->rxsupport_size) { | ||
1317 | memcpy(skb_put(jumbo->skb, | ||
1318 | sp->rxfrag_size), | ||
1319 | skb->data, sp->rxfrag_size); | ||
1320 | } | ||
1321 | } | ||
1322 | ipg_nic_rx_free_skb(dev); | ||
1323 | } | ||
1324 | } else { | ||
1325 | dev_kfree_skb_irq(jumbo->skb); | ||
1326 | jumbo->found_start = 0; | ||
1327 | jumbo->current_size = 0; | ||
1328 | jumbo->skb = NULL; | ||
1329 | } | ||
1330 | } | ||
1331 | |||
1332 | static int ipg_nic_rx_jumbo(struct net_device *dev) | ||
1333 | { | ||
1334 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1335 | unsigned int curr = sp->rx_current; | ||
1336 | void __iomem *ioaddr = sp->ioaddr; | ||
1337 | unsigned int i; | ||
1338 | |||
1339 | IPG_DEBUG_MSG("_nic_rx\n"); | ||
1340 | |||
1341 | for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { | ||
1342 | unsigned int entry = curr % IPG_RFDLIST_LENGTH; | ||
1343 | struct ipg_rx *rxfd = sp->rxd + entry; | ||
1344 | |||
1345 | if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE))) | ||
1346 | break; | ||
1347 | |||
1348 | switch (ipg_nic_rx_check_frame_type(dev)) { | ||
1349 | case FRAME_WITH_START_WITH_END: | ||
1350 | ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); | ||
1351 | break; | ||
1352 | case FRAME_WITH_START: | ||
1353 | ipg_nic_rx_with_start(dev, sp, rxfd, entry); | ||
1354 | break; | ||
1355 | case FRAME_WITH_END: | ||
1356 | ipg_nic_rx_with_end(dev, sp, rxfd, entry); | ||
1357 | break; | ||
1358 | case FRAME_NO_START_NO_END: | ||
1359 | ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); | ||
1360 | break; | ||
1361 | } | ||
1362 | } | ||
1363 | |||
1364 | sp->rx_current = curr; | ||
1365 | |||
1366 | if (i == IPG_MAXRFDPROCESS_COUNT) { | ||
1367 | /* There are more RFDs to process, however the | ||
1368 | * allocated amount of RFD processing time has | ||
1369 | * expired. Assert Interrupt Requested to make | ||
1370 | * sure we come back to process the remaining RFDs. | ||
1371 | */ | ||
1372 | ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); | ||
1373 | } | ||
1374 | |||
1375 | ipg_nic_rxrestore(dev); | ||
1376 | |||
1377 | return 0; | ||
1378 | } | ||
1379 | |||
1380 | static int ipg_nic_rx(struct net_device *dev) | ||
1381 | { | ||
1382 | /* Transfer received Ethernet frames to higher network layers. */ | ||
1383 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1384 | unsigned int curr = sp->rx_current; | ||
1385 | void __iomem *ioaddr = sp->ioaddr; | ||
1386 | struct ipg_rx *rxfd; | ||
1387 | unsigned int i; | ||
1388 | |||
1389 | IPG_DEBUG_MSG("_nic_rx\n"); | ||
1390 | |||
1391 | #define __RFS_MASK \ | ||
1392 | cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) | ||
1393 | |||
1394 | for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { | ||
1395 | unsigned int entry = curr % IPG_RFDLIST_LENGTH; | ||
1396 | struct sk_buff *skb = sp->rx_buff[entry]; | ||
1397 | unsigned int framelen; | ||
1398 | |||
1399 | rxfd = sp->rxd + entry; | ||
1400 | |||
1401 | if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) | ||
1402 | break; | ||
1403 | |||
1404 | /* Get received frame length. */ | ||
1405 | framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; | ||
1406 | |||
1407 | /* Check for jumbo frame arrival with too small | ||
1408 | * RXFRAG_SIZE. | ||
1409 | */ | ||
1410 | if (framelen > sp->rxfrag_size) { | ||
1411 | IPG_DEBUG_MSG | ||
1412 | ("RFS FrameLen > allocated fragment size\n"); | ||
1413 | |||
1414 | framelen = sp->rxfrag_size; | ||
1415 | } | ||
1416 | |||
1417 | if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & | ||
1418 | (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | | ||
1419 | IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | | ||
1420 | IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { | ||
1421 | |||
1422 | IPG_DEBUG_MSG("Rx error, RFS = %016lx\n", | ||
1423 | (unsigned long int) rxfd->rfs); | ||
1424 | |||
1425 | /* Increment general receive error statistic. */ | ||
1426 | sp->stats.rx_errors++; | ||
1427 | |||
1428 | /* Increment detailed receive error statistics. */ | ||
1429 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { | ||
1430 | IPG_DEBUG_MSG("RX FIFO overrun occurred\n"); | ||
1431 | sp->stats.rx_fifo_errors++; | ||
1432 | } | ||
1433 | |||
1434 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { | ||
1435 | IPG_DEBUG_MSG("RX runt occurred\n"); | ||
1436 | sp->stats.rx_length_errors++; | ||
1437 | } | ||
1438 | |||
1439 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; | ||
1440 | /* Do nothing, error count handled by a IPG | ||
1441 | * statistic register. | ||
1442 | */ | ||
1443 | |||
1444 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { | ||
1445 | IPG_DEBUG_MSG("RX alignment error occurred\n"); | ||
1446 | sp->stats.rx_frame_errors++; | ||
1447 | } | ||
1448 | |||
1449 | if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; | ||
1450 | /* Do nothing, error count handled by a IPG | ||
1451 | * statistic register. | ||
1452 | */ | ||
1453 | |||
1454 | /* Free the memory associated with the RX | ||
1455 | * buffer since it is erroneous and we will | ||
1456 | * not pass it to higher layer processes. | ||
1457 | */ | ||
1458 | if (skb) { | ||
1459 | __le64 info = rxfd->frag_info; | ||
1460 | |||
1461 | pci_unmap_single(sp->pdev, | ||
1462 | le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, | ||
1463 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1464 | |||
1465 | dev_kfree_skb_irq(skb); | ||
1466 | } | ||
1467 | } else { | ||
1468 | |||
1469 | /* Adjust the new buffer length to accommodate the size | ||
1470 | * of the received frame. | ||
1471 | */ | ||
1472 | skb_put(skb, framelen); | ||
1473 | |||
1474 | /* Set the buffer's protocol field to Ethernet. */ | ||
1475 | skb->protocol = eth_type_trans(skb, dev); | ||
1476 | |||
1477 | /* The IPG encountered an error with (or | ||
1478 | * there were no) IP/TCP/UDP checksums. | ||
1479 | * This may or may not indicate an invalid | ||
1480 | * IP/TCP/UDP frame was received. Let the | ||
1481 | * upper layer decide. | ||
1482 | */ | ||
1483 | skb_checksum_none_assert(skb); | ||
1484 | |||
1485 | /* Hand off frame for higher layer processing. | ||
1486 | * The function netif_rx() releases the sk_buff | ||
1487 | * when processing completes. | ||
1488 | */ | ||
1489 | netif_rx(skb); | ||
1490 | } | ||
1491 | |||
1492 | /* Assure RX buffer is not reused by IPG. */ | ||
1493 | sp->rx_buff[entry] = NULL; | ||
1494 | } | ||
1495 | |||
1496 | /* | ||
1497 | * If there are more RFDs to process and the allocated amount of RFD | ||
1498 | * processing time has expired, assert Interrupt Requested to make | ||
1499 | * sure we come back to process the remaining RFDs. | ||
1500 | */ | ||
1501 | if (i == IPG_MAXRFDPROCESS_COUNT) | ||
1502 | ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); | ||
1503 | |||
1504 | #ifdef IPG_DEBUG | ||
1505 | /* Check if the RFD list contained no receive frame data. */ | ||
1506 | if (!i) | ||
1507 | sp->EmptyRFDListCount++; | ||
1508 | #endif | ||
1509 | while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && | ||
1510 | !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && | ||
1511 | (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { | ||
1512 | unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; | ||
1513 | |||
1514 | rxfd = sp->rxd + entry; | ||
1515 | |||
1516 | IPG_DEBUG_MSG("Frame requires multiple RFDs\n"); | ||
1517 | |||
1518 | /* An unexpected event, additional code needed to handle | ||
1519 | * properly. So for the time being, just disregard the | ||
1520 | * frame. | ||
1521 | */ | ||
1522 | |||
1523 | /* Free the memory associated with the RX | ||
1524 | * buffer since it is erroneous and we will | ||
1525 | * not pass it to higher layer processes. | ||
1526 | */ | ||
1527 | if (sp->rx_buff[entry]) { | ||
1528 | pci_unmap_single(sp->pdev, | ||
1529 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1530 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1531 | dev_kfree_skb_irq(sp->rx_buff[entry]); | ||
1532 | } | ||
1533 | |||
1534 | /* Assure RX buffer is not reused by IPG. */ | ||
1535 | sp->rx_buff[entry] = NULL; | ||
1536 | } | ||
1537 | |||
1538 | sp->rx_current = curr; | ||
1539 | |||
1540 | /* Check to see if there are a minimum number of used | ||
1541 | * RFDs before restoring any (should improve performance.) | ||
1542 | */ | ||
1543 | if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) | ||
1544 | ipg_nic_rxrestore(dev); | ||
1545 | |||
1546 | return 0; | ||
1547 | } | ||
1548 | |||
1549 | static void ipg_reset_after_host_error(struct work_struct *work) | ||
1550 | { | ||
1551 | struct ipg_nic_private *sp = | ||
1552 | container_of(work, struct ipg_nic_private, task.work); | ||
1553 | struct net_device *dev = sp->dev; | ||
1554 | |||
1555 | /* | ||
1556 | * Acknowledge HostError interrupt by resetting | ||
1557 | * IPG DMA and HOST. | ||
1558 | */ | ||
1559 | ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); | ||
1560 | |||
1561 | init_rfdlist(dev); | ||
1562 | init_tfdlist(dev); | ||
1563 | |||
1564 | if (ipg_io_config(dev) < 0) { | ||
1565 | netdev_info(dev, "Cannot recover from PCI error\n"); | ||
1566 | schedule_delayed_work(&sp->task, HZ); | ||
1567 | } | ||
1568 | } | ||
1569 | |||
1570 | static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) | ||
1571 | { | ||
1572 | struct net_device *dev = dev_inst; | ||
1573 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1574 | void __iomem *ioaddr = sp->ioaddr; | ||
1575 | unsigned int handled = 0; | ||
1576 | u16 status; | ||
1577 | |||
1578 | IPG_DEBUG_MSG("_interrupt_handler\n"); | ||
1579 | |||
1580 | if (sp->is_jumbo) | ||
1581 | ipg_nic_rxrestore(dev); | ||
1582 | |||
1583 | spin_lock(&sp->lock); | ||
1584 | |||
1585 | /* Get interrupt source information, and acknowledge | ||
1586 | * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, | ||
1587 | * IntRequested, MacControlFrame, LinkEvent) interrupts | ||
1588 | * if issued. Also, all IPG interrupts are disabled by | ||
1589 | * reading IntStatusAck. | ||
1590 | */ | ||
1591 | status = ipg_r16(INT_STATUS_ACK); | ||
1592 | |||
1593 | IPG_DEBUG_MSG("IntStatusAck = %04x\n", status); | ||
1594 | |||
1595 | /* Shared IRQ of remove event. */ | ||
1596 | if (!(status & IPG_IS_RSVD_MASK)) | ||
1597 | goto out_enable; | ||
1598 | |||
1599 | handled = 1; | ||
1600 | |||
1601 | if (unlikely(!netif_running(dev))) | ||
1602 | goto out_unlock; | ||
1603 | |||
1604 | /* If RFDListEnd interrupt, restore all used RFDs. */ | ||
1605 | if (status & IPG_IS_RFD_LIST_END) { | ||
1606 | IPG_DEBUG_MSG("RFDListEnd Interrupt\n"); | ||
1607 | |||
1608 | /* The RFD list end indicates an RFD was encountered | ||
1609 | * with a 0 NextPtr, or with an RFDDone bit set to 1 | ||
1610 | * (indicating the RFD is not read for use by the | ||
1611 | * IPG.) Try to restore all RFDs. | ||
1612 | */ | ||
1613 | ipg_nic_rxrestore(dev); | ||
1614 | |||
1615 | #ifdef IPG_DEBUG | ||
1616 | /* Increment the RFDlistendCount counter. */ | ||
1617 | sp->RFDlistendCount++; | ||
1618 | #endif | ||
1619 | } | ||
1620 | |||
1621 | /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or | ||
1622 | * IntRequested interrupt, process received frames. */ | ||
1623 | if ((status & IPG_IS_RX_DMA_PRIORITY) || | ||
1624 | (status & IPG_IS_RFD_LIST_END) || | ||
1625 | (status & IPG_IS_RX_DMA_COMPLETE) || | ||
1626 | (status & IPG_IS_INT_REQUESTED)) { | ||
1627 | #ifdef IPG_DEBUG | ||
1628 | /* Increment the RFD list checked counter if interrupted | ||
1629 | * only to check the RFD list. */ | ||
1630 | if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | | ||
1631 | IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & | ||
1632 | (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | | ||
1633 | IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | | ||
1634 | IPG_IS_UPDATE_STATS))) | ||
1635 | sp->RFDListCheckedCount++; | ||
1636 | #endif | ||
1637 | |||
1638 | if (sp->is_jumbo) | ||
1639 | ipg_nic_rx_jumbo(dev); | ||
1640 | else | ||
1641 | ipg_nic_rx(dev); | ||
1642 | } | ||
1643 | |||
1644 | /* If TxDMAComplete interrupt, free used TFDs. */ | ||
1645 | if (status & IPG_IS_TX_DMA_COMPLETE) | ||
1646 | ipg_nic_txfree(dev); | ||
1647 | |||
1648 | /* TxComplete interrupts indicate one of numerous actions. | ||
1649 | * Determine what action to take based on TXSTATUS register. | ||
1650 | */ | ||
1651 | if (status & IPG_IS_TX_COMPLETE) | ||
1652 | ipg_nic_txcleanup(dev); | ||
1653 | |||
1654 | /* If UpdateStats interrupt, update Linux Ethernet statistics */ | ||
1655 | if (status & IPG_IS_UPDATE_STATS) | ||
1656 | ipg_nic_get_stats(dev); | ||
1657 | |||
1658 | /* If HostError interrupt, reset IPG. */ | ||
1659 | if (status & IPG_IS_HOST_ERROR) { | ||
1660 | IPG_DDEBUG_MSG("HostError Interrupt\n"); | ||
1661 | |||
1662 | schedule_delayed_work(&sp->task, 0); | ||
1663 | } | ||
1664 | |||
1665 | /* If LinkEvent interrupt, resolve autonegotiation. */ | ||
1666 | if (status & IPG_IS_LINK_EVENT) { | ||
1667 | if (ipg_config_autoneg(dev) < 0) | ||
1668 | netdev_info(dev, "Auto-negotiation error\n"); | ||
1669 | } | ||
1670 | |||
1671 | /* If MACCtrlFrame interrupt, do nothing. */ | ||
1672 | if (status & IPG_IS_MAC_CTRL_FRAME) | ||
1673 | IPG_DEBUG_MSG("MACCtrlFrame interrupt\n"); | ||
1674 | |||
1675 | /* If RxComplete interrupt, do nothing. */ | ||
1676 | if (status & IPG_IS_RX_COMPLETE) | ||
1677 | IPG_DEBUG_MSG("RxComplete interrupt\n"); | ||
1678 | |||
1679 | /* If RxEarly interrupt, do nothing. */ | ||
1680 | if (status & IPG_IS_RX_EARLY) | ||
1681 | IPG_DEBUG_MSG("RxEarly interrupt\n"); | ||
1682 | |||
1683 | out_enable: | ||
1684 | /* Re-enable IPG interrupts. */ | ||
1685 | ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | | ||
1686 | IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | | ||
1687 | IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); | ||
1688 | out_unlock: | ||
1689 | spin_unlock(&sp->lock); | ||
1690 | |||
1691 | return IRQ_RETVAL(handled); | ||
1692 | } | ||
1693 | |||
1694 | static void ipg_rx_clear(struct ipg_nic_private *sp) | ||
1695 | { | ||
1696 | unsigned int i; | ||
1697 | |||
1698 | for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { | ||
1699 | if (sp->rx_buff[i]) { | ||
1700 | struct ipg_rx *rxfd = sp->rxd + i; | ||
1701 | |||
1702 | dev_kfree_skb_irq(sp->rx_buff[i]); | ||
1703 | sp->rx_buff[i] = NULL; | ||
1704 | pci_unmap_single(sp->pdev, | ||
1705 | le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, | ||
1706 | sp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
1707 | } | ||
1708 | } | ||
1709 | } | ||
1710 | |||
1711 | static void ipg_tx_clear(struct ipg_nic_private *sp) | ||
1712 | { | ||
1713 | unsigned int i; | ||
1714 | |||
1715 | for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { | ||
1716 | if (sp->tx_buff[i]) { | ||
1717 | struct ipg_tx *txfd = sp->txd + i; | ||
1718 | |||
1719 | pci_unmap_single(sp->pdev, | ||
1720 | le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, | ||
1721 | sp->tx_buff[i]->len, PCI_DMA_TODEVICE); | ||
1722 | |||
1723 | dev_kfree_skb_irq(sp->tx_buff[i]); | ||
1724 | |||
1725 | sp->tx_buff[i] = NULL; | ||
1726 | } | ||
1727 | } | ||
1728 | } | ||
1729 | |||
1730 | static int ipg_nic_open(struct net_device *dev) | ||
1731 | { | ||
1732 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1733 | void __iomem *ioaddr = sp->ioaddr; | ||
1734 | struct pci_dev *pdev = sp->pdev; | ||
1735 | int rc; | ||
1736 | |||
1737 | IPG_DEBUG_MSG("_nic_open\n"); | ||
1738 | |||
1739 | sp->rx_buf_sz = sp->rxsupport_size; | ||
1740 | |||
1741 | /* Check for interrupt line conflicts, and request interrupt | ||
1742 | * line for IPG. | ||
1743 | * | ||
1744 | * IMPORTANT: Disable IPG interrupts prior to registering | ||
1745 | * IRQ. | ||
1746 | */ | ||
1747 | ipg_w16(0x0000, INT_ENABLE); | ||
1748 | |||
1749 | /* Register the interrupt line to be used by the IPG within | ||
1750 | * the Linux system. | ||
1751 | */ | ||
1752 | rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED, | ||
1753 | dev->name, dev); | ||
1754 | if (rc < 0) { | ||
1755 | netdev_info(dev, "Error when requesting interrupt\n"); | ||
1756 | goto out; | ||
1757 | } | ||
1758 | |||
1759 | dev->irq = pdev->irq; | ||
1760 | |||
1761 | rc = -ENOMEM; | ||
1762 | |||
1763 | sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, | ||
1764 | &sp->rxd_map, GFP_KERNEL); | ||
1765 | if (!sp->rxd) | ||
1766 | goto err_free_irq_0; | ||
1767 | |||
1768 | sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, | ||
1769 | &sp->txd_map, GFP_KERNEL); | ||
1770 | if (!sp->txd) | ||
1771 | goto err_free_rx_1; | ||
1772 | |||
1773 | rc = init_rfdlist(dev); | ||
1774 | if (rc < 0) { | ||
1775 | netdev_info(dev, "Error during configuration\n"); | ||
1776 | goto err_free_tx_2; | ||
1777 | } | ||
1778 | |||
1779 | init_tfdlist(dev); | ||
1780 | |||
1781 | rc = ipg_io_config(dev); | ||
1782 | if (rc < 0) { | ||
1783 | netdev_info(dev, "Error during configuration\n"); | ||
1784 | goto err_release_tfdlist_3; | ||
1785 | } | ||
1786 | |||
1787 | /* Resolve autonegotiation. */ | ||
1788 | if (ipg_config_autoneg(dev) < 0) | ||
1789 | netdev_info(dev, "Auto-negotiation error\n"); | ||
1790 | |||
1791 | /* initialize JUMBO Frame control variable */ | ||
1792 | sp->jumbo.found_start = 0; | ||
1793 | sp->jumbo.current_size = 0; | ||
1794 | sp->jumbo.skb = NULL; | ||
1795 | |||
1796 | /* Enable transmit and receive operation of the IPG. */ | ||
1797 | ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & | ||
1798 | IPG_MC_RSVD_MASK, MAC_CTRL); | ||
1799 | |||
1800 | netif_start_queue(dev); | ||
1801 | out: | ||
1802 | return rc; | ||
1803 | |||
1804 | err_release_tfdlist_3: | ||
1805 | ipg_tx_clear(sp); | ||
1806 | ipg_rx_clear(sp); | ||
1807 | err_free_tx_2: | ||
1808 | dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); | ||
1809 | err_free_rx_1: | ||
1810 | dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); | ||
1811 | err_free_irq_0: | ||
1812 | free_irq(pdev->irq, dev); | ||
1813 | goto out; | ||
1814 | } | ||
1815 | |||
1816 | static int ipg_nic_stop(struct net_device *dev) | ||
1817 | { | ||
1818 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1819 | void __iomem *ioaddr = sp->ioaddr; | ||
1820 | struct pci_dev *pdev = sp->pdev; | ||
1821 | |||
1822 | IPG_DEBUG_MSG("_nic_stop\n"); | ||
1823 | |||
1824 | netif_stop_queue(dev); | ||
1825 | |||
1826 | IPG_DUMPTFDLIST(dev); | ||
1827 | |||
1828 | do { | ||
1829 | (void) ipg_r16(INT_STATUS_ACK); | ||
1830 | |||
1831 | ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); | ||
1832 | |||
1833 | synchronize_irq(pdev->irq); | ||
1834 | } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); | ||
1835 | |||
1836 | ipg_rx_clear(sp); | ||
1837 | |||
1838 | ipg_tx_clear(sp); | ||
1839 | |||
1840 | pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); | ||
1841 | pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); | ||
1842 | |||
1843 | free_irq(pdev->irq, dev); | ||
1844 | |||
1845 | return 0; | ||
1846 | } | ||
1847 | |||
1848 | static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, | ||
1849 | struct net_device *dev) | ||
1850 | { | ||
1851 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
1852 | void __iomem *ioaddr = sp->ioaddr; | ||
1853 | unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; | ||
1854 | unsigned long flags; | ||
1855 | struct ipg_tx *txfd; | ||
1856 | |||
1857 | IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); | ||
1858 | |||
1859 | /* If in 10Mbps mode, stop the transmit queue so | ||
1860 | * no more transmit frames are accepted. | ||
1861 | */ | ||
1862 | if (sp->tenmbpsmode) | ||
1863 | netif_stop_queue(dev); | ||
1864 | |||
1865 | if (sp->reset_current_tfd) { | ||
1866 | sp->reset_current_tfd = 0; | ||
1867 | entry = 0; | ||
1868 | } | ||
1869 | |||
1870 | txfd = sp->txd + entry; | ||
1871 | |||
1872 | sp->tx_buff[entry] = skb; | ||
1873 | |||
1874 | /* Clear all TFC fields, except TFDDONE. */ | ||
1875 | txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); | ||
1876 | |||
1877 | /* Specify the TFC field within the TFD. */ | ||
1878 | txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | | ||
1879 | (IPG_TFC_FRAMEID & sp->tx_current) | | ||
1880 | (IPG_TFC_FRAGCOUNT & (1 << 24))); | ||
1881 | /* | ||
1882 | * 16--17 (WordAlign) <- 3 (disable), | ||
1883 | * 0--15 (FrameId) <- sp->tx_current, | ||
1884 | * 24--27 (FragCount) <- 1 | ||
1885 | */ | ||
1886 | |||
1887 | /* Request TxComplete interrupts at an interval defined | ||
1888 | * by the constant IPG_FRAMESBETWEENTXCOMPLETES. | ||
1889 | * Request TxComplete interrupt for every frame | ||
1890 | * if in 10Mbps mode to accommodate problem with 10Mbps | ||
1891 | * processing. | ||
1892 | */ | ||
1893 | if (sp->tenmbpsmode) | ||
1894 | txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); | ||
1895 | txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); | ||
1896 | /* Based on compilation option, determine if FCS is to be | ||
1897 | * appended to transmit frame by IPG. | ||
1898 | */ | ||
1899 | if (!(IPG_APPEND_FCS_ON_TX)) | ||
1900 | txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); | ||
1901 | |||
1902 | /* Based on compilation option, determine if IP, TCP and/or | ||
1903 | * UDP checksums are to be added to transmit frame by IPG. | ||
1904 | */ | ||
1905 | if (IPG_ADD_IPCHECKSUM_ON_TX) | ||
1906 | txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); | ||
1907 | |||
1908 | if (IPG_ADD_TCPCHECKSUM_ON_TX) | ||
1909 | txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); | ||
1910 | |||
1911 | if (IPG_ADD_UDPCHECKSUM_ON_TX) | ||
1912 | txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); | ||
1913 | |||
1914 | /* Based on compilation option, determine if VLAN tag info is to be | ||
1915 | * inserted into transmit frame by IPG. | ||
1916 | */ | ||
1917 | if (IPG_INSERT_MANUAL_VLAN_TAG) { | ||
1918 | txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | | ||
1919 | ((u64) IPG_MANUAL_VLAN_VID << 32) | | ||
1920 | ((u64) IPG_MANUAL_VLAN_CFI << 44) | | ||
1921 | ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); | ||
1922 | } | ||
1923 | |||
1924 | /* The fragment start location within system memory is defined | ||
1925 | * by the sk_buff structure's data field. The physical address | ||
1926 | * of this location within the system's virtual memory space | ||
1927 | * is determined using the IPG_HOST2BUS_MAP function. | ||
1928 | */ | ||
1929 | txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, | ||
1930 | skb->len, PCI_DMA_TODEVICE)); | ||
1931 | |||
1932 | /* The length of the fragment within system memory is defined by | ||
1933 | * the sk_buff structure's len field. | ||
1934 | */ | ||
1935 | txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & | ||
1936 | ((u64) (skb->len & 0xffff) << 48)); | ||
1937 | |||
1938 | /* Clear the TFDDone bit last to indicate the TFD is ready | ||
1939 | * for transfer to the IPG. | ||
1940 | */ | ||
1941 | txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); | ||
1942 | |||
1943 | spin_lock_irqsave(&sp->lock, flags); | ||
1944 | |||
1945 | sp->tx_current++; | ||
1946 | |||
1947 | mmiowb(); | ||
1948 | |||
1949 | ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); | ||
1950 | |||
1951 | if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) | ||
1952 | netif_stop_queue(dev); | ||
1953 | |||
1954 | spin_unlock_irqrestore(&sp->lock, flags); | ||
1955 | |||
1956 | return NETDEV_TX_OK; | ||
1957 | } | ||
1958 | |||
1959 | static void ipg_set_phy_default_param(unsigned char rev, | ||
1960 | struct net_device *dev, int phy_address) | ||
1961 | { | ||
1962 | unsigned short length; | ||
1963 | unsigned char revision; | ||
1964 | const unsigned short *phy_param; | ||
1965 | unsigned short address, value; | ||
1966 | |||
1967 | phy_param = &DefaultPhyParam[0]; | ||
1968 | length = *phy_param & 0x00FF; | ||
1969 | revision = (unsigned char)((*phy_param) >> 8); | ||
1970 | phy_param++; | ||
1971 | while (length != 0) { | ||
1972 | if (rev == revision) { | ||
1973 | while (length > 1) { | ||
1974 | address = *phy_param; | ||
1975 | value = *(phy_param + 1); | ||
1976 | phy_param += 2; | ||
1977 | mdio_write(dev, phy_address, address, value); | ||
1978 | length -= 4; | ||
1979 | } | ||
1980 | break; | ||
1981 | } else { | ||
1982 | phy_param += length / 2; | ||
1983 | length = *phy_param & 0x00FF; | ||
1984 | revision = (unsigned char)((*phy_param) >> 8); | ||
1985 | phy_param++; | ||
1986 | } | ||
1987 | } | ||
1988 | } | ||
1989 | |||
1990 | static int read_eeprom(struct net_device *dev, int eep_addr) | ||
1991 | { | ||
1992 | void __iomem *ioaddr = ipg_ioaddr(dev); | ||
1993 | unsigned int i; | ||
1994 | int ret = 0; | ||
1995 | u16 value; | ||
1996 | |||
1997 | value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); | ||
1998 | ipg_w16(value, EEPROM_CTRL); | ||
1999 | |||
2000 | for (i = 0; i < 1000; i++) { | ||
2001 | u16 data; | ||
2002 | |||
2003 | mdelay(10); | ||
2004 | data = ipg_r16(EEPROM_CTRL); | ||
2005 | if (!(data & IPG_EC_EEPROM_BUSY)) { | ||
2006 | ret = ipg_r16(EEPROM_DATA); | ||
2007 | break; | ||
2008 | } | ||
2009 | } | ||
2010 | return ret; | ||
2011 | } | ||
2012 | |||
2013 | static void ipg_init_mii(struct net_device *dev) | ||
2014 | { | ||
2015 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2016 | struct mii_if_info *mii_if = &sp->mii_if; | ||
2017 | int phyaddr; | ||
2018 | |||
2019 | mii_if->dev = dev; | ||
2020 | mii_if->mdio_read = mdio_read; | ||
2021 | mii_if->mdio_write = mdio_write; | ||
2022 | mii_if->phy_id_mask = 0x1f; | ||
2023 | mii_if->reg_num_mask = 0x1f; | ||
2024 | |||
2025 | mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); | ||
2026 | |||
2027 | if (phyaddr != 0x1f) { | ||
2028 | u16 mii_phyctrl, mii_1000cr; | ||
2029 | |||
2030 | mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); | ||
2031 | mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | | ||
2032 | GMII_PHY_1000BASETCONTROL_PreferMaster; | ||
2033 | mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); | ||
2034 | |||
2035 | mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); | ||
2036 | |||
2037 | /* Set default phyparam */ | ||
2038 | ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr); | ||
2039 | |||
2040 | /* Reset PHY */ | ||
2041 | mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; | ||
2042 | mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); | ||
2043 | |||
2044 | } | ||
2045 | } | ||
2046 | |||
2047 | static int ipg_hw_init(struct net_device *dev) | ||
2048 | { | ||
2049 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2050 | void __iomem *ioaddr = sp->ioaddr; | ||
2051 | unsigned int i; | ||
2052 | int rc; | ||
2053 | |||
2054 | /* Read/Write and Reset EEPROM Value */ | ||
2055 | /* Read LED Mode Configuration from EEPROM */ | ||
2056 | sp->led_mode = read_eeprom(dev, 6); | ||
2057 | |||
2058 | /* Reset all functions within the IPG. Do not assert | ||
2059 | * RST_OUT as not compatible with some PHYs. | ||
2060 | */ | ||
2061 | rc = ipg_reset(dev, IPG_RESET_MASK); | ||
2062 | if (rc < 0) | ||
2063 | goto out; | ||
2064 | |||
2065 | ipg_init_mii(dev); | ||
2066 | |||
2067 | /* Read MAC Address from EEPROM */ | ||
2068 | for (i = 0; i < 3; i++) | ||
2069 | sp->station_addr[i] = read_eeprom(dev, 16 + i); | ||
2070 | |||
2071 | for (i = 0; i < 3; i++) | ||
2072 | ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); | ||
2073 | |||
2074 | /* Set station address in ethernet_device structure. */ | ||
2075 | dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; | ||
2076 | dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; | ||
2077 | dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; | ||
2078 | dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; | ||
2079 | dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; | ||
2080 | dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; | ||
2081 | out: | ||
2082 | return rc; | ||
2083 | } | ||
2084 | |||
2085 | static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2086 | { | ||
2087 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2088 | int rc; | ||
2089 | |||
2090 | mutex_lock(&sp->mii_mutex); | ||
2091 | rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); | ||
2092 | mutex_unlock(&sp->mii_mutex); | ||
2093 | |||
2094 | return rc; | ||
2095 | } | ||
2096 | |||
2097 | static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) | ||
2098 | { | ||
2099 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2100 | int err; | ||
2101 | |||
2102 | /* Function to accommodate changes to Maximum Transfer Unit | ||
2103 | * (or MTU) of IPG NIC. Cannot use default function since | ||
2104 | * the default will not allow for MTU > 1500 bytes. | ||
2105 | */ | ||
2106 | |||
2107 | IPG_DEBUG_MSG("_nic_change_mtu\n"); | ||
2108 | |||
2109 | /* | ||
2110 | * Check that the new MTU value is between 68 (14 byte header, 46 byte | ||
2111 | * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU. | ||
2112 | */ | ||
2113 | if (new_mtu < 68 || new_mtu > 10240) | ||
2114 | return -EINVAL; | ||
2115 | |||
2116 | err = ipg_nic_stop(dev); | ||
2117 | if (err) | ||
2118 | return err; | ||
2119 | |||
2120 | dev->mtu = new_mtu; | ||
2121 | |||
2122 | sp->max_rxframe_size = new_mtu; | ||
2123 | |||
2124 | sp->rxfrag_size = new_mtu; | ||
2125 | if (sp->rxfrag_size > 4088) | ||
2126 | sp->rxfrag_size = 4088; | ||
2127 | |||
2128 | sp->rxsupport_size = sp->max_rxframe_size; | ||
2129 | |||
2130 | if (new_mtu > 0x0600) | ||
2131 | sp->is_jumbo = true; | ||
2132 | else | ||
2133 | sp->is_jumbo = false; | ||
2134 | |||
2135 | return ipg_nic_open(dev); | ||
2136 | } | ||
2137 | |||
2138 | static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2139 | { | ||
2140 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2141 | int rc; | ||
2142 | |||
2143 | mutex_lock(&sp->mii_mutex); | ||
2144 | rc = mii_ethtool_gset(&sp->mii_if, cmd); | ||
2145 | mutex_unlock(&sp->mii_mutex); | ||
2146 | |||
2147 | return rc; | ||
2148 | } | ||
2149 | |||
2150 | static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
2151 | { | ||
2152 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2153 | int rc; | ||
2154 | |||
2155 | mutex_lock(&sp->mii_mutex); | ||
2156 | rc = mii_ethtool_sset(&sp->mii_if, cmd); | ||
2157 | mutex_unlock(&sp->mii_mutex); | ||
2158 | |||
2159 | return rc; | ||
2160 | } | ||
2161 | |||
2162 | static int ipg_nway_reset(struct net_device *dev) | ||
2163 | { | ||
2164 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2165 | int rc; | ||
2166 | |||
2167 | mutex_lock(&sp->mii_mutex); | ||
2168 | rc = mii_nway_restart(&sp->mii_if); | ||
2169 | mutex_unlock(&sp->mii_mutex); | ||
2170 | |||
2171 | return rc; | ||
2172 | } | ||
2173 | |||
2174 | static const struct ethtool_ops ipg_ethtool_ops = { | ||
2175 | .get_settings = ipg_get_settings, | ||
2176 | .set_settings = ipg_set_settings, | ||
2177 | .nway_reset = ipg_nway_reset, | ||
2178 | }; | ||
2179 | |||
2180 | static void __devexit ipg_remove(struct pci_dev *pdev) | ||
2181 | { | ||
2182 | struct net_device *dev = pci_get_drvdata(pdev); | ||
2183 | struct ipg_nic_private *sp = netdev_priv(dev); | ||
2184 | |||
2185 | IPG_DEBUG_MSG("_remove\n"); | ||
2186 | |||
2187 | /* Un-register Ethernet device. */ | ||
2188 | unregister_netdev(dev); | ||
2189 | |||
2190 | pci_iounmap(pdev, sp->ioaddr); | ||
2191 | |||
2192 | pci_release_regions(pdev); | ||
2193 | |||
2194 | free_netdev(dev); | ||
2195 | pci_disable_device(pdev); | ||
2196 | pci_set_drvdata(pdev, NULL); | ||
2197 | } | ||
2198 | |||
2199 | static const struct net_device_ops ipg_netdev_ops = { | ||
2200 | .ndo_open = ipg_nic_open, | ||
2201 | .ndo_stop = ipg_nic_stop, | ||
2202 | .ndo_start_xmit = ipg_nic_hard_start_xmit, | ||
2203 | .ndo_get_stats = ipg_nic_get_stats, | ||
2204 | .ndo_set_multicast_list = ipg_nic_set_multicast_list, | ||
2205 | .ndo_do_ioctl = ipg_ioctl, | ||
2206 | .ndo_tx_timeout = ipg_tx_timeout, | ||
2207 | .ndo_change_mtu = ipg_nic_change_mtu, | ||
2208 | .ndo_set_mac_address = eth_mac_addr, | ||
2209 | .ndo_validate_addr = eth_validate_addr, | ||
2210 | }; | ||
2211 | |||
2212 | static int __devinit ipg_probe(struct pci_dev *pdev, | ||
2213 | const struct pci_device_id *id) | ||
2214 | { | ||
2215 | unsigned int i = id->driver_data; | ||
2216 | struct ipg_nic_private *sp; | ||
2217 | struct net_device *dev; | ||
2218 | void __iomem *ioaddr; | ||
2219 | int rc; | ||
2220 | |||
2221 | rc = pci_enable_device(pdev); | ||
2222 | if (rc < 0) | ||
2223 | goto out; | ||
2224 | |||
2225 | pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]); | ||
2226 | |||
2227 | pci_set_master(pdev); | ||
2228 | |||
2229 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); | ||
2230 | if (rc < 0) { | ||
2231 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
2232 | if (rc < 0) { | ||
2233 | pr_err("%s: DMA config failed\n", pci_name(pdev)); | ||
2234 | goto err_disable_0; | ||
2235 | } | ||
2236 | } | ||
2237 | |||
2238 | /* | ||
2239 | * Initialize net device. | ||
2240 | */ | ||
2241 | dev = alloc_etherdev(sizeof(struct ipg_nic_private)); | ||
2242 | if (!dev) { | ||
2243 | pr_err("%s: alloc_etherdev failed\n", pci_name(pdev)); | ||
2244 | rc = -ENOMEM; | ||
2245 | goto err_disable_0; | ||
2246 | } | ||
2247 | |||
2248 | sp = netdev_priv(dev); | ||
2249 | spin_lock_init(&sp->lock); | ||
2250 | mutex_init(&sp->mii_mutex); | ||
2251 | |||
2252 | sp->is_jumbo = IPG_IS_JUMBO; | ||
2253 | sp->rxfrag_size = IPG_RXFRAG_SIZE; | ||
2254 | sp->rxsupport_size = IPG_RXSUPPORT_SIZE; | ||
2255 | sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE; | ||
2256 | |||
2257 | /* Declare IPG NIC functions for Ethernet device methods. | ||
2258 | */ | ||
2259 | dev->netdev_ops = &ipg_netdev_ops; | ||
2260 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
2261 | SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); | ||
2262 | |||
2263 | rc = pci_request_regions(pdev, DRV_NAME); | ||
2264 | if (rc) | ||
2265 | goto err_free_dev_1; | ||
2266 | |||
2267 | ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); | ||
2268 | if (!ioaddr) { | ||
2269 | pr_err("%s: cannot map MMIO\n", pci_name(pdev)); | ||
2270 | rc = -EIO; | ||
2271 | goto err_release_regions_2; | ||
2272 | } | ||
2273 | |||
2274 | /* Save the pointer to the PCI device information. */ | ||
2275 | sp->ioaddr = ioaddr; | ||
2276 | sp->pdev = pdev; | ||
2277 | sp->dev = dev; | ||
2278 | |||
2279 | INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); | ||
2280 | |||
2281 | pci_set_drvdata(pdev, dev); | ||
2282 | |||
2283 | rc = ipg_hw_init(dev); | ||
2284 | if (rc < 0) | ||
2285 | goto err_unmap_3; | ||
2286 | |||
2287 | rc = register_netdev(dev); | ||
2288 | if (rc < 0) | ||
2289 | goto err_unmap_3; | ||
2290 | |||
2291 | netdev_info(dev, "Ethernet device registered\n"); | ||
2292 | out: | ||
2293 | return rc; | ||
2294 | |||
2295 | err_unmap_3: | ||
2296 | pci_iounmap(pdev, ioaddr); | ||
2297 | err_release_regions_2: | ||
2298 | pci_release_regions(pdev); | ||
2299 | err_free_dev_1: | ||
2300 | free_netdev(dev); | ||
2301 | err_disable_0: | ||
2302 | pci_disable_device(pdev); | ||
2303 | goto out; | ||
2304 | } | ||
2305 | |||
2306 | static struct pci_driver ipg_pci_driver = { | ||
2307 | .name = IPG_DRIVER_NAME, | ||
2308 | .id_table = ipg_pci_tbl, | ||
2309 | .probe = ipg_probe, | ||
2310 | .remove = __devexit_p(ipg_remove), | ||
2311 | }; | ||
2312 | |||
2313 | static int __init ipg_init_module(void) | ||
2314 | { | ||
2315 | return pci_register_driver(&ipg_pci_driver); | ||
2316 | } | ||
2317 | |||
2318 | static void __exit ipg_exit_module(void) | ||
2319 | { | ||
2320 | pci_unregister_driver(&ipg_pci_driver); | ||
2321 | } | ||
2322 | |||
2323 | module_init(ipg_init_module); | ||
2324 | module_exit(ipg_exit_module); | ||
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h new file mode 100644 index 000000000000..6ce027355fcf --- /dev/null +++ b/drivers/net/ethernet/icplus/ipg.h | |||
@@ -0,0 +1,749 @@ | |||
1 | /* | ||
2 | * Include file for Gigabit Ethernet device driver for Network | ||
3 | * Interface Cards (NICs) utilizing the Tamarack Microelectronics | ||
4 | * Inc. IPG Gigabit or Triple Speed Ethernet Media Access | ||
5 | * Controller. | ||
6 | */ | ||
7 | #ifndef __LINUX_IPG_H | ||
8 | #define __LINUX_IPG_H | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/ioport.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <asm/io.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/netdevice.h> | ||
20 | #include <linux/etherdevice.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/skbuff.h> | ||
23 | #include <asm/bitops.h> | ||
24 | |||
25 | /* | ||
26 | * Constants | ||
27 | */ | ||
28 | |||
29 | /* GMII based PHY IDs */ | ||
30 | #define NS 0x2000 | ||
31 | #define MARVELL 0x0141 | ||
32 | #define ICPLUS_PHY 0x243 | ||
33 | |||
34 | /* NIC Physical Layer Device MII register fields. */ | ||
35 | #define MII_PHY_SELECTOR_IEEE8023 0x0001 | ||
36 | #define MII_PHY_TECHABILITYFIELD 0x1FE0 | ||
37 | |||
38 | /* GMII_PHY_1000 need to set to prefer master */ | ||
39 | #define GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400 | ||
40 | |||
41 | /* NIC Physical Layer Device GMII constants. */ | ||
42 | #define GMII_PREAMBLE 0xFFFFFFFF | ||
43 | #define GMII_ST 0x1 | ||
44 | #define GMII_READ 0x2 | ||
45 | #define GMII_WRITE 0x1 | ||
46 | #define GMII_TA_READ_MASK 0x1 | ||
47 | #define GMII_TA_WRITE 0x2 | ||
48 | |||
49 | /* I/O register offsets. */ | ||
50 | enum ipg_regs { | ||
51 | DMA_CTRL = 0x00, | ||
52 | RX_DMA_STATUS = 0x08, /* Unused + reserved */ | ||
53 | TFD_LIST_PTR_0 = 0x10, | ||
54 | TFD_LIST_PTR_1 = 0x14, | ||
55 | TX_DMA_BURST_THRESH = 0x18, | ||
56 | TX_DMA_URGENT_THRESH = 0x19, | ||
57 | TX_DMA_POLL_PERIOD = 0x1a, | ||
58 | RFD_LIST_PTR_0 = 0x1c, | ||
59 | RFD_LIST_PTR_1 = 0x20, | ||
60 | RX_DMA_BURST_THRESH = 0x24, | ||
61 | RX_DMA_URGENT_THRESH = 0x25, | ||
62 | RX_DMA_POLL_PERIOD = 0x26, | ||
63 | DEBUG_CTRL = 0x2c, | ||
64 | ASIC_CTRL = 0x30, | ||
65 | FIFO_CTRL = 0x38, /* Unused */ | ||
66 | FLOW_OFF_THRESH = 0x3c, | ||
67 | FLOW_ON_THRESH = 0x3e, | ||
68 | EEPROM_DATA = 0x48, | ||
69 | EEPROM_CTRL = 0x4a, | ||
70 | EXPROM_ADDR = 0x4c, /* Unused */ | ||
71 | EXPROM_DATA = 0x50, /* Unused */ | ||
72 | WAKE_EVENT = 0x51, /* Unused */ | ||
73 | COUNTDOWN = 0x54, /* Unused */ | ||
74 | INT_STATUS_ACK = 0x5a, | ||
75 | INT_ENABLE = 0x5c, | ||
76 | INT_STATUS = 0x5e, /* Unused */ | ||
77 | TX_STATUS = 0x60, | ||
78 | MAC_CTRL = 0x6c, | ||
79 | VLAN_TAG = 0x70, /* Unused */ | ||
80 | PHY_SET = 0x75, | ||
81 | PHY_CTRL = 0x76, | ||
82 | STATION_ADDRESS_0 = 0x78, | ||
83 | STATION_ADDRESS_1 = 0x7a, | ||
84 | STATION_ADDRESS_2 = 0x7c, | ||
85 | MAX_FRAME_SIZE = 0x86, | ||
86 | RECEIVE_MODE = 0x88, | ||
87 | HASHTABLE_0 = 0x8c, | ||
88 | HASHTABLE_1 = 0x90, | ||
89 | RMON_STATISTICS_MASK = 0x98, | ||
90 | STATISTICS_MASK = 0x9c, | ||
91 | RX_JUMBO_FRAMES = 0xbc, /* Unused */ | ||
92 | TCP_CHECKSUM_ERRORS = 0xc0, /* Unused */ | ||
93 | IP_CHECKSUM_ERRORS = 0xc2, /* Unused */ | ||
94 | UDP_CHECKSUM_ERRORS = 0xc4, /* Unused */ | ||
95 | TX_JUMBO_FRAMES = 0xf4 /* Unused */ | ||
96 | }; | ||
97 | |||
98 | /* Ethernet MIB statistic register offsets. */ | ||
99 | #define IPG_OCTETRCVOK 0xA8 | ||
100 | #define IPG_MCSTOCTETRCVDOK 0xAC | ||
101 | #define IPG_BCSTOCTETRCVOK 0xB0 | ||
102 | #define IPG_FRAMESRCVDOK 0xB4 | ||
103 | #define IPG_MCSTFRAMESRCVDOK 0xB8 | ||
104 | #define IPG_BCSTFRAMESRCVDOK 0xBE | ||
105 | #define IPG_MACCONTROLFRAMESRCVD 0xC6 | ||
106 | #define IPG_FRAMETOOLONGERRRORS 0xC8 | ||
107 | #define IPG_INRANGELENGTHERRORS 0xCA | ||
108 | #define IPG_FRAMECHECKSEQERRORS 0xCC | ||
109 | #define IPG_FRAMESLOSTRXERRORS 0xCE | ||
110 | #define IPG_OCTETXMTOK 0xD0 | ||
111 | #define IPG_MCSTOCTETXMTOK 0xD4 | ||
112 | #define IPG_BCSTOCTETXMTOK 0xD8 | ||
113 | #define IPG_FRAMESXMTDOK 0xDC | ||
114 | #define IPG_MCSTFRAMESXMTDOK 0xE0 | ||
115 | #define IPG_FRAMESWDEFERREDXMT 0xE4 | ||
116 | #define IPG_LATECOLLISIONS 0xE8 | ||
117 | #define IPG_MULTICOLFRAMES 0xEC | ||
118 | #define IPG_SINGLECOLFRAMES 0xF0 | ||
119 | #define IPG_BCSTFRAMESXMTDOK 0xF6 | ||
120 | #define IPG_CARRIERSENSEERRORS 0xF8 | ||
121 | #define IPG_MACCONTROLFRAMESXMTDOK 0xFA | ||
122 | #define IPG_FRAMESABORTXSCOLLS 0xFC | ||
123 | #define IPG_FRAMESWEXDEFERRAL 0xFE | ||
124 | |||
125 | /* RMON statistic register offsets. */ | ||
126 | #define IPG_ETHERSTATSCOLLISIONS 0x100 | ||
127 | #define IPG_ETHERSTATSOCTETSTRANSMIT 0x104 | ||
128 | #define IPG_ETHERSTATSPKTSTRANSMIT 0x108 | ||
129 | #define IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT 0x10C | ||
130 | #define IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT 0x110 | ||
131 | #define IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT 0x114 | ||
132 | #define IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT 0x118 | ||
133 | #define IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT 0x11C | ||
134 | #define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 | ||
135 | #define IPG_ETHERSTATSCRCALIGNERRORS 0x124 | ||
136 | #define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 | ||
137 | #define IPG_ETHERSTATSFRAGMENTS 0x12C | ||
138 | #define IPG_ETHERSTATSJABBERS 0x130 | ||
139 | #define IPG_ETHERSTATSOCTETS 0x134 | ||
140 | #define IPG_ETHERSTATSPKTS 0x138 | ||
141 | #define IPG_ETHERSTATSPKTS64OCTESTS 0x13C | ||
142 | #define IPG_ETHERSTATSPKTS65TO127OCTESTS 0x140 | ||
143 | #define IPG_ETHERSTATSPKTS128TO255OCTESTS 0x144 | ||
144 | #define IPG_ETHERSTATSPKTS256TO511OCTESTS 0x148 | ||
145 | #define IPG_ETHERSTATSPKTS512TO1023OCTESTS 0x14C | ||
146 | #define IPG_ETHERSTATSPKTS1024TO1518OCTESTS 0x150 | ||
147 | |||
148 | /* RMON statistic register equivalents. */ | ||
149 | #define IPG_ETHERSTATSMULTICASTPKTSTRANSMIT 0xE0 | ||
150 | #define IPG_ETHERSTATSBROADCASTPKTSTRANSMIT 0xF6 | ||
151 | #define IPG_ETHERSTATSMULTICASTPKTS 0xB8 | ||
152 | #define IPG_ETHERSTATSBROADCASTPKTS 0xBE | ||
153 | #define IPG_ETHERSTATSOVERSIZEPKTS 0xC8 | ||
154 | #define IPG_ETHERSTATSDROPEVENTS 0xCE | ||
155 | |||
156 | /* Serial EEPROM offsets */ | ||
157 | #define IPG_EEPROM_CONFIGPARAM 0x00 | ||
158 | #define IPG_EEPROM_ASICCTRL 0x01 | ||
159 | #define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 | ||
160 | #define IPG_EEPROM_SUBSYSTEMID 0x03 | ||
161 | #define IPG_EEPROM_STATIONADDRESS0 0x10 | ||
162 | #define IPG_EEPROM_STATIONADDRESS1 0x11 | ||
163 | #define IPG_EEPROM_STATIONADDRESS2 0x12 | ||
164 | |||
165 | /* Register & data structure bit masks */ | ||
166 | |||
167 | /* PCI register masks. */ | ||
168 | |||
169 | /* IOBaseAddress */ | ||
170 | #define IPG_PIB_RSVD_MASK 0xFFFFFE01 | ||
171 | #define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 | ||
172 | #define IPG_PIB_IOBASEADDRIND 0x00000001 | ||
173 | |||
174 | /* MemBaseAddress */ | ||
175 | #define IPG_PMB_RSVD_MASK 0xFFFFFE07 | ||
176 | #define IPG_PMB_MEMBASEADDRIND 0x00000001 | ||
177 | #define IPG_PMB_MEMMAPTYPE 0x00000006 | ||
178 | #define IPG_PMB_MEMMAPTYPE0 0x00000002 | ||
179 | #define IPG_PMB_MEMMAPTYPE1 0x00000004 | ||
180 | #define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 | ||
181 | |||
182 | /* ConfigStatus */ | ||
183 | #define IPG_CS_RSVD_MASK 0xFFB0 | ||
184 | #define IPG_CS_CAPABILITIES 0x0010 | ||
185 | #define IPG_CS_66MHZCAPABLE 0x0020 | ||
186 | #define IPG_CS_FASTBACK2BACK 0x0080 | ||
187 | #define IPG_CS_DATAPARITYREPORTED 0x0100 | ||
188 | #define IPG_CS_DEVSELTIMING 0x0600 | ||
189 | #define IPG_CS_SIGNALEDTARGETABORT 0x0800 | ||
190 | #define IPG_CS_RECEIVEDTARGETABORT 0x1000 | ||
191 | #define IPG_CS_RECEIVEDMASTERABORT 0x2000 | ||
192 | #define IPG_CS_SIGNALEDSYSTEMERROR 0x4000 | ||
193 | #define IPG_CS_DETECTEDPARITYERROR 0x8000 | ||
194 | |||
195 | /* TFD data structure masks. */ | ||
196 | |||
197 | /* TFDList, TFC */ | ||
198 | #define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF | ||
199 | #define IPG_TFC_FRAMEID 0x000000000000FFFF | ||
200 | #define IPG_TFC_WORDALIGN 0x0000000000030000 | ||
201 | #define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 | ||
202 | #define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 | ||
203 | #define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 | ||
204 | #define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 | ||
205 | #define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 | ||
206 | #define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 | ||
207 | #define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 | ||
208 | #define IPG_TFC_TXINDICATE 0x0000000000400000 | ||
209 | #define IPG_TFC_TXDMAINDICATE 0x0000000000800000 | ||
210 | #define IPG_TFC_FRAGCOUNT 0x000000000F000000 | ||
211 | #define IPG_TFC_VLANTAGINSERT 0x0000000010000000 | ||
212 | #define IPG_TFC_TFDDONE 0x0000000080000000 | ||
213 | #define IPG_TFC_VID 0x00000FFF00000000 | ||
214 | #define IPG_TFC_CFI 0x0000100000000000 | ||
215 | #define IPG_TFC_USERPRIORITY 0x0000E00000000000 | ||
216 | |||
217 | /* TFDList, FragInfo */ | ||
218 | #define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF | ||
219 | #define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF | ||
220 | #define IPG_TFI_FRAGLEN 0xFFFF000000000000LL | ||
221 | |||
222 | /* RFD data structure masks. */ | ||
223 | |||
224 | /* RFDList, RFS */ | ||
225 | #define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF | ||
226 | #define IPG_RFS_RXFRAMELEN 0x000000000000FFFF | ||
227 | #define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 | ||
228 | #define IPG_RFS_RXRUNTFRAME 0x0000000000020000 | ||
229 | #define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 | ||
230 | #define IPG_RFS_RXFCSERROR 0x0000000000080000 | ||
231 | #define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 | ||
232 | #define IPG_RFS_RXLENGTHERROR 0x0000000000200000 | ||
233 | #define IPG_RFS_VLANDETECTED 0x0000000000400000 | ||
234 | #define IPG_RFS_TCPDETECTED 0x0000000000800000 | ||
235 | #define IPG_RFS_TCPERROR 0x0000000001000000 | ||
236 | #define IPG_RFS_UDPDETECTED 0x0000000002000000 | ||
237 | #define IPG_RFS_UDPERROR 0x0000000004000000 | ||
238 | #define IPG_RFS_IPDETECTED 0x0000000008000000 | ||
239 | #define IPG_RFS_IPERROR 0x0000000010000000 | ||
240 | #define IPG_RFS_FRAMESTART 0x0000000020000000 | ||
241 | #define IPG_RFS_FRAMEEND 0x0000000040000000 | ||
242 | #define IPG_RFS_RFDDONE 0x0000000080000000 | ||
243 | #define IPG_RFS_TCI 0x0000FFFF00000000 | ||
244 | |||
245 | /* RFDList, FragInfo */ | ||
246 | #define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF | ||
247 | #define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF | ||
248 | #define IPG_RFI_FRAGLEN 0xFFFF000000000000LL | ||
249 | |||
250 | /* I/O Register masks. */ | ||
251 | |||
252 | /* RMON Statistics Mask */ | ||
253 | #define IPG_RZ_ALL 0x0FFFFFFF | ||
254 | |||
255 | /* Statistics Mask */ | ||
256 | #define IPG_SM_ALL 0x0FFFFFFF | ||
257 | #define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 | ||
258 | #define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 | ||
259 | #define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 | ||
260 | #define IPG_SM_RXJUMBOFRAMES 0x00000008 | ||
261 | #define IPG_SM_TCPCHECKSUMERRORS 0x00000010 | ||
262 | #define IPG_SM_IPCHECKSUMERRORS 0x00000020 | ||
263 | #define IPG_SM_UDPCHECKSUMERRORS 0x00000040 | ||
264 | #define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 | ||
265 | #define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 | ||
266 | #define IPG_SM_INRANGELENGTHERRORS 0x00000200 | ||
267 | #define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 | ||
268 | #define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 | ||
269 | #define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 | ||
270 | #define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 | ||
271 | #define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 | ||
272 | #define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 | ||
273 | #define IPG_SM_LATECOLLISIONS 0x00010000 | ||
274 | #define IPG_SM_MULTICOLFRAMES 0x00020000 | ||
275 | #define IPG_SM_SINGLECOLFRAMES 0x00040000 | ||
276 | #define IPG_SM_TXJUMBOFRAMES 0x00080000 | ||
277 | #define IPG_SM_CARRIERSENSEERRORS 0x00100000 | ||
278 | #define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 | ||
279 | #define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 | ||
280 | #define IPG_SM_FRAMESWEXDEFERAL 0x00800000 | ||
281 | |||
282 | /* Countdown */ | ||
283 | #define IPG_CD_RSVD_MASK 0x0700FFFF | ||
284 | #define IPG_CD_COUNT 0x0000FFFF | ||
285 | #define IPG_CD_COUNTDOWNSPEED 0x01000000 | ||
286 | #define IPG_CD_COUNTDOWNMODE 0x02000000 | ||
287 | #define IPG_CD_COUNTINTENABLED 0x04000000 | ||
288 | |||
289 | /* TxDMABurstThresh */ | ||
290 | #define IPG_TB_RSVD_MASK 0xFF | ||
291 | |||
292 | /* TxDMAUrgentThresh */ | ||
293 | #define IPG_TU_RSVD_MASK 0xFF | ||
294 | |||
295 | /* TxDMAPollPeriod */ | ||
296 | #define IPG_TP_RSVD_MASK 0xFF | ||
297 | |||
298 | /* RxDMAUrgentThresh */ | ||
299 | #define IPG_RU_RSVD_MASK 0xFF | ||
300 | |||
301 | /* RxDMAPollPeriod */ | ||
302 | #define IPG_RP_RSVD_MASK 0xFF | ||
303 | |||
304 | /* ReceiveMode */ | ||
305 | #define IPG_RM_RSVD_MASK 0x3F | ||
306 | #define IPG_RM_RECEIVEUNICAST 0x01 | ||
307 | #define IPG_RM_RECEIVEMULTICAST 0x02 | ||
308 | #define IPG_RM_RECEIVEBROADCAST 0x04 | ||
309 | #define IPG_RM_RECEIVEALLFRAMES 0x08 | ||
310 | #define IPG_RM_RECEIVEMULTICASTHASH 0x10 | ||
311 | #define IPG_RM_RECEIVEIPMULTICAST 0x20 | ||
312 | |||
313 | /* PhySet */ | ||
314 | #define IPG_PS_MEM_LENB9B 0x01 | ||
315 | #define IPG_PS_MEM_LEN9 0x02 | ||
316 | #define IPG_PS_NON_COMPDET 0x04 | ||
317 | |||
318 | /* PhyCtrl */ | ||
319 | #define IPG_PC_RSVD_MASK 0xFF | ||
320 | #define IPG_PC_MGMTCLK_LO 0x00 | ||
321 | #define IPG_PC_MGMTCLK_HI 0x01 | ||
322 | #define IPG_PC_MGMTCLK 0x01 | ||
323 | #define IPG_PC_MGMTDATA 0x02 | ||
324 | #define IPG_PC_MGMTDIR 0x04 | ||
325 | #define IPG_PC_DUPLEX_POLARITY 0x08 | ||
326 | #define IPG_PC_DUPLEX_STATUS 0x10 | ||
327 | #define IPG_PC_LINK_POLARITY 0x20 | ||
328 | #define IPG_PC_LINK_SPEED 0xC0 | ||
329 | #define IPG_PC_LINK_SPEED_10MBPS 0x40 | ||
330 | #define IPG_PC_LINK_SPEED_100MBPS 0x80 | ||
331 | #define IPG_PC_LINK_SPEED_1000MBPS 0xC0 | ||
332 | |||
333 | /* DMACtrl */ | ||
334 | #define IPG_DC_RSVD_MASK 0xC07D9818 | ||
335 | #define IPG_DC_RX_DMA_COMPLETE 0x00000008 | ||
336 | #define IPG_DC_RX_DMA_POLL_NOW 0x00000010 | ||
337 | #define IPG_DC_TX_DMA_COMPLETE 0x00000800 | ||
338 | #define IPG_DC_TX_DMA_POLL_NOW 0x00001000 | ||
339 | #define IPG_DC_TX_DMA_IN_PROG 0x00008000 | ||
340 | #define IPG_DC_RX_EARLY_DISABLE 0x00010000 | ||
341 | #define IPG_DC_MWI_DISABLE 0x00040000 | ||
342 | #define IPG_DC_TX_WRITE_BACK_DISABLE 0x00080000 | ||
343 | #define IPG_DC_TX_BURST_LIMIT 0x00700000 | ||
344 | #define IPG_DC_TARGET_ABORT 0x40000000 | ||
345 | #define IPG_DC_MASTER_ABORT 0x80000000 | ||
346 | |||
347 | /* ASICCtrl */ | ||
348 | #define IPG_AC_RSVD_MASK 0x07FFEFF2 | ||
349 | #define IPG_AC_EXP_ROM_SIZE 0x00000002 | ||
350 | #define IPG_AC_PHY_SPEED10 0x00000010 | ||
351 | #define IPG_AC_PHY_SPEED100 0x00000020 | ||
352 | #define IPG_AC_PHY_SPEED1000 0x00000040 | ||
353 | #define IPG_AC_PHY_MEDIA 0x00000080 | ||
354 | #define IPG_AC_FORCED_CFG 0x00000700 | ||
355 | #define IPG_AC_D3RESETDISABLE 0x00000800 | ||
356 | #define IPG_AC_SPEED_UP_MODE 0x00002000 | ||
357 | #define IPG_AC_LED_MODE 0x00004000 | ||
358 | #define IPG_AC_RST_OUT_POLARITY 0x00008000 | ||
359 | #define IPG_AC_GLOBAL_RESET 0x00010000 | ||
360 | #define IPG_AC_RX_RESET 0x00020000 | ||
361 | #define IPG_AC_TX_RESET 0x00040000 | ||
362 | #define IPG_AC_DMA 0x00080000 | ||
363 | #define IPG_AC_FIFO 0x00100000 | ||
364 | #define IPG_AC_NETWORK 0x00200000 | ||
365 | #define IPG_AC_HOST 0x00400000 | ||
366 | #define IPG_AC_AUTO_INIT 0x00800000 | ||
367 | #define IPG_AC_RST_OUT 0x01000000 | ||
368 | #define IPG_AC_INT_REQUEST 0x02000000 | ||
369 | #define IPG_AC_RESET_BUSY 0x04000000 | ||
370 | #define IPG_AC_LED_SPEED 0x08000000 | ||
371 | #define IPG_AC_LED_MODE_BIT_1 0x20000000 | ||
372 | |||
373 | /* EepromCtrl */ | ||
374 | #define IPG_EC_RSVD_MASK 0x83FF | ||
375 | #define IPG_EC_EEPROM_ADDR 0x00FF | ||
376 | #define IPG_EC_EEPROM_OPCODE 0x0300 | ||
377 | #define IPG_EC_EEPROM_SUBCOMMAD 0x0000 | ||
378 | #define IPG_EC_EEPROM_WRITEOPCODE 0x0100 | ||
379 | #define IPG_EC_EEPROM_READOPCODE 0x0200 | ||
380 | #define IPG_EC_EEPROM_ERASEOPCODE 0x0300 | ||
381 | #define IPG_EC_EEPROM_BUSY 0x8000 | ||
382 | |||
383 | /* FIFOCtrl */ | ||
384 | #define IPG_FC_RSVD_MASK 0xC001 | ||
385 | #define IPG_FC_RAM_TEST_MODE 0x0001 | ||
386 | #define IPG_FC_TRANSMITTING 0x4000 | ||
387 | #define IPG_FC_RECEIVING 0x8000 | ||
388 | |||
389 | /* TxStatus */ | ||
390 | #define IPG_TS_RSVD_MASK 0xFFFF00DD | ||
391 | #define IPG_TS_TX_ERROR 0x00000001 | ||
392 | #define IPG_TS_LATE_COLLISION 0x00000004 | ||
393 | #define IPG_TS_TX_MAX_COLL 0x00000008 | ||
394 | #define IPG_TS_TX_UNDERRUN 0x00000010 | ||
395 | #define IPG_TS_TX_IND_REQD 0x00000040 | ||
396 | #define IPG_TS_TX_COMPLETE 0x00000080 | ||
397 | #define IPG_TS_TX_FRAMEID 0xFFFF0000 | ||
398 | |||
399 | /* WakeEvent */ | ||
400 | #define IPG_WE_WAKE_PKT_ENABLE 0x01 | ||
401 | #define IPG_WE_MAGIC_PKT_ENABLE 0x02 | ||
402 | #define IPG_WE_LINK_EVT_ENABLE 0x04 | ||
403 | #define IPG_WE_WAKE_POLARITY 0x08 | ||
404 | #define IPG_WE_WAKE_PKT_EVT 0x10 | ||
405 | #define IPG_WE_MAGIC_PKT_EVT 0x20 | ||
406 | #define IPG_WE_LINK_EVT 0x40 | ||
407 | #define IPG_WE_WOL_ENABLE 0x80 | ||
408 | |||
409 | /* IntEnable */ | ||
410 | #define IPG_IE_RSVD_MASK 0x1FFE | ||
411 | #define IPG_IE_HOST_ERROR 0x0002 | ||
412 | #define IPG_IE_TX_COMPLETE 0x0004 | ||
413 | #define IPG_IE_MAC_CTRL_FRAME 0x0008 | ||
414 | #define IPG_IE_RX_COMPLETE 0x0010 | ||
415 | #define IPG_IE_RX_EARLY 0x0020 | ||
416 | #define IPG_IE_INT_REQUESTED 0x0040 | ||
417 | #define IPG_IE_UPDATE_STATS 0x0080 | ||
418 | #define IPG_IE_LINK_EVENT 0x0100 | ||
419 | #define IPG_IE_TX_DMA_COMPLETE 0x0200 | ||
420 | #define IPG_IE_RX_DMA_COMPLETE 0x0400 | ||
421 | #define IPG_IE_RFD_LIST_END 0x0800 | ||
422 | #define IPG_IE_RX_DMA_PRIORITY 0x1000 | ||
423 | |||
424 | /* IntStatus */ | ||
425 | #define IPG_IS_RSVD_MASK 0x1FFF | ||
426 | #define IPG_IS_INTERRUPT_STATUS 0x0001 | ||
427 | #define IPG_IS_HOST_ERROR 0x0002 | ||
428 | #define IPG_IS_TX_COMPLETE 0x0004 | ||
429 | #define IPG_IS_MAC_CTRL_FRAME 0x0008 | ||
430 | #define IPG_IS_RX_COMPLETE 0x0010 | ||
431 | #define IPG_IS_RX_EARLY 0x0020 | ||
432 | #define IPG_IS_INT_REQUESTED 0x0040 | ||
433 | #define IPG_IS_UPDATE_STATS 0x0080 | ||
434 | #define IPG_IS_LINK_EVENT 0x0100 | ||
435 | #define IPG_IS_TX_DMA_COMPLETE 0x0200 | ||
436 | #define IPG_IS_RX_DMA_COMPLETE 0x0400 | ||
437 | #define IPG_IS_RFD_LIST_END 0x0800 | ||
438 | #define IPG_IS_RX_DMA_PRIORITY 0x1000 | ||
439 | |||
440 | /* MACCtrl */ | ||
441 | #define IPG_MC_RSVD_MASK 0x7FE33FA3 | ||
442 | #define IPG_MC_IFS_SELECT 0x00000003 | ||
443 | #define IPG_MC_IFS_4352BIT 0x00000003 | ||
444 | #define IPG_MC_IFS_1792BIT 0x00000002 | ||
445 | #define IPG_MC_IFS_1024BIT 0x00000001 | ||
446 | #define IPG_MC_IFS_96BIT 0x00000000 | ||
447 | #define IPG_MC_DUPLEX_SELECT 0x00000020 | ||
448 | #define IPG_MC_DUPLEX_SELECT_FD 0x00000020 | ||
449 | #define IPG_MC_DUPLEX_SELECT_HD 0x00000000 | ||
450 | #define IPG_MC_TX_FLOW_CONTROL_ENABLE 0x00000080 | ||
451 | #define IPG_MC_RX_FLOW_CONTROL_ENABLE 0x00000100 | ||
452 | #define IPG_MC_RCV_FCS 0x00000200 | ||
453 | #define IPG_MC_FIFO_LOOPBACK 0x00000400 | ||
454 | #define IPG_MC_MAC_LOOPBACK 0x00000800 | ||
455 | #define IPG_MC_AUTO_VLAN_TAGGING 0x00001000 | ||
456 | #define IPG_MC_AUTO_VLAN_UNTAGGING 0x00002000 | ||
457 | #define IPG_MC_COLLISION_DETECT 0x00010000 | ||
458 | #define IPG_MC_CARRIER_SENSE 0x00020000 | ||
459 | #define IPG_MC_STATISTICS_ENABLE 0x00200000 | ||
460 | #define IPG_MC_STATISTICS_DISABLE 0x00400000 | ||
461 | #define IPG_MC_STATISTICS_ENABLED 0x00800000 | ||
462 | #define IPG_MC_TX_ENABLE 0x01000000 | ||
463 | #define IPG_MC_TX_DISABLE 0x02000000 | ||
464 | #define IPG_MC_TX_ENABLED 0x04000000 | ||
465 | #define IPG_MC_RX_ENABLE 0x08000000 | ||
466 | #define IPG_MC_RX_DISABLE 0x10000000 | ||
467 | #define IPG_MC_RX_ENABLED 0x20000000 | ||
468 | #define IPG_MC_PAUSED 0x40000000 | ||
469 | |||
470 | /* | ||
471 | * Tune | ||
472 | */ | ||
473 | |||
474 | /* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */ | ||
475 | #define IPG_APPEND_FCS_ON_TX 1 | ||
476 | |||
477 | /* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */ | ||
478 | #define IPG_STRIP_FCS_ON_RX 1 | ||
479 | |||
480 | /* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with | ||
481 | * Ethernet errors. | ||
482 | */ | ||
483 | #define IPG_DROP_ON_RX_ETH_ERRORS 1 | ||
484 | |||
485 | /* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually | ||
486 | * (via TFC). | ||
487 | */ | ||
488 | #define IPG_INSERT_MANUAL_VLAN_TAG 0 | ||
489 | |||
490 | /* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */ | ||
491 | #define IPG_ADD_IPCHECKSUM_ON_TX 0 | ||
492 | |||
493 | /* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX. | ||
494 | * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. | ||
495 | */ | ||
496 | #define IPG_ADD_TCPCHECKSUM_ON_TX 0 | ||
497 | |||
498 | /* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX. | ||
499 | * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER. | ||
500 | */ | ||
501 | #define IPG_ADD_UDPCHECKSUM_ON_TX 0 | ||
502 | |||
503 | /* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx | ||
504 | * constants as desired. | ||
505 | */ | ||
506 | #define IPG_MANUAL_VLAN_VID 0xABC | ||
507 | #define IPG_MANUAL_VLAN_CFI 0x1 | ||
508 | #define IPG_MANUAL_VLAN_USERPRIORITY 0x5 | ||
509 | |||
510 | #define IPG_IO_REG_RANGE 0xFF | ||
511 | #define IPG_MEM_REG_RANGE 0x154 | ||
512 | #define IPG_DRIVER_NAME "Sundance Technology IPG Triple-Speed Ethernet" | ||
513 | #define IPG_NIC_PHY_ADDRESS 0x01 | ||
514 | #define IPG_DMALIST_ALIGN_PAD 0x07 | ||
515 | #define IPG_MULTICAST_HASHTABLE_SIZE 0x40 | ||
516 | |||
517 | /* Number of milliseconds to wait after issuing a software reset. | ||
518 | * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation. | ||
519 | */ | ||
520 | #define IPG_AC_RESETWAIT 0x05 | ||
521 | |||
522 | /* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */ | ||
523 | #define IPG_AC_RESET_TIMEOUT 0x0A | ||
524 | |||
525 | /* Minimum number of nanoseconds used to toggle MDC clock during | ||
526 | * MII/GMII register access. | ||
527 | */ | ||
528 | #define IPG_PC_PHYCTRLWAIT_NS 200 | ||
529 | |||
530 | #define IPG_TFDLIST_LENGTH 0x100 | ||
531 | |||
532 | /* Number of frames between TxDMAComplete interrupt. | ||
533 | * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH | ||
534 | */ | ||
535 | #define IPG_FRAMESBETWEENTXDMACOMPLETES 0x1 | ||
536 | |||
537 | #define IPG_RFDLIST_LENGTH 0x100 | ||
538 | |||
539 | /* Maximum number of RFDs to process per interrupt. | ||
540 | * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH | ||
541 | */ | ||
542 | #define IPG_MAXRFDPROCESS_COUNT 0x80 | ||
543 | |||
544 | /* Minimum margin between last freed RFD, and current RFD. | ||
545 | * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH | ||
546 | */ | ||
547 | #define IPG_MINUSEDRFDSTOFREE 0x80 | ||
548 | |||
549 | /* specify the jumbo frame maximum size | ||
550 | * per unit is 0x600 (the rx_buffer size that one RFD can carry) | ||
551 | */ | ||
552 | #define MAX_JUMBOSIZE 0x8 /* max is 12K */ | ||
553 | |||
554 | /* Key register values loaded at driver start up. */ | ||
555 | |||
556 | /* TXDMAPollPeriod is specified in 320ns increments. | ||
557 | * | ||
558 | * Value Time | ||
559 | * --------------------- | ||
560 | * 0x00-0x01 320ns | ||
561 | * 0x03 ~1us | ||
562 | * 0x1F ~10us | ||
563 | * 0xFF ~82us | ||
564 | */ | ||
565 | #define IPG_TXDMAPOLLPERIOD_VALUE 0x26 | ||
566 | |||
567 | /* TxDMAUrgentThresh specifies the minimum amount of | ||
568 | * data in the transmit FIFO before asserting an | ||
569 | * urgent transmit DMA request. | ||
570 | * | ||
571 | * Value Min TxFIFO occupied space before urgent TX request | ||
572 | * --------------------------------------------------------------- | ||
573 | * 0x00-0x04 128 bytes (1024 bits) | ||
574 | * 0x27 1248 bytes (~10000 bits) | ||
575 | * 0x30 1536 bytes (12288 bits) | ||
576 | * 0xFF 8192 bytes (65535 bits) | ||
577 | */ | ||
578 | #define IPG_TXDMAURGENTTHRESH_VALUE 0x04 | ||
579 | |||
580 | /* TxDMABurstThresh specifies the minimum amount of | ||
581 | * free space in the transmit FIFO before asserting an | ||
582 | * transmit DMA request. | ||
583 | * | ||
584 | * Value Min TxFIFO free space before TX request | ||
585 | * ---------------------------------------------------- | ||
586 | * 0x00-0x08 256 bytes | ||
587 | * 0x30 1536 bytes | ||
588 | * 0xFF 8192 bytes | ||
589 | */ | ||
590 | #define IPG_TXDMABURSTTHRESH_VALUE 0x30 | ||
591 | |||
592 | /* RXDMAPollPeriod is specified in 320ns increments. | ||
593 | * | ||
594 | * Value Time | ||
595 | * --------------------- | ||
596 | * 0x00-0x01 320ns | ||
597 | * 0x03 ~1us | ||
598 | * 0x1F ~10us | ||
599 | * 0xFF ~82us | ||
600 | */ | ||
601 | #define IPG_RXDMAPOLLPERIOD_VALUE 0x01 | ||
602 | |||
603 | /* RxDMAUrgentThresh specifies the minimum amount of | ||
604 | * free space within the receive FIFO before asserting | ||
605 | * a urgent receive DMA request. | ||
606 | * | ||
607 | * Value Min RxFIFO free space before urgent RX request | ||
608 | * --------------------------------------------------------------- | ||
609 | * 0x00-0x04 128 bytes (1024 bits) | ||
610 | * 0x27 1248 bytes (~10000 bits) | ||
611 | * 0x30 1536 bytes (12288 bits) | ||
612 | * 0xFF 8192 bytes (65535 bits) | ||
613 | */ | ||
614 | #define IPG_RXDMAURGENTTHRESH_VALUE 0x30 | ||
615 | |||
616 | /* RxDMABurstThresh specifies the minimum amount of | ||
617 | * occupied space within the receive FIFO before asserting | ||
618 | * a receive DMA request. | ||
619 | * | ||
620 | * Value Min TxFIFO free space before TX request | ||
621 | * ---------------------------------------------------- | ||
622 | * 0x00-0x08 256 bytes | ||
623 | * 0x30 1536 bytes | ||
624 | * 0xFF 8192 bytes | ||
625 | */ | ||
626 | #define IPG_RXDMABURSTTHRESH_VALUE 0x30 | ||
627 | |||
628 | /* FlowOnThresh specifies the maximum amount of occupied | ||
629 | * space in the receive FIFO before a PAUSE frame with | ||
630 | * maximum pause time transmitted. | ||
631 | * | ||
632 | * Value Max RxFIFO occupied space before PAUSE | ||
633 | * --------------------------------------------------- | ||
634 | * 0x0000 0 bytes | ||
635 | * 0x0740 29,696 bytes | ||
636 | * 0x07FF 32,752 bytes | ||
637 | */ | ||
638 | #define IPG_FLOWONTHRESH_VALUE 0x0740 | ||
639 | |||
640 | /* FlowOffThresh specifies the minimum amount of occupied | ||
641 | * space in the receive FIFO before a PAUSE frame with | ||
642 | * zero pause time is transmitted. | ||
643 | * | ||
644 | * Value Max RxFIFO occupied space before PAUSE | ||
645 | * --------------------------------------------------- | ||
646 | * 0x0000 0 bytes | ||
647 | * 0x00BF 3056 bytes | ||
648 | * 0x07FF 32,752 bytes | ||
649 | */ | ||
650 | #define IPG_FLOWOFFTHRESH_VALUE 0x00BF | ||
651 | |||
652 | /* | ||
653 | * Miscellaneous macros. | ||
654 | */ | ||
655 | |||
656 | /* Macros for printing debug statements. */ | ||
657 | #ifdef IPG_DEBUG | ||
658 | # define IPG_DEBUG_MSG(fmt, args...) \ | ||
659 | do { \ | ||
660 | if (0) \ | ||
661 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
662 | } while (0) | ||
663 | # define IPG_DDEBUG_MSG(fmt, args...) \ | ||
664 | printk(KERN_DEBUG "IPG: " fmt, ##args) | ||
665 | # define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) | ||
666 | # define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) | ||
667 | #else | ||
668 | # define IPG_DEBUG_MSG(fmt, args...) \ | ||
669 | do { \ | ||
670 | if (0) \ | ||
671 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
672 | } while (0) | ||
673 | # define IPG_DDEBUG_MSG(fmt, args...) \ | ||
674 | do { \ | ||
675 | if (0) \ | ||
676 | printk(KERN_DEBUG "IPG: " fmt, ##args); \ | ||
677 | } while (0) | ||
678 | # define IPG_DUMPRFDLIST(args) | ||
679 | # define IPG_DUMPTFDLIST(args) | ||
680 | #endif | ||
681 | |||
682 | /* | ||
683 | * End miscellaneous macros. | ||
684 | */ | ||
685 | |||
686 | /* Transmit Frame Descriptor. The IPG supports 15 fragments, | ||
687 | * however Linux requires only a single fragment. Note, each | ||
688 | * TFD field is 64 bits wide. | ||
689 | */ | ||
690 | struct ipg_tx { | ||
691 | __le64 next_desc; | ||
692 | __le64 tfc; | ||
693 | __le64 frag_info; | ||
694 | }; | ||
695 | |||
696 | /* Receive Frame Descriptor. Note, each RFD field is 64 bits wide. | ||
697 | */ | ||
698 | struct ipg_rx { | ||
699 | __le64 next_desc; | ||
700 | __le64 rfs; | ||
701 | __le64 frag_info; | ||
702 | }; | ||
703 | |||
704 | struct ipg_jumbo { | ||
705 | int found_start; | ||
706 | int current_size; | ||
707 | struct sk_buff *skb; | ||
708 | }; | ||
709 | |||
710 | /* Structure of IPG NIC specific data. */ | ||
711 | struct ipg_nic_private { | ||
712 | void __iomem *ioaddr; | ||
713 | struct ipg_tx *txd; | ||
714 | struct ipg_rx *rxd; | ||
715 | dma_addr_t txd_map; | ||
716 | dma_addr_t rxd_map; | ||
717 | struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH]; | ||
718 | struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH]; | ||
719 | unsigned int tx_current; | ||
720 | unsigned int tx_dirty; | ||
721 | unsigned int rx_current; | ||
722 | unsigned int rx_dirty; | ||
723 | bool is_jumbo; | ||
724 | struct ipg_jumbo jumbo; | ||
725 | unsigned long rxfrag_size; | ||
726 | unsigned long rxsupport_size; | ||
727 | unsigned long max_rxframe_size; | ||
728 | unsigned int rx_buf_sz; | ||
729 | struct pci_dev *pdev; | ||
730 | struct net_device *dev; | ||
731 | struct net_device_stats stats; | ||
732 | spinlock_t lock; | ||
733 | int tenmbpsmode; | ||
734 | |||
735 | u16 led_mode; | ||
736 | u16 station_addr[3]; /* Station Address in EEPROM Reg 0x10..0x12 */ | ||
737 | |||
738 | struct mutex mii_mutex; | ||
739 | struct mii_if_info mii_if; | ||
740 | int reset_current_tfd; | ||
741 | #ifdef IPG_DEBUG | ||
742 | int RFDlistendCount; | ||
743 | int RFDListCheckedCount; | ||
744 | int EmptyRFDListCount; | ||
745 | #endif | ||
746 | struct delayed_work task; | ||
747 | }; | ||
748 | |||
749 | #endif /* __LINUX_IPG_H */ | ||