aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-09-22 20:15:30 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-22 20:15:30 -0400
commitff2a2e0172fd11b1bfb9b45ee1b996d3f10669af (patch)
treeaa73a797810fb389c27af29b9eea2648bc979ee4 /drivers/net
parent06ede91017d015a03cf8c1c87b3ff668f9a846e0 (diff)
[NET] GT96100: Delete bitrotting ethernet driver
Code for the EV96100 evaluation board hasn't compiled since at least November 15, 2003, so it is being deleted as of 2.6.18 due to lack of a user base. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/gt96100eth.c1568
-rw-r--r--drivers/net/gt96100eth.h346
4 files changed, 0 insertions, 1921 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index c49a1cc76c2a..63154774c257 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -438,12 +438,6 @@ config MIPS_JAZZ_SONIC
438 This is the driver for the onboard card of MIPS Magnum 4000, 438 This is the driver for the onboard card of MIPS Magnum 4000,
439 Acer PICA, Olivetti M700-10 and a few other identical OEM systems. 439 Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
440 440
441config MIPS_GT96100ETH
442 bool "MIPS GT96100 Ethernet support"
443 depends on NET_ETHERNET && MIPS_GT96100
444 help
445 Say Y here to support the Ethernet subsystem on your GT96100 card.
446
447config MIPS_AU1X00_ENET 441config MIPS_AU1X00_ENET
448 bool "MIPS AU1000 Ethernet support" 442 bool "MIPS AU1000 Ethernet support"
449 depends on NET_ETHERNET && SOC_AU1X00 443 depends on NET_ETHERNET && SOC_AU1X00
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 0f329e56345e..f270bc49e571 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -170,7 +170,6 @@ obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
170obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 170obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
171obj-$(CONFIG_EQUALIZER) += eql.o 171obj-$(CONFIG_EQUALIZER) += eql.o
172obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o 172obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
173obj-$(CONFIG_MIPS_GT96100ETH) += gt96100eth.o
174obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o 173obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
175obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o 174obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
176obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o 175obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
diff --git a/drivers/net/gt96100eth.c b/drivers/net/gt96100eth.c
deleted file mode 100644
index 785c5bf4c6ea..000000000000
--- a/drivers/net/gt96100eth.c
+++ /dev/null
@@ -1,1568 +0,0 @@
1/*
2 * Copyright 2000, 2001 MontaVista Software Inc.
3 * Author: MontaVista Software, Inc.
4 * stevel@mvista.com or source@mvista.com
5 *
6 * This program is free software; you can distribute it and/or modify it
7 * under the terms of the GNU General Public License (Version 2) as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
18 *
19 * Ethernet driver for the MIPS GT96100 Advanced Communication Controller.
20 *
21 * Revision history
22 *
23 * 11.11.2001 Moved to 2.4.14, ppopov@mvista.com. Modified driver to add
24 * proper gt96100A support.
25 * 12.05.2001 Moved eth port 0 to irq 3 (mapped to GT_SERINT0 on EV96100A)
26 * in order for both ports to work. Also cleaned up boot
27 * option support (mac address string parsing), fleshed out
28 * gt96100_cleanup_module(), and other general code cleanups
29 * <stevel@mvista.com>.
30 */
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/string.h>
34#include <linux/timer.h>
35#include <linux/errno.h>
36#include <linux/in.h>
37#include <linux/ioport.h>
38#include <linux/slab.h>
39#include <linux/interrupt.h>
40#include <linux/pci.h>
41#include <linux/init.h>
42#include <linux/netdevice.h>
43#include <linux/etherdevice.h>
44#include <linux/skbuff.h>
45#include <linux/delay.h>
46#include <linux/ctype.h>
47#include <linux/bitops.h>
48
49#include <asm/irq.h>
50#include <asm/io.h>
51
52#define DESC_BE 1
53#define DESC_DATA_BE 1
54
55#define GT96100_DEBUG 2
56
57#include "gt96100eth.h"
58
59// prototypes
60static void* dmaalloc(size_t size, dma_addr_t *dma_handle);
61static void dmafree(size_t size, void *vaddr);
62static void gt96100_delay(int msec);
63static int gt96100_add_hash_entry(struct net_device *dev,
64 unsigned char* addr);
65static void read_mib_counters(struct gt96100_private *gp);
66static int read_MII(int phy_addr, u32 reg);
67static int write_MII(int phy_addr, u32 reg, u16 data);
68static int gt96100_init_module(void);
69static void gt96100_cleanup_module(void);
70static void dump_MII(int dbg_lvl, struct net_device *dev);
71static void dump_tx_desc(int dbg_lvl, struct net_device *dev, int i);
72static void dump_rx_desc(int dbg_lvl, struct net_device *dev, int i);
73static void dump_skb(int dbg_lvl, struct net_device *dev,
74 struct sk_buff *skb);
75static void update_stats(struct gt96100_private *gp);
76static void abort(struct net_device *dev, u32 abort_bits);
77static void hard_stop(struct net_device *dev);
78static void enable_ether_irq(struct net_device *dev);
79static void disable_ether_irq(struct net_device *dev);
80static int gt96100_probe1(struct pci_dev *pci, int port_num);
81static void reset_tx(struct net_device *dev);
82static void reset_rx(struct net_device *dev);
83static int gt96100_check_tx_consistent(struct gt96100_private *gp);
84static int gt96100_init(struct net_device *dev);
85static int gt96100_open(struct net_device *dev);
86static int gt96100_close(struct net_device *dev);
87static int gt96100_tx(struct sk_buff *skb, struct net_device *dev);
88static int gt96100_rx(struct net_device *dev, u32 status);
89static irqreturn_t gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs);
90static void gt96100_tx_timeout(struct net_device *dev);
91static void gt96100_set_rx_mode(struct net_device *dev);
92static struct net_device_stats* gt96100_get_stats(struct net_device *dev);
93
94extern char * __init prom_getcmdline(void);
95
96static int max_interrupt_work = 32;
97
98#define nibswap(x) ((((x) >> 4) & 0x0f) | (((x) << 4) & 0xf0))
99
100#define RUN_AT(x) (jiffies + (x))
101
102// For reading/writing 32-bit words and half-words from/to DMA memory
103#ifdef DESC_BE
104#define cpu_to_dma32 cpu_to_be32
105#define dma32_to_cpu be32_to_cpu
106#define cpu_to_dma16 cpu_to_be16
107#define dma16_to_cpu be16_to_cpu
108#else
109#define cpu_to_dma32 cpu_to_le32
110#define dma32_to_cpu le32_to_cpu
111#define cpu_to_dma16 cpu_to_le16
112#define dma16_to_cpu le16_to_cpu
113#endif
114
115static char mac0[18] = "00.02.03.04.05.06";
116static char mac1[18] = "00.01.02.03.04.05";
117module_param_string(mac0, mac0, 18, 0);
118module_param_string(mac1, mac0, 18, 0);
119MODULE_PARM_DESC(mac0, "MAC address for GT96100 ethernet port 0");
120MODULE_PARM_DESC(mac1, "MAC address for GT96100 ethernet port 1");
121
122/*
123 * Info for the GT96100 ethernet controller's ports.
124 */
125static struct gt96100_if_t {
126 struct net_device *dev;
127 unsigned int iobase; // IO Base address of this port
128 int irq; // IRQ number of this port
129 char *mac_str;
130} gt96100_iflist[NUM_INTERFACES] = {
131 {
132 NULL,
133 GT96100_ETH0_BASE, GT96100_ETHER0_IRQ,
134 mac0
135 },
136 {
137 NULL,
138 GT96100_ETH1_BASE, GT96100_ETHER1_IRQ,
139 mac1
140 }
141};
142
143static inline const char*
144chip_name(int chip_rev)
145{
146 switch (chip_rev) {
147 case REV_GT96100:
148 return "GT96100";
149 case REV_GT96100A_1:
150 case REV_GT96100A:
151 return "GT96100A";
152 default:
153 return "Unknown GT96100";
154 }
155}
156
157/*
158 DMA memory allocation, derived from pci_alloc_consistent.
159*/
160static void * dmaalloc(size_t size, dma_addr_t *dma_handle)
161{
162 void *ret;
163
164 ret = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, get_order(size));
165
166 if (ret != NULL) {
167 dma_cache_inv((unsigned long)ret, size);
168 if (dma_handle != NULL)
169 *dma_handle = virt_to_phys(ret);
170
171 /* bump virtual address up to non-cached area */
172 ret = (void*)KSEG1ADDR(ret);
173 }
174
175 return ret;
176}
177
178static void dmafree(size_t size, void *vaddr)
179{
180 vaddr = (void*)KSEG0ADDR(vaddr);
181 free_pages((unsigned long)vaddr, get_order(size));
182}
183
184static void gt96100_delay(int ms)
185{
186 if (in_interrupt())
187 return;
188 else
189 msleep_interruptible(ms);
190}
191
192static int
193parse_mac_addr(struct net_device *dev, char* macstr)
194{
195 int i, j;
196 unsigned char result, value;
197
198 for (i=0; i<6; i++) {
199 result = 0;
200 if (i != 5 && *(macstr+2) != '.') {
201 err(__FILE__ "invalid mac address format: %d %c\n",
202 i, *(macstr+2));
203 return -EINVAL;
204 }
205
206 for (j=0; j<2; j++) {
207 if (isxdigit(*macstr) &&
208 (value = isdigit(*macstr) ? *macstr-'0' :
209 toupper(*macstr)-'A'+10) < 16) {
210 result = result*16 + value;
211 macstr++;
212 } else {
213 err(__FILE__ "invalid mac address "
214 "character: %c\n", *macstr);
215 return -EINVAL;
216 }
217 }
218
219 macstr++; // step over '.'
220 dev->dev_addr[i] = result;
221 }
222
223 return 0;
224}
225
226
227static int
228read_MII(int phy_addr, u32 reg)
229{
230 int timedout = 20;
231 u32 smir = smirOpCode | (phy_addr << smirPhyAdBit) |
232 (reg << smirRegAdBit);
233
234 // wait for last operation to complete
235 while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) {
236 // snooze for 1 msec and check again
237 gt96100_delay(1);
238
239 if (--timedout == 0) {
240 printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__);
241 return -ENODEV;
242 }
243 }
244
245 GT96100_WRITE(GT96100_ETH_SMI_REG, smir);
246
247 timedout = 20;
248 // wait for read to complete
249 while (!((smir = GT96100_READ(GT96100_ETH_SMI_REG)) & smirReadValid)) {
250 // snooze for 1 msec and check again
251 gt96100_delay(1);
252
253 if (--timedout == 0) {
254 printk(KERN_ERR "%s: timeout!!\n", __FUNCTION__);
255 return -ENODEV;
256 }
257 }
258
259 return (int)(smir & smirDataMask);
260}
261
262static void
263dump_tx_desc(int dbg_lvl, struct net_device *dev, int i)
264{
265 struct gt96100_private *gp = netdev_priv(dev);
266 gt96100_td_t *td = &gp->tx_ring[i];
267
268 dbg(dbg_lvl, "Tx descriptor at 0x%08lx:\n", virt_to_phys(td));
269 dbg(dbg_lvl,
270 " cmdstat=%04x, byte_cnt=%04x, buff_ptr=%04x, next=%04x\n",
271 dma32_to_cpu(td->cmdstat),
272 dma16_to_cpu(td->byte_cnt),
273 dma32_to_cpu(td->buff_ptr),
274 dma32_to_cpu(td->next));
275}
276
277static void
278dump_rx_desc(int dbg_lvl, struct net_device *dev, int i)
279{
280 struct gt96100_private *gp = netdev_priv(dev);
281 gt96100_rd_t *rd = &gp->rx_ring[i];
282
283 dbg(dbg_lvl, "Rx descriptor at 0x%08lx:\n", virt_to_phys(rd));
284 dbg(dbg_lvl, " cmdstat=%04x, buff_sz=%04x, byte_cnt=%04x, "
285 "buff_ptr=%04x, next=%04x\n",
286 dma32_to_cpu(rd->cmdstat),
287 dma16_to_cpu(rd->buff_sz),
288 dma16_to_cpu(rd->byte_cnt),
289 dma32_to_cpu(rd->buff_ptr),
290 dma32_to_cpu(rd->next));
291}
292
293static int
294write_MII(int phy_addr, u32 reg, u16 data)
295{
296 int timedout = 20;
297 u32 smir = (phy_addr << smirPhyAdBit) |
298 (reg << smirRegAdBit) | data;
299
300 // wait for last operation to complete
301 while (GT96100_READ(GT96100_ETH_SMI_REG) & smirBusy) {
302 // snooze for 1 msec and check again
303 gt96100_delay(1);
304
305 if (--timedout == 0) {
306 printk(KERN_ERR "%s: busy timeout!!\n", __FUNCTION__);
307 return -1;
308 }
309 }
310
311 GT96100_WRITE(GT96100_ETH_SMI_REG, smir);
312 return 0;
313}
314
315static void
316dump_MII(int dbg_lvl, struct net_device *dev)
317{
318 int i, val;
319 struct gt96100_private *gp = netdev_priv(dev);
320
321 if (dbg_lvl <= GT96100_DEBUG) {
322 for (i=0; i<7; i++) {
323 if ((val = read_MII(gp->phy_addr, i)) >= 0)
324 printk("MII Reg %d=%x\n", i, val);
325 }
326 for (i=16; i<21; i++) {
327 if ((val = read_MII(gp->phy_addr, i)) >= 0)
328 printk("MII Reg %d=%x\n", i, val);
329 }
330 }
331}
332
333static void
334dump_hw_addr(int dbg_lvl, struct net_device *dev, const char* pfx,
335 const char* func, unsigned char* addr_str)
336{
337 int i;
338 char buf[100], octet[5];
339
340 if (dbg_lvl <= GT96100_DEBUG) {
341 sprintf(buf, pfx, func);
342 for (i = 0; i < 6; i++) {
343 sprintf(octet, "%2.2x%s",
344 addr_str[i], i<5 ? ":" : "\n");
345 strcat(buf, octet);
346 }
347 info("%s", buf);
348 }
349}
350
351
352static void
353dump_skb(int dbg_lvl, struct net_device *dev, struct sk_buff *skb)
354{
355 int i;
356 unsigned char* skbdata;
357
358 if (dbg_lvl <= GT96100_DEBUG) {
359 dbg(dbg_lvl, "%s: skb=%p, skb->data=%p, skb->len=%d\n",
360 __FUNCTION__, skb, skb->data, skb->len);
361
362 skbdata = (unsigned char*)KSEG1ADDR(skb->data);
363
364 for (i=0; i<skb->len; i++) {
365 if (!(i % 16))
366 printk(KERN_DEBUG "\n %3.3x: %2.2x,",
367 i, skbdata[i]);
368 else
369 printk(KERN_DEBUG "%2.2x,", skbdata[i]);
370 }
371 printk(KERN_DEBUG "\n");
372 }
373}
374
375
376static int
377gt96100_add_hash_entry(struct net_device *dev, unsigned char* addr)
378{
379 struct gt96100_private *gp = netdev_priv(dev);
380 //u16 hashResult, stmp;
381 //unsigned char ctmp, hash_ea[6];
382 u32 tblEntry1, tblEntry0, *tblEntryAddr;
383 int i;
384
385 tblEntry1 = hteValid | hteRD;
386 tblEntry1 |= (u32)addr[5] << 3;
387 tblEntry1 |= (u32)addr[4] << 11;
388 tblEntry1 |= (u32)addr[3] << 19;
389 tblEntry1 |= ((u32)addr[2] & 0x1f) << 27;
390 dbg(3, "%s: tblEntry1=%x\n", __FUNCTION__, tblEntry1);
391 tblEntry0 = ((u32)addr[2] >> 5) & 0x07;
392 tblEntry0 |= (u32)addr[1] << 3;
393 tblEntry0 |= (u32)addr[0] << 11;
394 dbg(3, "%s: tblEntry0=%x\n", __FUNCTION__, tblEntry0);
395
396#if 0
397
398 for (i=0; i<6; i++) {
399 // nibble swap
400 ctmp = nibswap(addr[i]);
401 // invert every nibble
402 hash_ea[i] = ((ctmp&1)<<3) | ((ctmp&8)>>3) |
403 ((ctmp&2)<<1) | ((ctmp&4)>>1);
404 hash_ea[i] |= ((ctmp&0x10)<<3) | ((ctmp&0x80)>>3) |
405 ((ctmp&0x20)<<1) | ((ctmp&0x40)>>1);
406 }
407
408 dump_hw_addr(3, dev, "%s: nib swap/invt addr=", __FUNCTION__, hash_ea);
409
410 if (gp->hash_mode == 0) {
411 hashResult = ((u16)hash_ea[0] & 0xfc) << 7;
412 stmp = ((u16)hash_ea[0] & 0x03) |
413 (((u16)hash_ea[1] & 0x7f) << 2);
414 stmp ^= (((u16)hash_ea[1] >> 7) & 0x01) |
415 ((u16)hash_ea[2] << 1);
416 stmp ^= (u16)hash_ea[3] | (((u16)hash_ea[4] & 1) << 8);
417 hashResult |= stmp;
418 } else {
419 return -1; // don't support hash mode 1
420 }
421
422 dbg(3, "%s: hashResult=%x\n", __FUNCTION__, hashResult);
423
424 tblEntryAddr =
425 (u32 *)(&gp->hash_table[((u32)hashResult & 0x7ff) << 3]);
426
427 dbg(3, "%s: tblEntryAddr=%p\n", tblEntryAddr, __FUNCTION__);
428
429 for (i=0; i<HASH_HOP_NUMBER; i++) {
430 if ((*tblEntryAddr & hteValid) &&
431 !(*tblEntryAddr & hteSkip)) {
432 // This entry is already occupied, go to next entry
433 tblEntryAddr += 2;
434 dbg(3, "%s: skipping to %p\n", __FUNCTION__,
435 tblEntryAddr);
436 } else {
437 memset(tblEntryAddr, 0, 8);
438 tblEntryAddr[1] = cpu_to_dma32(tblEntry1);
439 tblEntryAddr[0] = cpu_to_dma32(tblEntry0);
440 break;
441 }
442 }
443
444 if (i >= HASH_HOP_NUMBER) {
445 err("%s: expired!\n", __FUNCTION__);
446 return -1; // Couldn't find an unused entry
447 }
448
449#else
450
451 tblEntryAddr = (u32 *)gp->hash_table;
452 for (i=0; i<RX_HASH_TABLE_SIZE/4; i+=2) {
453 tblEntryAddr[i+1] = cpu_to_dma32(tblEntry1);
454 tblEntryAddr[i] = cpu_to_dma32(tblEntry0);
455 }
456
457#endif
458
459 return 0;
460}
461
462
463static void
464read_mib_counters(struct gt96100_private *gp)
465{
466 u32* mib_regs = (u32*)&gp->mib;
467 int i;
468
469 for (i=0; i<sizeof(mib_counters_t)/sizeof(u32); i++)
470 mib_regs[i] = GT96100ETH_READ(gp, GT96100_ETH_MIB_COUNT_BASE +
471 i*sizeof(u32));
472}
473
474
475static void
476update_stats(struct gt96100_private *gp)
477{
478 mib_counters_t *mib = &gp->mib;
479 struct net_device_stats *stats = &gp->stats;
480
481 read_mib_counters(gp);
482
483 stats->rx_packets = mib->totalFramesReceived;
484 stats->tx_packets = mib->framesSent;
485 stats->rx_bytes = mib->totalByteReceived;
486 stats->tx_bytes = mib->byteSent;
487 stats->rx_errors = mib->totalFramesReceived - mib->framesReceived;
488 //the tx error counters are incremented by the ISR
489 //rx_dropped incremented by gt96100_rx
490 //tx_dropped incremented by gt96100_tx
491 stats->multicast = mib->multicastFramesReceived;
492 // collisions incremented by gt96100_tx_complete
493 stats->rx_length_errors = mib->oversizeFrames + mib->fragments;
494 // The RxError condition means the Rx DMA encountered a
495 // CPU owned descriptor, which, if things are working as
496 // they should, means the Rx ring has overflowed.
497 stats->rx_over_errors = mib->macRxError;
498 stats->rx_crc_errors = mib->cRCError;
499}
500
501static void
502abort(struct net_device *dev, u32 abort_bits)
503{
504 struct gt96100_private *gp = netdev_priv(dev);
505 int timedout = 100; // wait up to 100 msec for hard stop to complete
506
507 dbg(3, "%s\n", __FUNCTION__);
508
509 // Return if neither Rx or Tx abort bits are set
510 if (!(abort_bits & (sdcmrAR | sdcmrAT)))
511 return;
512
513 // make sure only the Rx/Tx abort bits are set
514 abort_bits &= (sdcmrAR | sdcmrAT);
515
516 spin_lock(&gp->lock);
517
518 // abort any Rx/Tx DMA immediately
519 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, abort_bits);
520
521 dbg(3, "%s: SDMA comm = %x\n", __FUNCTION__,
522 GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM));
523
524 // wait for abort to complete
525 while (GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM) & abort_bits) {
526 // snooze for 1 msec and check again
527 gt96100_delay(1);
528
529 if (--timedout == 0) {
530 err("%s: timeout!!\n", __FUNCTION__);
531 break;
532 }
533 }
534
535 spin_unlock(&gp->lock);
536}
537
538
539static void
540hard_stop(struct net_device *dev)
541{
542 struct gt96100_private *gp = netdev_priv(dev);
543
544 dbg(3, "%s\n", __FUNCTION__);
545
546 disable_ether_irq(dev);
547
548 abort(dev, sdcmrAR | sdcmrAT);
549
550 // disable port
551 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, 0);
552}
553
554
555static void
556enable_ether_irq(struct net_device *dev)
557{
558 struct gt96100_private *gp = netdev_priv(dev);
559 u32 intMask;
560 /*
561 * route ethernet interrupt to GT_SERINT0 for port 0,
562 * GT_INT0 for port 1.
563 */
564 int intr_mask_reg = (gp->port_num == 0) ?
565 GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK;
566
567 if (gp->chip_rev >= REV_GT96100A_1) {
568 intMask = icrTxBufferLow | icrTxEndLow |
569 icrTxErrorLow | icrRxOVR | icrTxUdr |
570 icrRxBufferQ0 | icrRxErrorQ0 |
571 icrMIIPhySTC | icrEtherIntSum;
572 }
573 else {
574 intMask = icrTxBufferLow | icrTxEndLow |
575 icrTxErrorLow | icrRxOVR | icrTxUdr |
576 icrRxBuffer | icrRxError |
577 icrMIIPhySTC | icrEtherIntSum;
578 }
579
580 // unmask interrupts
581 GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, intMask);
582
583 intMask = GT96100_READ(intr_mask_reg);
584 intMask |= 1<<gp->port_num;
585 GT96100_WRITE(intr_mask_reg, intMask);
586}
587
588static void
589disable_ether_irq(struct net_device *dev)
590{
591 struct gt96100_private *gp = netdev_priv(dev);
592 u32 intMask;
593 int intr_mask_reg = (gp->port_num == 0) ?
594 GT96100_SERINT0_MASK : GT96100_INT0_HIGH_MASK;
595
596 intMask = GT96100_READ(intr_mask_reg);
597 intMask &= ~(1<<gp->port_num);
598 GT96100_WRITE(intr_mask_reg, intMask);
599
600 GT96100ETH_WRITE(gp, GT96100_ETH_INT_MASK, 0);
601}
602
603
604/*
605 * Init GT96100 ethernet controller driver
606 */
607static int gt96100_init_module(void)
608{
609 struct pci_dev *pci;
610 int i, retval=0;
611 u32 cpuConfig;
612
613 /*
614 * Stupid probe because this really isn't a PCI device
615 */
616 if (!(pci = pci_get_device(PCI_VENDOR_ID_MARVELL,
617 PCI_DEVICE_ID_MARVELL_GT96100, NULL)) &&
618 !(pci = pci_get_device(PCI_VENDOR_ID_MARVELL,
619 PCI_DEVICE_ID_MARVELL_GT96100A, NULL))) {
620 printk(KERN_ERR __FILE__ ": GT96100 not found!\n");
621 return -ENODEV;
622 }
623
624 cpuConfig = GT96100_READ(GT96100_CPU_INTERF_CONFIG);
625 if (cpuConfig & (1<<12)) {
626 printk(KERN_ERR __FILE__
627 ": must be in Big Endian mode!\n");
628 return -ENODEV;
629 }
630
631 for (i=0; i < NUM_INTERFACES; i++)
632 retval |= gt96100_probe1(pci, i);
633
634 pci_dev_put(pci);
635
636 return retval;
637}
638
639static int __init gt96100_probe1(struct pci_dev *pci, int port_num)
640{
641 struct gt96100_private *gp = NULL;
642 struct gt96100_if_t *gtif = &gt96100_iflist[port_num];
643 int phy_addr, phy_id1, phy_id2;
644 u32 phyAD;
645 int retval;
646 unsigned char chip_rev;
647 struct net_device *dev = NULL;
648
649 if (gtif->irq < 0) {
650 printk(KERN_ERR "%s: irq unknown - probing not supported\n",
651 __FUNCTION__);
652 return -ENODEV;
653 }
654
655 pci_read_config_byte(pci, PCI_REVISION_ID, &chip_rev);
656
657 if (chip_rev >= REV_GT96100A_1) {
658 phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
659 phy_addr = (phyAD >> (5*port_num)) & 0x1f;
660 } else {
661 /*
662 * not sure what's this about -- probably a gt bug
663 */
664 phy_addr = port_num;
665 phyAD = GT96100_READ(GT96100_ETH_PHY_ADDR_REG);
666 phyAD &= ~(0x1f << (port_num*5));
667 phyAD |= phy_addr << (port_num*5);
668 GT96100_WRITE(GT96100_ETH_PHY_ADDR_REG, phyAD);
669 }
670
671 // probe for the external PHY
672 if ((phy_id1 = read_MII(phy_addr, 2)) <= 0 ||
673 (phy_id2 = read_MII(phy_addr, 3)) <= 0) {
674 printk(KERN_ERR "%s: no PHY found on MII%d\n", __FUNCTION__, port_num);
675 return -ENODEV;
676 }
677
678 if (!request_region(gtif->iobase, GT96100_ETH_IO_SIZE, "GT96100ETH")) {
679 printk(KERN_ERR "%s: request_region failed\n", __FUNCTION__);
680 return -EBUSY;
681 }
682
683 dev = alloc_etherdev(sizeof(struct gt96100_private));
684 if (!dev)
685 goto out;
686 gtif->dev = dev;
687
688 /* private struct aligned and zeroed by alloc_etherdev */
689 /* Fill in the 'dev' fields. */
690 dev->base_addr = gtif->iobase;
691 dev->irq = gtif->irq;
692
693 if ((retval = parse_mac_addr(dev, gtif->mac_str))) {
694 err("%s: MAC address parse failed\n", __FUNCTION__);
695 retval = -EINVAL;
696 goto out1;
697 }
698
699 gp = netdev_priv(dev);
700
701 memset(gp, 0, sizeof(*gp)); // clear it
702
703 gp->port_num = port_num;
704 gp->port_offset = port_num * GT96100_ETH_IO_SIZE;
705 gp->phy_addr = phy_addr;
706 gp->chip_rev = chip_rev;
707
708 info("%s found at 0x%x, irq %d\n",
709 chip_name(gp->chip_rev), gtif->iobase, gtif->irq);
710 dump_hw_addr(0, dev, "%s: HW Address ", __FUNCTION__, dev->dev_addr);
711 info("%s chip revision=%d\n", chip_name(gp->chip_rev), gp->chip_rev);
712 info("%s ethernet port %d\n", chip_name(gp->chip_rev), gp->port_num);
713 info("external PHY ID1=0x%04x, ID2=0x%04x\n", phy_id1, phy_id2);
714
715 // Allocate Rx and Tx descriptor rings
716 if (gp->rx_ring == NULL) {
717 // All descriptors in ring must be 16-byte aligned
718 gp->rx_ring = dmaalloc(sizeof(gt96100_rd_t) * RX_RING_SIZE
719 + sizeof(gt96100_td_t) * TX_RING_SIZE,
720 &gp->rx_ring_dma);
721 if (gp->rx_ring == NULL) {
722 retval = -ENOMEM;
723 goto out1;
724 }
725
726 gp->tx_ring = (gt96100_td_t *)(gp->rx_ring + RX_RING_SIZE);
727 gp->tx_ring_dma =
728 gp->rx_ring_dma + sizeof(gt96100_rd_t) * RX_RING_SIZE;
729 }
730
731 // Allocate the Rx Data Buffers
732 if (gp->rx_buff == NULL) {
733 gp->rx_buff = dmaalloc(PKT_BUF_SZ*RX_RING_SIZE,
734 &gp->rx_buff_dma);
735 if (gp->rx_buff == NULL) {
736 retval = -ENOMEM;
737 goto out2;
738 }
739 }
740
741 dbg(3, "%s: rx_ring=%p, tx_ring=%p\n", __FUNCTION__,
742 gp->rx_ring, gp->tx_ring);
743
744 // Allocate Rx Hash Table
745 if (gp->hash_table == NULL) {
746 gp->hash_table = (char*)dmaalloc(RX_HASH_TABLE_SIZE,
747 &gp->hash_table_dma);
748 if (gp->hash_table == NULL) {
749 retval = -ENOMEM;
750 goto out3;
751 }
752 }
753
754 dbg(3, "%s: hash=%p\n", __FUNCTION__, gp->hash_table);
755
756 spin_lock_init(&gp->lock);
757
758 dev->open = gt96100_open;
759 dev->hard_start_xmit = gt96100_tx;
760 dev->stop = gt96100_close;
761 dev->get_stats = gt96100_get_stats;
762 //dev->do_ioctl = gt96100_ioctl;
763 dev->set_multicast_list = gt96100_set_rx_mode;
764 dev->tx_timeout = gt96100_tx_timeout;
765 dev->watchdog_timeo = GT96100ETH_TX_TIMEOUT;
766
767 retval = register_netdev(dev);
768 if (retval)
769 goto out4;
770 return 0;
771
772out4:
773 dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma);
774out3:
775 dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
776out2:
777 dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE
778 + sizeof(gt96100_td_t) * TX_RING_SIZE,
779 gp->rx_ring);
780out1:
781 free_netdev (dev);
782out:
783 release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
784
785 err("%s failed. Returns %d\n", __FUNCTION__, retval);
786 return retval;
787}
788
789
790static void
791reset_tx(struct net_device *dev)
792{
793 struct gt96100_private *gp = netdev_priv(dev);
794 int i;
795
796 abort(dev, sdcmrAT);
797
798 for (i=0; i<TX_RING_SIZE; i++) {
799 if (gp->tx_skbuff[i]) {
800 if (in_interrupt())
801 dev_kfree_skb_irq(gp->tx_skbuff[i]);
802 else
803 dev_kfree_skb(gp->tx_skbuff[i]);
804 gp->tx_skbuff[i] = NULL;
805 }
806
807 gp->tx_ring[i].cmdstat = 0; // CPU owns
808 gp->tx_ring[i].byte_cnt = 0;
809 gp->tx_ring[i].buff_ptr = 0;
810 gp->tx_ring[i].next =
811 cpu_to_dma32(gp->tx_ring_dma +
812 sizeof(gt96100_td_t) * (i+1));
813 dump_tx_desc(4, dev, i);
814 }
815 /* Wrap the ring. */
816 gp->tx_ring[i-1].next = cpu_to_dma32(gp->tx_ring_dma);
817
818 // setup only the lowest priority TxCDP reg
819 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR0, gp->tx_ring_dma);
820 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_TX_DESC_PTR1, 0);
821
822 // init Tx indeces and pkt counter
823 gp->tx_next_in = gp->tx_next_out = 0;
824 gp->tx_count = 0;
825
826}
827
828static void
829reset_rx(struct net_device *dev)
830{
831 struct gt96100_private *gp = netdev_priv(dev);
832 int i;
833
834 abort(dev, sdcmrAR);
835
836 for (i=0; i<RX_RING_SIZE; i++) {
837 gp->rx_ring[i].next =
838 cpu_to_dma32(gp->rx_ring_dma +
839 sizeof(gt96100_rd_t) * (i+1));
840 gp->rx_ring[i].buff_ptr =
841 cpu_to_dma32(gp->rx_buff_dma + i*PKT_BUF_SZ);
842 gp->rx_ring[i].buff_sz = cpu_to_dma16(PKT_BUF_SZ);
843 // Give ownership to device, set first and last, enable intr
844 gp->rx_ring[i].cmdstat =
845 cpu_to_dma32((u32)(rxFirst | rxLast | rxOwn | rxEI));
846 dump_rx_desc(4, dev, i);
847 }
848 /* Wrap the ring. */
849 gp->rx_ring[i-1].next = cpu_to_dma32(gp->rx_ring_dma);
850
851 // Setup only the lowest priority RxFDP and RxCDP regs
852 for (i=0; i<4; i++) {
853 if (i == 0) {
854 GT96100ETH_WRITE(gp, GT96100_ETH_1ST_RX_DESC_PTR0,
855 gp->rx_ring_dma);
856 GT96100ETH_WRITE(gp, GT96100_ETH_CURR_RX_DESC_PTR0,
857 gp->rx_ring_dma);
858 } else {
859 GT96100ETH_WRITE(gp,
860 GT96100_ETH_1ST_RX_DESC_PTR0 + i*4,
861 0);
862 GT96100ETH_WRITE(gp,
863 GT96100_ETH_CURR_RX_DESC_PTR0 + i*4,
864 0);
865 }
866 }
867
868 // init Rx NextOut index
869 gp->rx_next_out = 0;
870}
871
872
873// Returns 1 if the Tx counter and indeces don't gel
874static int
875gt96100_check_tx_consistent(struct gt96100_private *gp)
876{
877 int diff = gp->tx_next_in - gp->tx_next_out;
878
879 diff = diff<0 ? TX_RING_SIZE + diff : diff;
880 diff = gp->tx_count == TX_RING_SIZE ? diff + TX_RING_SIZE : diff;
881
882 return (diff != gp->tx_count);
883}
884
885static int
886gt96100_init(struct net_device *dev)
887{
888 struct gt96100_private *gp = netdev_priv(dev);
889 u32 tmp;
890 u16 mii_reg;
891
892 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
893 dbg(3, "%s: scs10_lo=%4x, scs10_hi=%4x\n", __FUNCTION__,
894 GT96100_READ(0x8), GT96100_READ(0x10));
895 dbg(3, "%s: scs32_lo=%4x, scs32_hi=%4x\n", __FUNCTION__,
896 GT96100_READ(0x18), GT96100_READ(0x20));
897
898 // Stop and disable Port
899 hard_stop(dev);
900
901 // Setup CIU Arbiter
902 tmp = GT96100_READ(GT96100_CIU_ARBITER_CONFIG);
903 tmp |= (0x0c << (gp->port_num*2)); // set Ether DMA req priority to hi
904#ifndef DESC_BE
905 tmp &= ~(1<<31); // set desc endianess to little
906#else
907 tmp |= (1<<31);
908#endif
909 GT96100_WRITE(GT96100_CIU_ARBITER_CONFIG, tmp);
910 dbg(3, "%s: CIU Config=%x/%x\n", __FUNCTION__,
911 tmp, GT96100_READ(GT96100_CIU_ARBITER_CONFIG));
912
913 // Set routing.
914 tmp = GT96100_READ(GT96100_ROUTE_MAIN) & (0x3f << 18);
915 tmp |= (0x07 << (18 + gp->port_num*3));
916 GT96100_WRITE(GT96100_ROUTE_MAIN, tmp);
917
918 /* set MII as peripheral func */
919 tmp = GT96100_READ(GT96100_GPP_CONFIG2);
920 tmp |= 0x7fff << (gp->port_num*16);
921 GT96100_WRITE(GT96100_GPP_CONFIG2, tmp);
922
923 /* Set up MII port pin directions */
924 tmp = GT96100_READ(GT96100_GPP_IO2);
925 tmp |= 0x003d << (gp->port_num*16);
926 GT96100_WRITE(GT96100_GPP_IO2, tmp);
927
928 // Set-up hash table
929 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE); // clear it
930 gp->hash_mode = 0;
931 // Add a single entry to hash table - our ethernet address
932 gt96100_add_hash_entry(dev, dev->dev_addr);
933 // Set-up DMA ptr to hash table
934 GT96100ETH_WRITE(gp, GT96100_ETH_HASH_TBL_PTR, gp->hash_table_dma);
935 dbg(3, "%s: Hash Tbl Ptr=%x\n", __FUNCTION__,
936 GT96100ETH_READ(gp, GT96100_ETH_HASH_TBL_PTR));
937
938 // Setup Tx
939 reset_tx(dev);
940
941 dbg(3, "%s: Curr Tx Desc Ptr0=%x\n", __FUNCTION__,
942 GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0));
943
944 // Setup Rx
945 reset_rx(dev);
946
947 dbg(3, "%s: 1st/Curr Rx Desc Ptr0=%x/%x\n", __FUNCTION__,
948 GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0),
949 GT96100ETH_READ(gp, GT96100_ETH_CURR_RX_DESC_PTR0));
950
951 // eth port config register
952 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
953 pcxrFCTL | pcxrFCTLen | pcxrFLP | pcxrDPLXen);
954
955 mii_reg = read_MII(gp->phy_addr, 0x11); /* int enable register */
956 mii_reg |= 2; /* enable mii interrupt */
957 write_MII(gp->phy_addr, 0x11, mii_reg);
958
959 dbg(3, "%s: PhyAD=%x\n", __FUNCTION__,
960 GT96100_READ(GT96100_ETH_PHY_ADDR_REG));
961
962 // setup DMA
963
964 // We want the Rx/Tx DMA to write/read data to/from memory in
965 // Big Endian mode. Also set DMA Burst Size to 8 64Bit words.
966#ifdef DESC_DATA_BE
967 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG,
968 (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
969#else
970 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_CONFIG,
971 sdcrBLMR | sdcrBLMT |
972 (0xf<<sdcrRCBit) | sdcrRIFB | (3<<sdcrBSZBit));
973#endif
974 dbg(3, "%s: SDMA Config=%x\n", __FUNCTION__,
975 GT96100ETH_READ(gp, GT96100_ETH_SDMA_CONFIG));
976
977 // start Rx DMA
978 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD);
979 dbg(3, "%s: SDMA Comm=%x\n", __FUNCTION__,
980 GT96100ETH_READ(gp, GT96100_ETH_SDMA_COMM));
981
982 // enable this port (set hash size to 1/2K)
983 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG, pcrEN | pcrHS);
984 dbg(3, "%s: Port Config=%x\n", __FUNCTION__,
985 GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG));
986
987 /*
988 * Disable all Type-of-Service queueing. All Rx packets will be
989 * treated normally and will be sent to the lowest priority
990 * queue.
991 *
992 * Disable flow-control for now. FIXME: support flow control?
993 */
994
995 // clear all the MIB ctr regs
996 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
997 pcxrFCTL | pcxrFCTLen | pcxrFLP |
998 pcxrPRIOrxOverride);
999 read_mib_counters(gp);
1000 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG_EXT,
1001 pcxrFCTL | pcxrFCTLen | pcxrFLP |
1002 pcxrPRIOrxOverride | pcxrMIBclrMode);
1003
1004 dbg(3, "%s: Port Config Ext=%x\n", __FUNCTION__,
1005 GT96100ETH_READ(gp, GT96100_ETH_PORT_CONFIG_EXT));
1006
1007 netif_start_queue(dev);
1008
1009 dump_MII(4, dev);
1010
1011 // enable interrupts
1012 enable_ether_irq(dev);
1013
1014 // we should now be receiving frames
1015 return 0;
1016}
1017
1018
1019static int
1020gt96100_open(struct net_device *dev)
1021{
1022 int retval;
1023
1024 dbg(2, "%s: dev=%p\n", __FUNCTION__, dev);
1025
1026 // Initialize and startup the GT-96100 ethernet port
1027 if ((retval = gt96100_init(dev))) {
1028 err("error in gt96100_init\n");
1029 free_irq(dev->irq, dev);
1030 return retval;
1031 }
1032
1033 if ((retval = request_irq(dev->irq, &gt96100_interrupt,
1034 IRQF_SHARED, dev->name, dev))) {
1035 err("unable to get IRQ %d\n", dev->irq);
1036 return retval;
1037 }
1038
1039 dbg(2, "%s: Initialization done.\n", __FUNCTION__);
1040
1041 return 0;
1042}
1043
1044static int
1045gt96100_close(struct net_device *dev)
1046{
1047 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
1048
1049 // stop the device
1050 if (netif_device_present(dev)) {
1051 netif_stop_queue(dev);
1052 hard_stop(dev);
1053 }
1054
1055 free_irq(dev->irq, dev);
1056
1057 return 0;
1058}
1059
1060
1061static int
1062gt96100_tx(struct sk_buff *skb, struct net_device *dev)
1063{
1064 struct gt96100_private *gp = netdev_priv(dev);
1065 unsigned long flags;
1066 int nextIn;
1067
1068 spin_lock_irqsave(&gp->lock, flags);
1069
1070 nextIn = gp->tx_next_in;
1071
1072 dbg(3, "%s: nextIn=%d\n", __FUNCTION__, nextIn);
1073
1074 if (gp->tx_count >= TX_RING_SIZE) {
1075 warn("Tx Ring full, pkt dropped.\n");
1076 gp->stats.tx_dropped++;
1077 spin_unlock_irqrestore(&gp->lock, flags);
1078 return 1;
1079 }
1080
1081 if (!(gp->last_psr & psrLink)) {
1082 err("%s: Link down, pkt dropped.\n", __FUNCTION__);
1083 gp->stats.tx_dropped++;
1084 spin_unlock_irqrestore(&gp->lock, flags);
1085 return 1;
1086 }
1087
1088 if (dma32_to_cpu(gp->tx_ring[nextIn].cmdstat) & txOwn) {
1089 err("%s: device owns descriptor, pkt dropped.\n", __FUNCTION__);
1090 gp->stats.tx_dropped++;
1091 // stop the queue, so Tx timeout can fix it
1092 netif_stop_queue(dev);
1093 spin_unlock_irqrestore(&gp->lock, flags);
1094 return 1;
1095 }
1096
1097 // Prepare the Descriptor at tx_next_in
1098 gp->tx_skbuff[nextIn] = skb;
1099 gp->tx_ring[nextIn].byte_cnt = cpu_to_dma16(skb->len);
1100 gp->tx_ring[nextIn].buff_ptr = cpu_to_dma32(virt_to_phys(skb->data));
1101 // make sure packet gets written back to memory
1102 dma_cache_wback_inv((unsigned long)(skb->data), skb->len);
1103 // Give ownership to device, set first and last desc, enable interrupt
1104 // Setting of ownership bit must be *last*!
1105 gp->tx_ring[nextIn].cmdstat =
1106 cpu_to_dma32((u32)(txOwn | txGenCRC | txEI |
1107 txPad | txFirst | txLast));
1108
1109 dump_tx_desc(4, dev, nextIn);
1110 dump_skb(4, dev, skb);
1111
1112 // increment tx_next_in with wrap
1113 gp->tx_next_in = (nextIn + 1) % TX_RING_SIZE;
1114 // If DMA is stopped, restart
1115 if (!(GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS) & psrTxLow))
1116 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM,
1117 sdcmrERD | sdcmrTXDL);
1118
1119 // increment count and stop queue if full
1120 if (++gp->tx_count == TX_RING_SIZE) {
1121 gp->tx_full = 1;
1122 netif_stop_queue(dev);
1123 dbg(2, "Tx Ring now full, queue stopped.\n");
1124 }
1125
1126 dev->trans_start = jiffies;
1127 spin_unlock_irqrestore(&gp->lock, flags);
1128
1129 return 0;
1130}
1131
1132
1133static int
1134gt96100_rx(struct net_device *dev, u32 status)
1135{
1136 struct gt96100_private *gp = netdev_priv(dev);
1137 struct sk_buff *skb;
1138 int pkt_len, nextOut, cdp;
1139 gt96100_rd_t *rd;
1140 u32 cmdstat;
1141
1142 dbg(3, "%s: dev=%p, status=%x\n", __FUNCTION__, dev, status);
1143
1144 cdp = (GT96100ETH_READ(gp, GT96100_ETH_1ST_RX_DESC_PTR0)
1145 - gp->rx_ring_dma) / sizeof(gt96100_rd_t);
1146
1147 // Continue until we reach 1st descriptor pointer
1148 for (nextOut = gp->rx_next_out; nextOut != cdp;
1149 nextOut = (nextOut + 1) % RX_RING_SIZE) {
1150
1151 if (--gp->intr_work_done == 0)
1152 break;
1153
1154 rd = &gp->rx_ring[nextOut];
1155 cmdstat = dma32_to_cpu(rd->cmdstat);
1156
1157 dbg(4, "%s: Rx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__,
1158 cmdstat, nextOut);
1159
1160 if (cmdstat & (u32)rxOwn) {
1161 //err("%s: device owns descriptor!\n", __FUNCTION__);
1162 // DMA is not finished updating descriptor???
1163 // Leave and come back later to pick-up where
1164 // we left off.
1165 break;
1166 }
1167
1168 // Drop this received pkt if there were any errors
1169 if (((cmdstat & (u32)(rxErrorSummary)) &&
1170 (cmdstat & (u32)(rxFirst))) || (status & icrRxError)) {
1171 // update the detailed rx error counters that
1172 // are not covered by the MIB counters.
1173 if (cmdstat & (u32)rxOverrun)
1174 gp->stats.rx_fifo_errors++;
1175 cmdstat |= (u32)rxOwn;
1176 rd->cmdstat = cpu_to_dma32(cmdstat);
1177 continue;
1178 }
1179
1180 /*
1181 * Must be first and last (ie only) descriptor of packet. We
1182 * ignore (drop) any packets that do not fit in one descriptor.
1183 * Every descriptor's receive buffer is large enough to hold
1184 * the maximum 802.3 frame size, so a multi-descriptor packet
1185 * indicates an error. Most if not all corrupted packets will
1186 * have already been dropped by the above check for the
1187 * rxErrorSummary status bit.
1188 */
1189 if (!(cmdstat & (u32)rxFirst) || !(cmdstat & (u32)rxLast)) {
1190 if (cmdstat & (u32)rxFirst) {
1191 /*
1192 * This is the first descriptor of a
1193 * multi-descriptor packet. It isn't corrupted
1194 * because the above check for rxErrorSummary
1195 * would have dropped it already, so what's
1196 * the deal with this packet? Good question,
1197 * let's dump it out.
1198 */
1199 err("%s: desc not first and last!\n", __FUNCTION__);
1200 dump_rx_desc(0, dev, nextOut);
1201 }
1202 cmdstat |= (u32)rxOwn;
1203 rd->cmdstat = cpu_to_dma32(cmdstat);
1204 // continue to drop every descriptor of this packet
1205 continue;
1206 }
1207
1208 pkt_len = dma16_to_cpu(rd->byte_cnt);
1209
1210 /* Create new skb. */
1211 skb = dev_alloc_skb(pkt_len+2);
1212 if (skb == NULL) {
1213 err("%s: Memory squeeze, dropping packet.\n", __FUNCTION__);
1214 gp->stats.rx_dropped++;
1215 cmdstat |= (u32)rxOwn;
1216 rd->cmdstat = cpu_to_dma32(cmdstat);
1217 continue;
1218 }
1219 skb->dev = dev;
1220 skb_reserve(skb, 2); /* 16 byte IP header align */
1221 memcpy(skb_put(skb, pkt_len),
1222 &gp->rx_buff[nextOut*PKT_BUF_SZ], pkt_len);
1223 skb->protocol = eth_type_trans(skb, dev);
1224 dump_skb(4, dev, skb);
1225
1226 netif_rx(skb); /* pass the packet to upper layers */
1227 dev->last_rx = jiffies;
1228
1229 // now we can release ownership of this desc back to device
1230 cmdstat |= (u32)rxOwn;
1231 rd->cmdstat = cpu_to_dma32(cmdstat);
1232 }
1233
1234 if (nextOut == gp->rx_next_out)
1235 dbg(3, "%s: RxCDP did not increment?\n", __FUNCTION__);
1236
1237 gp->rx_next_out = nextOut;
1238 return 0;
1239}
1240
1241
1242static void
1243gt96100_tx_complete(struct net_device *dev, u32 status)
1244{
1245 struct gt96100_private *gp = netdev_priv(dev);
1246 int nextOut, cdp;
1247 gt96100_td_t *td;
1248 u32 cmdstat;
1249
1250 cdp = (GT96100ETH_READ(gp, GT96100_ETH_CURR_TX_DESC_PTR0)
1251 - gp->tx_ring_dma) / sizeof(gt96100_td_t);
1252
1253 // Continue until we reach the current descriptor pointer
1254 for (nextOut = gp->tx_next_out; nextOut != cdp;
1255 nextOut = (nextOut + 1) % TX_RING_SIZE) {
1256
1257 if (--gp->intr_work_done == 0)
1258 break;
1259
1260 td = &gp->tx_ring[nextOut];
1261 cmdstat = dma32_to_cpu(td->cmdstat);
1262
1263 dbg(3, "%s: Tx desc cmdstat=%x, nextOut=%d\n", __FUNCTION__,
1264 cmdstat, nextOut);
1265
1266 if (cmdstat & (u32)txOwn) {
1267 /*
1268 * DMA is not finished writing descriptor???
1269 * Leave and come back later to pick-up where
1270 * we left off.
1271 */
1272 break;
1273 }
1274
1275 // increment Tx error stats
1276 if (cmdstat & (u32)txErrorSummary) {
1277 dbg(2, "%s: Tx error, cmdstat = %x\n", __FUNCTION__,
1278 cmdstat);
1279 gp->stats.tx_errors++;
1280 if (cmdstat & (u32)txReTxLimit)
1281 gp->stats.tx_aborted_errors++;
1282 if (cmdstat & (u32)txUnderrun)
1283 gp->stats.tx_fifo_errors++;
1284 if (cmdstat & (u32)txLateCollision)
1285 gp->stats.tx_window_errors++;
1286 }
1287
1288 if (cmdstat & (u32)txCollision)
1289 gp->stats.collisions +=
1290 (u32)((cmdstat & txReTxCntMask) >>
1291 txReTxCntBit);
1292
1293 // Wake the queue if the ring was full
1294 if (gp->tx_full) {
1295 gp->tx_full = 0;
1296 if (gp->last_psr & psrLink) {
1297 netif_wake_queue(dev);
1298 dbg(2, "%s: Tx Ring was full, queue waked\n",
1299 __FUNCTION__);
1300 }
1301 }
1302
1303 // decrement tx ring buffer count
1304 if (gp->tx_count) gp->tx_count--;
1305
1306 // free the skb
1307 if (gp->tx_skbuff[nextOut]) {
1308 dbg(3, "%s: good Tx, skb=%p\n", __FUNCTION__,
1309 gp->tx_skbuff[nextOut]);
1310 dev_kfree_skb_irq(gp->tx_skbuff[nextOut]);
1311 gp->tx_skbuff[nextOut] = NULL;
1312 } else {
1313 err("%s: no skb!\n", __FUNCTION__);
1314 }
1315 }
1316
1317 gp->tx_next_out = nextOut;
1318
1319 if (gt96100_check_tx_consistent(gp)) {
1320 err("%s: Tx queue inconsistent!\n", __FUNCTION__);
1321 }
1322
1323 if ((status & icrTxEndLow) && gp->tx_count != 0) {
1324 // we must restart the DMA
1325 dbg(3, "%s: Restarting Tx DMA\n", __FUNCTION__);
1326 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM,
1327 sdcmrERD | sdcmrTXDL);
1328 }
1329}
1330
1331
1332static irqreturn_t
1333gt96100_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1334{
1335 struct net_device *dev = (struct net_device *)dev_id;
1336 struct gt96100_private *gp = netdev_priv(dev);
1337 u32 status;
1338 int handled = 0;
1339
1340 if (dev == NULL) {
1341 err("%s: null dev ptr\n", __FUNCTION__);
1342 return IRQ_NONE;
1343 }
1344
1345 dbg(3, "%s: entry, icr=%x\n", __FUNCTION__,
1346 GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));
1347
1348 spin_lock(&gp->lock);
1349
1350 gp->intr_work_done = max_interrupt_work;
1351
1352 while (gp->intr_work_done > 0) {
1353
1354 status = GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE);
1355 // ACK interrupts
1356 GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, ~status);
1357
1358 if ((status & icrEtherIntSum) == 0 &&
1359 !(status & (icrTxBufferLow|icrTxBufferHigh|icrRxBuffer)))
1360 break;
1361
1362 handled = 1;
1363
1364 if (status & icrMIIPhySTC) {
1365 u32 psr = GT96100ETH_READ(gp, GT96100_ETH_PORT_STATUS);
1366 if (gp->last_psr != psr) {
1367 dbg(0, "port status:\n");
1368 dbg(0, " %s MBit/s, %s-duplex, "
1369 "flow-control %s, link is %s,\n",
1370 psr & psrSpeed ? "100":"10",
1371 psr & psrDuplex ? "full":"half",
1372 psr & psrFctl ? "disabled":"enabled",
1373 psr & psrLink ? "up":"down");
1374 dbg(0, " TxLowQ is %s, TxHighQ is %s, "
1375 "Transmitter is %s\n",
1376 psr & psrTxLow ? "running":"stopped",
1377 psr & psrTxHigh ? "running":"stopped",
1378 psr & psrTxInProg ? "on":"off");
1379
1380 if ((psr & psrLink) && !gp->tx_full &&
1381 netif_queue_stopped(dev)) {
1382 dbg(0, "%s: Link up, waking queue.\n",
1383 __FUNCTION__);
1384 netif_wake_queue(dev);
1385 } else if (!(psr & psrLink) &&
1386 !netif_queue_stopped(dev)) {
1387 dbg(0, "%s: Link down, stopping queue.\n",
1388 __FUNCTION__);
1389 netif_stop_queue(dev);
1390 }
1391
1392 gp->last_psr = psr;
1393 }
1394
1395 if (--gp->intr_work_done == 0)
1396 break;
1397 }
1398
1399 if (status & (icrTxBufferLow | icrTxEndLow))
1400 gt96100_tx_complete(dev, status);
1401
1402 if (status & (icrRxBuffer | icrRxError)) {
1403 gt96100_rx(dev, status);
1404 }
1405
1406 // Now check TX errors (RX errors were handled in gt96100_rx)
1407 if (status & icrTxErrorLow) {
1408 err("%s: Tx resource error\n", __FUNCTION__);
1409 if (--gp->intr_work_done == 0)
1410 break;
1411 }
1412
1413 if (status & icrTxUdr) {
1414 err("%s: Tx underrun error\n", __FUNCTION__);
1415 if (--gp->intr_work_done == 0)
1416 break;
1417 }
1418 }
1419
1420 if (gp->intr_work_done == 0) {
1421 // ACK any remaining pending interrupts
1422 GT96100ETH_WRITE(gp, GT96100_ETH_INT_CAUSE, 0);
1423 dbg(3, "%s: hit max work\n", __FUNCTION__);
1424 }
1425
1426 dbg(3, "%s: exit, icr=%x\n", __FUNCTION__,
1427 GT96100ETH_READ(gp, GT96100_ETH_INT_CAUSE));
1428
1429 spin_unlock(&gp->lock);
1430 return IRQ_RETVAL(handled);
1431}
1432
1433
1434static void
1435gt96100_tx_timeout(struct net_device *dev)
1436{
1437 struct gt96100_private *gp = netdev_priv(dev);
1438 unsigned long flags;
1439
1440 spin_lock_irqsave(&gp->lock, flags);
1441
1442 if (!(gp->last_psr & psrLink)) {
1443 err("tx_timeout: link down.\n");
1444 spin_unlock_irqrestore(&gp->lock, flags);
1445 } else {
1446 if (gt96100_check_tx_consistent(gp))
1447 err("tx_timeout: Tx ring error.\n");
1448
1449 disable_ether_irq(dev);
1450 spin_unlock_irqrestore(&gp->lock, flags);
1451 reset_tx(dev);
1452 enable_ether_irq(dev);
1453
1454 netif_wake_queue(dev);
1455 }
1456}
1457
1458
1459static void
1460gt96100_set_rx_mode(struct net_device *dev)
1461{
1462 struct gt96100_private *gp = netdev_priv(dev);
1463 unsigned long flags;
1464 //struct dev_mc_list *mcptr;
1465
1466 dbg(3, "%s: dev=%p, flags=%x\n", __FUNCTION__, dev, dev->flags);
1467
1468 // stop the Receiver DMA
1469 abort(dev, sdcmrAR);
1470
1471 spin_lock_irqsave(&gp->lock, flags);
1472
1473 if (dev->flags & IFF_PROMISC) {
1474 GT96100ETH_WRITE(gp, GT96100_ETH_PORT_CONFIG,
1475 pcrEN | pcrHS | pcrPM);
1476 }
1477
1478#if 0
1479 /*
1480 FIXME: currently multicast doesn't work - need to get hash table
1481 working first.
1482 */
1483 if (dev->mc_count) {
1484 // clear hash table
1485 memset(gp->hash_table, 0, RX_HASH_TABLE_SIZE);
1486 // Add our ethernet address
1487 gt96100_add_hash_entry(dev, dev->dev_addr);
1488
1489 for (mcptr = dev->mc_list; mcptr; mcptr = mcptr->next) {
1490 dump_hw_addr(2, dev, "%s: addr=", __FUNCTION__,
1491 mcptr->dmi_addr);
1492 gt96100_add_hash_entry(dev, mcptr->dmi_addr);
1493 }
1494 }
1495#endif
1496
1497 // restart Rx DMA
1498 GT96100ETH_WRITE(gp, GT96100_ETH_SDMA_COMM, sdcmrERD);
1499
1500 spin_unlock_irqrestore(&gp->lock, flags);
1501}
1502
1503static struct net_device_stats *
1504gt96100_get_stats(struct net_device *dev)
1505{
1506 struct gt96100_private *gp = netdev_priv(dev);
1507 unsigned long flags;
1508
1509 dbg(3, "%s: dev=%p\n", __FUNCTION__, dev);
1510
1511 if (netif_device_present(dev)) {
1512 spin_lock_irqsave (&gp->lock, flags);
1513 update_stats(gp);
1514 spin_unlock_irqrestore (&gp->lock, flags);
1515 }
1516
1517 return &gp->stats;
1518}
1519
1520static void gt96100_cleanup_module(void)
1521{
1522 int i;
1523 for (i=0; i<NUM_INTERFACES; i++) {
1524 struct gt96100_if_t *gtif = &gt96100_iflist[i];
1525 if (gtif->dev != NULL) {
1526 struct gt96100_private *gp = (struct gt96100_private *)
1527 netdev_priv(gtif->dev);
1528 unregister_netdev(gtif->dev);
1529 dmafree(RX_HASH_TABLE_SIZE, gp->hash_table_dma);
1530 dmafree(PKT_BUF_SZ*RX_RING_SIZE, gp->rx_buff);
1531 dmafree(sizeof(gt96100_rd_t) * RX_RING_SIZE
1532 + sizeof(gt96100_td_t) * TX_RING_SIZE,
1533 gp->rx_ring);
1534 free_netdev(gtif->dev);
1535 release_region(gtif->iobase, GT96100_ETH_IO_SIZE);
1536 }
1537 }
1538}
1539
1540static int __init gt96100_setup(char *options)
1541{
1542 char *this_opt;
1543
1544 if (!options || !*options)
1545 return 0;
1546
1547 while ((this_opt = strsep (&options, ",")) != NULL) {
1548 if (!*this_opt)
1549 continue;
1550 if (!strncmp(this_opt, "mac0:", 5)) {
1551 memcpy(mac0, this_opt+5, 17);
1552 mac0[17]= '\0';
1553 } else if (!strncmp(this_opt, "mac1:", 5)) {
1554 memcpy(mac1, this_opt+5, 17);
1555 mac1[17]= '\0';
1556 }
1557 }
1558
1559 return 1;
1560}
1561
1562__setup("gt96100eth=", gt96100_setup);
1563
1564module_init(gt96100_init_module);
1565module_exit(gt96100_cleanup_module);
1566
1567MODULE_AUTHOR("Steve Longerbeam <stevel@mvista.com>");
1568MODULE_DESCRIPTION("GT96100 Ethernet driver");
diff --git a/drivers/net/gt96100eth.h b/drivers/net/gt96100eth.h
deleted file mode 100644
index 509d8166bb59..000000000000
--- a/drivers/net/gt96100eth.h
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Copyright 2000 MontaVista Software Inc.
3 * Author: MontaVista Software, Inc.
4 * stevel@mvista.com or source@mvista.com
5 *
6 * ########################################################################
7 *
8 * This program is free software; you can distribute it and/or modify it
9 * under the terms of the GNU General Public License (Version 2) as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
20 *
21 * ########################################################################
22 *
23 * Ethernet driver definitions for the MIPS GT96100 Advanced
24 * Communication Controller.
25 *
26 */
27#ifndef _GT96100ETH_H
28#define _GT96100ETH_H
29
30#include <asm/galileo-boards/gt96100.h>
31
32#define dbg(lvl, format, arg...) \
33 if (lvl <= GT96100_DEBUG) \
34 printk(KERN_DEBUG "%s: " format, dev->name , ## arg)
35#define err(format, arg...) \
36 printk(KERN_ERR "%s: " format, dev->name , ## arg)
37#define info(format, arg...) \
38 printk(KERN_INFO "%s: " format, dev->name , ## arg)
39#define warn(format, arg...) \
40 printk(KERN_WARNING "%s: " format, dev->name , ## arg)
41
42/* Keep the ring sizes a power of two for efficiency. */
43#define TX_RING_SIZE 16
44#define RX_RING_SIZE 32
45#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
46
47#define RX_HASH_TABLE_SIZE 16384
48#define HASH_HOP_NUMBER 12
49
50#define NUM_INTERFACES 2
51
52#define GT96100ETH_TX_TIMEOUT HZ/4
53
54#define GT96100_ETH0_BASE (MIPS_GT96100_BASE + GT96100_ETH_PORT_CONFIG)
55#define GT96100_ETH1_BASE (GT96100_ETH0_BASE + GT96100_ETH_IO_SIZE)
56
57#ifdef CONFIG_MIPS_EV96100
58#define GT96100_ETHER0_IRQ 3
59#define GT96100_ETHER1_IRQ 4
60#else
61#define GT96100_ETHER0_IRQ -1
62#define GT96100_ETHER1_IRQ -1
63#endif
64
65#define REV_GT96100 1
66#define REV_GT96100A_1 2
67#define REV_GT96100A 3
68
69#define GT96100ETH_READ(gp, offset) \
70 GT96100_READ((gp->port_offset + offset))
71
72#define GT96100ETH_WRITE(gp, offset, data) \
73 GT96100_WRITE((gp->port_offset + offset), data)
74
75#define GT96100ETH_SETBIT(gp, offset, bits) {\
76 u32 val = GT96100ETH_READ(gp, offset); val |= (u32)(bits); \
77 GT96100ETH_WRITE(gp, offset, val); }
78
79#define GT96100ETH_CLRBIT(gp, offset, bits) {\
80 u32 val = GT96100ETH_READ(gp, offset); val &= (u32)(~(bits)); \
81 GT96100ETH_WRITE(gp, offset, val); }
82
83
84/* Bit definitions of the SMI Reg */
85enum {
86 smirDataMask = 0xffff,
87 smirPhyAdMask = 0x1f<<16,
88 smirPhyAdBit = 16,
89 smirRegAdMask = 0x1f<<21,
90 smirRegAdBit = 21,
91 smirOpCode = 1<<26,
92 smirReadValid = 1<<27,
93 smirBusy = 1<<28
94};
95
96/* Bit definitions of the Port Config Reg */
97enum pcr_bits {
98 pcrPM = 1,
99 pcrRBM = 2,
100 pcrPBF = 4,
101 pcrEN = 1<<7,
102 pcrLPBKMask = 0x3<<8,
103 pcrLPBKBit = 8,
104 pcrFC = 1<<10,
105 pcrHS = 1<<12,
106 pcrHM = 1<<13,
107 pcrHDM = 1<<14,
108 pcrHD = 1<<15,
109 pcrISLMask = 0x7<<28,
110 pcrISLBit = 28,
111 pcrACCS = 1<<31
112};
113
114/* Bit definitions of the Port Config Extend Reg */
115enum pcxr_bits {
116 pcxrIGMP = 1,
117 pcxrSPAN = 2,
118 pcxrPAR = 4,
119 pcxrPRIOtxMask = 0x7<<3,
120 pcxrPRIOtxBit = 3,
121 pcxrPRIOrxMask = 0x3<<6,
122 pcxrPRIOrxBit = 6,
123 pcxrPRIOrxOverride = 1<<8,
124 pcxrDPLXen = 1<<9,
125 pcxrFCTLen = 1<<10,
126 pcxrFLP = 1<<11,
127 pcxrFCTL = 1<<12,
128 pcxrMFLMask = 0x3<<14,
129 pcxrMFLBit = 14,
130 pcxrMIBclrMode = 1<<16,
131 pcxrSpeed = 1<<18,
132 pcxrSpeeden = 1<<19,
133 pcxrRMIIen = 1<<20,
134 pcxrDSCPen = 1<<21
135};
136
137/* Bit definitions of the Port Command Reg */
138enum pcmr_bits {
139 pcmrFJ = 1<<15
140};
141
142
143/* Bit definitions of the Port Status Reg */
144enum psr_bits {
145 psrSpeed = 1,
146 psrDuplex = 2,
147 psrFctl = 4,
148 psrLink = 8,
149 psrPause = 1<<4,
150 psrTxLow = 1<<5,
151 psrTxHigh = 1<<6,
152 psrTxInProg = 1<<7
153};
154
155/* Bit definitions of the SDMA Config Reg */
156enum sdcr_bits {
157 sdcrRCMask = 0xf<<2,
158 sdcrRCBit = 2,
159 sdcrBLMR = 1<<6,
160 sdcrBLMT = 1<<7,
161 sdcrPOVR = 1<<8,
162 sdcrRIFB = 1<<9,
163 sdcrBSZMask = 0x3<<12,
164 sdcrBSZBit = 12
165};
166
167/* Bit definitions of the SDMA Command Reg */
168enum sdcmr_bits {
169 sdcmrERD = 1<<7,
170 sdcmrAR = 1<<15,
171 sdcmrSTDH = 1<<16,
172 sdcmrSTDL = 1<<17,
173 sdcmrTXDH = 1<<23,
174 sdcmrTXDL = 1<<24,
175 sdcmrAT = 1<<31
176};
177
178/* Bit definitions of the Interrupt Cause Reg */
179enum icr_bits {
180 icrRxBuffer = 1,
181 icrTxBufferHigh = 1<<2,
182 icrTxBufferLow = 1<<3,
183 icrTxEndHigh = 1<<6,
184 icrTxEndLow = 1<<7,
185 icrRxError = 1<<8,
186 icrTxErrorHigh = 1<<10,
187 icrTxErrorLow = 1<<11,
188 icrRxOVR = 1<<12,
189 icrTxUdr = 1<<13,
190 icrRxBufferQ0 = 1<<16,
191 icrRxBufferQ1 = 1<<17,
192 icrRxBufferQ2 = 1<<18,
193 icrRxBufferQ3 = 1<<19,
194 icrRxErrorQ0 = 1<<20,
195 icrRxErrorQ1 = 1<<21,
196 icrRxErrorQ2 = 1<<22,
197 icrRxErrorQ3 = 1<<23,
198 icrMIIPhySTC = 1<<28,
199 icrSMIdone = 1<<29,
200 icrEtherIntSum = 1<<31
201};
202
203
204/* The Rx and Tx descriptor lists. */
205typedef struct {
206#ifdef DESC_BE
207 u16 byte_cnt;
208 u16 reserved;
209#else
210 u16 reserved;
211 u16 byte_cnt;
212#endif
213 u32 cmdstat;
214 u32 next;
215 u32 buff_ptr;
216} __attribute__ ((packed)) gt96100_td_t;
217
218typedef struct {
219#ifdef DESC_BE
220 u16 buff_sz;
221 u16 byte_cnt;
222#else
223 u16 byte_cnt;
224 u16 buff_sz;
225#endif
226 u32 cmdstat;
227 u32 next;
228 u32 buff_ptr;
229} __attribute__ ((packed)) gt96100_rd_t;
230
231
232/* Values for the Tx command-status descriptor entry. */
233enum td_cmdstat {
234 txOwn = 1<<31,
235 txAutoMode = 1<<30,
236 txEI = 1<<23,
237 txGenCRC = 1<<22,
238 txPad = 1<<18,
239 txFirst = 1<<17,
240 txLast = 1<<16,
241 txErrorSummary = 1<<15,
242 txReTxCntMask = 0x0f<<10,
243 txReTxCntBit = 10,
244 txCollision = 1<<9,
245 txReTxLimit = 1<<8,
246 txUnderrun = 1<<6,
247 txLateCollision = 1<<5
248};
249
250
251/* Values for the Rx command-status descriptor entry. */
252enum rd_cmdstat {
253 rxOwn = 1<<31,
254 rxAutoMode = 1<<30,
255 rxEI = 1<<23,
256 rxFirst = 1<<17,
257 rxLast = 1<<16,
258 rxErrorSummary = 1<<15,
259 rxIGMP = 1<<14,
260 rxHashExpired = 1<<13,
261 rxMissedFrame = 1<<12,
262 rxFrameType = 1<<11,
263 rxShortFrame = 1<<8,
264 rxMaxFrameLen = 1<<7,
265 rxOverrun = 1<<6,
266 rxCollision = 1<<4,
267 rxCRCError = 1
268};
269
270/* Bit fields of a Hash Table Entry */
271enum hash_table_entry {
272 hteValid = 1,
273 hteSkip = 2,
274 hteRD = 4
275};
276
277// The MIB counters
278typedef struct {
279 u32 byteReceived;
280 u32 byteSent;
281 u32 framesReceived;
282 u32 framesSent;
283 u32 totalByteReceived;
284 u32 totalFramesReceived;
285 u32 broadcastFramesReceived;
286 u32 multicastFramesReceived;
287 u32 cRCError;
288 u32 oversizeFrames;
289 u32 fragments;
290 u32 jabber;
291 u32 collision;
292 u32 lateCollision;
293 u32 frames64;
294 u32 frames65_127;
295 u32 frames128_255;
296 u32 frames256_511;
297 u32 frames512_1023;
298 u32 frames1024_MaxSize;
299 u32 macRxError;
300 u32 droppedFrames;
301 u32 outMulticastFrames;
302 u32 outBroadcastFrames;
303 u32 undersizeFrames;
304} mib_counters_t;
305
306
307struct gt96100_private {
308 gt96100_rd_t* rx_ring;
309 gt96100_td_t* tx_ring;
310 // The Rx and Tx rings must be 16-byte aligned
311 dma_addr_t rx_ring_dma;
312 dma_addr_t tx_ring_dma;
313 char* hash_table;
314 // The Hash Table must be 8-byte aligned
315 dma_addr_t hash_table_dma;
316 int hash_mode;
317
318 // The Rx buffers must be 8-byte aligned
319 char* rx_buff;
320 dma_addr_t rx_buff_dma;
321 // Tx buffers (tx_skbuff[i]->data) with less than 8 bytes
322 // of payload must be 8-byte aligned
323 struct sk_buff* tx_skbuff[TX_RING_SIZE];
324 int rx_next_out; /* The next free ring entry to receive */
325 int tx_next_in; /* The next free ring entry to send */
326 int tx_next_out; /* The last ring entry the ISR processed */
327 int tx_count; /* current # of pkts waiting to be sent in Tx ring */
328 int intr_work_done; /* number of Rx and Tx pkts processed in the isr */
329 int tx_full; /* Tx ring is full */
330
331 mib_counters_t mib;
332 struct net_device_stats stats;
333
334 int port_num; // 0 or 1
335 int chip_rev;
336 u32 port_offset;
337
338 int phy_addr; // PHY address
339 u32 last_psr; // last value of the port status register
340
341 int options; /* User-settable misc. driver options. */
342 struct timer_list timer;
343 spinlock_t lock; /* Serialise access to device */
344};
345
346#endif