aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/amd')
-rw-r--r--drivers/net/ethernet/amd/7990.c662
-rw-r--r--drivers/net/ethernet/amd/7990.h254
-rw-r--r--drivers/net/ethernet/amd/Kconfig195
-rw-r--r--drivers/net/ethernet/amd/Makefile20
-rw-r--r--drivers/net/ethernet/amd/a2065.c781
-rw-r--r--drivers/net/ethernet/amd/a2065.h173
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c770
-rw-r--r--drivers/net/ethernet/amd/am79c961a.h145
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c1992
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h816
-rw-r--r--drivers/net/ethernet/amd/ariadne.c793
-rw-r--r--drivers/net/ethernet/amd/ariadne.h415
-rw-r--r--drivers/net/ethernet/amd/atarilance.c1176
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1356
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h134
-rw-r--r--drivers/net/ethernet/amd/declance.c1381
-rw-r--r--drivers/net/ethernet/amd/depca.c2111
-rw-r--r--drivers/net/ethernet/amd/depca.h183
-rw-r--r--drivers/net/ethernet/amd/hplance.c242
-rw-r--r--drivers/net/ethernet/amd/hplance.h26
-rw-r--r--drivers/net/ethernet/amd/lance.c1313
-rw-r--r--drivers/net/ethernet/amd/mvme147.c205
-rw-r--r--drivers/net/ethernet/amd/ni65.c1254
-rw-r--r--drivers/net/ethernet/amd/ni65.h121
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c1525
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c2937
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c961
-rw-r--r--drivers/net/ethernet/amd/sunlance.c1556
28 files changed, 23497 insertions, 0 deletions
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
new file mode 100644
index 000000000000..60b35fb5f524
--- /dev/null
+++ b/drivers/net/ethernet/amd/7990.c
@@ -0,0 +1,662 @@
1/*
2 * 7990.c -- LANCE ethernet IC generic routines.
3 * This is an attempt to separate out the bits of various ethernet
4 * drivers that are common because they all use the AMD 7990 LANCE
5 * (Local Area Network Controller for Ethernet) chip.
6 *
7 * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
8 *
9 * Most of this stuff was obtained by looking at other LANCE drivers,
10 * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
11 * NB: this was made easy by the fact that Jes Sorensen had cleaned up
12 * most of a2025 and sunlance with the aim of merging them, so the
13 * common code was pretty obvious.
14 */
15#include <linux/crc32.h>
16#include <linux/delay.h>
17#include <linux/errno.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/fcntl.h>
25#include <linux/interrupt.h>
26#include <linux/ioport.h>
27#include <linux/in.h>
28#include <linux/route.h>
29#include <linux/string.h>
30#include <linux/skbuff.h>
31#include <asm/irq.h>
32/* Used for the temporal inet entries and routing */
33#include <linux/socket.h>
34#include <linux/bitops.h>
35
36#include <asm/system.h>
37#include <asm/io.h>
38#include <asm/dma.h>
39#include <asm/pgtable.h>
40#ifdef CONFIG_HP300
41#include <asm/blinken.h>
42#endif
43
44#include "7990.h"
45
46#define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x))
47#define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x))
48#define READRDP(lp) in_be16(lp->base + LANCE_RDP)
49
50#if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE)
51#include "hplance.h"
52
53#undef WRITERAP
54#undef WRITERDP
55#undef READRDP
56
57#if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE)
58
59/* Lossage Factor Nine, Mr Sulu. */
60#define WRITERAP(lp,x) (lp->writerap(lp,x))
61#define WRITERDP(lp,x) (lp->writerdp(lp,x))
62#define READRDP(lp) (lp->readrdp(lp))
63
64#else
65
66/* These inlines can be used if only CONFIG_HPLANCE is defined */
67static inline void WRITERAP(struct lance_private *lp, __u16 value)
68{
69 do {
70 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
71 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
72}
73
74static inline void WRITERDP(struct lance_private *lp, __u16 value)
75{
76 do {
77 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
78 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
79}
80
81static inline __u16 READRDP(struct lance_private *lp)
82{
83 __u16 value;
84 do {
85 value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
86 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
87 return value;
88}
89
90#endif
91#endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */
92
93/* debugging output macros, various flavours */
94/* #define TEST_HITS */
95#ifdef UNDEF
96#define PRINT_RINGS() \
97do { \
98 int t; \
99 for (t=0; t < RX_RING_SIZE; t++) { \
100 printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\
101 t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\
102 ib->brx_ring[t].length,\
103 ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\
104 }\
105 for (t=0; t < TX_RING_SIZE; t++) { \
106 printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\
107 t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\
108 ib->btx_ring[t].length,\
109 ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\
110 }\
111} while (0)
112#else
113#define PRINT_RINGS()
114#endif
115
116/* Load the CSR registers. The LANCE has to be STOPped when we do this! */
117static void load_csrs (struct lance_private *lp)
118{
119 volatile struct lance_init_block *aib = lp->lance_init_block;
120 int leptr;
121
122 leptr = LANCE_ADDR (aib);
123
124 WRITERAP(lp, LE_CSR1); /* load address of init block */
125 WRITERDP(lp, leptr & 0xFFFF);
126 WRITERAP(lp, LE_CSR2);
127 WRITERDP(lp, leptr >> 16);
128 WRITERAP(lp, LE_CSR3);
129 WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */
130
131 /* Point back to csr0 */
132 WRITERAP(lp, LE_CSR0);
133}
134
135/* #define to 0 or 1 appropriately */
136#define DEBUG_IRING 0
137/* Set up the Lance Rx and Tx rings and the init block */
138static void lance_init_ring (struct net_device *dev)
139{
140 struct lance_private *lp = netdev_priv(dev);
141 volatile struct lance_init_block *ib = lp->init_block;
142 volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
143 int leptr;
144 int i;
145
146 aib = lp->lance_init_block;
147
148 lp->rx_new = lp->tx_new = 0;
149 lp->rx_old = lp->tx_old = 0;
150
151 ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */
152
153 /* Copy the ethernet address to the lance init block
154 * Notice that we do a byteswap if we're big endian.
155 * [I think this is the right criterion; at least, sunlance,
156 * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
157 * However, the datasheet says that the BSWAP bit doesn't affect
158 * the init block, so surely it should be low byte first for
159 * everybody? Um.]
160 * We could define the ib->physaddr as three 16bit values and
161 * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
162 */
163#ifdef __BIG_ENDIAN
164 ib->phys_addr [0] = dev->dev_addr [1];
165 ib->phys_addr [1] = dev->dev_addr [0];
166 ib->phys_addr [2] = dev->dev_addr [3];
167 ib->phys_addr [3] = dev->dev_addr [2];
168 ib->phys_addr [4] = dev->dev_addr [5];
169 ib->phys_addr [5] = dev->dev_addr [4];
170#else
171 for (i=0; i<6; i++)
172 ib->phys_addr[i] = dev->dev_addr[i];
173#endif
174
175 if (DEBUG_IRING)
176 printk ("TX rings:\n");
177
178 lp->tx_full = 0;
179 /* Setup the Tx ring entries */
180 for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) {
181 leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
182 ib->btx_ring [i].tmd0 = leptr;
183 ib->btx_ring [i].tmd1_hadr = leptr >> 16;
184 ib->btx_ring [i].tmd1_bits = 0;
185 ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
186 ib->btx_ring [i].misc = 0;
187 if (DEBUG_IRING)
188 printk ("%d: 0x%8.8x\n", i, leptr);
189 }
190
191 /* Setup the Rx ring entries */
192 if (DEBUG_IRING)
193 printk ("RX rings:\n");
194 for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
195 leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
196
197 ib->brx_ring [i].rmd0 = leptr;
198 ib->brx_ring [i].rmd1_hadr = leptr >> 16;
199 ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
200 /* 0xf000 == bits that must be one (reserved, presumably) */
201 ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
202 ib->brx_ring [i].mblength = 0;
203 if (DEBUG_IRING)
204 printk ("%d: 0x%8.8x\n", i, leptr);
205 }
206
207 /* Setup the initialization block */
208
209 /* Setup rx descriptor pointer */
210 leptr = LANCE_ADDR(&aib->brx_ring);
211 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
212 ib->rx_ptr = leptr;
213 if (DEBUG_IRING)
214 printk ("RX ptr: %8.8x\n", leptr);
215
216 /* Setup tx descriptor pointer */
217 leptr = LANCE_ADDR(&aib->btx_ring);
218 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
219 ib->tx_ptr = leptr;
220 if (DEBUG_IRING)
221 printk ("TX ptr: %8.8x\n", leptr);
222
223 /* Clear the multicast filter */
224 ib->filter [0] = 0;
225 ib->filter [1] = 0;
226 PRINT_RINGS();
227}
228
229/* LANCE must be STOPped before we do this, too... */
230static int init_restart_lance (struct lance_private *lp)
231{
232 int i;
233
234 WRITERAP(lp, LE_CSR0);
235 WRITERDP(lp, LE_C0_INIT);
236
237 /* Need a hook here for sunlance ledma stuff */
238
239 /* Wait for the lance to complete initialization */
240 for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
241 barrier();
242 if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
243 printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
244 return -1;
245 }
246
247 /* Clear IDON by writing a "1", enable interrupts and start lance */
248 WRITERDP(lp, LE_C0_IDON);
249 WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
250
251 return 0;
252}
253
254static int lance_reset (struct net_device *dev)
255{
256 struct lance_private *lp = netdev_priv(dev);
257 int status;
258
259 /* Stop the lance */
260 WRITERAP(lp, LE_CSR0);
261 WRITERDP(lp, LE_C0_STOP);
262
263 load_csrs (lp);
264 lance_init_ring (dev);
265 dev->trans_start = jiffies; /* prevent tx timeout */
266 status = init_restart_lance (lp);
267#ifdef DEBUG_DRIVER
268 printk ("Lance restart=%d\n", status);
269#endif
270 return status;
271}
272
273static int lance_rx (struct net_device *dev)
274{
275 struct lance_private *lp = netdev_priv(dev);
276 volatile struct lance_init_block *ib = lp->init_block;
277 volatile struct lance_rx_desc *rd;
278 unsigned char bits;
279#ifdef TEST_HITS
280 int i;
281#endif
282
283#ifdef TEST_HITS
284 printk ("[");
285 for (i = 0; i < RX_RING_SIZE; i++) {
286 if (i == lp->rx_new)
287 printk ("%s",
288 ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
289 else
290 printk ("%s",
291 ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
292 }
293 printk ("]");
294#endif
295#ifdef CONFIG_HP300
296 blinken_leds(0x40, 0);
297#endif
298 WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */
299 for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */
300 !((bits = rd->rmd1_bits) & LE_R1_OWN);
301 rd = &ib->brx_ring [lp->rx_new]) {
302
303 /* We got an incomplete frame? */
304 if ((bits & LE_R1_POK) != LE_R1_POK) {
305 dev->stats.rx_over_errors++;
306 dev->stats.rx_errors++;
307 continue;
308 } else if (bits & LE_R1_ERR) {
309 /* Count only the end frame as a rx error,
310 * not the beginning
311 */
312 if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
313 if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
314 if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
315 if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
316 if (bits & LE_R1_EOP) dev->stats.rx_errors++;
317 } else {
318 int len = (rd->mblength & 0xfff) - 4;
319 struct sk_buff *skb = dev_alloc_skb (len+2);
320
321 if (!skb) {
322 printk ("%s: Memory squeeze, deferring packet.\n",
323 dev->name);
324 dev->stats.rx_dropped++;
325 rd->mblength = 0;
326 rd->rmd1_bits = LE_R1_OWN;
327 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
328 return 0;
329 }
330
331 skb_reserve (skb, 2); /* 16 byte align */
332 skb_put (skb, len); /* make room */
333 skb_copy_to_linear_data(skb,
334 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
335 len);
336 skb->protocol = eth_type_trans (skb, dev);
337 netif_rx (skb);
338 dev->stats.rx_packets++;
339 dev->stats.rx_bytes += len;
340 }
341
342 /* Return the packet to the pool */
343 rd->mblength = 0;
344 rd->rmd1_bits = LE_R1_OWN;
345 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
346 }
347 return 0;
348}
349
350static int lance_tx (struct net_device *dev)
351{
352 struct lance_private *lp = netdev_priv(dev);
353 volatile struct lance_init_block *ib = lp->init_block;
354 volatile struct lance_tx_desc *td;
355 int i, j;
356 int status;
357
358#ifdef CONFIG_HP300
359 blinken_leds(0x80, 0);
360#endif
361 /* csr0 is 2f3 */
362 WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
363 /* csr0 is 73 */
364
365 j = lp->tx_old;
366 for (i = j; i != lp->tx_new; i = j) {
367 td = &ib->btx_ring [i];
368
369 /* If we hit a packet not owned by us, stop */
370 if (td->tmd1_bits & LE_T1_OWN)
371 break;
372
373 if (td->tmd1_bits & LE_T1_ERR) {
374 status = td->misc;
375
376 dev->stats.tx_errors++;
377 if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
378 if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
379
380 if (status & LE_T3_CLOS) {
381 dev->stats.tx_carrier_errors++;
382 if (lp->auto_select) {
383 lp->tpe = 1 - lp->tpe;
384 printk("%s: Carrier Lost, trying %s\n",
385 dev->name, lp->tpe?"TPE":"AUI");
386 /* Stop the lance */
387 WRITERAP(lp, LE_CSR0);
388 WRITERDP(lp, LE_C0_STOP);
389 lance_init_ring (dev);
390 load_csrs (lp);
391 init_restart_lance (lp);
392 return 0;
393 }
394 }
395
396 /* buffer errors and underflows turn off the transmitter */
397 /* Restart the adapter */
398 if (status & (LE_T3_BUF|LE_T3_UFL)) {
399 dev->stats.tx_fifo_errors++;
400
401 printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
402 dev->name);
403 /* Stop the lance */
404 WRITERAP(lp, LE_CSR0);
405 WRITERDP(lp, LE_C0_STOP);
406 lance_init_ring (dev);
407 load_csrs (lp);
408 init_restart_lance (lp);
409 return 0;
410 }
411 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
412 /*
413 * So we don't count the packet more than once.
414 */
415 td->tmd1_bits &= ~(LE_T1_POK);
416
417 /* One collision before packet was sent. */
418 if (td->tmd1_bits & LE_T1_EONE)
419 dev->stats.collisions++;
420
421 /* More than one collision, be optimistic. */
422 if (td->tmd1_bits & LE_T1_EMORE)
423 dev->stats.collisions += 2;
424
425 dev->stats.tx_packets++;
426 }
427
428 j = (j + 1) & lp->tx_ring_mod_mask;
429 }
430 lp->tx_old = j;
431 WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
432 return 0;
433}
434
435static irqreturn_t
436lance_interrupt (int irq, void *dev_id)
437{
438 struct net_device *dev = (struct net_device *)dev_id;
439 struct lance_private *lp = netdev_priv(dev);
440 int csr0;
441
442 spin_lock (&lp->devlock);
443
444 WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */
445 csr0 = READRDP(lp);
446
447 PRINT_RINGS();
448
449 if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */
450 spin_unlock (&lp->devlock);
451 return IRQ_NONE; /* been generated by the Lance. */
452 }
453
454 /* Acknowledge all the interrupt sources ASAP */
455 WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
456
457 if ((csr0 & LE_C0_ERR)) {
458 /* Clear the error condition */
459 WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
460 }
461
462 if (csr0 & LE_C0_RINT)
463 lance_rx (dev);
464
465 if (csr0 & LE_C0_TINT)
466 lance_tx (dev);
467
468 /* Log misc errors. */
469 if (csr0 & LE_C0_BABL)
470 dev->stats.tx_errors++; /* Tx babble. */
471 if (csr0 & LE_C0_MISS)
472 dev->stats.rx_errors++; /* Missed a Rx frame. */
473 if (csr0 & LE_C0_MERR) {
474 printk("%s: Bus master arbitration failure, status %4.4x.\n",
475 dev->name, csr0);
476 /* Restart the chip. */
477 WRITERDP(lp, LE_C0_STRT);
478 }
479
480 if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
481 lp->tx_full = 0;
482 netif_wake_queue (dev);
483 }
484
485 WRITERAP(lp, LE_CSR0);
486 WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
487
488 spin_unlock (&lp->devlock);
489 return IRQ_HANDLED;
490}
491
492int lance_open (struct net_device *dev)
493{
494 struct lance_private *lp = netdev_priv(dev);
495 int res;
496
497 /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
498 if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
499 return -EAGAIN;
500
501 res = lance_reset(dev);
502 spin_lock_init(&lp->devlock);
503 netif_start_queue (dev);
504
505 return res;
506}
507EXPORT_SYMBOL_GPL(lance_open);
508
509int lance_close (struct net_device *dev)
510{
511 struct lance_private *lp = netdev_priv(dev);
512
513 netif_stop_queue (dev);
514
515 /* Stop the LANCE */
516 WRITERAP(lp, LE_CSR0);
517 WRITERDP(lp, LE_C0_STOP);
518
519 free_irq(lp->irq, dev);
520
521 return 0;
522}
523EXPORT_SYMBOL_GPL(lance_close);
524
525void lance_tx_timeout(struct net_device *dev)
526{
527 printk("lance_tx_timeout\n");
528 lance_reset(dev);
529 dev->trans_start = jiffies; /* prevent tx timeout */
530 netif_wake_queue (dev);
531}
532EXPORT_SYMBOL_GPL(lance_tx_timeout);
533
534int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
535{
536 struct lance_private *lp = netdev_priv(dev);
537 volatile struct lance_init_block *ib = lp->init_block;
538 int entry, skblen, len;
539 static int outs;
540 unsigned long flags;
541
542 if (!TX_BUFFS_AVAIL)
543 return NETDEV_TX_LOCKED;
544
545 netif_stop_queue (dev);
546
547 skblen = skb->len;
548
549#ifdef DEBUG_DRIVER
550 /* dump the packet */
551 {
552 int i;
553
554 for (i = 0; i < 64; i++) {
555 if ((i % 16) == 0)
556 printk ("\n");
557 printk ("%2.2x ", skb->data [i]);
558 }
559 }
560#endif
561 len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
562 entry = lp->tx_new & lp->tx_ring_mod_mask;
563 ib->btx_ring [entry].length = (-len) | 0xf000;
564 ib->btx_ring [entry].misc = 0;
565
566 if (skb->len < ETH_ZLEN)
567 memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
568 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
569
570 /* Now, give the packet to the lance */
571 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
572 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
573
574 outs++;
575 /* Kick the lance: transmit now */
576 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
577 dev_kfree_skb (skb);
578
579 spin_lock_irqsave (&lp->devlock, flags);
580 if (TX_BUFFS_AVAIL)
581 netif_start_queue (dev);
582 else
583 lp->tx_full = 1;
584 spin_unlock_irqrestore (&lp->devlock, flags);
585
586 return NETDEV_TX_OK;
587}
588EXPORT_SYMBOL_GPL(lance_start_xmit);
589
590/* taken from the depca driver via a2065.c */
591static void lance_load_multicast (struct net_device *dev)
592{
593 struct lance_private *lp = netdev_priv(dev);
594 volatile struct lance_init_block *ib = lp->init_block;
595 volatile u16 *mcast_table = (u16 *)&ib->filter;
596 struct netdev_hw_addr *ha;
597 u32 crc;
598
599 /* set all multicast bits */
600 if (dev->flags & IFF_ALLMULTI){
601 ib->filter [0] = 0xffffffff;
602 ib->filter [1] = 0xffffffff;
603 return;
604 }
605 /* clear the multicast filter */
606 ib->filter [0] = 0;
607 ib->filter [1] = 0;
608
609 /* Add addresses */
610 netdev_for_each_mc_addr(ha, dev) {
611 crc = ether_crc_le(6, ha->addr);
612 crc = crc >> 26;
613 mcast_table [crc >> 4] |= 1 << (crc & 0xf);
614 }
615}
616
617
618void lance_set_multicast (struct net_device *dev)
619{
620 struct lance_private *lp = netdev_priv(dev);
621 volatile struct lance_init_block *ib = lp->init_block;
622 int stopped;
623
624 stopped = netif_queue_stopped(dev);
625 if (!stopped)
626 netif_stop_queue (dev);
627
628 while (lp->tx_old != lp->tx_new)
629 schedule();
630
631 WRITERAP(lp, LE_CSR0);
632 WRITERDP(lp, LE_C0_STOP);
633 lance_init_ring (dev);
634
635 if (dev->flags & IFF_PROMISC) {
636 ib->mode |= LE_MO_PROM;
637 } else {
638 ib->mode &= ~LE_MO_PROM;
639 lance_load_multicast (dev);
640 }
641 load_csrs (lp);
642 init_restart_lance (lp);
643
644 if (!stopped)
645 netif_start_queue (dev);
646}
647EXPORT_SYMBOL_GPL(lance_set_multicast);
648
649#ifdef CONFIG_NET_POLL_CONTROLLER
650void lance_poll(struct net_device *dev)
651{
652 struct lance_private *lp = netdev_priv(dev);
653
654 spin_lock (&lp->devlock);
655 WRITERAP(lp, LE_CSR0);
656 WRITERDP(lp, LE_C0_STRT);
657 spin_unlock (&lp->devlock);
658 lance_interrupt(dev->irq, dev);
659}
660#endif
661
662MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/7990.h b/drivers/net/ethernet/amd/7990.h
new file mode 100644
index 000000000000..0a5837b96421
--- /dev/null
+++ b/drivers/net/ethernet/amd/7990.h
@@ -0,0 +1,254 @@
1/*
2 * 7990.h -- LANCE ethernet IC generic routines.
3 * This is an attempt to separate out the bits of various ethernet
4 * drivers that are common because they all use the AMD 7990 LANCE
5 * (Local Area Network Controller for Ethernet) chip.
6 *
7 * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
8 *
9 * Most of this stuff was obtained by looking at other LANCE drivers,
10 * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
11 */
12
13#ifndef _7990_H
14#define _7990_H
15
16/* The lance only has two register locations. We communicate mostly via memory. */
17#define LANCE_RDP 0 /* Register Data Port */
18#define LANCE_RAP 2 /* Register Address Port */
19
20/* Transmit/receive ring definitions.
21 * We allow the specific drivers to override these defaults if they want to.
22 * NB: according to lance.c, increasing the number of buffers is a waste
23 * of space and reduces the chance that an upper layer will be able to
24 * reorder queued Tx packets based on priority. [Clearly there is a minimum
25 * limit too: too small and we drop rx packets and can't tx at full speed.]
26 * 4+4 seems to be the usual setting; the atarilance driver uses 3 and 5.
27 */
28
29/* Blast! This won't work. The problem is that we can't specify a default
30 * setting because that would cause the lance_init_block struct to be
31 * too long (and overflow the RAM on shared-memory cards like the HP LANCE.
32 */
33#ifndef LANCE_LOG_TX_BUFFERS
34#define LANCE_LOG_TX_BUFFERS 1
35#define LANCE_LOG_RX_BUFFERS 3
36#endif
37
38#define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
39#define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
40#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
41#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
42#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
43#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
44#define PKT_BUFF_SIZE (1544)
45#define RX_BUFF_SIZE PKT_BUFF_SIZE
46#define TX_BUFF_SIZE PKT_BUFF_SIZE
47
48/* Each receive buffer is described by a receive message descriptor (RMD) */
49struct lance_rx_desc {
50 volatile unsigned short rmd0; /* low address of packet */
51 volatile unsigned char rmd1_bits; /* descriptor bits */
52 volatile unsigned char rmd1_hadr; /* high address of packet */
53 volatile short length; /* This length is 2s complement (negative)!
54 * Buffer length
55 */
56 volatile unsigned short mblength; /* Actual number of bytes received */
57};
58
59/* Ditto for TMD: */
60struct lance_tx_desc {
61 volatile unsigned short tmd0; /* low address of packet */
62 volatile unsigned char tmd1_bits; /* descriptor bits */
63 volatile unsigned char tmd1_hadr; /* high address of packet */
64 volatile short length; /* Length is 2s complement (negative)! */
65 volatile unsigned short misc;
66};
67
68/* There are three memory structures accessed by the LANCE:
69 * the initialization block, the receive and transmit descriptor rings,
70 * and the data buffers themselves. In fact we might as well put the
71 * init block,the Tx and Rx rings and the buffers together in memory:
72 */
73struct lance_init_block {
74 volatile unsigned short mode; /* Pre-set mode (reg. 15) */
75 volatile unsigned char phys_addr[6]; /* Physical ethernet address */
76 volatile unsigned filter[2]; /* Multicast filter (64 bits) */
77
78 /* Receive and transmit ring base, along with extra bits. */
79 volatile unsigned short rx_ptr; /* receive descriptor addr */
80 volatile unsigned short rx_len; /* receive len and high addr */
81 volatile unsigned short tx_ptr; /* transmit descriptor addr */
82 volatile unsigned short tx_len; /* transmit len and high addr */
83
84 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries.
85 * This will be true if this whole struct is 8-byte aligned.
86 */
87 volatile struct lance_tx_desc btx_ring[TX_RING_SIZE];
88 volatile struct lance_rx_desc brx_ring[RX_RING_SIZE];
89
90 volatile char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
91 volatile char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
92 /* we use this just to make the struct big enough that we can move its startaddr
93 * in order to force alignment to an eight byte boundary.
94 */
95};
96
97/* This is where we keep all the stuff the driver needs to know about.
98 * I'm definitely unhappy about the mechanism for allowing specific
99 * drivers to add things...
100 */
101struct lance_private
102{
103 char *name;
104 unsigned long base;
105 volatile struct lance_init_block *init_block; /* CPU address of RAM */
106 volatile struct lance_init_block *lance_init_block; /* LANCE address of RAM */
107
108 int rx_new, tx_new;
109 int rx_old, tx_old;
110
111 int lance_log_rx_bufs, lance_log_tx_bufs;
112 int rx_ring_mod_mask, tx_ring_mod_mask;
113
114 int tpe; /* TPE is selected */
115 int auto_select; /* cable-selection is by carrier */
116 unsigned short busmaster_regval;
117
118 unsigned int irq; /* IRQ to register */
119
120 /* This is because the HP LANCE is disgusting and you have to check
121 * a DIO-specific register every time you read/write the LANCE regs :-<
122 * [could we get away with making these some sort of macro?]
123 */
124 void (*writerap)(void *, unsigned short);
125 void (*writerdp)(void *, unsigned short);
126 unsigned short (*readrdp)(void *);
127 spinlock_t devlock;
128 char tx_full;
129};
130
131/*
132 * Am7990 Control and Status Registers
133 */
134#define LE_CSR0 0x0000 /* LANCE Controller Status */
135#define LE_CSR1 0x0001 /* IADR[15:0] (bit0==0 ie word aligned) */
136#define LE_CSR2 0x0002 /* IADR[23:16] (high bits reserved) */
137#define LE_CSR3 0x0003 /* Misc */
138
139/*
140 * Bit definitions for CSR0 (LANCE Controller Status)
141 */
142#define LE_C0_ERR 0x8000 /* Error = BABL | CERR | MISS | MERR */
143#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
144#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
145#define LE_C0_MISS 0x1000 /* Missed Frame (no rx buffer to put it in) */
146#define LE_C0_MERR 0x0800 /* Memory Error */
147#define LE_C0_RINT 0x0400 /* Receive Interrupt */
148#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
149#define LE_C0_IDON 0x0100 /* Initialization Done */
150#define LE_C0_INTR 0x0080 /* Interrupt Flag
151 = BABL | MISS | MERR | RINT | TINT | IDON */
152#define LE_C0_INEA 0x0040 /* Interrupt Enable */
153#define LE_C0_RXON 0x0020 /* Receive On */
154#define LE_C0_TXON 0x0010 /* Transmit On */
155#define LE_C0_TDMD 0x0008 /* Transmit Demand */
156#define LE_C0_STOP 0x0004 /* Stop */
157#define LE_C0_STRT 0x0002 /* Start */
158#define LE_C0_INIT 0x0001 /* Initialize */
159
160
161/*
162 * Bit definitions for CSR3
163 */
164#define LE_C3_BSWP 0x0004 /* Byte Swap
165 (on for big endian byte order) */
166#define LE_C3_ACON 0x0002 /* ALE Control
167 (on for active low ALE) */
168#define LE_C3_BCON 0x0001 /* Byte Control */
169
170
171/*
172 * Mode Flags
173 */
174#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
175/* these next ones 0x4000 -- 0x0080 are not available on the LANCE 7990,
176 * but they are in NetBSD's am7990.h, presumably for backwards-compatible chips
177 */
178#define LE_MO_DRCVBC 0x4000 /* disable receive broadcast */
179#define LE_MO_DRCVPA 0x2000 /* disable physical address detection */
180#define LE_MO_DLNKTST 0x1000 /* disable link status */
181#define LE_MO_DAPC 0x0800 /* disable automatic polarity correction */
182#define LE_MO_MENDECL 0x0400 /* MENDEC loopback mode */
183#define LE_MO_LRTTSEL 0x0200 /* lower RX threshold / TX mode selection */
184#define LE_MO_PSEL1 0x0100 /* port selection bit1 */
185#define LE_MO_PSEL0 0x0080 /* port selection bit0 */
186/* and this one is from the C-LANCE data sheet... */
187#define LE_MO_EMBA 0x0080 /* Enable Modified Backoff Algorithm
188 (C-LANCE, not original LANCE) */
189#define LE_MO_INTL 0x0040 /* Internal Loopback */
190#define LE_MO_DRTY 0x0020 /* Disable Retry */
191#define LE_MO_FCOLL 0x0010 /* Force Collision */
192#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
193#define LE_MO_LOOP 0x0004 /* Loopback Enable */
194#define LE_MO_DTX 0x0002 /* Disable Transmitter */
195#define LE_MO_DRX 0x0001 /* Disable Receiver */
196
197
198/*
199 * Receive Flags
200 */
201#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
202#define LE_R1_ERR 0x40 /* Error */
203#define LE_R1_FRA 0x20 /* Framing Error */
204#define LE_R1_OFL 0x10 /* Overflow Error */
205#define LE_R1_CRC 0x08 /* CRC Error */
206#define LE_R1_BUF 0x04 /* Buffer Error */
207#define LE_R1_SOP 0x02 /* Start of Packet */
208#define LE_R1_EOP 0x01 /* End of Packet */
209#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
210
211
212/*
213 * Transmit Flags
214 */
215#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
216#define LE_T1_ERR 0x40 /* Error */
217#define LE_T1_RES 0x20 /* Reserved, LANCE writes this with a zero */
218#define LE_T1_EMORE 0x10 /* More than one retry needed */
219#define LE_T1_EONE 0x08 /* One retry needed */
220#define LE_T1_EDEF 0x04 /* Deferred */
221#define LE_T1_SOP 0x02 /* Start of Packet */
222#define LE_T1_EOP 0x01 /* End of Packet */
223#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
224
225/*
226 * Error Flags
227 */
228#define LE_T3_BUF 0x8000 /* Buffer Error */
229#define LE_T3_UFL 0x4000 /* Underflow Error */
230#define LE_T3_LCOL 0x1000 /* Late Collision */
231#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
232#define LE_T3_RTY 0x0400 /* Retry Error */
233#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
234
235/* Miscellaneous useful macros */
236
237#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
238 lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
239 lp->tx_old - lp->tx_new-1)
240
241/* The LANCE only uses 24 bit addresses. This does the obvious thing. */
242#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
243
244/* Now the prototypes we export */
245extern int lance_open(struct net_device *dev);
246extern int lance_close (struct net_device *dev);
247extern int lance_start_xmit (struct sk_buff *skb, struct net_device *dev);
248extern void lance_set_multicast (struct net_device *dev);
249extern void lance_tx_timeout(struct net_device *dev);
250#ifdef CONFIG_NET_POLL_CONTROLLER
251extern void lance_poll(struct net_device *dev);
252#endif
253
254#endif /* ndef _7990_H */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
new file mode 100644
index 000000000000..238b537b68fe
--- /dev/null
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -0,0 +1,195 @@
1#
2# AMD network device configuration
3#
4
5config NET_VENDOR_AMD
6 bool "AMD devices"
7 default y
8 depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
9 SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
10 (ARM && ARCH_EBSA110) || ISA || EISA || MCA || PCMCIA
11 ---help---
12 If you have a network (Ethernet) chipset belonging to this class,
13 say Y.
14
15 Note that the answer to this question does not directly affect
16 the kernel: saying N will just case the configurator to skip all
17 the questions regarding AMD chipsets. If you say Y, you will be asked
18 for your specific chipset/driver in the following questions.
19
20if NET_VENDOR_AMD
21
22config A2065
23 tristate "A2065 support"
24 depends on ZORRO
25 select CRC32
26 ---help---
27 If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
28 say N.
29
30 To compile this driver as a module, choose M here: the module
31 will be called a2065.
32
33config AMD8111_ETH
34 tristate "AMD 8111 (new PCI LANCE) support"
35 depends on PCI
36 select CRC32
37 select NET_CORE
38 select MII
39 ---help---
40 If you have an AMD 8111-based PCI LANCE ethernet card,
41 answer Y here and read the Ethernet-HOWTO, available from
42 <http://www.tldp.org/docs.html#howto>.
43
44 To compile this driver as a module, choose M here. The module
45 will be called amd8111e.
46
47config LANCE
48 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
49 depends on ISA && ISA_DMA_API
50 ---help---
51 If you have a network (Ethernet) card of this type, say Y and read
52 the Ethernet-HOWTO, available from
53 <http://www.tldp.org/docs.html#howto>. Some LinkSys cards are
54 of this type.
55
56 To compile this driver as a module, choose M here: the module
57 will be called lance. This is recommended.
58
59config PCNET32
60 tristate "AMD PCnet32 PCI support"
61 depends on PCI
62 select CRC32
63 select NET_CORE
64 select MII
65 ---help---
66 If you have a PCnet32 or PCnetPCI based network (Ethernet) card,
67 answer Y here and read the Ethernet-HOWTO, available from
68 <http://www.tldp.org/docs.html#howto>.
69
70 To compile this driver as a module, choose M here. The module
71 will be called pcnet32.
72
73config ARIADNE
74 tristate "Ariadne support"
75 depends on ZORRO
76 ---help---
77 If you have a Village Tronic Ariadne Ethernet adapter, say Y.
78 Otherwise, say N.
79
80 To compile this driver as a module, choose M here: the module
81 will be called ariadne.
82
83config ARM_AM79C961A
84 bool "ARM EBSA110 AM79C961A support"
85 depends on ARM && ARCH_EBSA110
86 select CRC32
87 ---help---
88 If you wish to compile a kernel for the EBSA-110, then you should
89 always answer Y to this.
90
91config ATARILANCE
92 tristate "Atari LANCE support"
93 depends on ATARI
94 ---help---
95 Say Y to include support for several Atari Ethernet adapters based
96 on the AMD LANCE chipset: RieblCard (with or without battery), or
97 PAMCard VME (also the version by Rhotron, with different addresses).
98
99config DECLANCE
100 tristate "DEC LANCE ethernet controller support"
101 depends on MACH_DECSTATION
102 select CRC32
103 ---help---
104 This driver is for the series of Ethernet controllers produced by
105 DEC (now Compaq) based on the AMD LANCE chipset, including the
106 DEPCA series. (This chipset is better known via the NE2100 cards.)
107
108config DEPCA
109 tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
110 depends on (ISA || EISA || MCA)
111 select CRC32
112 ---help---
113 If you have a network (Ethernet) card of this type, say Y and read
114 the Ethernet-HOWTO, available from
115 <http://www.tldp.org/docs.html#howto> as well as
116 <file:drivers/net/depca.c>.
117
118 To compile this driver as a module, choose M here. The module
119 will be called depca.
120
121config HPLANCE
122 bool "HP on-board LANCE support"
123 depends on DIO
124 select CRC32
125 ---help---
126 If you want to use the builtin "LANCE" Ethernet controller on an
127 HP300 machine, say Y here.
128
129config MIPS_AU1X00_ENET
130 tristate "MIPS AU1000 Ethernet support"
131 depends on MIPS_ALCHEMY
132 select PHYLIB
133 select CRC32
134 ---help---
135 If you have an Alchemy Semi AU1X00 based system
136 say Y. Otherwise, say N.
137
138config MVME147_NET
139 tristate "MVME147 (LANCE) Ethernet support"
140 depends on MVME147
141 select CRC32
142 ---help---
143 Support for the on-board Ethernet interface on the Motorola MVME147
144 single-board computer. Say Y here to include the
145 driver for this chip in your kernel.
146 To compile this driver as a module, choose M here.
147
148config PCMCIA_NMCLAN
149 tristate "New Media PCMCIA support"
150 depends on PCMCIA
151 help
152 Say Y here if you intend to attach a New Media Ethernet or LiveWire
153 PCMCIA (PC-card) Ethernet card to your computer.
154
155 To compile this driver as a module, choose M here: the module will be
156 called nmclan_cs. If unsure, say N.
157
158config NI65
159 tristate "NI6510 support"
160 depends on ISA && ISA_DMA_API
161 ---help---
162 If you have a network (Ethernet) card of this type, say Y and read
163 the Ethernet-HOWTO, available from
164 <http://www.tldp.org/docs.html#howto>.
165
166 To compile this driver as a module, choose M here. The module
167 will be called ni65.
168
169config SUN3LANCE
170 tristate "Sun3/Sun3x on-board LANCE support"
171 depends on (SUN3 || SUN3X)
172 ---help---
173 Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
174 featured an AMD LANCE 10Mbit Ethernet controller on board; say Y
175 here to compile in the Linux driver for this and enable Ethernet.
176 General Linux information on the Sun 3 and 3x series (now
177 discontinued) is at
178 <http://www.angelfire.com/ca2/tech68k/sun3.html>.
179
180 If you're not building a kernel for a Sun 3, say N.
181
182config SUNLANCE
183 tristate "Sun LANCE support"
184 depends on SBUS
185 select CRC32
186 ---help---
187 This driver supports the "le" interface present on all 32-bit Sparc
188 systems, on some older Ultra systems and as an Sbus option. These
189 cards are based on the AMD LANCE chipset, which is better known
190 via the NE2100 cards.
191
192 To compile this driver as a module, choose M here: the module
193 will be called sunlance.
194
195endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
new file mode 100644
index 000000000000..175caa5328c9
--- /dev/null
+++ b/drivers/net/ethernet/amd/Makefile
@@ -0,0 +1,20 @@
1#
2# Makefile for the AMD network device drivers.
3#
4
5obj-$(CONFIG_A2065) += a2065.o
6obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
7obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
8obj-$(CONFIG_ARIADNE) += ariadne.o
9obj-$(CONFIG_ATARILANCE) += atarilance.o
10obj-$(CONFIG_DECLANCE) += declance.o
11obj-$(CONFIG_DEPCA) += depca.o
12obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
13obj-$(CONFIG_LANCE) += lance.o
14obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
15obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
16obj-$(CONFIG_PCMCIA_NMCLAN) += nmclan_cs.o
17obj-$(CONFIG_NI65) += ni65.o
18obj-$(CONFIG_PCNET32) += pcnet32.o
19obj-$(CONFIG_SUN3LANCE) += sun3lance.o
20obj-$(CONFIG_SUNLANCE) += sunlance.o
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
new file mode 100644
index 000000000000..825e5d4ef4c3
--- /dev/null
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -0,0 +1,781 @@
1/*
2 * Amiga Linux/68k A2065 Ethernet Driver
3 *
4 * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
5 *
6 * Fixes and tips by:
7 * - Janos Farkas (CHEXUM@sparta.banki.hu)
8 * - Jes Degn Soerensen (jds@kom.auc.dk)
9 * - Matt Domsch (Matt_Domsch@dell.com)
10 *
11 * ----------------------------------------------------------------------------
12 *
13 * This program is based on
14 *
15 * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
16 * (C) Copyright 1995 by Geert Uytterhoeven,
17 * Peter De Schrijver
18 *
19 * lance.c: An AMD LANCE ethernet driver for linux.
20 * Written 1993-94 by Donald Becker.
21 *
22 * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
23 * Advanced Micro Devices
24 * Publication #16907, Rev. B, Amendment/0, May 1994
25 *
26 * ----------------------------------------------------------------------------
27 *
28 * This file is subject to the terms and conditions of the GNU General Public
29 * License. See the file COPYING in the main directory of the Linux
30 * distribution for more details.
31 *
32 * ----------------------------------------------------------------------------
33 *
34 * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
35 *
36 * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
37 * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
38 */
39
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
42/*#define DEBUG*/
43/*#define TEST_HITS*/
44
45#include <linux/errno.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/module.h>
49#include <linux/stddef.h>
50#include <linux/kernel.h>
51#include <linux/interrupt.h>
52#include <linux/ioport.h>
53#include <linux/skbuff.h>
54#include <linux/string.h>
55#include <linux/init.h>
56#include <linux/crc32.h>
57#include <linux/zorro.h>
58#include <linux/bitops.h>
59
60#include <asm/irq.h>
61#include <asm/amigaints.h>
62#include <asm/amigahw.h>
63
64#include "a2065.h"
65
66/* Transmit/Receive Ring Definitions */
67
68#define LANCE_LOG_TX_BUFFERS (2)
69#define LANCE_LOG_RX_BUFFERS (4)
70
71#define TX_RING_SIZE (1 << LANCE_LOG_TX_BUFFERS)
72#define RX_RING_SIZE (1 << LANCE_LOG_RX_BUFFERS)
73
74#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
75#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
76
77#define PKT_BUF_SIZE (1544)
78#define RX_BUFF_SIZE PKT_BUF_SIZE
79#define TX_BUFF_SIZE PKT_BUF_SIZE
80
81/* Layout of the Lance's RAM Buffer */
82
83struct lance_init_block {
84 unsigned short mode; /* Pre-set mode (reg. 15) */
85 unsigned char phys_addr[6]; /* Physical ethernet address */
86 unsigned filter[2]; /* Multicast filter. */
87
88 /* Receive and transmit ring base, along with extra bits. */
89 unsigned short rx_ptr; /* receive descriptor addr */
90 unsigned short rx_len; /* receive len and high addr */
91 unsigned short tx_ptr; /* transmit descriptor addr */
92 unsigned short tx_len; /* transmit len and high addr */
93
94 /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
95 struct lance_rx_desc brx_ring[RX_RING_SIZE];
96 struct lance_tx_desc btx_ring[TX_RING_SIZE];
97
98 char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
99 char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
100};
101
102/* Private Device Data */
103
104struct lance_private {
105 char *name;
106 volatile struct lance_regs *ll;
107 volatile struct lance_init_block *init_block; /* Hosts view */
108 volatile struct lance_init_block *lance_init_block; /* Lance view */
109
110 int rx_new, tx_new;
111 int rx_old, tx_old;
112
113 int lance_log_rx_bufs, lance_log_tx_bufs;
114 int rx_ring_mod_mask, tx_ring_mod_mask;
115
116 int tpe; /* cable-selection is TPE */
117 int auto_select; /* cable-selection by carrier */
118 unsigned short busmaster_regval;
119
120#ifdef CONFIG_SUNLANCE
121 struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
122 int burst_sizes; /* ledma SBus burst sizes */
123#endif
124 struct timer_list multicast_timer;
125};
126
127#define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
128
129/* Load the CSR registers */
130static void load_csrs(struct lance_private *lp)
131{
132 volatile struct lance_regs *ll = lp->ll;
133 volatile struct lance_init_block *aib = lp->lance_init_block;
134 int leptr = LANCE_ADDR(aib);
135
136 ll->rap = LE_CSR1;
137 ll->rdp = (leptr & 0xFFFF);
138 ll->rap = LE_CSR2;
139 ll->rdp = leptr >> 16;
140 ll->rap = LE_CSR3;
141 ll->rdp = lp->busmaster_regval;
142
143 /* Point back to csr0 */
144 ll->rap = LE_CSR0;
145}
146
147/* Setup the Lance Rx and Tx rings */
148static void lance_init_ring(struct net_device *dev)
149{
150 struct lance_private *lp = netdev_priv(dev);
151 volatile struct lance_init_block *ib = lp->init_block;
152 volatile struct lance_init_block *aib = lp->lance_init_block;
153 /* for LANCE_ADDR computations */
154 int leptr;
155 int i;
156
157 /* Lock out other processes while setting up hardware */
158 netif_stop_queue(dev);
159 lp->rx_new = lp->tx_new = 0;
160 lp->rx_old = lp->tx_old = 0;
161
162 ib->mode = 0;
163
164 /* Copy the ethernet address to the lance init block
165 * Note that on the sparc you need to swap the ethernet address.
166 */
167 ib->phys_addr[0] = dev->dev_addr[1];
168 ib->phys_addr[1] = dev->dev_addr[0];
169 ib->phys_addr[2] = dev->dev_addr[3];
170 ib->phys_addr[3] = dev->dev_addr[2];
171 ib->phys_addr[4] = dev->dev_addr[5];
172 ib->phys_addr[5] = dev->dev_addr[4];
173
174 /* Setup the Tx ring entries */
175 netdev_dbg(dev, "TX rings:\n");
176 for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
177 leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
178 ib->btx_ring[i].tmd0 = leptr;
179 ib->btx_ring[i].tmd1_hadr = leptr >> 16;
180 ib->btx_ring[i].tmd1_bits = 0;
181 ib->btx_ring[i].length = 0xf000; /* The ones required by tmd2 */
182 ib->btx_ring[i].misc = 0;
183 if (i < 3)
184 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
185 }
186
187 /* Setup the Rx ring entries */
188 netdev_dbg(dev, "RX rings:\n");
189 for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
190 leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
191
192 ib->brx_ring[i].rmd0 = leptr;
193 ib->brx_ring[i].rmd1_hadr = leptr >> 16;
194 ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
195 ib->brx_ring[i].length = -RX_BUFF_SIZE | 0xf000;
196 ib->brx_ring[i].mblength = 0;
197 if (i < 3)
198 netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
199 }
200
201 /* Setup the initialization block */
202
203 /* Setup rx descriptor pointer */
204 leptr = LANCE_ADDR(&aib->brx_ring);
205 ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
206 ib->rx_ptr = leptr;
207 netdev_dbg(dev, "RX ptr: %08x\n", leptr);
208
209 /* Setup tx descriptor pointer */
210 leptr = LANCE_ADDR(&aib->btx_ring);
211 ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
212 ib->tx_ptr = leptr;
213 netdev_dbg(dev, "TX ptr: %08x\n", leptr);
214
215 /* Clear the multicast filter */
216 ib->filter[0] = 0;
217 ib->filter[1] = 0;
218}
219
220static int init_restart_lance(struct lance_private *lp)
221{
222 volatile struct lance_regs *ll = lp->ll;
223 int i;
224
225 ll->rap = LE_CSR0;
226 ll->rdp = LE_C0_INIT;
227
228 /* Wait for the lance to complete initialization */
229 for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
230 barrier();
231 if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
232 pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
233 return -EIO;
234 }
235
236 /* Clear IDON by writing a "1", enable interrupts and start lance */
237 ll->rdp = LE_C0_IDON;
238 ll->rdp = LE_C0_INEA | LE_C0_STRT;
239
240 return 0;
241}
242
243static int lance_rx(struct net_device *dev)
244{
245 struct lance_private *lp = netdev_priv(dev);
246 volatile struct lance_init_block *ib = lp->init_block;
247 volatile struct lance_regs *ll = lp->ll;
248 volatile struct lance_rx_desc *rd;
249 unsigned char bits;
250
251#ifdef TEST_HITS
252 int i;
253 char buf[RX_RING_SIZE + 1];
254
255 for (i = 0; i < RX_RING_SIZE; i++) {
256 char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
257 if (i == lp->rx_new)
258 buf[i] = r1_own ? '_' : 'X';
259 else
260 buf[i] = r1_own ? '.' : '1';
261 }
262 buf[RX_RING_SIZE] = 0;
263
264 pr_debug("RxRing TestHits: [%s]\n", buf);
265#endif
266
267 ll->rdp = LE_C0_RINT | LE_C0_INEA;
268 for (rd = &ib->brx_ring[lp->rx_new];
269 !((bits = rd->rmd1_bits) & LE_R1_OWN);
270 rd = &ib->brx_ring[lp->rx_new]) {
271
272 /* We got an incomplete frame? */
273 if ((bits & LE_R1_POK) != LE_R1_POK) {
274 dev->stats.rx_over_errors++;
275 dev->stats.rx_errors++;
276 continue;
277 } else if (bits & LE_R1_ERR) {
278 /* Count only the end frame as a rx error,
279 * not the beginning
280 */
281 if (bits & LE_R1_BUF)
282 dev->stats.rx_fifo_errors++;
283 if (bits & LE_R1_CRC)
284 dev->stats.rx_crc_errors++;
285 if (bits & LE_R1_OFL)
286 dev->stats.rx_over_errors++;
287 if (bits & LE_R1_FRA)
288 dev->stats.rx_frame_errors++;
289 if (bits & LE_R1_EOP)
290 dev->stats.rx_errors++;
291 } else {
292 int len = (rd->mblength & 0xfff) - 4;
293 struct sk_buff *skb = dev_alloc_skb(len + 2);
294
295 if (!skb) {
296 netdev_warn(dev, "Memory squeeze, deferring packet\n");
297 dev->stats.rx_dropped++;
298 rd->mblength = 0;
299 rd->rmd1_bits = LE_R1_OWN;
300 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
301 return 0;
302 }
303
304 skb_reserve(skb, 2); /* 16 byte align */
305 skb_put(skb, len); /* make room */
306 skb_copy_to_linear_data(skb,
307 (unsigned char *)&ib->rx_buf[lp->rx_new][0],
308 len);
309 skb->protocol = eth_type_trans(skb, dev);
310 netif_rx(skb);
311 dev->stats.rx_packets++;
312 dev->stats.rx_bytes += len;
313 }
314
315 /* Return the packet to the pool */
316 rd->mblength = 0;
317 rd->rmd1_bits = LE_R1_OWN;
318 lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
319 }
320 return 0;
321}
322
323static int lance_tx(struct net_device *dev)
324{
325 struct lance_private *lp = netdev_priv(dev);
326 volatile struct lance_init_block *ib = lp->init_block;
327 volatile struct lance_regs *ll = lp->ll;
328 volatile struct lance_tx_desc *td;
329 int i, j;
330 int status;
331
332 /* csr0 is 2f3 */
333 ll->rdp = LE_C0_TINT | LE_C0_INEA;
334 /* csr0 is 73 */
335
336 j = lp->tx_old;
337 for (i = j; i != lp->tx_new; i = j) {
338 td = &ib->btx_ring[i];
339
340 /* If we hit a packet not owned by us, stop */
341 if (td->tmd1_bits & LE_T1_OWN)
342 break;
343
344 if (td->tmd1_bits & LE_T1_ERR) {
345 status = td->misc;
346
347 dev->stats.tx_errors++;
348 if (status & LE_T3_RTY)
349 dev->stats.tx_aborted_errors++;
350 if (status & LE_T3_LCOL)
351 dev->stats.tx_window_errors++;
352
353 if (status & LE_T3_CLOS) {
354 dev->stats.tx_carrier_errors++;
355 if (lp->auto_select) {
356 lp->tpe = 1 - lp->tpe;
357 netdev_err(dev, "Carrier Lost, trying %s\n",
358 lp->tpe ? "TPE" : "AUI");
359 /* Stop the lance */
360 ll->rap = LE_CSR0;
361 ll->rdp = LE_C0_STOP;
362 lance_init_ring(dev);
363 load_csrs(lp);
364 init_restart_lance(lp);
365 return 0;
366 }
367 }
368
369 /* buffer errors and underflows turn off
370 * the transmitter, so restart the adapter
371 */
372 if (status & (LE_T3_BUF | LE_T3_UFL)) {
373 dev->stats.tx_fifo_errors++;
374
375 netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
376 /* Stop the lance */
377 ll->rap = LE_CSR0;
378 ll->rdp = LE_C0_STOP;
379 lance_init_ring(dev);
380 load_csrs(lp);
381 init_restart_lance(lp);
382 return 0;
383 }
384 } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
385 /* So we don't count the packet more than once. */
386 td->tmd1_bits &= ~(LE_T1_POK);
387
388 /* One collision before packet was sent. */
389 if (td->tmd1_bits & LE_T1_EONE)
390 dev->stats.collisions++;
391
392 /* More than one collision, be optimistic. */
393 if (td->tmd1_bits & LE_T1_EMORE)
394 dev->stats.collisions += 2;
395
396 dev->stats.tx_packets++;
397 }
398
399 j = (j + 1) & lp->tx_ring_mod_mask;
400 }
401 lp->tx_old = j;
402 ll->rdp = LE_C0_TINT | LE_C0_INEA;
403 return 0;
404}
405
406static int lance_tx_buffs_avail(struct lance_private *lp)
407{
408 if (lp->tx_old <= lp->tx_new)
409 return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
410 return lp->tx_old - lp->tx_new - 1;
411}
412
413static irqreturn_t lance_interrupt(int irq, void *dev_id)
414{
415 struct net_device *dev = dev_id;
416 struct lance_private *lp = netdev_priv(dev);
417 volatile struct lance_regs *ll = lp->ll;
418 int csr0;
419
420 ll->rap = LE_CSR0; /* LANCE Controller Status */
421 csr0 = ll->rdp;
422
423 if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
424 return IRQ_NONE; /* been generated by the Lance. */
425
426 /* Acknowledge all the interrupt sources ASAP */
427 ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
428 LE_C0_INIT);
429
430 if (csr0 & LE_C0_ERR) {
431 /* Clear the error condition */
432 ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
433 }
434
435 if (csr0 & LE_C0_RINT)
436 lance_rx(dev);
437
438 if (csr0 & LE_C0_TINT)
439 lance_tx(dev);
440
441 /* Log misc errors. */
442 if (csr0 & LE_C0_BABL)
443 dev->stats.tx_errors++; /* Tx babble. */
444 if (csr0 & LE_C0_MISS)
445 dev->stats.rx_errors++; /* Missed a Rx frame. */
446 if (csr0 & LE_C0_MERR) {
447 netdev_err(dev, "Bus master arbitration failure, status %04x\n",
448 csr0);
449 /* Restart the chip. */
450 ll->rdp = LE_C0_STRT;
451 }
452
453 if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
454 netif_wake_queue(dev);
455
456 ll->rap = LE_CSR0;
457 ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
458 LE_C0_IDON | LE_C0_INEA);
459 return IRQ_HANDLED;
460}
461
462static int lance_open(struct net_device *dev)
463{
464 struct lance_private *lp = netdev_priv(dev);
465 volatile struct lance_regs *ll = lp->ll;
466 int ret;
467
468 /* Stop the Lance */
469 ll->rap = LE_CSR0;
470 ll->rdp = LE_C0_STOP;
471
472 /* Install the Interrupt handler */
473 ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
474 dev->name, dev);
475 if (ret)
476 return ret;
477
478 load_csrs(lp);
479 lance_init_ring(dev);
480
481 netif_start_queue(dev);
482
483 return init_restart_lance(lp);
484}
485
486static int lance_close(struct net_device *dev)
487{
488 struct lance_private *lp = netdev_priv(dev);
489 volatile struct lance_regs *ll = lp->ll;
490
491 netif_stop_queue(dev);
492 del_timer_sync(&lp->multicast_timer);
493
494 /* Stop the card */
495 ll->rap = LE_CSR0;
496 ll->rdp = LE_C0_STOP;
497
498 free_irq(IRQ_AMIGA_PORTS, dev);
499 return 0;
500}
501
502static inline int lance_reset(struct net_device *dev)
503{
504 struct lance_private *lp = netdev_priv(dev);
505 volatile struct lance_regs *ll = lp->ll;
506 int status;
507
508 /* Stop the lance */
509 ll->rap = LE_CSR0;
510 ll->rdp = LE_C0_STOP;
511
512 load_csrs(lp);
513
514 lance_init_ring(dev);
515 dev->trans_start = jiffies; /* prevent tx timeout */
516 netif_start_queue(dev);
517
518 status = init_restart_lance(lp);
519 netdev_dbg(dev, "Lance restart=%d\n", status);
520
521 return status;
522}
523
524static void lance_tx_timeout(struct net_device *dev)
525{
526 struct lance_private *lp = netdev_priv(dev);
527 volatile struct lance_regs *ll = lp->ll;
528
529 netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
530 lance_reset(dev);
531 netif_wake_queue(dev);
532}
533
534static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
535 struct net_device *dev)
536{
537 struct lance_private *lp = netdev_priv(dev);
538 volatile struct lance_regs *ll = lp->ll;
539 volatile struct lance_init_block *ib = lp->init_block;
540 int entry, skblen;
541 int status = NETDEV_TX_OK;
542 unsigned long flags;
543
544 if (skb_padto(skb, ETH_ZLEN))
545 return NETDEV_TX_OK;
546 skblen = max_t(unsigned, skb->len, ETH_ZLEN);
547
548 local_irq_save(flags);
549
550 if (!lance_tx_buffs_avail(lp)) {
551 local_irq_restore(flags);
552 return NETDEV_TX_LOCKED;
553 }
554
555#ifdef DEBUG
556 /* dump the packet */
557 print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
558 16, 1, skb->data, 64, true);
559#endif
560 entry = lp->tx_new & lp->tx_ring_mod_mask;
561 ib->btx_ring[entry].length = (-skblen) | 0xf000;
562 ib->btx_ring[entry].misc = 0;
563
564 skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
565
566 /* Now, give the packet to the lance */
567 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
568 lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
569 dev->stats.tx_bytes += skblen;
570
571 if (lance_tx_buffs_avail(lp) <= 0)
572 netif_stop_queue(dev);
573
574 /* Kick the lance: transmit now */
575 ll->rdp = LE_C0_INEA | LE_C0_TDMD;
576 dev_kfree_skb(skb);
577
578 local_irq_restore(flags);
579
580 return status;
581}
582
583/* taken from the depca driver */
584static void lance_load_multicast(struct net_device *dev)
585{
586 struct lance_private *lp = netdev_priv(dev);
587 volatile struct lance_init_block *ib = lp->init_block;
588 volatile u16 *mcast_table = (u16 *)&ib->filter;
589 struct netdev_hw_addr *ha;
590 u32 crc;
591
592 /* set all multicast bits */
593 if (dev->flags & IFF_ALLMULTI) {
594 ib->filter[0] = 0xffffffff;
595 ib->filter[1] = 0xffffffff;
596 return;
597 }
598 /* clear the multicast filter */
599 ib->filter[0] = 0;
600 ib->filter[1] = 0;
601
602 /* Add addresses */
603 netdev_for_each_mc_addr(ha, dev) {
604 crc = ether_crc_le(6, ha->addr);
605 crc = crc >> 26;
606 mcast_table[crc >> 4] |= 1 << (crc & 0xf);
607 }
608}
609
610static void lance_set_multicast(struct net_device *dev)
611{
612 struct lance_private *lp = netdev_priv(dev);
613 volatile struct lance_init_block *ib = lp->init_block;
614 volatile struct lance_regs *ll = lp->ll;
615
616 if (!netif_running(dev))
617 return;
618
619 if (lp->tx_old != lp->tx_new) {
620 mod_timer(&lp->multicast_timer, jiffies + 4);
621 netif_wake_queue(dev);
622 return;
623 }
624
625 netif_stop_queue(dev);
626
627 ll->rap = LE_CSR0;
628 ll->rdp = LE_C0_STOP;
629 lance_init_ring(dev);
630
631 if (dev->flags & IFF_PROMISC) {
632 ib->mode |= LE_MO_PROM;
633 } else {
634 ib->mode &= ~LE_MO_PROM;
635 lance_load_multicast(dev);
636 }
637 load_csrs(lp);
638 init_restart_lance(lp);
639 netif_wake_queue(dev);
640}
641
642static int __devinit a2065_init_one(struct zorro_dev *z,
643 const struct zorro_device_id *ent);
644static void __devexit a2065_remove_one(struct zorro_dev *z);
645
646
647static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
648 { ZORRO_PROD_CBM_A2065_1 },
649 { ZORRO_PROD_CBM_A2065_2 },
650 { ZORRO_PROD_AMERISTAR_A2065 },
651 { 0 }
652};
653MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
654
655static struct zorro_driver a2065_driver = {
656 .name = "a2065",
657 .id_table = a2065_zorro_tbl,
658 .probe = a2065_init_one,
659 .remove = __devexit_p(a2065_remove_one),
660};
661
662static const struct net_device_ops lance_netdev_ops = {
663 .ndo_open = lance_open,
664 .ndo_stop = lance_close,
665 .ndo_start_xmit = lance_start_xmit,
666 .ndo_tx_timeout = lance_tx_timeout,
667 .ndo_set_rx_mode = lance_set_multicast,
668 .ndo_validate_addr = eth_validate_addr,
669 .ndo_change_mtu = eth_change_mtu,
670 .ndo_set_mac_address = eth_mac_addr,
671};
672
673static int __devinit a2065_init_one(struct zorro_dev *z,
674 const struct zorro_device_id *ent)
675{
676 struct net_device *dev;
677 struct lance_private *priv;
678 unsigned long board = z->resource.start;
679 unsigned long base_addr = board + A2065_LANCE;
680 unsigned long mem_start = board + A2065_RAM;
681 struct resource *r1, *r2;
682 int err;
683
684 r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
685 "Am7990");
686 if (!r1)
687 return -EBUSY;
688 r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
689 if (!r2) {
690 release_mem_region(base_addr, sizeof(struct lance_regs));
691 return -EBUSY;
692 }
693
694 dev = alloc_etherdev(sizeof(struct lance_private));
695 if (dev == NULL) {
696 release_mem_region(base_addr, sizeof(struct lance_regs));
697 release_mem_region(mem_start, A2065_RAM_SIZE);
698 return -ENOMEM;
699 }
700
701 priv = netdev_priv(dev);
702
703 r1->name = dev->name;
704 r2->name = dev->name;
705
706 dev->dev_addr[0] = 0x00;
707 if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
708 dev->dev_addr[1] = 0x80;
709 dev->dev_addr[2] = 0x10;
710 } else { /* Ameristar */
711 dev->dev_addr[1] = 0x00;
712 dev->dev_addr[2] = 0x9f;
713 }
714 dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
715 dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
716 dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
717 dev->base_addr = ZTWO_VADDR(base_addr);
718 dev->mem_start = ZTWO_VADDR(mem_start);
719 dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
720
721 priv->ll = (volatile struct lance_regs *)dev->base_addr;
722 priv->init_block = (struct lance_init_block *)dev->mem_start;
723 priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
724 priv->auto_select = 0;
725 priv->busmaster_regval = LE_C3_BSWP;
726
727 priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
728 priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
729 priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
730 priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
731
732 dev->netdev_ops = &lance_netdev_ops;
733 dev->watchdog_timeo = 5*HZ;
734 dev->dma = 0;
735
736 init_timer(&priv->multicast_timer);
737 priv->multicast_timer.data = (unsigned long) dev;
738 priv->multicast_timer.function =
739 (void (*)(unsigned long))lance_set_multicast;
740
741 err = register_netdev(dev);
742 if (err) {
743 release_mem_region(base_addr, sizeof(struct lance_regs));
744 release_mem_region(mem_start, A2065_RAM_SIZE);
745 free_netdev(dev);
746 return err;
747 }
748 zorro_set_drvdata(z, dev);
749
750 netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
751 board, dev->dev_addr);
752
753 return 0;
754}
755
756
757static void __devexit a2065_remove_one(struct zorro_dev *z)
758{
759 struct net_device *dev = zorro_get_drvdata(z);
760
761 unregister_netdev(dev);
762 release_mem_region(ZTWO_PADDR(dev->base_addr),
763 sizeof(struct lance_regs));
764 release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
765 free_netdev(dev);
766}
767
768static int __init a2065_init_module(void)
769{
770 return zorro_register_driver(&a2065_driver);
771}
772
773static void __exit a2065_cleanup_module(void)
774{
775 zorro_unregister_driver(&a2065_driver);
776}
777
778module_init(a2065_init_module);
779module_exit(a2065_cleanup_module);
780
781MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/a2065.h b/drivers/net/ethernet/amd/a2065.h
new file mode 100644
index 000000000000..5117759d4e9c
--- /dev/null
+++ b/drivers/net/ethernet/amd/a2065.h
@@ -0,0 +1,173 @@
1/*
2 * Amiga Linux/68k A2065 Ethernet Driver
3 *
4 * (C) Copyright 1995 by Geert Uytterhoeven <geert@linux-m68k.org>
5 *
6 * ---------------------------------------------------------------------------
7 *
8 * This program is based on
9 *
10 * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
11 * (C) Copyright 1995 by Geert Uytterhoeven,
12 * Peter De Schrijver
13 *
14 * lance.c: An AMD LANCE ethernet driver for linux.
15 * Written 1993-94 by Donald Becker.
16 *
17 * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
18 * Advanced Micro Devices
19 * Publication #16907, Rev. B, Amendment/0, May 1994
20 *
21 * ---------------------------------------------------------------------------
22 *
23 * This file is subject to the terms and conditions of the GNU General Public
24 * License. See the file COPYING in the main directory of the Linux
25 * distribution for more details.
26 *
27 * ---------------------------------------------------------------------------
28 *
29 * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
30 *
31 * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
32 * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
33 */
34
35
36/*
37 * Am7990 Local Area Network Controller for Ethernet (LANCE)
38 */
39
40struct lance_regs {
41 unsigned short rdp; /* Register Data Port */
42 unsigned short rap; /* Register Address Port */
43};
44
45
46/*
47 * Am7990 Control and Status Registers
48 */
49
50#define LE_CSR0 0x0000 /* LANCE Controller Status */
51#define LE_CSR1 0x0001 /* IADR[15:0] */
52#define LE_CSR2 0x0002 /* IADR[23:16] */
53#define LE_CSR3 0x0003 /* Misc */
54
55
56/*
57 * Bit definitions for CSR0 (LANCE Controller Status)
58 */
59
60#define LE_C0_ERR 0x8000 /* Error */
61#define LE_C0_BABL 0x4000 /* Babble: Transmitted too many bits */
62#define LE_C0_CERR 0x2000 /* No Heartbeat (10BASE-T) */
63#define LE_C0_MISS 0x1000 /* Missed Frame */
64#define LE_C0_MERR 0x0800 /* Memory Error */
65#define LE_C0_RINT 0x0400 /* Receive Interrupt */
66#define LE_C0_TINT 0x0200 /* Transmit Interrupt */
67#define LE_C0_IDON 0x0100 /* Initialization Done */
68#define LE_C0_INTR 0x0080 /* Interrupt Flag */
69#define LE_C0_INEA 0x0040 /* Interrupt Enable */
70#define LE_C0_RXON 0x0020 /* Receive On */
71#define LE_C0_TXON 0x0010 /* Transmit On */
72#define LE_C0_TDMD 0x0008 /* Transmit Demand */
73#define LE_C0_STOP 0x0004 /* Stop */
74#define LE_C0_STRT 0x0002 /* Start */
75#define LE_C0_INIT 0x0001 /* Initialize */
76
77
78/*
79 * Bit definitions for CSR3
80 */
81
82#define LE_C3_BSWP 0x0004 /* Byte Swap
83 (on for big endian byte order) */
84#define LE_C3_ACON 0x0002 /* ALE Control
85 (on for active low ALE) */
86#define LE_C3_BCON 0x0001 /* Byte Control */
87
88
89/*
90 * Mode Flags
91 */
92
93#define LE_MO_PROM 0x8000 /* Promiscuous Mode */
94#define LE_MO_INTL 0x0040 /* Internal Loopback */
95#define LE_MO_DRTY 0x0020 /* Disable Retry */
96#define LE_MO_FCOLL 0x0010 /* Force Collision */
97#define LE_MO_DXMTFCS 0x0008 /* Disable Transmit CRC */
98#define LE_MO_LOOP 0x0004 /* Loopback Enable */
99#define LE_MO_DTX 0x0002 /* Disable Transmitter */
100#define LE_MO_DRX 0x0001 /* Disable Receiver */
101
102
103struct lance_rx_desc {
104 unsigned short rmd0; /* low address of packet */
105 unsigned char rmd1_bits; /* descriptor bits */
106 unsigned char rmd1_hadr; /* high address of packet */
107 short length; /* This length is 2s complement (negative)!
108 * Buffer length
109 */
110 unsigned short mblength; /* Aactual number of bytes received */
111};
112
113struct lance_tx_desc {
114 unsigned short tmd0; /* low address of packet */
115 unsigned char tmd1_bits; /* descriptor bits */
116 unsigned char tmd1_hadr; /* high address of packet */
117 short length; /* Length is 2s complement (negative)! */
118 unsigned short misc;
119};
120
121
122/*
123 * Receive Flags
124 */
125
126#define LE_R1_OWN 0x80 /* LANCE owns the descriptor */
127#define LE_R1_ERR 0x40 /* Error */
128#define LE_R1_FRA 0x20 /* Framing Error */
129#define LE_R1_OFL 0x10 /* Overflow Error */
130#define LE_R1_CRC 0x08 /* CRC Error */
131#define LE_R1_BUF 0x04 /* Buffer Error */
132#define LE_R1_SOP 0x02 /* Start of Packet */
133#define LE_R1_EOP 0x01 /* End of Packet */
134#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
135
136
137/*
138 * Transmit Flags
139 */
140
141#define LE_T1_OWN 0x80 /* LANCE owns the descriptor */
142#define LE_T1_ERR 0x40 /* Error */
143#define LE_T1_RES 0x20 /* Reserved,
144 LANCE writes this with a zero */
145#define LE_T1_EMORE 0x10 /* More than one retry needed */
146#define LE_T1_EONE 0x08 /* One retry needed */
147#define LE_T1_EDEF 0x04 /* Deferred */
148#define LE_T1_SOP 0x02 /* Start of Packet */
149#define LE_T1_EOP 0x01 /* End of Packet */
150#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
151
152
153/*
154 * Error Flags
155 */
156
157#define LE_T3_BUF 0x8000 /* Buffer Error */
158#define LE_T3_UFL 0x4000 /* Underflow Error */
159#define LE_T3_LCOL 0x1000 /* Late Collision */
160#define LE_T3_CLOS 0x0800 /* Loss of Carrier */
161#define LE_T3_RTY 0x0400 /* Retry Error */
162#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry */
163
164
165/*
166 * A2065 Expansion Board Structure
167 */
168
169#define A2065_LANCE 0x4000
170
171#define A2065_RAM 0x8000
172#define A2065_RAM_SIZE 0x8000
173
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
new file mode 100644
index 000000000000..7d5ded80d2d7
--- /dev/null
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -0,0 +1,770 @@
1/*
2 * linux/drivers/net/am79c961.c
3 *
4 * by Russell King <rmk@arm.linux.org.uk> 1995-2001.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Derived from various things including skeleton.c
11 *
12 * This is a special driver for the am79c961A Lance chip used in the
13 * Intel (formally Digital Equipment Corp) EBSA110 platform. Please
14 * note that this can not be built as a module (it doesn't make sense).
15 */
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/interrupt.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <linux/errno.h>
23#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/crc32.h>
28#include <linux/bitops.h>
29#include <linux/platform_device.h>
30#include <linux/io.h>
31
32#include <mach/hardware.h>
33#include <asm/system.h>
34
35#define TX_BUFFERS 15
36#define RX_BUFFERS 25
37
38#include "am79c961a.h"
39
40static irqreturn_t
41am79c961_interrupt (int irq, void *dev_id);
42
43static unsigned int net_debug = NET_DEBUG;
44
45static const char version[] =
46 "am79c961 ethernet driver (C) 1995-2001 Russell King v0.04\n";
47
48/* --------------------------------------------------------------------------- */
49
50#ifdef __arm__
51static void write_rreg(u_long base, u_int reg, u_int val)
52{
53 asm volatile(
54 "str%?h %1, [%2] @ NET_RAP\n\t"
55 "str%?h %0, [%2, #-4] @ NET_RDP"
56 :
57 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
58}
59
60static inline unsigned short read_rreg(u_long base_addr, u_int reg)
61{
62 unsigned short v;
63 asm volatile(
64 "str%?h %1, [%2] @ NET_RAP\n\t"
65 "ldr%?h %0, [%2, #-4] @ NET_RDP"
66 : "=r" (v)
67 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
68 return v;
69}
70
71static inline void write_ireg(u_long base, u_int reg, u_int val)
72{
73 asm volatile(
74 "str%?h %1, [%2] @ NET_RAP\n\t"
75 "str%?h %0, [%2, #8] @ NET_IDP"
76 :
77 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
78}
79
80static inline unsigned short read_ireg(u_long base_addr, u_int reg)
81{
82 u_short v;
83 asm volatile(
84 "str%?h %1, [%2] @ NAT_RAP\n\t"
85 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
86 : "=r" (v)
87 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
88 return v;
89}
90
91#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
92#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
93
94static void
95am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
96{
97 offset = ISAMEM_BASE + (offset << 1);
98 length = (length + 1) & ~1;
99 if ((int)buf & 2) {
100 asm volatile("str%?h %2, [%0], #4"
101 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
102 buf += 2;
103 length -= 2;
104 }
105 while (length > 8) {
106 register unsigned int tmp asm("r2"), tmp2 asm("r3");
107 asm volatile(
108 "ldm%?ia %0!, {%1, %2}"
109 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
110 length -= 8;
111 asm volatile(
112 "str%?h %1, [%0], #4\n\t"
113 "mov%? %1, %1, lsr #16\n\t"
114 "str%?h %1, [%0], #4\n\t"
115 "str%?h %2, [%0], #4\n\t"
116 "mov%? %2, %2, lsr #16\n\t"
117 "str%?h %2, [%0], #4"
118 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
119 }
120 while (length > 0) {
121 asm volatile("str%?h %2, [%0], #4"
122 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
123 buf += 2;
124 length -= 2;
125 }
126}
127
128static void
129am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
130{
131 offset = ISAMEM_BASE + (offset << 1);
132 length = (length + 1) & ~1;
133 if ((int)buf & 2) {
134 unsigned int tmp;
135 asm volatile(
136 "ldr%?h %2, [%0], #4\n\t"
137 "str%?b %2, [%1], #1\n\t"
138 "mov%? %2, %2, lsr #8\n\t"
139 "str%?b %2, [%1], #1"
140 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
141 length -= 2;
142 }
143 while (length > 8) {
144 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
145 asm volatile(
146 "ldr%?h %2, [%0], #4\n\t"
147 "ldr%?h %4, [%0], #4\n\t"
148 "ldr%?h %3, [%0], #4\n\t"
149 "orr%? %2, %2, %4, lsl #16\n\t"
150 "ldr%?h %4, [%0], #4\n\t"
151 "orr%? %3, %3, %4, lsl #16\n\t"
152 "stm%?ia %1!, {%2, %3}"
153 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
154 : "0" (offset), "1" (buf));
155 length -= 8;
156 }
157 while (length > 0) {
158 unsigned int tmp;
159 asm volatile(
160 "ldr%?h %2, [%0], #4\n\t"
161 "str%?b %2, [%1], #1\n\t"
162 "mov%? %2, %2, lsr #8\n\t"
163 "str%?b %2, [%1], #1"
164 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
165 length -= 2;
166 }
167}
168#else
169#error Not compatible
170#endif
171
172static int
173am79c961_ramtest(struct net_device *dev, unsigned int val)
174{
175 unsigned char *buffer = kmalloc (65536, GFP_KERNEL);
176 int i, error = 0, errorcount = 0;
177
178 if (!buffer)
179 return 0;
180 memset (buffer, val, 65536);
181 am_writebuffer(dev, 0, buffer, 65536);
182 memset (buffer, val ^ 255, 65536);
183 am_readbuffer(dev, 0, buffer, 65536);
184 for (i = 0; i < 65536; i++) {
185 if (buffer[i] != val && !error) {
186 printk ("%s: buffer error (%02X %02X) %05X - ", dev->name, val, buffer[i], i);
187 error = 1;
188 errorcount ++;
189 } else if (error && buffer[i] == val) {
190 printk ("%05X\n", i);
191 error = 0;
192 }
193 }
194 if (error)
195 printk ("10000\n");
196 kfree (buffer);
197 return errorcount;
198}
199
200static void am79c961_mc_hash(char *addr, u16 *hash)
201{
202 int idx, bit;
203 u32 crc;
204
205 crc = ether_crc_le(ETH_ALEN, addr);
206
207 idx = crc >> 30;
208 bit = (crc >> 26) & 15;
209
210 hash[idx] |= 1 << bit;
211}
212
213static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
214{
215 unsigned int mode = MODE_PORT_10BT;
216
217 if (dev->flags & IFF_PROMISC) {
218 mode |= MODE_PROMISC;
219 memset(hash, 0xff, 4 * sizeof(*hash));
220 } else if (dev->flags & IFF_ALLMULTI) {
221 memset(hash, 0xff, 4 * sizeof(*hash));
222 } else {
223 struct netdev_hw_addr *ha;
224
225 memset(hash, 0, 4 * sizeof(*hash));
226
227 netdev_for_each_mc_addr(ha, dev)
228 am79c961_mc_hash(ha->addr, hash);
229 }
230
231 return mode;
232}
233
234static void
235am79c961_init_for_open(struct net_device *dev)
236{
237 struct dev_priv *priv = netdev_priv(dev);
238 unsigned long flags;
239 unsigned char *p;
240 u_int hdr_addr, first_free_addr;
241 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
242 int i;
243
244 /*
245 * Stop the chip.
246 */
247 spin_lock_irqsave(&priv->chip_lock, flags);
248 write_rreg (dev->base_addr, CSR0, CSR0_BABL|CSR0_CERR|CSR0_MISS|CSR0_MERR|CSR0_TINT|CSR0_RINT|CSR0_STOP);
249 spin_unlock_irqrestore(&priv->chip_lock, flags);
250
251 write_ireg (dev->base_addr, 5, 0x00a0); /* Receive address LED */
252 write_ireg (dev->base_addr, 6, 0x0081); /* Collision LED */
253 write_ireg (dev->base_addr, 7, 0x0090); /* XMIT LED */
254 write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
255
256 for (i = LADRL; i <= LADRH; i++)
257 write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
258
259 for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
260 write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
261
262 write_rreg (dev->base_addr, MODE, mode);
263 write_rreg (dev->base_addr, POLLINT, 0);
264 write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
265 write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
266
267 first_free_addr = RX_BUFFERS * 8 + TX_BUFFERS * 8 + 16;
268 hdr_addr = 0;
269
270 priv->rxhead = 0;
271 priv->rxtail = 0;
272 priv->rxhdr = hdr_addr;
273
274 for (i = 0; i < RX_BUFFERS; i++) {
275 priv->rxbuffer[i] = first_free_addr;
276 am_writeword (dev, hdr_addr, first_free_addr);
277 am_writeword (dev, hdr_addr + 2, RMD_OWN);
278 am_writeword (dev, hdr_addr + 4, (-1600));
279 am_writeword (dev, hdr_addr + 6, 0);
280 first_free_addr += 1600;
281 hdr_addr += 8;
282 }
283 priv->txhead = 0;
284 priv->txtail = 0;
285 priv->txhdr = hdr_addr;
286 for (i = 0; i < TX_BUFFERS; i++) {
287 priv->txbuffer[i] = first_free_addr;
288 am_writeword (dev, hdr_addr, first_free_addr);
289 am_writeword (dev, hdr_addr + 2, TMD_STP|TMD_ENP);
290 am_writeword (dev, hdr_addr + 4, 0xf000);
291 am_writeword (dev, hdr_addr + 6, 0);
292 first_free_addr += 1600;
293 hdr_addr += 8;
294 }
295
296 write_rreg (dev->base_addr, BASERXL, priv->rxhdr);
297 write_rreg (dev->base_addr, BASERXH, 0);
298 write_rreg (dev->base_addr, BASETXL, priv->txhdr);
299 write_rreg (dev->base_addr, BASERXH, 0);
300 write_rreg (dev->base_addr, CSR0, CSR0_STOP);
301 write_rreg (dev->base_addr, CSR3, CSR3_IDONM|CSR3_BABLM|CSR3_DXSUFLO);
302 write_rreg (dev->base_addr, CSR4, CSR4_APAD_XMIT|CSR4_MFCOM|CSR4_RCVCCOM|CSR4_TXSTRTM|CSR4_JABM);
303 write_rreg (dev->base_addr, CSR0, CSR0_IENA|CSR0_STRT);
304}
305
306static void am79c961_timer(unsigned long data)
307{
308 struct net_device *dev = (struct net_device *)data;
309 struct dev_priv *priv = netdev_priv(dev);
310 unsigned int lnkstat, carrier;
311 unsigned long flags;
312
313 spin_lock_irqsave(&priv->chip_lock, flags);
314 lnkstat = read_ireg(dev->base_addr, ISALED0) & ISALED0_LNKST;
315 spin_unlock_irqrestore(&priv->chip_lock, flags);
316 carrier = netif_carrier_ok(dev);
317
318 if (lnkstat && !carrier) {
319 netif_carrier_on(dev);
320 printk("%s: link up\n", dev->name);
321 } else if (!lnkstat && carrier) {
322 netif_carrier_off(dev);
323 printk("%s: link down\n", dev->name);
324 }
325
326 mod_timer(&priv->timer, jiffies + msecs_to_jiffies(500));
327}
328
329/*
330 * Open/initialize the board.
331 */
332static int
333am79c961_open(struct net_device *dev)
334{
335 struct dev_priv *priv = netdev_priv(dev);
336 int ret;
337
338 ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
339 if (ret)
340 return ret;
341
342 am79c961_init_for_open(dev);
343
344 netif_carrier_off(dev);
345
346 priv->timer.expires = jiffies;
347 add_timer(&priv->timer);
348
349 netif_start_queue(dev);
350
351 return 0;
352}
353
354/*
355 * The inverse routine to am79c961_open().
356 */
357static int
358am79c961_close(struct net_device *dev)
359{
360 struct dev_priv *priv = netdev_priv(dev);
361 unsigned long flags;
362
363 del_timer_sync(&priv->timer);
364
365 netif_stop_queue(dev);
366 netif_carrier_off(dev);
367
368 spin_lock_irqsave(&priv->chip_lock, flags);
369 write_rreg (dev->base_addr, CSR0, CSR0_STOP);
370 write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
371 spin_unlock_irqrestore(&priv->chip_lock, flags);
372
373 free_irq (dev->irq, dev);
374
375 return 0;
376}
377
378/*
379 * Set or clear promiscuous/multicast mode filter for this adapter.
380 */
381static void am79c961_setmulticastlist (struct net_device *dev)
382{
383 struct dev_priv *priv = netdev_priv(dev);
384 unsigned long flags;
385 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
386 int i, stopped;
387
388 spin_lock_irqsave(&priv->chip_lock, flags);
389
390 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
391
392 if (!stopped) {
393 /*
394 * Put the chip into suspend mode
395 */
396 write_rreg(dev->base_addr, CTRL1, CTRL1_SPND);
397
398 /*
399 * Spin waiting for chip to report suspend mode
400 */
401 while ((read_rreg(dev->base_addr, CTRL1) & CTRL1_SPND) == 0) {
402 spin_unlock_irqrestore(&priv->chip_lock, flags);
403 nop();
404 spin_lock_irqsave(&priv->chip_lock, flags);
405 }
406 }
407
408 /*
409 * Update the multicast hash table
410 */
411 for (i = 0; i < ARRAY_SIZE(multi_hash); i++)
412 write_rreg(dev->base_addr, i + LADRL, multi_hash[i]);
413
414 /*
415 * Write the mode register
416 */
417 write_rreg(dev->base_addr, MODE, mode);
418
419 if (!stopped) {
420 /*
421 * Put the chip back into running mode
422 */
423 write_rreg(dev->base_addr, CTRL1, 0);
424 }
425
426 spin_unlock_irqrestore(&priv->chip_lock, flags);
427}
428
429static void am79c961_timeout(struct net_device *dev)
430{
431 printk(KERN_WARNING "%s: transmit timed out, network cable problem?\n",
432 dev->name);
433
434 /*
435 * ought to do some setup of the tx side here
436 */
437
438 netif_wake_queue(dev);
439}
440
441/*
442 * Transmit a packet
443 */
444static int
445am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
446{
447 struct dev_priv *priv = netdev_priv(dev);
448 unsigned int hdraddr, bufaddr;
449 unsigned int head;
450 unsigned long flags;
451
452 head = priv->txhead;
453 hdraddr = priv->txhdr + (head << 3);
454 bufaddr = priv->txbuffer[head];
455 head += 1;
456 if (head >= TX_BUFFERS)
457 head = 0;
458
459 am_writebuffer (dev, bufaddr, skb->data, skb->len);
460 am_writeword (dev, hdraddr + 4, -skb->len);
461 am_writeword (dev, hdraddr + 2, TMD_OWN|TMD_STP|TMD_ENP);
462 priv->txhead = head;
463
464 spin_lock_irqsave(&priv->chip_lock, flags);
465 write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
466 spin_unlock_irqrestore(&priv->chip_lock, flags);
467
468 /*
469 * If the next packet is owned by the ethernet device,
470 * then the tx ring is full and we can't add another
471 * packet.
472 */
473 if (am_readword(dev, priv->txhdr + (priv->txhead << 3) + 2) & TMD_OWN)
474 netif_stop_queue(dev);
475
476 dev_kfree_skb(skb);
477
478 return NETDEV_TX_OK;
479}
480
481/*
482 * If we have a good packet(s), get it/them out of the buffers.
483 */
484static void
485am79c961_rx(struct net_device *dev, struct dev_priv *priv)
486{
487 do {
488 struct sk_buff *skb;
489 u_int hdraddr;
490 u_int pktaddr;
491 u_int status;
492 int len;
493
494 hdraddr = priv->rxhdr + (priv->rxtail << 3);
495 pktaddr = priv->rxbuffer[priv->rxtail];
496
497 status = am_readword (dev, hdraddr + 2);
498 if (status & RMD_OWN) /* do we own it? */
499 break;
500
501 priv->rxtail ++;
502 if (priv->rxtail >= RX_BUFFERS)
503 priv->rxtail = 0;
504
505 if ((status & (RMD_ERR|RMD_STP|RMD_ENP)) != (RMD_STP|RMD_ENP)) {
506 am_writeword (dev, hdraddr + 2, RMD_OWN);
507 dev->stats.rx_errors++;
508 if (status & RMD_ERR) {
509 if (status & RMD_FRAM)
510 dev->stats.rx_frame_errors++;
511 if (status & RMD_CRC)
512 dev->stats.rx_crc_errors++;
513 } else if (status & RMD_STP)
514 dev->stats.rx_length_errors++;
515 continue;
516 }
517
518 len = am_readword(dev, hdraddr + 6);
519 skb = dev_alloc_skb(len + 2);
520
521 if (skb) {
522 skb_reserve(skb, 2);
523
524 am_readbuffer(dev, pktaddr, skb_put(skb, len), len);
525 am_writeword(dev, hdraddr + 2, RMD_OWN);
526 skb->protocol = eth_type_trans(skb, dev);
527 netif_rx(skb);
528 dev->stats.rx_bytes += len;
529 dev->stats.rx_packets++;
530 } else {
531 am_writeword (dev, hdraddr + 2, RMD_OWN);
532 printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
533 dev->stats.rx_dropped++;
534 break;
535 }
536 } while (1);
537}
538
539/*
540 * Update stats for the transmitted packet
541 */
542static void
543am79c961_tx(struct net_device *dev, struct dev_priv *priv)
544{
545 do {
546 short len;
547 u_int hdraddr;
548 u_int status;
549
550 hdraddr = priv->txhdr + (priv->txtail << 3);
551 status = am_readword (dev, hdraddr + 2);
552 if (status & TMD_OWN)
553 break;
554
555 priv->txtail ++;
556 if (priv->txtail >= TX_BUFFERS)
557 priv->txtail = 0;
558
559 if (status & TMD_ERR) {
560 u_int status2;
561
562 dev->stats.tx_errors++;
563
564 status2 = am_readword (dev, hdraddr + 6);
565
566 /*
567 * Clear the error byte
568 */
569 am_writeword (dev, hdraddr + 6, 0);
570
571 if (status2 & TST_RTRY)
572 dev->stats.collisions += 16;
573 if (status2 & TST_LCOL)
574 dev->stats.tx_window_errors++;
575 if (status2 & TST_LCAR)
576 dev->stats.tx_carrier_errors++;
577 if (status2 & TST_UFLO)
578 dev->stats.tx_fifo_errors++;
579 continue;
580 }
581 dev->stats.tx_packets++;
582 len = am_readword (dev, hdraddr + 4);
583 dev->stats.tx_bytes += -len;
584 } while (priv->txtail != priv->txhead);
585
586 netif_wake_queue(dev);
587}
588
589static irqreturn_t
590am79c961_interrupt(int irq, void *dev_id)
591{
592 struct net_device *dev = (struct net_device *)dev_id;
593 struct dev_priv *priv = netdev_priv(dev);
594 u_int status, n = 100;
595 int handled = 0;
596
597 do {
598 status = read_rreg(dev->base_addr, CSR0);
599 write_rreg(dev->base_addr, CSR0, status &
600 (CSR0_IENA|CSR0_TINT|CSR0_RINT|
601 CSR0_MERR|CSR0_MISS|CSR0_CERR|CSR0_BABL));
602
603 if (status & CSR0_RINT) {
604 handled = 1;
605 am79c961_rx(dev, priv);
606 }
607 if (status & CSR0_TINT) {
608 handled = 1;
609 am79c961_tx(dev, priv);
610 }
611 if (status & CSR0_MISS) {
612 handled = 1;
613 dev->stats.rx_dropped++;
614 }
615 if (status & CSR0_CERR) {
616 handled = 1;
617 mod_timer(&priv->timer, jiffies);
618 }
619 } while (--n && status & (CSR0_RINT | CSR0_TINT));
620
621 return IRQ_RETVAL(handled);
622}
623
624#ifdef CONFIG_NET_POLL_CONTROLLER
625static void am79c961_poll_controller(struct net_device *dev)
626{
627 unsigned long flags;
628 local_irq_save(flags);
629 am79c961_interrupt(dev->irq, dev);
630 local_irq_restore(flags);
631}
632#endif
633
634/*
635 * Initialise the chip. Note that we always expect
636 * to be entered with interrupts enabled.
637 */
638static int
639am79c961_hw_init(struct net_device *dev)
640{
641 struct dev_priv *priv = netdev_priv(dev);
642
643 spin_lock_irq(&priv->chip_lock);
644 write_rreg (dev->base_addr, CSR0, CSR0_STOP);
645 write_rreg (dev->base_addr, CSR3, CSR3_MASKALL);
646 spin_unlock_irq(&priv->chip_lock);
647
648 am79c961_ramtest(dev, 0x66);
649 am79c961_ramtest(dev, 0x99);
650
651 return 0;
652}
653
654static void __init am79c961_banner(void)
655{
656 static unsigned version_printed;
657
658 if (net_debug && version_printed++ == 0)
659 printk(KERN_INFO "%s", version);
660}
661static const struct net_device_ops am79c961_netdev_ops = {
662 .ndo_open = am79c961_open,
663 .ndo_stop = am79c961_close,
664 .ndo_start_xmit = am79c961_sendpacket,
665 .ndo_set_rx_mode = am79c961_setmulticastlist,
666 .ndo_tx_timeout = am79c961_timeout,
667 .ndo_validate_addr = eth_validate_addr,
668 .ndo_change_mtu = eth_change_mtu,
669 .ndo_set_mac_address = eth_mac_addr,
670#ifdef CONFIG_NET_POLL_CONTROLLER
671 .ndo_poll_controller = am79c961_poll_controller,
672#endif
673};
674
675static int __devinit am79c961_probe(struct platform_device *pdev)
676{
677 struct resource *res;
678 struct net_device *dev;
679 struct dev_priv *priv;
680 int i, ret;
681
682 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
683 if (!res)
684 return -ENODEV;
685
686 dev = alloc_etherdev(sizeof(struct dev_priv));
687 ret = -ENOMEM;
688 if (!dev)
689 goto out;
690
691 SET_NETDEV_DEV(dev, &pdev->dev);
692
693 priv = netdev_priv(dev);
694
695 /*
696 * Fixed address and IRQ lines here.
697 * The PNP initialisation should have been
698 * done by the ether bootp loader.
699 */
700 dev->base_addr = res->start;
701 ret = platform_get_irq(pdev, 0);
702
703 if (ret < 0) {
704 ret = -ENODEV;
705 goto nodev;
706 }
707 dev->irq = ret;
708
709 ret = -ENODEV;
710 if (!request_region(dev->base_addr, 0x18, dev->name))
711 goto nodev;
712
713 /*
714 * Reset the device.
715 */
716 inb(dev->base_addr + NET_RESET);
717 udelay(5);
718
719 /*
720 * Check the manufacturer part of the
721 * ether address.
722 */
723 if (inb(dev->base_addr) != 0x08 ||
724 inb(dev->base_addr + 2) != 0x00 ||
725 inb(dev->base_addr + 4) != 0x2b)
726 goto release;
727
728 for (i = 0; i < 6; i++)
729 dev->dev_addr[i] = inb(dev->base_addr + i * 2) & 0xff;
730
731 am79c961_banner();
732
733 spin_lock_init(&priv->chip_lock);
734 init_timer(&priv->timer);
735 priv->timer.data = (unsigned long)dev;
736 priv->timer.function = am79c961_timer;
737
738 if (am79c961_hw_init(dev))
739 goto release;
740
741 dev->netdev_ops = &am79c961_netdev_ops;
742
743 ret = register_netdev(dev);
744 if (ret == 0) {
745 printk(KERN_INFO "%s: ether address %pM\n",
746 dev->name, dev->dev_addr);
747 return 0;
748 }
749
750release:
751 release_region(dev->base_addr, 0x18);
752nodev:
753 free_netdev(dev);
754out:
755 return ret;
756}
757
758static struct platform_driver am79c961_driver = {
759 .probe = am79c961_probe,
760 .driver = {
761 .name = "am79c961",
762 },
763};
764
765static int __init am79c961_init(void)
766{
767 return platform_driver_register(&am79c961_driver);
768}
769
770__initcall(am79c961_init);
diff --git a/drivers/net/ethernet/amd/am79c961a.h b/drivers/net/ethernet/amd/am79c961a.h
new file mode 100644
index 000000000000..fd634d32756b
--- /dev/null
+++ b/drivers/net/ethernet/amd/am79c961a.h
@@ -0,0 +1,145 @@
1/*
2 * linux/drivers/net/arm/am79c961a.h
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef _LINUX_am79c961a_H
10#define _LINUX_am79c961a_H
11
12/* use 0 for production, 1 for verification, >2 for debug. debug flags: */
13#define DEBUG_TX 2
14#define DEBUG_RX 4
15#define DEBUG_INT 8
16#define DEBUG_IC 16
17#ifndef NET_DEBUG
18#define NET_DEBUG 0
19#endif
20
21#define NET_UID 0
22#define NET_RDP 0x10
23#define NET_RAP 0x12
24#define NET_RESET 0x14
25#define NET_IDP 0x16
26
27/*
28 * RAP registers
29 */
30#define CSR0 0
31#define CSR0_INIT 0x0001
32#define CSR0_STRT 0x0002
33#define CSR0_STOP 0x0004
34#define CSR0_TDMD 0x0008
35#define CSR0_TXON 0x0010
36#define CSR0_RXON 0x0020
37#define CSR0_IENA 0x0040
38#define CSR0_INTR 0x0080
39#define CSR0_IDON 0x0100
40#define CSR0_TINT 0x0200
41#define CSR0_RINT 0x0400
42#define CSR0_MERR 0x0800
43#define CSR0_MISS 0x1000
44#define CSR0_CERR 0x2000
45#define CSR0_BABL 0x4000
46#define CSR0_ERR 0x8000
47
48#define CSR3 3
49#define CSR3_EMBA 0x0008
50#define CSR3_DXMT2PD 0x0010
51#define CSR3_LAPPEN 0x0020
52#define CSR3_DXSUFLO 0x0040
53#define CSR3_IDONM 0x0100
54#define CSR3_TINTM 0x0200
55#define CSR3_RINTM 0x0400
56#define CSR3_MERRM 0x0800
57#define CSR3_MISSM 0x1000
58#define CSR3_BABLM 0x4000
59#define CSR3_MASKALL 0x5F00
60
61#define CSR4 4
62#define CSR4_JABM 0x0001
63#define CSR4_JAB 0x0002
64#define CSR4_TXSTRTM 0x0004
65#define CSR4_TXSTRT 0x0008
66#define CSR4_RCVCCOM 0x0010
67#define CSR4_RCVCCO 0x0020
68#define CSR4_MFCOM 0x0100
69#define CSR4_MFCO 0x0200
70#define CSR4_ASTRP_RCV 0x0400
71#define CSR4_APAD_XMIT 0x0800
72
73#define CTRL1 5
74#define CTRL1_SPND 0x0001
75
76#define LADRL 8
77#define LADRM1 9
78#define LADRM2 10
79#define LADRH 11
80#define PADRL 12
81#define PADRM 13
82#define PADRH 14
83
84#define MODE 15
85#define MODE_DISRX 0x0001
86#define MODE_DISTX 0x0002
87#define MODE_LOOP 0x0004
88#define MODE_DTCRC 0x0008
89#define MODE_COLL 0x0010
90#define MODE_DRETRY 0x0020
91#define MODE_INTLOOP 0x0040
92#define MODE_PORT_AUI 0x0000
93#define MODE_PORT_10BT 0x0080
94#define MODE_DRXPA 0x2000
95#define MODE_DRXBA 0x4000
96#define MODE_PROMISC 0x8000
97
98#define BASERXL 24
99#define BASERXH 25
100#define BASETXL 30
101#define BASETXH 31
102
103#define POLLINT 47
104
105#define SIZERXR 76
106#define SIZETXR 78
107
108#define CSR_MFC 112
109
110#define RMD_ENP 0x0100
111#define RMD_STP 0x0200
112#define RMD_CRC 0x0800
113#define RMD_FRAM 0x2000
114#define RMD_ERR 0x4000
115#define RMD_OWN 0x8000
116
117#define TMD_ENP 0x0100
118#define TMD_STP 0x0200
119#define TMD_MORE 0x1000
120#define TMD_ERR 0x4000
121#define TMD_OWN 0x8000
122
123#define TST_RTRY 0x0400
124#define TST_LCAR 0x0800
125#define TST_LCOL 0x1000
126#define TST_UFLO 0x4000
127#define TST_BUFF 0x8000
128
129#define ISALED0 0x0004
130#define ISALED0_LNKST 0x8000
131
132struct dev_priv {
133 unsigned long rxbuffer[RX_BUFFERS];
134 unsigned long txbuffer[TX_BUFFERS];
135 unsigned char txhead;
136 unsigned char txtail;
137 unsigned char rxhead;
138 unsigned char rxtail;
139 unsigned long rxhdr;
140 unsigned long txhdr;
141 spinlock_t chip_lock;
142 struct timer_list timer;
143};
144
145#endif
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
new file mode 100644
index 000000000000..a9745f4ddbfe
--- /dev/null
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -0,0 +1,1992 @@
1
2/* Advanced Micro Devices Inc. AMD8111E Linux Network Driver
3 * Copyright (C) 2004 Advanced Micro Devices
4 *
5 *
6 * Copyright 2001,2002 Jeff Garzik <jgarzik@mandrakesoft.com> [ 8139cp.c,tg3.c ]
7 * Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com)[ tg3.c]
8 * Copyright 1996-1999 Thomas Bogendoerfer [ pcnet32.c ]
9 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
10 * Copyright 1993 United States Government as represented by the
11 * Director, National Security Agency.[ pcnet32.c ]
12 * Carsten Langgaard, carstenl@mips.com [ pcnet32.c ]
13 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
14 *
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
29 * USA
30
31Module Name:
32
33 amd8111e.c
34
35Abstract:
36
37 AMD8111 based 10/100 Ethernet Controller Driver.
38
39Environment:
40
41 Kernel Mode
42
43Revision History:
44 3.0.0
45 Initial Revision.
46 3.0.1
47 1. Dynamic interrupt coalescing.
48 2. Removed prev_stats.
49 3. MII support.
50 4. Dynamic IPG support
51 3.0.2 05/29/2003
52 1. Bug fix: Fixed failure to send jumbo packets larger than 4k.
53 2. Bug fix: Fixed VLAN support failure.
54 3. Bug fix: Fixed receive interrupt coalescing bug.
55 4. Dynamic IPG support is disabled by default.
56 3.0.3 06/05/2003
57 1. Bug fix: Fixed failure to close the interface if SMP is enabled.
58 3.0.4 12/09/2003
59 1. Added set_mac_address routine for bonding driver support.
60 2. Tested the driver for bonding support
61 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
62 indicated to the h/w.
63 4. Modified amd8111e_rx() routine to receive all the received packets
64 in the first interrupt.
65 5. Bug fix: Corrected rx_errors reported in get_stats() function.
66 3.0.5 03/22/2004
67 1. Added NAPI support
68
69*/
70
71
72#include <linux/module.h>
73#include <linux/kernel.h>
74#include <linux/types.h>
75#include <linux/compiler.h>
76#include <linux/delay.h>
77#include <linux/init.h>
78#include <linux/interrupt.h>
79#include <linux/ioport.h>
80#include <linux/pci.h>
81#include <linux/netdevice.h>
82#include <linux/etherdevice.h>
83#include <linux/skbuff.h>
84#include <linux/ethtool.h>
85#include <linux/mii.h>
86#include <linux/if_vlan.h>
87#include <linux/ctype.h>
88#include <linux/crc32.h>
89#include <linux/dma-mapping.h>
90
91#include <asm/system.h>
92#include <asm/io.h>
93#include <asm/byteorder.h>
94#include <asm/uaccess.h>
95
96#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
97#define AMD8111E_VLAN_TAG_USED 1
98#else
99#define AMD8111E_VLAN_TAG_USED 0
100#endif
101
102#include "amd8111e.h"
103#define MODULE_NAME "amd8111e"
104#define MODULE_VERS "3.0.7"
105MODULE_AUTHOR("Advanced Micro Devices, Inc.");
106MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
107MODULE_LICENSE("GPL");
108MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
109module_param_array(speed_duplex, int, NULL, 0);
110MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
111module_param_array(coalesce, bool, NULL, 0);
112MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
113module_param_array(dynamic_ipg, bool, NULL, 0);
114MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
115
116static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
117
118 { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
120 { 0, }
121
122};
123/*
124This function will read the PHY registers.
125*/
126static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
127{
128 void __iomem *mmio = lp->mmio;
129 unsigned int reg_val;
130 unsigned int repeat= REPEAT_CNT;
131
132 reg_val = readl(mmio + PHY_ACCESS);
133 while (reg_val & PHY_CMD_ACTIVE)
134 reg_val = readl( mmio + PHY_ACCESS );
135
136 writel( PHY_RD_CMD | ((phy_id & 0x1f) << 21) |
137 ((reg & 0x1f) << 16), mmio +PHY_ACCESS);
138 do{
139 reg_val = readl(mmio + PHY_ACCESS);
140 udelay(30); /* It takes 30 us to read/write data */
141 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
142 if(reg_val & PHY_RD_ERR)
143 goto err_phy_read;
144
145 *val = reg_val & 0xffff;
146 return 0;
147err_phy_read:
148 *val = 0;
149 return -EINVAL;
150
151}
152
153/*
154This function will write into PHY registers.
155*/
156static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
157{
158 unsigned int repeat = REPEAT_CNT;
159 void __iomem *mmio = lp->mmio;
160 unsigned int reg_val;
161
162 reg_val = readl(mmio + PHY_ACCESS);
163 while (reg_val & PHY_CMD_ACTIVE)
164 reg_val = readl( mmio + PHY_ACCESS );
165
166 writel( PHY_WR_CMD | ((phy_id & 0x1f) << 21) |
167 ((reg & 0x1f) << 16)|val, mmio + PHY_ACCESS);
168
169 do{
170 reg_val = readl(mmio + PHY_ACCESS);
171 udelay(30); /* It takes 30 us to read/write the data */
172 } while (--repeat && (reg_val & PHY_CMD_ACTIVE));
173
174 if(reg_val & PHY_RD_ERR)
175 goto err_phy_write;
176
177 return 0;
178
179err_phy_write:
180 return -EINVAL;
181
182}
183/*
184This is the mii register read function provided to the mii interface.
185*/
186static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
187{
188 struct amd8111e_priv* lp = netdev_priv(dev);
189 unsigned int reg_val;
190
191 amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
192 return reg_val;
193
194}
195
196/*
197This is the mii register write function provided to the mii interface.
198*/
199static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
200{
201 struct amd8111e_priv* lp = netdev_priv(dev);
202
203 amd8111e_write_phy(lp, phy_id, reg_num, val);
204}
205
206/*
207This function will set PHY speed. During initialization sets the original speed to 100 full.
208*/
209static void amd8111e_set_ext_phy(struct net_device *dev)
210{
211 struct amd8111e_priv *lp = netdev_priv(dev);
212 u32 bmcr,advert,tmp;
213
214 /* Determine mii register values to set the speed */
215 advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
216 tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
217 switch (lp->ext_phy_option){
218
219 default:
220 case SPEED_AUTONEG: /* advertise all values */
221 tmp |= ( ADVERTISE_10HALF|ADVERTISE_10FULL|
222 ADVERTISE_100HALF|ADVERTISE_100FULL) ;
223 break;
224 case SPEED10_HALF:
225 tmp |= ADVERTISE_10HALF;
226 break;
227 case SPEED10_FULL:
228 tmp |= ADVERTISE_10FULL;
229 break;
230 case SPEED100_HALF:
231 tmp |= ADVERTISE_100HALF;
232 break;
233 case SPEED100_FULL:
234 tmp |= ADVERTISE_100FULL;
235 break;
236 }
237
238 if(advert != tmp)
239 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_ADVERTISE, tmp);
240 /* Restart auto negotiation */
241 bmcr = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_BMCR);
242 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
243 amd8111e_mdio_write(dev, lp->ext_phy_addr, MII_BMCR, bmcr);
244
245}
246
247/*
248This function will unmap skb->data space and will free
249all transmit and receive skbuffs.
250*/
251static int amd8111e_free_skbs(struct net_device *dev)
252{
253 struct amd8111e_priv *lp = netdev_priv(dev);
254 struct sk_buff* rx_skbuff;
255 int i;
256
257 /* Freeing transmit skbs */
258 for(i = 0; i < NUM_TX_BUFFERS; i++){
259 if(lp->tx_skbuff[i]){
260 pci_unmap_single(lp->pci_dev,lp->tx_dma_addr[i], lp->tx_skbuff[i]->len,PCI_DMA_TODEVICE);
261 dev_kfree_skb (lp->tx_skbuff[i]);
262 lp->tx_skbuff[i] = NULL;
263 lp->tx_dma_addr[i] = 0;
264 }
265 }
266 /* Freeing previously allocated receive buffers */
267 for (i = 0; i < NUM_RX_BUFFERS; i++){
268 rx_skbuff = lp->rx_skbuff[i];
269 if(rx_skbuff != NULL){
270 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[i],
271 lp->rx_buff_len - 2,PCI_DMA_FROMDEVICE);
272 dev_kfree_skb(lp->rx_skbuff[i]);
273 lp->rx_skbuff[i] = NULL;
274 lp->rx_dma_addr[i] = 0;
275 }
276 }
277
278 return 0;
279}
280
281/*
282This will set the receive buffer length corresponding to the mtu size of networkinterface.
283*/
284static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
285{
286 struct amd8111e_priv* lp = netdev_priv(dev);
287 unsigned int mtu = dev->mtu;
288
289 if (mtu > ETH_DATA_LEN){
290 /* MTU + ethernet header + FCS
291 + optional VLAN tag + skb reserve space 2 */
292
293 lp->rx_buff_len = mtu + ETH_HLEN + 10;
294 lp->options |= OPTION_JUMBO_ENABLE;
295 } else{
296 lp->rx_buff_len = PKT_BUFF_SZ;
297 lp->options &= ~OPTION_JUMBO_ENABLE;
298 }
299}
300
301/*
302This function will free all the previously allocated buffers, determine new receive buffer length and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
303 */
304static int amd8111e_init_ring(struct net_device *dev)
305{
306 struct amd8111e_priv *lp = netdev_priv(dev);
307 int i;
308
309 lp->rx_idx = lp->tx_idx = 0;
310 lp->tx_complete_idx = 0;
311 lp->tx_ring_idx = 0;
312
313
314 if(lp->opened)
315 /* Free previously allocated transmit and receive skbs */
316 amd8111e_free_skbs(dev);
317
318 else{
319 /* allocate the tx and rx descriptors */
320 if((lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
321 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
322 &lp->tx_ring_dma_addr)) == NULL)
323
324 goto err_no_mem;
325
326 if((lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
327 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
328 &lp->rx_ring_dma_addr)) == NULL)
329
330 goto err_free_tx_ring;
331
332 }
333 /* Set new receive buff size */
334 amd8111e_set_rx_buff_len(dev);
335
336 /* Allocating receive skbs */
337 for (i = 0; i < NUM_RX_BUFFERS; i++) {
338
339 if (!(lp->rx_skbuff[i] = dev_alloc_skb(lp->rx_buff_len))) {
340 /* Release previos allocated skbs */
341 for(--i; i >= 0 ;i--)
342 dev_kfree_skb(lp->rx_skbuff[i]);
343 goto err_free_rx_ring;
344 }
345 skb_reserve(lp->rx_skbuff[i],2);
346 }
347 /* Initilaizing receive descriptors */
348 for (i = 0; i < NUM_RX_BUFFERS; i++) {
349 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev,
350 lp->rx_skbuff[i]->data,lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
351
352 lp->rx_ring[i].buff_phy_addr = cpu_to_le32(lp->rx_dma_addr[i]);
353 lp->rx_ring[i].buff_count = cpu_to_le16(lp->rx_buff_len-2);
354 wmb();
355 lp->rx_ring[i].rx_flags = cpu_to_le16(OWN_BIT);
356 }
357
358 /* Initializing transmit descriptors */
359 for (i = 0; i < NUM_TX_RING_DR; i++) {
360 lp->tx_ring[i].buff_phy_addr = 0;
361 lp->tx_ring[i].tx_flags = 0;
362 lp->tx_ring[i].buff_count = 0;
363 }
364
365 return 0;
366
367err_free_rx_ring:
368
369 pci_free_consistent(lp->pci_dev,
370 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,lp->rx_ring,
371 lp->rx_ring_dma_addr);
372
373err_free_tx_ring:
374
375 pci_free_consistent(lp->pci_dev,
376 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,lp->tx_ring,
377 lp->tx_ring_dma_addr);
378
379err_no_mem:
380 return -ENOMEM;
381}
382/* This function will set the interrupt coalescing according to the input arguments */
383static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
384{
385 unsigned int timeout;
386 unsigned int event_count;
387
388 struct amd8111e_priv *lp = netdev_priv(dev);
389 void __iomem *mmio = lp->mmio;
390 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
391
392
393 switch(cmod)
394 {
395 case RX_INTR_COAL :
396 timeout = coal_conf->rx_timeout;
397 event_count = coal_conf->rx_event_count;
398 if( timeout > MAX_TIMEOUT ||
399 event_count > MAX_EVENT_COUNT )
400 return -EINVAL;
401
402 timeout = timeout * DELAY_TIMER_CONV;
403 writel(VAL0|STINTEN, mmio+INTEN0);
404 writel((u32)DLY_INT_A_R0|( event_count<< 16 )|timeout,
405 mmio+DLY_INT_A);
406 break;
407
408 case TX_INTR_COAL :
409 timeout = coal_conf->tx_timeout;
410 event_count = coal_conf->tx_event_count;
411 if( timeout > MAX_TIMEOUT ||
412 event_count > MAX_EVENT_COUNT )
413 return -EINVAL;
414
415
416 timeout = timeout * DELAY_TIMER_CONV;
417 writel(VAL0|STINTEN,mmio+INTEN0);
418 writel((u32)DLY_INT_B_T0|( event_count<< 16 )|timeout,
419 mmio+DLY_INT_B);
420 break;
421
422 case DISABLE_COAL:
423 writel(0,mmio+STVAL);
424 writel(STINTEN, mmio+INTEN0);
425 writel(0, mmio +DLY_INT_B);
426 writel(0, mmio+DLY_INT_A);
427 break;
428 case ENABLE_COAL:
429 /* Start the timer */
430 writel((u32)SOFT_TIMER_FREQ, mmio+STVAL); /* 0.5 sec */
431 writel(VAL0|STINTEN, mmio+INTEN0);
432 break;
433 default:
434 break;
435
436 }
437 return 0;
438
439}
440
441/*
442This function initializes the device registers and starts the device.
443*/
444static int amd8111e_restart(struct net_device *dev)
445{
446 struct amd8111e_priv *lp = netdev_priv(dev);
447 void __iomem *mmio = lp->mmio;
448 int i,reg_val;
449
450 /* stop the chip */
451 writel(RUN, mmio + CMD0);
452
453 if(amd8111e_init_ring(dev))
454 return -ENOMEM;
455
456 /* enable the port manager and set auto negotiation always */
457 writel((u32) VAL1|EN_PMGR, mmio + CMD3 );
458 writel((u32)XPHYANE|XPHYRST , mmio + CTRL2);
459
460 amd8111e_set_ext_phy(dev);
461
462 /* set control registers */
463 reg_val = readl(mmio + CTRL1);
464 reg_val &= ~XMTSP_MASK;
465 writel( reg_val| XMTSP_128 | CACHE_ALIGN, mmio + CTRL1 );
466
467 /* enable interrupt */
468 writel( APINT5EN | APINT4EN | APINT3EN | APINT2EN | APINT1EN |
469 APINT0EN | MIIPDTINTEN | MCCIINTEN | MCCINTEN | MREINTEN |
470 SPNDINTEN | MPINTEN | SINTEN | STINTEN, mmio + INTEN0);
471
472 writel(VAL3 | LCINTEN | VAL1 | TINTEN0 | VAL0 | RINTEN0, mmio + INTEN0);
473
474 /* initialize tx and rx ring base addresses */
475 writel((u32)lp->tx_ring_dma_addr,mmio + XMT_RING_BASE_ADDR0);
476 writel((u32)lp->rx_ring_dma_addr,mmio+ RCV_RING_BASE_ADDR0);
477
478 writew((u32)NUM_TX_RING_DR, mmio + XMT_RING_LEN0);
479 writew((u16)NUM_RX_RING_DR, mmio + RCV_RING_LEN0);
480
481 /* set default IPG to 96 */
482 writew((u32)DEFAULT_IPG,mmio+IPG);
483 writew((u32)(DEFAULT_IPG-IFS1_DELTA), mmio + IFS1);
484
485 if(lp->options & OPTION_JUMBO_ENABLE){
486 writel((u32)VAL2|JUMBO, mmio + CMD3);
487 /* Reset REX_UFLO */
488 writel( REX_UFLO, mmio + CMD2);
489 /* Should not set REX_UFLO for jumbo frames */
490 writel( VAL0 | APAD_XMT|REX_RTRY , mmio + CMD2);
491 }else{
492 writel( VAL0 | APAD_XMT | REX_RTRY|REX_UFLO, mmio + CMD2);
493 writel((u32)JUMBO, mmio + CMD3);
494 }
495
496#if AMD8111E_VLAN_TAG_USED
497 writel((u32) VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3);
498#endif
499 writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
500
501 /* Setting the MAC address to the device */
502 for(i = 0; i < ETH_ADDR_LEN; i++)
503 writeb( dev->dev_addr[i], mmio + PADR + i );
504
505 /* Enable interrupt coalesce */
506 if(lp->options & OPTION_INTR_COAL_ENABLE){
507 printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
508 dev->name);
509 amd8111e_set_coalesce(dev,ENABLE_COAL);
510 }
511
512 /* set RUN bit to start the chip */
513 writel(VAL2 | RDMD0, mmio + CMD0);
514 writel(VAL0 | INTREN | RUN, mmio + CMD0);
515
516 /* To avoid PCI posting bug */
517 readl(mmio+CMD0);
518 return 0;
519}
520/*
521This function clears necessary the device registers.
522*/
523static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
524{
525 unsigned int reg_val;
526 unsigned int logic_filter[2] ={0,};
527 void __iomem *mmio = lp->mmio;
528
529
530 /* stop the chip */
531 writel(RUN, mmio + CMD0);
532
533 /* AUTOPOLL0 Register *//*TBD default value is 8100 in FPS */
534 writew( 0x8100 | lp->ext_phy_addr, mmio + AUTOPOLL0);
535
536 /* Clear RCV_RING_BASE_ADDR */
537 writel(0, mmio + RCV_RING_BASE_ADDR0);
538
539 /* Clear XMT_RING_BASE_ADDR */
540 writel(0, mmio + XMT_RING_BASE_ADDR0);
541 writel(0, mmio + XMT_RING_BASE_ADDR1);
542 writel(0, mmio + XMT_RING_BASE_ADDR2);
543 writel(0, mmio + XMT_RING_BASE_ADDR3);
544
545 /* Clear CMD0 */
546 writel(CMD0_CLEAR,mmio + CMD0);
547
548 /* Clear CMD2 */
549 writel(CMD2_CLEAR, mmio +CMD2);
550
551 /* Clear CMD7 */
552 writel(CMD7_CLEAR , mmio + CMD7);
553
554 /* Clear DLY_INT_A and DLY_INT_B */
555 writel(0x0, mmio + DLY_INT_A);
556 writel(0x0, mmio + DLY_INT_B);
557
558 /* Clear FLOW_CONTROL */
559 writel(0x0, mmio + FLOW_CONTROL);
560
561 /* Clear INT0 write 1 to clear register */
562 reg_val = readl(mmio + INT0);
563 writel(reg_val, mmio + INT0);
564
565 /* Clear STVAL */
566 writel(0x0, mmio + STVAL);
567
568 /* Clear INTEN0 */
569 writel( INTEN0_CLEAR, mmio + INTEN0);
570
571 /* Clear LADRF */
572 writel(0x0 , mmio + LADRF);
573
574 /* Set SRAM_SIZE & SRAM_BOUNDARY registers */
575 writel( 0x80010,mmio + SRAM_SIZE);
576
577 /* Clear RCV_RING0_LEN */
578 writel(0x0, mmio + RCV_RING_LEN0);
579
580 /* Clear XMT_RING0/1/2/3_LEN */
581 writel(0x0, mmio + XMT_RING_LEN0);
582 writel(0x0, mmio + XMT_RING_LEN1);
583 writel(0x0, mmio + XMT_RING_LEN2);
584 writel(0x0, mmio + XMT_RING_LEN3);
585
586 /* Clear XMT_RING_LIMIT */
587 writel(0x0, mmio + XMT_RING_LIMIT);
588
589 /* Clear MIB */
590 writew(MIB_CLEAR, mmio + MIB_ADDR);
591
592 /* Clear LARF */
593 amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
594
595 /* SRAM_SIZE register */
596 reg_val = readl(mmio + SRAM_SIZE);
597
598 if(lp->options & OPTION_JUMBO_ENABLE)
599 writel( VAL2|JUMBO, mmio + CMD3);
600#if AMD8111E_VLAN_TAG_USED
601 writel(VAL2|VSIZE|VL_TAG_DEL, mmio + CMD3 );
602#endif
603 /* Set default value to CTRL1 Register */
604 writel(CTRL1_DEFAULT, mmio + CTRL1);
605
606 /* To avoid PCI posting bug */
607 readl(mmio + CMD2);
608
609}
610
611/*
612This function disables the interrupt and clears all the pending
613interrupts in INT0
614 */
615static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
616{
617 u32 intr0;
618
619 /* Disable interrupt */
620 writel(INTREN, lp->mmio + CMD0);
621
622 /* Clear INT0 */
623 intr0 = readl(lp->mmio + INT0);
624 writel(intr0, lp->mmio + INT0);
625
626 /* To avoid PCI posting bug */
627 readl(lp->mmio + INT0);
628
629}
630
631/*
632This function stops the chip.
633*/
634static void amd8111e_stop_chip(struct amd8111e_priv* lp)
635{
636 writel(RUN, lp->mmio + CMD0);
637
638 /* To avoid PCI posting bug */
639 readl(lp->mmio + CMD0);
640}
641
642/*
643This function frees the transmiter and receiver descriptor rings.
644*/
645static void amd8111e_free_ring(struct amd8111e_priv* lp)
646{
647 /* Free transmit and receive descriptor rings */
648 if(lp->rx_ring){
649 pci_free_consistent(lp->pci_dev,
650 sizeof(struct amd8111e_rx_dr)*NUM_RX_RING_DR,
651 lp->rx_ring, lp->rx_ring_dma_addr);
652 lp->rx_ring = NULL;
653 }
654
655 if(lp->tx_ring){
656 pci_free_consistent(lp->pci_dev,
657 sizeof(struct amd8111e_tx_dr)*NUM_TX_RING_DR,
658 lp->tx_ring, lp->tx_ring_dma_addr);
659
660 lp->tx_ring = NULL;
661 }
662
663}
664
665/*
666This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
667*/
668static int amd8111e_tx(struct net_device *dev)
669{
670 struct amd8111e_priv* lp = netdev_priv(dev);
671 int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
672 int status;
673 /* Complete all the transmit packet */
674 while (lp->tx_complete_idx != lp->tx_idx){
675 tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
676 status = le16_to_cpu(lp->tx_ring[tx_index].tx_flags);
677
678 if(status & OWN_BIT)
679 break; /* It still hasn't been Txed */
680
681 lp->tx_ring[tx_index].buff_phy_addr = 0;
682
683 /* We must free the original skb */
684 if (lp->tx_skbuff[tx_index]) {
685 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index],
686 lp->tx_skbuff[tx_index]->len,
687 PCI_DMA_TODEVICE);
688 dev_kfree_skb_irq (lp->tx_skbuff[tx_index]);
689 lp->tx_skbuff[tx_index] = NULL;
690 lp->tx_dma_addr[tx_index] = 0;
691 }
692 lp->tx_complete_idx++;
693 /*COAL update tx coalescing parameters */
694 lp->coal_conf.tx_packets++;
695 lp->coal_conf.tx_bytes +=
696 le16_to_cpu(lp->tx_ring[tx_index].buff_count);
697
698 if (netif_queue_stopped(dev) &&
699 lp->tx_complete_idx > lp->tx_idx - NUM_TX_BUFFERS +2){
700 /* The ring is no longer full, clear tbusy. */
701 /* lp->tx_full = 0; */
702 netif_wake_queue (dev);
703 }
704 }
705 return 0;
706}
707
708/* This function handles the driver receive operation in polling mode */
709static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
710{
711 struct amd8111e_priv *lp = container_of(napi, struct amd8111e_priv, napi);
712 struct net_device *dev = lp->amd8111e_net_dev;
713 int rx_index = lp->rx_idx & RX_RING_DR_MOD_MASK;
714 void __iomem *mmio = lp->mmio;
715 struct sk_buff *skb,*new_skb;
716 int min_pkt_len, status;
717 unsigned int intr0;
718 int num_rx_pkt = 0;
719 short pkt_len;
720#if AMD8111E_VLAN_TAG_USED
721 short vtag;
722#endif
723 int rx_pkt_limit = budget;
724 unsigned long flags;
725
726 do{
727 /* process receive packets until we use the quota*/
728 /* If we own the next entry, it's a new packet. Send it up. */
729 while(1) {
730 status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
731 if (status & OWN_BIT)
732 break;
733
734 /*
735 * There is a tricky error noted by John Murphy,
736 * <murf@perftech.com> to Russ Nelson: Even with
737 * full-sized * buffers it's possible for a
738 * jabber packet to use two buffers, with only
739 * the last correctly noting the error.
740 */
741
742 if(status & ERR_BIT) {
743 /* reseting flags */
744 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
745 goto err_next_pkt;
746 }
747 /* check for STP and ENP */
748 if(!((status & STP_BIT) && (status & ENP_BIT))){
749 /* reseting flags */
750 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
751 goto err_next_pkt;
752 }
753 pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4;
754
755#if AMD8111E_VLAN_TAG_USED
756 vtag = status & TT_MASK;
757 /*MAC will strip vlan tag*/
758 if (vtag != 0)
759 min_pkt_len =MIN_PKT_LEN - 4;
760 else
761#endif
762 min_pkt_len =MIN_PKT_LEN;
763
764 if (pkt_len < min_pkt_len) {
765 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
766 lp->drv_rx_errors++;
767 goto err_next_pkt;
768 }
769 if(--rx_pkt_limit < 0)
770 goto rx_not_empty;
771 if(!(new_skb = dev_alloc_skb(lp->rx_buff_len))){
772 /* if allocation fail,
773 ignore that pkt and go to next one */
774 lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
775 lp->drv_rx_errors++;
776 goto err_next_pkt;
777 }
778
779 skb_reserve(new_skb, 2);
780 skb = lp->rx_skbuff[rx_index];
781 pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index],
782 lp->rx_buff_len-2, PCI_DMA_FROMDEVICE);
783 skb_put(skb, pkt_len);
784 lp->rx_skbuff[rx_index] = new_skb;
785 lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev,
786 new_skb->data,
787 lp->rx_buff_len-2,
788 PCI_DMA_FROMDEVICE);
789
790 skb->protocol = eth_type_trans(skb, dev);
791
792#if AMD8111E_VLAN_TAG_USED
793 if (vtag == TT_VLAN_TAGGED){
794 u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info);
795 __vlan_hwaccel_put_tag(skb, vlan_tag);
796 }
797#endif
798 netif_receive_skb(skb);
799 /*COAL update rx coalescing parameters*/
800 lp->coal_conf.rx_packets++;
801 lp->coal_conf.rx_bytes += pkt_len;
802 num_rx_pkt++;
803
804 err_next_pkt:
805 lp->rx_ring[rx_index].buff_phy_addr
806 = cpu_to_le32(lp->rx_dma_addr[rx_index]);
807 lp->rx_ring[rx_index].buff_count =
808 cpu_to_le16(lp->rx_buff_len-2);
809 wmb();
810 lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT);
811 rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
812 }
813 /* Check the interrupt status register for more packets in the
814 mean time. Process them since we have not used up our quota.*/
815
816 intr0 = readl(mmio + INT0);
817 /*Ack receive packets */
818 writel(intr0 & RINT0,mmio + INT0);
819
820 } while(intr0 & RINT0);
821
822 if (rx_pkt_limit > 0) {
823 /* Receive descriptor is empty now */
824 spin_lock_irqsave(&lp->lock, flags);
825 __napi_complete(napi);
826 writel(VAL0|RINTEN0, mmio + INTEN0);
827 writel(VAL2 | RDMD0, mmio + CMD0);
828 spin_unlock_irqrestore(&lp->lock, flags);
829 }
830
831rx_not_empty:
832 return num_rx_pkt;
833}
834
835/*
836This function will indicate the link status to the kernel.
837*/
838static int amd8111e_link_change(struct net_device* dev)
839{
840 struct amd8111e_priv *lp = netdev_priv(dev);
841 int status0,speed;
842
843 /* read the link change */
844 status0 = readl(lp->mmio + STAT0);
845
846 if(status0 & LINK_STATS){
847 if(status0 & AUTONEG_COMPLETE)
848 lp->link_config.autoneg = AUTONEG_ENABLE;
849 else
850 lp->link_config.autoneg = AUTONEG_DISABLE;
851
852 if(status0 & FULL_DPLX)
853 lp->link_config.duplex = DUPLEX_FULL;
854 else
855 lp->link_config.duplex = DUPLEX_HALF;
856 speed = (status0 & SPEED_MASK) >> 7;
857 if(speed == PHY_SPEED_10)
858 lp->link_config.speed = SPEED_10;
859 else if(speed == PHY_SPEED_100)
860 lp->link_config.speed = SPEED_100;
861
862 printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n", dev->name,
863 (lp->link_config.speed == SPEED_100) ? "100": "10",
864 (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
865 netif_carrier_on(dev);
866 }
867 else{
868 lp->link_config.speed = SPEED_INVALID;
869 lp->link_config.duplex = DUPLEX_INVALID;
870 lp->link_config.autoneg = AUTONEG_INVALID;
871 printk(KERN_INFO "%s: Link is Down.\n",dev->name);
872 netif_carrier_off(dev);
873 }
874
875 return 0;
876}
877/*
878This function reads the mib counters.
879*/
880static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
881{
882 unsigned int status;
883 unsigned int data;
884 unsigned int repeat = REPEAT_CNT;
885
886 writew( MIB_RD_CMD | MIB_COUNTER, mmio + MIB_ADDR);
887 do {
888 status = readw(mmio + MIB_ADDR);
889 udelay(2); /* controller takes MAX 2 us to get mib data */
890 }
891 while (--repeat && (status & MIB_CMD_ACTIVE));
892
893 data = readl(mmio + MIB_DATA);
894 return data;
895}
896
897/*
898 * This function reads the mib registers and returns the hardware statistics.
899 * It updates previous internal driver statistics with new values.
900 */
901static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
902{
903 struct amd8111e_priv *lp = netdev_priv(dev);
904 void __iomem *mmio = lp->mmio;
905 unsigned long flags;
906 struct net_device_stats *new_stats = &dev->stats;
907
908 if (!lp->opened)
909 return new_stats;
910 spin_lock_irqsave (&lp->lock, flags);
911
912 /* stats.rx_packets */
913 new_stats->rx_packets = amd8111e_read_mib(mmio, rcv_broadcast_pkts)+
914 amd8111e_read_mib(mmio, rcv_multicast_pkts)+
915 amd8111e_read_mib(mmio, rcv_unicast_pkts);
916
917 /* stats.tx_packets */
918 new_stats->tx_packets = amd8111e_read_mib(mmio, xmt_packets);
919
920 /*stats.rx_bytes */
921 new_stats->rx_bytes = amd8111e_read_mib(mmio, rcv_octets);
922
923 /* stats.tx_bytes */
924 new_stats->tx_bytes = amd8111e_read_mib(mmio, xmt_octets);
925
926 /* stats.rx_errors */
927 /* hw errors + errors driver reported */
928 new_stats->rx_errors = amd8111e_read_mib(mmio, rcv_undersize_pkts)+
929 amd8111e_read_mib(mmio, rcv_fragments)+
930 amd8111e_read_mib(mmio, rcv_jabbers)+
931 amd8111e_read_mib(mmio, rcv_alignment_errors)+
932 amd8111e_read_mib(mmio, rcv_fcs_errors)+
933 amd8111e_read_mib(mmio, rcv_miss_pkts)+
934 lp->drv_rx_errors;
935
936 /* stats.tx_errors */
937 new_stats->tx_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
938
939 /* stats.rx_dropped*/
940 new_stats->rx_dropped = amd8111e_read_mib(mmio, rcv_miss_pkts);
941
942 /* stats.tx_dropped*/
943 new_stats->tx_dropped = amd8111e_read_mib(mmio, xmt_underrun_pkts);
944
945 /* stats.multicast*/
946 new_stats->multicast = amd8111e_read_mib(mmio, rcv_multicast_pkts);
947
948 /* stats.collisions*/
949 new_stats->collisions = amd8111e_read_mib(mmio, xmt_collisions);
950
951 /* stats.rx_length_errors*/
952 new_stats->rx_length_errors =
953 amd8111e_read_mib(mmio, rcv_undersize_pkts)+
954 amd8111e_read_mib(mmio, rcv_oversize_pkts);
955
956 /* stats.rx_over_errors*/
957 new_stats->rx_over_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
958
959 /* stats.rx_crc_errors*/
960 new_stats->rx_crc_errors = amd8111e_read_mib(mmio, rcv_fcs_errors);
961
962 /* stats.rx_frame_errors*/
963 new_stats->rx_frame_errors =
964 amd8111e_read_mib(mmio, rcv_alignment_errors);
965
966 /* stats.rx_fifo_errors */
967 new_stats->rx_fifo_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
968
969 /* stats.rx_missed_errors */
970 new_stats->rx_missed_errors = amd8111e_read_mib(mmio, rcv_miss_pkts);
971
972 /* stats.tx_aborted_errors*/
973 new_stats->tx_aborted_errors =
974 amd8111e_read_mib(mmio, xmt_excessive_collision);
975
976 /* stats.tx_carrier_errors*/
977 new_stats->tx_carrier_errors =
978 amd8111e_read_mib(mmio, xmt_loss_carrier);
979
980 /* stats.tx_fifo_errors*/
981 new_stats->tx_fifo_errors = amd8111e_read_mib(mmio, xmt_underrun_pkts);
982
983 /* stats.tx_window_errors*/
984 new_stats->tx_window_errors =
985 amd8111e_read_mib(mmio, xmt_late_collision);
986
987 /* Reset the mibs for collecting new statistics */
988 /* writew(MIB_CLEAR, mmio + MIB_ADDR);*/
989
990 spin_unlock_irqrestore (&lp->lock, flags);
991
992 return new_stats;
993}
994/* This function recalculate the interrupt coalescing mode on every interrupt
995according to the datarate and the packet rate.
996*/
997static int amd8111e_calc_coalesce(struct net_device *dev)
998{
999 struct amd8111e_priv *lp = netdev_priv(dev);
1000 struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
1001 int tx_pkt_rate;
1002 int rx_pkt_rate;
1003 int tx_data_rate;
1004 int rx_data_rate;
1005 int rx_pkt_size;
1006 int tx_pkt_size;
1007
1008 tx_pkt_rate = coal_conf->tx_packets - coal_conf->tx_prev_packets;
1009 coal_conf->tx_prev_packets = coal_conf->tx_packets;
1010
1011 tx_data_rate = coal_conf->tx_bytes - coal_conf->tx_prev_bytes;
1012 coal_conf->tx_prev_bytes = coal_conf->tx_bytes;
1013
1014 rx_pkt_rate = coal_conf->rx_packets - coal_conf->rx_prev_packets;
1015 coal_conf->rx_prev_packets = coal_conf->rx_packets;
1016
1017 rx_data_rate = coal_conf->rx_bytes - coal_conf->rx_prev_bytes;
1018 coal_conf->rx_prev_bytes = coal_conf->rx_bytes;
1019
1020 if(rx_pkt_rate < 800){
1021 if(coal_conf->rx_coal_type != NO_COALESCE){
1022
1023 coal_conf->rx_timeout = 0x0;
1024 coal_conf->rx_event_count = 0;
1025 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1026 coal_conf->rx_coal_type = NO_COALESCE;
1027 }
1028 }
1029 else{
1030
1031 rx_pkt_size = rx_data_rate/rx_pkt_rate;
1032 if (rx_pkt_size < 128){
1033 if(coal_conf->rx_coal_type != NO_COALESCE){
1034
1035 coal_conf->rx_timeout = 0;
1036 coal_conf->rx_event_count = 0;
1037 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1038 coal_conf->rx_coal_type = NO_COALESCE;
1039 }
1040
1041 }
1042 else if ( (rx_pkt_size >= 128) && (rx_pkt_size < 512) ){
1043
1044 if(coal_conf->rx_coal_type != LOW_COALESCE){
1045 coal_conf->rx_timeout = 1;
1046 coal_conf->rx_event_count = 4;
1047 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1048 coal_conf->rx_coal_type = LOW_COALESCE;
1049 }
1050 }
1051 else if ((rx_pkt_size >= 512) && (rx_pkt_size < 1024)){
1052
1053 if(coal_conf->rx_coal_type != MEDIUM_COALESCE){
1054 coal_conf->rx_timeout = 1;
1055 coal_conf->rx_event_count = 4;
1056 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1057 coal_conf->rx_coal_type = MEDIUM_COALESCE;
1058 }
1059
1060 }
1061 else if(rx_pkt_size >= 1024){
1062 if(coal_conf->rx_coal_type != HIGH_COALESCE){
1063 coal_conf->rx_timeout = 2;
1064 coal_conf->rx_event_count = 3;
1065 amd8111e_set_coalesce(dev,RX_INTR_COAL);
1066 coal_conf->rx_coal_type = HIGH_COALESCE;
1067 }
1068 }
1069 }
1070 /* NOW FOR TX INTR COALESC */
1071 if(tx_pkt_rate < 800){
1072 if(coal_conf->tx_coal_type != NO_COALESCE){
1073
1074 coal_conf->tx_timeout = 0x0;
1075 coal_conf->tx_event_count = 0;
1076 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1077 coal_conf->tx_coal_type = NO_COALESCE;
1078 }
1079 }
1080 else{
1081
1082 tx_pkt_size = tx_data_rate/tx_pkt_rate;
1083 if (tx_pkt_size < 128){
1084
1085 if(coal_conf->tx_coal_type != NO_COALESCE){
1086
1087 coal_conf->tx_timeout = 0;
1088 coal_conf->tx_event_count = 0;
1089 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1090 coal_conf->tx_coal_type = NO_COALESCE;
1091 }
1092
1093 }
1094 else if ( (tx_pkt_size >= 128) && (tx_pkt_size < 512) ){
1095
1096 if(coal_conf->tx_coal_type != LOW_COALESCE){
1097 coal_conf->tx_timeout = 1;
1098 coal_conf->tx_event_count = 2;
1099 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1100 coal_conf->tx_coal_type = LOW_COALESCE;
1101
1102 }
1103 }
1104 else if ((tx_pkt_size >= 512) && (tx_pkt_size < 1024)){
1105
1106 if(coal_conf->tx_coal_type != MEDIUM_COALESCE){
1107 coal_conf->tx_timeout = 2;
1108 coal_conf->tx_event_count = 5;
1109 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1110 coal_conf->tx_coal_type = MEDIUM_COALESCE;
1111 }
1112
1113 }
1114 else if(tx_pkt_size >= 1024){
1115 if (tx_pkt_size >= 1024){
1116 if(coal_conf->tx_coal_type != HIGH_COALESCE){
1117 coal_conf->tx_timeout = 4;
1118 coal_conf->tx_event_count = 8;
1119 amd8111e_set_coalesce(dev,TX_INTR_COAL);
1120 coal_conf->tx_coal_type = HIGH_COALESCE;
1121 }
1122 }
1123 }
1124 }
1125 return 0;
1126
1127}
1128/*
1129This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
1130*/
1131static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
1132{
1133
1134 struct net_device * dev = (struct net_device *) dev_id;
1135 struct amd8111e_priv *lp = netdev_priv(dev);
1136 void __iomem *mmio = lp->mmio;
1137 unsigned int intr0, intren0;
1138 unsigned int handled = 1;
1139
1140 if(unlikely(dev == NULL))
1141 return IRQ_NONE;
1142
1143 spin_lock(&lp->lock);
1144
1145 /* disabling interrupt */
1146 writel(INTREN, mmio + CMD0);
1147
1148 /* Read interrupt status */
1149 intr0 = readl(mmio + INT0);
1150 intren0 = readl(mmio + INTEN0);
1151
1152 /* Process all the INT event until INTR bit is clear. */
1153
1154 if (!(intr0 & INTR)){
1155 handled = 0;
1156 goto err_no_interrupt;
1157 }
1158
1159 /* Current driver processes 4 interrupts : RINT,TINT,LCINT,STINT */
1160 writel(intr0, mmio + INT0);
1161
1162 /* Check if Receive Interrupt has occurred. */
1163 if (intr0 & RINT0) {
1164 if (napi_schedule_prep(&lp->napi)) {
1165 /* Disable receive interupts */
1166 writel(RINTEN0, mmio + INTEN0);
1167 /* Schedule a polling routine */
1168 __napi_schedule(&lp->napi);
1169 } else if (intren0 & RINTEN0) {
1170 printk("************Driver bug! interrupt while in poll\n");
1171 /* Fix by disable receive interrupts */
1172 writel(RINTEN0, mmio + INTEN0);
1173 }
1174 }
1175
1176 /* Check if Transmit Interrupt has occurred. */
1177 if (intr0 & TINT0)
1178 amd8111e_tx(dev);
1179
1180 /* Check if Link Change Interrupt has occurred. */
1181 if (intr0 & LCINT)
1182 amd8111e_link_change(dev);
1183
1184 /* Check if Hardware Timer Interrupt has occurred. */
1185 if (intr0 & STINT)
1186 amd8111e_calc_coalesce(dev);
1187
1188err_no_interrupt:
1189 writel( VAL0 | INTREN,mmio + CMD0);
1190
1191 spin_unlock(&lp->lock);
1192
1193 return IRQ_RETVAL(handled);
1194}
1195
1196#ifdef CONFIG_NET_POLL_CONTROLLER
1197static void amd8111e_poll(struct net_device *dev)
1198{
1199 unsigned long flags;
1200 local_irq_save(flags);
1201 amd8111e_interrupt(0, dev);
1202 local_irq_restore(flags);
1203}
1204#endif
1205
1206
1207/*
1208This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
1209*/
1210static int amd8111e_close(struct net_device * dev)
1211{
1212 struct amd8111e_priv *lp = netdev_priv(dev);
1213 netif_stop_queue(dev);
1214
1215 napi_disable(&lp->napi);
1216
1217 spin_lock_irq(&lp->lock);
1218
1219 amd8111e_disable_interrupt(lp);
1220 amd8111e_stop_chip(lp);
1221
1222 /* Free transmit and receive skbs */
1223 amd8111e_free_skbs(lp->amd8111e_net_dev);
1224
1225 netif_carrier_off(lp->amd8111e_net_dev);
1226
1227 /* Delete ipg timer */
1228 if(lp->options & OPTION_DYN_IPG_ENABLE)
1229 del_timer_sync(&lp->ipg_data.ipg_timer);
1230
1231 spin_unlock_irq(&lp->lock);
1232 free_irq(dev->irq, dev);
1233 amd8111e_free_ring(lp);
1234
1235 /* Update the statistics before closing */
1236 amd8111e_get_stats(dev);
1237 lp->opened = 0;
1238 return 0;
1239}
1240/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
1241*/
1242static int amd8111e_open(struct net_device * dev )
1243{
1244 struct amd8111e_priv *lp = netdev_priv(dev);
1245
1246 if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, IRQF_SHARED,
1247 dev->name, dev))
1248 return -EAGAIN;
1249
1250 napi_enable(&lp->napi);
1251
1252 spin_lock_irq(&lp->lock);
1253
1254 amd8111e_init_hw_default(lp);
1255
1256 if(amd8111e_restart(dev)){
1257 spin_unlock_irq(&lp->lock);
1258 napi_disable(&lp->napi);
1259 if (dev->irq)
1260 free_irq(dev->irq, dev);
1261 return -ENOMEM;
1262 }
1263 /* Start ipg timer */
1264 if(lp->options & OPTION_DYN_IPG_ENABLE){
1265 add_timer(&lp->ipg_data.ipg_timer);
1266 printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
1267 }
1268
1269 lp->opened = 1;
1270
1271 spin_unlock_irq(&lp->lock);
1272
1273 netif_start_queue(dev);
1274
1275 return 0;
1276}
1277/*
1278This function checks if there is any transmit descriptors available to queue more packet.
1279*/
1280static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
1281{
1282 int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
1283 if (lp->tx_skbuff[tx_index])
1284 return -1;
1285 else
1286 return 0;
1287
1288}
1289/*
1290This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
1291*/
1292
1293static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1294 struct net_device * dev)
1295{
1296 struct amd8111e_priv *lp = netdev_priv(dev);
1297 int tx_index;
1298 unsigned long flags;
1299
1300 spin_lock_irqsave(&lp->lock, flags);
1301
1302 tx_index = lp->tx_idx & TX_RING_DR_MOD_MASK;
1303
1304 lp->tx_ring[tx_index].buff_count = cpu_to_le16(skb->len);
1305
1306 lp->tx_skbuff[tx_index] = skb;
1307 lp->tx_ring[tx_index].tx_flags = 0;
1308
1309#if AMD8111E_VLAN_TAG_USED
1310 if (vlan_tx_tag_present(skb)) {
1311 lp->tx_ring[tx_index].tag_ctrl_cmd |=
1312 cpu_to_le16(TCC_VLAN_INSERT);
1313 lp->tx_ring[tx_index].tag_ctrl_info =
1314 cpu_to_le16(vlan_tx_tag_get(skb));
1315
1316 }
1317#endif
1318 lp->tx_dma_addr[tx_index] =
1319 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
1320 lp->tx_ring[tx_index].buff_phy_addr =
1321 cpu_to_le32(lp->tx_dma_addr[tx_index]);
1322
1323 /* Set FCS and LTINT bits */
1324 wmb();
1325 lp->tx_ring[tx_index].tx_flags |=
1326 cpu_to_le16(OWN_BIT | STP_BIT | ENP_BIT|ADD_FCS_BIT|LTINT_BIT);
1327
1328 lp->tx_idx++;
1329
1330 /* Trigger an immediate send poll. */
1331 writel( VAL1 | TDMD0, lp->mmio + CMD0);
1332 writel( VAL2 | RDMD0,lp->mmio + CMD0);
1333
1334 if(amd8111e_tx_queue_avail(lp) < 0){
1335 netif_stop_queue(dev);
1336 }
1337 spin_unlock_irqrestore(&lp->lock, flags);
1338 return NETDEV_TX_OK;
1339}
1340/*
1341This function returns all the memory mapped registers of the device.
1342*/
1343static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
1344{
1345 void __iomem *mmio = lp->mmio;
1346 /* Read only necessary registers */
1347 buf[0] = readl(mmio + XMT_RING_BASE_ADDR0);
1348 buf[1] = readl(mmio + XMT_RING_LEN0);
1349 buf[2] = readl(mmio + RCV_RING_BASE_ADDR0);
1350 buf[3] = readl(mmio + RCV_RING_LEN0);
1351 buf[4] = readl(mmio + CMD0);
1352 buf[5] = readl(mmio + CMD2);
1353 buf[6] = readl(mmio + CMD3);
1354 buf[7] = readl(mmio + CMD7);
1355 buf[8] = readl(mmio + INT0);
1356 buf[9] = readl(mmio + INTEN0);
1357 buf[10] = readl(mmio + LADRF);
1358 buf[11] = readl(mmio + LADRF+4);
1359 buf[12] = readl(mmio + STAT0);
1360}
1361
1362
1363/*
1364This function sets promiscuos mode, all-multi mode or the multicast address
1365list to the device.
1366*/
1367static void amd8111e_set_multicast_list(struct net_device *dev)
1368{
1369 struct netdev_hw_addr *ha;
1370 struct amd8111e_priv *lp = netdev_priv(dev);
1371 u32 mc_filter[2] ;
1372 int bit_num;
1373
1374 if(dev->flags & IFF_PROMISC){
1375 writel( VAL2 | PROM, lp->mmio + CMD2);
1376 return;
1377 }
1378 else
1379 writel( PROM, lp->mmio + CMD2);
1380 if (dev->flags & IFF_ALLMULTI ||
1381 netdev_mc_count(dev) > MAX_FILTER_SIZE) {
1382 /* get all multicast packet */
1383 mc_filter[1] = mc_filter[0] = 0xffffffff;
1384 lp->options |= OPTION_MULTICAST_ENABLE;
1385 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1386 return;
1387 }
1388 if (netdev_mc_empty(dev)) {
1389 /* get only own packets */
1390 mc_filter[1] = mc_filter[0] = 0;
1391 lp->options &= ~OPTION_MULTICAST_ENABLE;
1392 amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
1393 /* disable promiscuous mode */
1394 writel(PROM, lp->mmio + CMD2);
1395 return;
1396 }
1397 /* load all the multicast addresses in the logic filter */
1398 lp->options |= OPTION_MULTICAST_ENABLE;
1399 mc_filter[1] = mc_filter[0] = 0;
1400 netdev_for_each_mc_addr(ha, dev) {
1401 bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1402 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1403 }
1404 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
1405
1406 /* To eliminate PCI posting bug */
1407 readl(lp->mmio + CMD2);
1408
1409}
1410
1411static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
1412{
1413 struct amd8111e_priv *lp = netdev_priv(dev);
1414 struct pci_dev *pci_dev = lp->pci_dev;
1415 strcpy (info->driver, MODULE_NAME);
1416 strcpy (info->version, MODULE_VERS);
1417 sprintf(info->fw_version,"%u",chip_version);
1418 strcpy (info->bus_info, pci_name(pci_dev));
1419}
1420
1421static int amd8111e_get_regs_len(struct net_device *dev)
1422{
1423 return AMD8111E_REG_DUMP_LEN;
1424}
1425
1426static void amd8111e_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
1427{
1428 struct amd8111e_priv *lp = netdev_priv(dev);
1429 regs->version = 0;
1430 amd8111e_read_regs(lp, buf);
1431}
1432
1433static int amd8111e_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1434{
1435 struct amd8111e_priv *lp = netdev_priv(dev);
1436 spin_lock_irq(&lp->lock);
1437 mii_ethtool_gset(&lp->mii_if, ecmd);
1438 spin_unlock_irq(&lp->lock);
1439 return 0;
1440}
1441
1442static int amd8111e_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1443{
1444 struct amd8111e_priv *lp = netdev_priv(dev);
1445 int res;
1446 spin_lock_irq(&lp->lock);
1447 res = mii_ethtool_sset(&lp->mii_if, ecmd);
1448 spin_unlock_irq(&lp->lock);
1449 return res;
1450}
1451
1452static int amd8111e_nway_reset(struct net_device *dev)
1453{
1454 struct amd8111e_priv *lp = netdev_priv(dev);
1455 return mii_nway_restart(&lp->mii_if);
1456}
1457
1458static u32 amd8111e_get_link(struct net_device *dev)
1459{
1460 struct amd8111e_priv *lp = netdev_priv(dev);
1461 return mii_link_ok(&lp->mii_if);
1462}
1463
1464static void amd8111e_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1465{
1466 struct amd8111e_priv *lp = netdev_priv(dev);
1467 wol_info->supported = WAKE_MAGIC|WAKE_PHY;
1468 if (lp->options & OPTION_WOL_ENABLE)
1469 wol_info->wolopts = WAKE_MAGIC;
1470}
1471
1472static int amd8111e_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol_info)
1473{
1474 struct amd8111e_priv *lp = netdev_priv(dev);
1475 if (wol_info->wolopts & ~(WAKE_MAGIC|WAKE_PHY))
1476 return -EINVAL;
1477 spin_lock_irq(&lp->lock);
1478 if (wol_info->wolopts & WAKE_MAGIC)
1479 lp->options |=
1480 (OPTION_WOL_ENABLE | OPTION_WAKE_MAGIC_ENABLE);
1481 else if(wol_info->wolopts & WAKE_PHY)
1482 lp->options |=
1483 (OPTION_WOL_ENABLE | OPTION_WAKE_PHY_ENABLE);
1484 else
1485 lp->options &= ~OPTION_WOL_ENABLE;
1486 spin_unlock_irq(&lp->lock);
1487 return 0;
1488}
1489
1490static const struct ethtool_ops ops = {
1491 .get_drvinfo = amd8111e_get_drvinfo,
1492 .get_regs_len = amd8111e_get_regs_len,
1493 .get_regs = amd8111e_get_regs,
1494 .get_settings = amd8111e_get_settings,
1495 .set_settings = amd8111e_set_settings,
1496 .nway_reset = amd8111e_nway_reset,
1497 .get_link = amd8111e_get_link,
1498 .get_wol = amd8111e_get_wol,
1499 .set_wol = amd8111e_set_wol,
1500};
1501
1502/*
1503This function handles all the ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
1504*/
1505
1506static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
1507{
1508 struct mii_ioctl_data *data = if_mii(ifr);
1509 struct amd8111e_priv *lp = netdev_priv(dev);
1510 int err;
1511 u32 mii_regval;
1512
1513 switch(cmd) {
1514 case SIOCGMIIPHY:
1515 data->phy_id = lp->ext_phy_addr;
1516
1517 /* fallthru */
1518 case SIOCGMIIREG:
1519
1520 spin_lock_irq(&lp->lock);
1521 err = amd8111e_read_phy(lp, data->phy_id,
1522 data->reg_num & PHY_REG_ADDR_MASK, &mii_regval);
1523 spin_unlock_irq(&lp->lock);
1524
1525 data->val_out = mii_regval;
1526 return err;
1527
1528 case SIOCSMIIREG:
1529
1530 spin_lock_irq(&lp->lock);
1531 err = amd8111e_write_phy(lp, data->phy_id,
1532 data->reg_num & PHY_REG_ADDR_MASK, data->val_in);
1533 spin_unlock_irq(&lp->lock);
1534
1535 return err;
1536
1537 default:
1538 /* do nothing */
1539 break;
1540 }
1541 return -EOPNOTSUPP;
1542}
1543static int amd8111e_set_mac_address(struct net_device *dev, void *p)
1544{
1545 struct amd8111e_priv *lp = netdev_priv(dev);
1546 int i;
1547 struct sockaddr *addr = p;
1548
1549 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1550 spin_lock_irq(&lp->lock);
1551 /* Setting the MAC address to the device */
1552 for(i = 0; i < ETH_ADDR_LEN; i++)
1553 writeb( dev->dev_addr[i], lp->mmio + PADR + i );
1554
1555 spin_unlock_irq(&lp->lock);
1556
1557 return 0;
1558}
1559
1560/*
1561This function changes the mtu of the device. It restarts the device to initialize the descriptor with new receive buffers.
1562*/
1563static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
1564{
1565 struct amd8111e_priv *lp = netdev_priv(dev);
1566 int err;
1567
1568 if ((new_mtu < AMD8111E_MIN_MTU) || (new_mtu > AMD8111E_MAX_MTU))
1569 return -EINVAL;
1570
1571 if (!netif_running(dev)) {
1572 /* new_mtu will be used
1573 when device starts netxt time */
1574 dev->mtu = new_mtu;
1575 return 0;
1576 }
1577
1578 spin_lock_irq(&lp->lock);
1579
1580 /* stop the chip */
1581 writel(RUN, lp->mmio + CMD0);
1582
1583 dev->mtu = new_mtu;
1584
1585 err = amd8111e_restart(dev);
1586 spin_unlock_irq(&lp->lock);
1587 if(!err)
1588 netif_start_queue(dev);
1589 return err;
1590}
1591
1592static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
1593{
1594 writel( VAL1|MPPLBA, lp->mmio + CMD3);
1595 writel( VAL0|MPEN_SW, lp->mmio + CMD7);
1596
1597 /* To eliminate PCI posting bug */
1598 readl(lp->mmio + CMD7);
1599 return 0;
1600}
1601
1602static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
1603{
1604
1605 /* Adapter is already stoped/suspended/interrupt-disabled */
1606 writel(VAL0|LCMODE_SW,lp->mmio + CMD7);
1607
1608 /* To eliminate PCI posting bug */
1609 readl(lp->mmio + CMD7);
1610 return 0;
1611}
1612
1613/*
1614 * This function is called when a packet transmission fails to complete
1615 * within a reasonable period, on the assumption that an interrupt have
1616 * failed or the interface is locked up. This function will reinitialize
1617 * the hardware.
1618 */
1619static void amd8111e_tx_timeout(struct net_device *dev)
1620{
1621 struct amd8111e_priv* lp = netdev_priv(dev);
1622 int err;
1623
1624 printk(KERN_ERR "%s: transmit timed out, resetting\n",
1625 dev->name);
1626 spin_lock_irq(&lp->lock);
1627 err = amd8111e_restart(dev);
1628 spin_unlock_irq(&lp->lock);
1629 if(!err)
1630 netif_wake_queue(dev);
1631}
1632static int amd8111e_suspend(struct pci_dev *pci_dev, pm_message_t state)
1633{
1634 struct net_device *dev = pci_get_drvdata(pci_dev);
1635 struct amd8111e_priv *lp = netdev_priv(dev);
1636
1637 if (!netif_running(dev))
1638 return 0;
1639
1640 /* disable the interrupt */
1641 spin_lock_irq(&lp->lock);
1642 amd8111e_disable_interrupt(lp);
1643 spin_unlock_irq(&lp->lock);
1644
1645 netif_device_detach(dev);
1646
1647 /* stop chip */
1648 spin_lock_irq(&lp->lock);
1649 if(lp->options & OPTION_DYN_IPG_ENABLE)
1650 del_timer_sync(&lp->ipg_data.ipg_timer);
1651 amd8111e_stop_chip(lp);
1652 spin_unlock_irq(&lp->lock);
1653
1654 if(lp->options & OPTION_WOL_ENABLE){
1655 /* enable wol */
1656 if(lp->options & OPTION_WAKE_MAGIC_ENABLE)
1657 amd8111e_enable_magicpkt(lp);
1658 if(lp->options & OPTION_WAKE_PHY_ENABLE)
1659 amd8111e_enable_link_change(lp);
1660
1661 pci_enable_wake(pci_dev, PCI_D3hot, 1);
1662 pci_enable_wake(pci_dev, PCI_D3cold, 1);
1663
1664 }
1665 else{
1666 pci_enable_wake(pci_dev, PCI_D3hot, 0);
1667 pci_enable_wake(pci_dev, PCI_D3cold, 0);
1668 }
1669
1670 pci_save_state(pci_dev);
1671 pci_set_power_state(pci_dev, PCI_D3hot);
1672
1673 return 0;
1674}
1675static int amd8111e_resume(struct pci_dev *pci_dev)
1676{
1677 struct net_device *dev = pci_get_drvdata(pci_dev);
1678 struct amd8111e_priv *lp = netdev_priv(dev);
1679
1680 if (!netif_running(dev))
1681 return 0;
1682
1683 pci_set_power_state(pci_dev, PCI_D0);
1684 pci_restore_state(pci_dev);
1685
1686 pci_enable_wake(pci_dev, PCI_D3hot, 0);
1687 pci_enable_wake(pci_dev, PCI_D3cold, 0); /* D3 cold */
1688
1689 netif_device_attach(dev);
1690
1691 spin_lock_irq(&lp->lock);
1692 amd8111e_restart(dev);
1693 /* Restart ipg timer */
1694 if(lp->options & OPTION_DYN_IPG_ENABLE)
1695 mod_timer(&lp->ipg_data.ipg_timer,
1696 jiffies + IPG_CONVERGE_JIFFIES);
1697 spin_unlock_irq(&lp->lock);
1698
1699 return 0;
1700}
1701
1702
1703static void __devexit amd8111e_remove_one(struct pci_dev *pdev)
1704{
1705 struct net_device *dev = pci_get_drvdata(pdev);
1706 if (dev) {
1707 unregister_netdev(dev);
1708 iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
1709 free_netdev(dev);
1710 pci_release_regions(pdev);
1711 pci_disable_device(pdev);
1712 pci_set_drvdata(pdev, NULL);
1713 }
1714}
1715static void amd8111e_config_ipg(struct net_device* dev)
1716{
1717 struct amd8111e_priv *lp = netdev_priv(dev);
1718 struct ipg_info* ipg_data = &lp->ipg_data;
1719 void __iomem *mmio = lp->mmio;
1720 unsigned int prev_col_cnt = ipg_data->col_cnt;
1721 unsigned int total_col_cnt;
1722 unsigned int tmp_ipg;
1723
1724 if(lp->link_config.duplex == DUPLEX_FULL){
1725 ipg_data->ipg = DEFAULT_IPG;
1726 return;
1727 }
1728
1729 if(ipg_data->ipg_state == SSTATE){
1730
1731 if(ipg_data->timer_tick == IPG_STABLE_TIME){
1732
1733 ipg_data->timer_tick = 0;
1734 ipg_data->ipg = MIN_IPG - IPG_STEP;
1735 ipg_data->current_ipg = MIN_IPG;
1736 ipg_data->diff_col_cnt = 0xFFFFFFFF;
1737 ipg_data->ipg_state = CSTATE;
1738 }
1739 else
1740 ipg_data->timer_tick++;
1741 }
1742
1743 if(ipg_data->ipg_state == CSTATE){
1744
1745 /* Get the current collision count */
1746
1747 total_col_cnt = ipg_data->col_cnt =
1748 amd8111e_read_mib(mmio, xmt_collisions);
1749
1750 if ((total_col_cnt - prev_col_cnt) <
1751 (ipg_data->diff_col_cnt)){
1752
1753 ipg_data->diff_col_cnt =
1754 total_col_cnt - prev_col_cnt ;
1755
1756 ipg_data->ipg = ipg_data->current_ipg;
1757 }
1758
1759 ipg_data->current_ipg += IPG_STEP;
1760
1761 if (ipg_data->current_ipg <= MAX_IPG)
1762 tmp_ipg = ipg_data->current_ipg;
1763 else{
1764 tmp_ipg = ipg_data->ipg;
1765 ipg_data->ipg_state = SSTATE;
1766 }
1767 writew((u32)tmp_ipg, mmio + IPG);
1768 writew((u32)(tmp_ipg - IFS1_DELTA), mmio + IFS1);
1769 }
1770 mod_timer(&lp->ipg_data.ipg_timer, jiffies + IPG_CONVERGE_JIFFIES);
1771 return;
1772
1773}
1774
1775static void __devinit amd8111e_probe_ext_phy(struct net_device* dev)
1776{
1777 struct amd8111e_priv *lp = netdev_priv(dev);
1778 int i;
1779
1780 for (i = 0x1e; i >= 0; i--) {
1781 u32 id1, id2;
1782
1783 if (amd8111e_read_phy(lp, i, MII_PHYSID1, &id1))
1784 continue;
1785 if (amd8111e_read_phy(lp, i, MII_PHYSID2, &id2))
1786 continue;
1787 lp->ext_phy_id = (id1 << 16) | id2;
1788 lp->ext_phy_addr = i;
1789 return;
1790 }
1791 lp->ext_phy_id = 0;
1792 lp->ext_phy_addr = 1;
1793}
1794
1795static const struct net_device_ops amd8111e_netdev_ops = {
1796 .ndo_open = amd8111e_open,
1797 .ndo_stop = amd8111e_close,
1798 .ndo_start_xmit = amd8111e_start_xmit,
1799 .ndo_tx_timeout = amd8111e_tx_timeout,
1800 .ndo_get_stats = amd8111e_get_stats,
1801 .ndo_set_rx_mode = amd8111e_set_multicast_list,
1802 .ndo_validate_addr = eth_validate_addr,
1803 .ndo_set_mac_address = amd8111e_set_mac_address,
1804 .ndo_do_ioctl = amd8111e_ioctl,
1805 .ndo_change_mtu = amd8111e_change_mtu,
1806#ifdef CONFIG_NET_POLL_CONTROLLER
1807 .ndo_poll_controller = amd8111e_poll,
1808#endif
1809};
1810
1811static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
1812 const struct pci_device_id *ent)
1813{
1814 int err,i,pm_cap;
1815 unsigned long reg_addr,reg_len;
1816 struct amd8111e_priv* lp;
1817 struct net_device* dev;
1818
1819 err = pci_enable_device(pdev);
1820 if(err){
1821 printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
1822 "exiting.\n");
1823 return err;
1824 }
1825
1826 if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
1827 printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
1828 "exiting.\n");
1829 err = -ENODEV;
1830 goto err_disable_pdev;
1831 }
1832
1833 err = pci_request_regions(pdev, MODULE_NAME);
1834 if(err){
1835 printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
1836 "exiting.\n");
1837 goto err_disable_pdev;
1838 }
1839
1840 pci_set_master(pdev);
1841
1842 /* Find power-management capability. */
1843 if((pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM))==0){
1844 printk(KERN_ERR "amd8111e: No Power Management capability, "
1845 "exiting.\n");
1846 goto err_free_reg;
1847 }
1848
1849 /* Initialize DMA */
1850 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
1851 printk(KERN_ERR "amd8111e: DMA not supported,"
1852 "exiting.\n");
1853 goto err_free_reg;
1854 }
1855
1856 reg_addr = pci_resource_start(pdev, 0);
1857 reg_len = pci_resource_len(pdev, 0);
1858
1859 dev = alloc_etherdev(sizeof(struct amd8111e_priv));
1860 if (!dev) {
1861 printk(KERN_ERR "amd8111e: Etherdev alloc failed, exiting.\n");
1862 err = -ENOMEM;
1863 goto err_free_reg;
1864 }
1865
1866 SET_NETDEV_DEV(dev, &pdev->dev);
1867
1868#if AMD8111E_VLAN_TAG_USED
1869 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX ;
1870#endif
1871
1872 lp = netdev_priv(dev);
1873 lp->pci_dev = pdev;
1874 lp->amd8111e_net_dev = dev;
1875 lp->pm_cap = pm_cap;
1876
1877 spin_lock_init(&lp->lock);
1878
1879 lp->mmio = ioremap(reg_addr, reg_len);
1880 if (!lp->mmio) {
1881 printk(KERN_ERR "amd8111e: Cannot map device registers, "
1882 "exiting\n");
1883 err = -ENOMEM;
1884 goto err_free_dev;
1885 }
1886
1887 /* Initializing MAC address */
1888 for(i = 0; i < ETH_ADDR_LEN; i++)
1889 dev->dev_addr[i] = readb(lp->mmio + PADR + i);
1890
1891 /* Setting user defined parametrs */
1892 lp->ext_phy_option = speed_duplex[card_idx];
1893 if(coalesce[card_idx])
1894 lp->options |= OPTION_INTR_COAL_ENABLE;
1895 if(dynamic_ipg[card_idx++])
1896 lp->options |= OPTION_DYN_IPG_ENABLE;
1897
1898
1899 /* Initialize driver entry points */
1900 dev->netdev_ops = &amd8111e_netdev_ops;
1901 SET_ETHTOOL_OPS(dev, &ops);
1902 dev->irq =pdev->irq;
1903 dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
1904 netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
1905
1906#if AMD8111E_VLAN_TAG_USED
1907 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1908#endif
1909 /* Probe the external PHY */
1910 amd8111e_probe_ext_phy(dev);
1911
1912 /* setting mii default values */
1913 lp->mii_if.dev = dev;
1914 lp->mii_if.mdio_read = amd8111e_mdio_read;
1915 lp->mii_if.mdio_write = amd8111e_mdio_write;
1916 lp->mii_if.phy_id = lp->ext_phy_addr;
1917
1918 /* Set receive buffer length and set jumbo option*/
1919 amd8111e_set_rx_buff_len(dev);
1920
1921
1922 err = register_netdev(dev);
1923 if (err) {
1924 printk(KERN_ERR "amd8111e: Cannot register net device, "
1925 "exiting.\n");
1926 goto err_iounmap;
1927 }
1928
1929 pci_set_drvdata(pdev, dev);
1930
1931 /* Initialize software ipg timer */
1932 if(lp->options & OPTION_DYN_IPG_ENABLE){
1933 init_timer(&lp->ipg_data.ipg_timer);
1934 lp->ipg_data.ipg_timer.data = (unsigned long) dev;
1935 lp->ipg_data.ipg_timer.function = (void *)&amd8111e_config_ipg;
1936 lp->ipg_data.ipg_timer.expires = jiffies +
1937 IPG_CONVERGE_JIFFIES;
1938 lp->ipg_data.ipg = DEFAULT_IPG;
1939 lp->ipg_data.ipg_state = CSTATE;
1940 }
1941
1942 /* display driver and device information */
1943
1944 chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
1945 printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
1946 dev->name,MODULE_VERS);
1947 printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
1948 dev->name, chip_version, dev->dev_addr);
1949 if (lp->ext_phy_id)
1950 printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
1951 dev->name, lp->ext_phy_id, lp->ext_phy_addr);
1952 else
1953 printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
1954 dev->name);
1955 return 0;
1956err_iounmap:
1957 iounmap(lp->mmio);
1958
1959err_free_dev:
1960 free_netdev(dev);
1961
1962err_free_reg:
1963 pci_release_regions(pdev);
1964
1965err_disable_pdev:
1966 pci_disable_device(pdev);
1967 pci_set_drvdata(pdev, NULL);
1968 return err;
1969
1970}
1971
1972static struct pci_driver amd8111e_driver = {
1973 .name = MODULE_NAME,
1974 .id_table = amd8111e_pci_tbl,
1975 .probe = amd8111e_probe_one,
1976 .remove = __devexit_p(amd8111e_remove_one),
1977 .suspend = amd8111e_suspend,
1978 .resume = amd8111e_resume
1979};
1980
1981static int __init amd8111e_init(void)
1982{
1983 return pci_register_driver(&amd8111e_driver);
1984}
1985
1986static void __exit amd8111e_cleanup(void)
1987{
1988 pci_unregister_driver(&amd8111e_driver);
1989}
1990
1991module_init(amd8111e_init);
1992module_exit(amd8111e_cleanup);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
new file mode 100644
index 000000000000..2ff2e7a12dd0
--- /dev/null
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -0,0 +1,816 @@
1/*
2 * Advanced Micro Devices Inc. AMD8111E Linux Network Driver
3 * Copyright (C) 2003 Advanced Micro Devices
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
18 * USA
19
20Module Name:
21
22 amd8111e.h
23
24Abstract:
25
26 AMD8111 based 10/100 Ethernet Controller driver definitions.
27
28Environment:
29
30 Kernel Mode
31
32Revision History:
33 3.0.0
34 Initial Revision.
35 3.0.1
36*/
37
38#ifndef _AMD811E_H
39#define _AMD811E_H
40
41/* Command style register access
42
43Registers CMD0, CMD2, CMD3,CMD7 and INTEN0 uses a write access technique called command style access. It allows the write to selected bits of this register without altering the bits that are not selected. Command style registers are divided into 4 bytes that can be written independently. Higher order bit of each byte is the value bit that specifies the value that will be written into the selected bits of register.
44
45eg., if the value 10011010b is written into the least significant byte of a command style register, bits 1,3 and 4 of the register will be set to 1, and the other bits will not be altered. If the value 00011010b is written into the same byte, bits 1,3 and 4 will be cleared to 0 and the other bits will not be altered.
46
47*/
48
49/* Offset for Memory Mapped Registers. */
50/* 32 bit registers */
51
52#define ASF_STAT 0x00 /* ASF status register */
53#define CHIPID 0x04 /* Chip ID regsiter */
54#define MIB_DATA 0x10 /* MIB data register */
55#define MIB_ADDR 0x14 /* MIB address register */
56#define STAT0 0x30 /* Status0 register */
57#define INT0 0x38 /* Interrupt0 register */
58#define INTEN0 0x40 /* Interrupt0 enable register*/
59#define CMD0 0x48 /* Command0 register */
60#define CMD2 0x50 /* Command2 register */
61#define CMD3 0x54 /* Command3 resiter */
62#define CMD7 0x64 /* Command7 register */
63
64#define CTRL1 0x6C /* Control1 register */
65#define CTRL2 0x70 /* Control2 register */
66
67#define XMT_RING_LIMIT 0x7C /* Transmit ring limit register */
68
69#define AUTOPOLL0 0x88 /* Auto-poll0 register */
70#define AUTOPOLL1 0x8A /* Auto-poll1 register */
71#define AUTOPOLL2 0x8C /* Auto-poll2 register */
72#define AUTOPOLL3 0x8E /* Auto-poll3 register */
73#define AUTOPOLL4 0x90 /* Auto-poll4 register */
74#define AUTOPOLL5 0x92 /* Auto-poll5 register */
75
76#define AP_VALUE 0x98 /* Auto-poll value register */
77#define DLY_INT_A 0xA8 /* Group A delayed interrupt register */
78#define DLY_INT_B 0xAC /* Group B delayed interrupt register */
79
80#define FLOW_CONTROL 0xC8 /* Flow control register */
81#define PHY_ACCESS 0xD0 /* PHY access register */
82
83#define STVAL 0xD8 /* Software timer value register */
84
85#define XMT_RING_BASE_ADDR0 0x100 /* Transmit ring0 base addr register */
86#define XMT_RING_BASE_ADDR1 0x108 /* Transmit ring1 base addr register */
87#define XMT_RING_BASE_ADDR2 0x110 /* Transmit ring2 base addr register */
88#define XMT_RING_BASE_ADDR3 0x118 /* Transmit ring2 base addr register */
89
90#define RCV_RING_BASE_ADDR0 0x120 /* Transmit ring0 base addr register */
91
92#define PMAT0 0x190 /* OnNow pattern register0 */
93#define PMAT1 0x194 /* OnNow pattern register1 */
94
95/* 16bit registers */
96
97#define XMT_RING_LEN0 0x140 /* Transmit Ring0 length register */
98#define XMT_RING_LEN1 0x144 /* Transmit Ring1 length register */
99#define XMT_RING_LEN2 0x148 /* Transmit Ring2 length register */
100#define XMT_RING_LEN3 0x14C /* Transmit Ring3 length register */
101
102#define RCV_RING_LEN0 0x150 /* Receive Ring0 length register */
103
104#define SRAM_SIZE 0x178 /* SRAM size register */
105#define SRAM_BOUNDARY 0x17A /* SRAM boundary register */
106
107/* 48bit register */
108
109#define PADR 0x160 /* Physical address register */
110
111#define IFS1 0x18C /* Inter-frame spacing Part1 register */
112#define IFS 0x18D /* Inter-frame spacing register */
113#define IPG 0x18E /* Inter-frame gap register */
114/* 64bit register */
115
116#define LADRF 0x168 /* Logical address filter register */
117
118
119/* Register Bit Definitions */
120typedef enum {
121
122 ASF_INIT_DONE = (1 << 1),
123 ASF_INIT_PRESENT = (1 << 0),
124
125}STAT_ASF_BITS;
126
127typedef enum {
128
129 MIB_CMD_ACTIVE = (1 << 15 ),
130 MIB_RD_CMD = (1 << 13 ),
131 MIB_CLEAR = (1 << 12 ),
132 MIB_ADDRESS = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)|
133 (1 << 4) | (1 << 5),
134}MIB_ADDR_BITS;
135
136
137typedef enum {
138
139 PMAT_DET = (1 << 12),
140 MP_DET = (1 << 11),
141 LC_DET = (1 << 10),
142 SPEED_MASK = (1 << 9)|(1 << 8)|(1 << 7),
143 FULL_DPLX = (1 << 6),
144 LINK_STATS = (1 << 5),
145 AUTONEG_COMPLETE = (1 << 4),
146 MIIPD = (1 << 3),
147 RX_SUSPENDED = (1 << 2),
148 TX_SUSPENDED = (1 << 1),
149 RUNNING = (1 << 0),
150
151}STAT0_BITS;
152
153#define PHY_SPEED_10 0x2
154#define PHY_SPEED_100 0x3
155
156/* INT0 0x38, 32bit register */
157typedef enum {
158
159 INTR = (1 << 31),
160 PCSINT = (1 << 28),
161 LCINT = (1 << 27),
162 APINT5 = (1 << 26),
163 APINT4 = (1 << 25),
164 APINT3 = (1 << 24),
165 TINT_SUM = (1 << 23),
166 APINT2 = (1 << 22),
167 APINT1 = (1 << 21),
168 APINT0 = (1 << 20),
169 MIIPDTINT = (1 << 19),
170 MCCINT = (1 << 17),
171 MREINT = (1 << 16),
172 RINT_SUM = (1 << 15),
173 SPNDINT = (1 << 14),
174 MPINT = (1 << 13),
175 SINT = (1 << 12),
176 TINT3 = (1 << 11),
177 TINT2 = (1 << 10),
178 TINT1 = (1 << 9),
179 TINT0 = (1 << 8),
180 UINT = (1 << 7),
181 STINT = (1 << 4),
182 RINT0 = (1 << 0),
183
184}INT0_BITS;
185
186typedef enum {
187
188 VAL3 = (1 << 31), /* VAL bit for byte 3 */
189 VAL2 = (1 << 23), /* VAL bit for byte 2 */
190 VAL1 = (1 << 15), /* VAL bit for byte 1 */
191 VAL0 = (1 << 7), /* VAL bit for byte 0 */
192
193}VAL_BITS;
194
195typedef enum {
196
197 /* VAL3 */
198 LCINTEN = (1 << 27),
199 APINT5EN = (1 << 26),
200 APINT4EN = (1 << 25),
201 APINT3EN = (1 << 24),
202 /* VAL2 */
203 APINT2EN = (1 << 22),
204 APINT1EN = (1 << 21),
205 APINT0EN = (1 << 20),
206 MIIPDTINTEN = (1 << 19),
207 MCCIINTEN = (1 << 18),
208 MCCINTEN = (1 << 17),
209 MREINTEN = (1 << 16),
210 /* VAL1 */
211 SPNDINTEN = (1 << 14),
212 MPINTEN = (1 << 13),
213 TINTEN3 = (1 << 11),
214 SINTEN = (1 << 12),
215 TINTEN2 = (1 << 10),
216 TINTEN1 = (1 << 9),
217 TINTEN0 = (1 << 8),
218 /* VAL0 */
219 STINTEN = (1 << 4),
220 RINTEN0 = (1 << 0),
221
222 INTEN0_CLEAR = 0x1F7F7F1F, /* Command style register */
223
224}INTEN0_BITS;
225
226typedef enum {
227 /* VAL2 */
228 RDMD0 = (1 << 16),
229 /* VAL1 */
230 TDMD3 = (1 << 11),
231 TDMD2 = (1 << 10),
232 TDMD1 = (1 << 9),
233 TDMD0 = (1 << 8),
234 /* VAL0 */
235 UINTCMD = (1 << 6),
236 RX_FAST_SPND = (1 << 5),
237 TX_FAST_SPND = (1 << 4),
238 RX_SPND = (1 << 3),
239 TX_SPND = (1 << 2),
240 INTREN = (1 << 1),
241 RUN = (1 << 0),
242
243 CMD0_CLEAR = 0x000F0F7F, /* Command style register */
244
245}CMD0_BITS;
246
247typedef enum {
248
249 /* VAL3 */
250 CONDUIT_MODE = (1 << 29),
251 /* VAL2 */
252 RPA = (1 << 19),
253 DRCVPA = (1 << 18),
254 DRCVBC = (1 << 17),
255 PROM = (1 << 16),
256 /* VAL1 */
257 ASTRP_RCV = (1 << 13),
258 RCV_DROP0 = (1 << 12),
259 EMBA = (1 << 11),
260 DXMT2PD = (1 << 10),
261 LTINTEN = (1 << 9),
262 DXMTFCS = (1 << 8),
263 /* VAL0 */
264 APAD_XMT = (1 << 6),
265 DRTY = (1 << 5),
266 INLOOP = (1 << 4),
267 EXLOOP = (1 << 3),
268 REX_RTRY = (1 << 2),
269 REX_UFLO = (1 << 1),
270 REX_LCOL = (1 << 0),
271
272 CMD2_CLEAR = 0x3F7F3F7F, /* Command style register */
273
274}CMD2_BITS;
275
276typedef enum {
277
278 /* VAL3 */
279 ASF_INIT_DONE_ALIAS = (1 << 29),
280 /* VAL2 */
281 JUMBO = (1 << 21),
282 VSIZE = (1 << 20),
283 VLONLY = (1 << 19),
284 VL_TAG_DEL = (1 << 18),
285 /* VAL1 */
286 EN_PMGR = (1 << 14),
287 INTLEVEL = (1 << 13),
288 FORCE_FULL_DUPLEX = (1 << 12),
289 FORCE_LINK_STATUS = (1 << 11),
290 APEP = (1 << 10),
291 MPPLBA = (1 << 9),
292 /* VAL0 */
293 RESET_PHY_PULSE = (1 << 2),
294 RESET_PHY = (1 << 1),
295 PHY_RST_POL = (1 << 0),
296
297}CMD3_BITS;
298
299
300typedef enum {
301
302 /* VAL0 */
303 PMAT_SAVE_MATCH = (1 << 4),
304 PMAT_MODE = (1 << 3),
305 MPEN_SW = (1 << 1),
306 LCMODE_SW = (1 << 0),
307
308 CMD7_CLEAR = 0x0000001B /* Command style register */
309
310}CMD7_BITS;
311
312
313typedef enum {
314
315 RESET_PHY_WIDTH = (0xF << 16) | (0xF<< 20), /* 0x00FF0000 */
316 XMTSP_MASK = (1 << 9) | (1 << 8), /* 9:8 */
317 XMTSP_128 = (1 << 9), /* 9 */
318 XMTSP_64 = (1 << 8),
319 CACHE_ALIGN = (1 << 4),
320 BURST_LIMIT_MASK = (0xF << 0 ),
321 CTRL1_DEFAULT = 0x00010111,
322
323}CTRL1_BITS;
324
325typedef enum {
326
327 FMDC_MASK = (1 << 9)|(1 << 8), /* 9:8 */
328 XPHYRST = (1 << 7),
329 XPHYANE = (1 << 6),
330 XPHYFD = (1 << 5),
331 XPHYSP = (1 << 4) | (1 << 3), /* 4:3 */
332 APDW_MASK = (1 << 2) | (1 << 1) | (1 << 0), /* 2:0 */
333
334}CTRL2_BITS;
335
336/* XMT_RING_LIMIT 0x7C, 32bit register */
337typedef enum {
338
339 XMT_RING2_LIMIT = (0xFF << 16), /* 23:16 */
340 XMT_RING1_LIMIT = (0xFF << 8), /* 15:8 */
341 XMT_RING0_LIMIT = (0xFF << 0), /* 7:0 */
342
343}XMT_RING_LIMIT_BITS;
344
345typedef enum {
346
347 AP_REG0_EN = (1 << 15),
348 AP_REG0_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
349 AP_PHY0_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
350
351}AUTOPOLL0_BITS;
352
353/* AUTOPOLL1 0x8A, 16bit register */
354typedef enum {
355
356 AP_REG1_EN = (1 << 15),
357 AP_REG1_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
358 AP_PRE_SUP1 = (1 << 6),
359 AP_PHY1_DFLT = (1 << 5),
360 AP_PHY1_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
361
362}AUTOPOLL1_BITS;
363
364
365typedef enum {
366
367 AP_REG2_EN = (1 << 15),
368 AP_REG2_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
369 AP_PRE_SUP2 = (1 << 6),
370 AP_PHY2_DFLT = (1 << 5),
371 AP_PHY2_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
372
373}AUTOPOLL2_BITS;
374
375typedef enum {
376
377 AP_REG3_EN = (1 << 15),
378 AP_REG3_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
379 AP_PRE_SUP3 = (1 << 6),
380 AP_PHY3_DFLT = (1 << 5),
381 AP_PHY3_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
382
383}AUTOPOLL3_BITS;
384
385
386typedef enum {
387
388 AP_REG4_EN = (1 << 15),
389 AP_REG4_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
390 AP_PRE_SUP4 = (1 << 6),
391 AP_PHY4_DFLT = (1 << 5),
392 AP_PHY4_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
393
394}AUTOPOLL4_BITS;
395
396
397typedef enum {
398
399 AP_REG5_EN = (1 << 15),
400 AP_REG5_ADDR_MASK = (0xF << 8) |(1 << 12),/* 12:8 */
401 AP_PRE_SUP5 = (1 << 6),
402 AP_PHY5_DFLT = (1 << 5),
403 AP_PHY5_ADDR_MASK = (0xF << 0) |(1 << 4),/* 4:0 */
404
405}AUTOPOLL5_BITS;
406
407
408
409
410/* AP_VALUE 0x98, 32bit ragister */
411typedef enum {
412
413 AP_VAL_ACTIVE = (1 << 31),
414 AP_VAL_RD_CMD = ( 1 << 29),
415 AP_ADDR = (1 << 18)|(1 << 17)|(1 << 16), /* 18:16 */
416 AP_VAL = (0xF << 0) | (0xF << 4) |( 0xF << 8) |
417 (0xF << 12), /* 15:0 */
418
419}AP_VALUE_BITS;
420
421typedef enum {
422
423 DLY_INT_A_R3 = (1 << 31),
424 DLY_INT_A_R2 = (1 << 30),
425 DLY_INT_A_R1 = (1 << 29),
426 DLY_INT_A_R0 = (1 << 28),
427 DLY_INT_A_T3 = (1 << 27),
428 DLY_INT_A_T2 = (1 << 26),
429 DLY_INT_A_T1 = (1 << 25),
430 DLY_INT_A_T0 = ( 1 << 24),
431 EVENT_COUNT_A = (0xF << 16) | (0x1 << 20),/* 20:16 */
432 MAX_DELAY_TIME_A = (0xF << 0) | (0xF << 4) | (1 << 8)|
433 (1 << 9) | (1 << 10), /* 10:0 */
434
435}DLY_INT_A_BITS;
436
437typedef enum {
438
439 DLY_INT_B_R3 = (1 << 31),
440 DLY_INT_B_R2 = (1 << 30),
441 DLY_INT_B_R1 = (1 << 29),
442 DLY_INT_B_R0 = (1 << 28),
443 DLY_INT_B_T3 = (1 << 27),
444 DLY_INT_B_T2 = (1 << 26),
445 DLY_INT_B_T1 = (1 << 25),
446 DLY_INT_B_T0 = ( 1 << 24),
447 EVENT_COUNT_B = (0xF << 16) | (0x1 << 20),/* 20:16 */
448 MAX_DELAY_TIME_B = (0xF << 0) | (0xF << 4) | (1 << 8)|
449 (1 << 9) | (1 << 10), /* 10:0 */
450}DLY_INT_B_BITS;
451
452
453/* FLOW_CONTROL 0xC8, 32bit register */
454typedef enum {
455
456 PAUSE_LEN_CHG = (1 << 30),
457 FTPE = (1 << 22),
458 FRPE = (1 << 21),
459 NAPA = (1 << 20),
460 NPA = (1 << 19),
461 FIXP = ( 1 << 18),
462 FCCMD = ( 1 << 16),
463 PAUSE_LEN = (0xF << 0) | (0xF << 4) |( 0xF << 8) | (0xF << 12), /* 15:0 */
464
465}FLOW_CONTROL_BITS;
466
467/* PHY_ ACCESS 0xD0, 32bit register */
468typedef enum {
469
470 PHY_CMD_ACTIVE = (1 << 31),
471 PHY_WR_CMD = (1 << 30),
472 PHY_RD_CMD = (1 << 29),
473 PHY_RD_ERR = (1 << 28),
474 PHY_PRE_SUP = (1 << 27),
475 PHY_ADDR = (1 << 21) | (1 << 22) | (1 << 23)|
476 (1 << 24) |(1 << 25),/* 25:21 */
477 PHY_REG_ADDR = (1 << 16) | (1 << 17) | (1 << 18)| (1 << 19) | (1 << 20),/* 20:16 */
478 PHY_DATA = (0xF << 0)|(0xF << 4) |(0xF << 8)|
479 (0xF << 12),/* 15:0 */
480
481}PHY_ACCESS_BITS;
482
483
484/* PMAT0 0x190, 32bit register */
485typedef enum {
486 PMR_ACTIVE = (1 << 31),
487 PMR_WR_CMD = (1 << 30),
488 PMR_RD_CMD = (1 << 29),
489 PMR_BANK = (1 <<28),
490 PMR_ADDR = (0xF << 16)|(1 << 20)|(1 << 21)|
491 (1 << 22),/* 22:16 */
492 PMR_B4 = (0xF << 0) | (0xF << 4),/* 15:0 */
493}PMAT0_BITS;
494
495
496/* PMAT1 0x194, 32bit register */
497typedef enum {
498 PMR_B3 = (0xF << 24) | (0xF <<28),/* 31:24 */
499 PMR_B2 = (0xF << 16) |(0xF << 20),/* 23:16 */
500 PMR_B1 = (0xF << 8) | (0xF <<12), /* 15:8 */
501 PMR_B0 = (0xF << 0)|(0xF << 4),/* 7:0 */
502}PMAT1_BITS;
503
504/************************************************************************/
505/* */
506/* MIB counter definitions */
507/* */
508/************************************************************************/
509
510#define rcv_miss_pkts 0x00
511#define rcv_octets 0x01
512#define rcv_broadcast_pkts 0x02
513#define rcv_multicast_pkts 0x03
514#define rcv_undersize_pkts 0x04
515#define rcv_oversize_pkts 0x05
516#define rcv_fragments 0x06
517#define rcv_jabbers 0x07
518#define rcv_unicast_pkts 0x08
519#define rcv_alignment_errors 0x09
520#define rcv_fcs_errors 0x0A
521#define rcv_good_octets 0x0B
522#define rcv_mac_ctrl 0x0C
523#define rcv_flow_ctrl 0x0D
524#define rcv_pkts_64_octets 0x0E
525#define rcv_pkts_65to127_octets 0x0F
526#define rcv_pkts_128to255_octets 0x10
527#define rcv_pkts_256to511_octets 0x11
528#define rcv_pkts_512to1023_octets 0x12
529#define rcv_pkts_1024to1518_octets 0x13
530#define rcv_unsupported_opcode 0x14
531#define rcv_symbol_errors 0x15
532#define rcv_drop_pkts_ring1 0x16
533#define rcv_drop_pkts_ring2 0x17
534#define rcv_drop_pkts_ring3 0x18
535#define rcv_drop_pkts_ring4 0x19
536#define rcv_jumbo_pkts 0x1A
537
538#define xmt_underrun_pkts 0x20
539#define xmt_octets 0x21
540#define xmt_packets 0x22
541#define xmt_broadcast_pkts 0x23
542#define xmt_multicast_pkts 0x24
543#define xmt_collisions 0x25
544#define xmt_unicast_pkts 0x26
545#define xmt_one_collision 0x27
546#define xmt_multiple_collision 0x28
547#define xmt_deferred_transmit 0x29
548#define xmt_late_collision 0x2A
549#define xmt_excessive_defer 0x2B
550#define xmt_loss_carrier 0x2C
551#define xmt_excessive_collision 0x2D
552#define xmt_back_pressure 0x2E
553#define xmt_flow_ctrl 0x2F
554#define xmt_pkts_64_octets 0x30
555#define xmt_pkts_65to127_octets 0x31
556#define xmt_pkts_128to255_octets 0x32
557#define xmt_pkts_256to511_octets 0x33
558#define xmt_pkts_512to1023_octets 0x34
559#define xmt_pkts_1024to1518_octet 0x35
560#define xmt_oversize_pkts 0x36
561#define xmt_jumbo_pkts 0x37
562
563
564/* Driver definitions */
565
566#define PCI_VENDOR_ID_AMD 0x1022
567#define PCI_DEVICE_ID_AMD8111E_7462 0x7462
568
569#define MAX_UNITS 8 /* Maximum number of devices possible */
570
571#define NUM_TX_BUFFERS 32 /* Number of transmit buffers */
572#define NUM_RX_BUFFERS 32 /* Number of receive buffers */
573
574#define TX_BUFF_MOD_MASK 31 /* (NUM_TX_BUFFERS -1) */
575#define RX_BUFF_MOD_MASK 31 /* (NUM_RX_BUFFERS -1) */
576
577#define NUM_TX_RING_DR 32
578#define NUM_RX_RING_DR 32
579
580#define TX_RING_DR_MOD_MASK 31 /* (NUM_TX_RING_DR -1) */
581#define RX_RING_DR_MOD_MASK 31 /* (NUM_RX_RING_DR -1) */
582
583#define MAX_FILTER_SIZE 64 /* Maximum multicast address */
584#define AMD8111E_MIN_MTU 60
585#define AMD8111E_MAX_MTU 9000
586
587#define PKT_BUFF_SZ 1536
588#define MIN_PKT_LEN 60
589#define ETH_ADDR_LEN 6
590
591#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
592#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
593#define DELAY_TIMER_CONV 50 /* msec to 10 usec conversion.
594 Only 500 usec resolution */
595#define OPTION_VLAN_ENABLE 0x0001
596#define OPTION_JUMBO_ENABLE 0x0002
597#define OPTION_MULTICAST_ENABLE 0x0004
598#define OPTION_WOL_ENABLE 0x0008
599#define OPTION_WAKE_MAGIC_ENABLE 0x0010
600#define OPTION_WAKE_PHY_ENABLE 0x0020
601#define OPTION_INTR_COAL_ENABLE 0x0040
602#define OPTION_DYN_IPG_ENABLE 0x0080
603
604#define PHY_REG_ADDR_MASK 0x1f
605
606/* ipg parameters */
607#define DEFAULT_IPG 0x60
608#define IFS1_DELTA 36
609#define IPG_CONVERGE_JIFFIES (HZ/2)
610#define IPG_STABLE_TIME 5
611#define MIN_IPG 96
612#define MAX_IPG 255
613#define IPG_STEP 16
614#define CSTATE 1
615#define SSTATE 2
616
617/* Assume contoller gets data 10 times the maximum processing time */
618#define REPEAT_CNT 10
619
620/* amd8111e decriptor flag definitions */
621typedef enum {
622
623 OWN_BIT = (1 << 15),
624 ADD_FCS_BIT = (1 << 13),
625 LTINT_BIT = (1 << 12),
626 STP_BIT = (1 << 9),
627 ENP_BIT = (1 << 8),
628 KILL_BIT = (1 << 6),
629 TCC_VLAN_INSERT = (1 << 1),
630 TCC_VLAN_REPLACE = (1 << 1) |( 1<< 0),
631
632}TX_FLAG_BITS;
633
634typedef enum {
635 ERR_BIT = (1 << 14),
636 FRAM_BIT = (1 << 13),
637 OFLO_BIT = (1 << 12),
638 CRC_BIT = (1 << 11),
639 PAM_BIT = (1 << 6),
640 LAFM_BIT = (1 << 5),
641 BAM_BIT = (1 << 4),
642 TT_VLAN_TAGGED = (1 << 3) |(1 << 2),/* 0x000 */
643 TT_PRTY_TAGGED = (1 << 3),/* 0x0008 */
644
645}RX_FLAG_BITS;
646
647#define RESET_RX_FLAGS 0x0000
648#define TT_MASK 0x000c
649#define TCC_MASK 0x0003
650
651/* driver ioctl parameters */
652#define AMD8111E_REG_DUMP_LEN 13*sizeof(u32)
653
654/* amd8111e desriptor format */
655
656struct amd8111e_tx_dr{
657
658 __le16 buff_count; /* Size of the buffer pointed by this descriptor */
659
660 __le16 tx_flags;
661
662 __le16 tag_ctrl_info;
663
664 __le16 tag_ctrl_cmd;
665
666 __le32 buff_phy_addr;
667
668 __le32 reserved;
669};
670
671struct amd8111e_rx_dr{
672
673 __le32 reserved;
674
675 __le16 msg_count; /* Received message len */
676
677 __le16 tag_ctrl_info;
678
679 __le16 buff_count; /* Len of the buffer pointed by descriptor. */
680
681 __le16 rx_flags;
682
683 __le32 buff_phy_addr;
684
685};
686struct amd8111e_link_config{
687
688#define SPEED_INVALID 0xffff
689#define DUPLEX_INVALID 0xff
690#define AUTONEG_INVALID 0xff
691
692 unsigned long orig_phy_option;
693 u16 speed;
694 u8 duplex;
695 u8 autoneg;
696 u8 reserved; /* 32bit alignment */
697};
698
699enum coal_type{
700
701 NO_COALESCE,
702 LOW_COALESCE,
703 MEDIUM_COALESCE,
704 HIGH_COALESCE,
705
706};
707
708enum coal_mode{
709 RX_INTR_COAL,
710 TX_INTR_COAL,
711 DISABLE_COAL,
712 ENABLE_COAL,
713
714};
715#define MAX_TIMEOUT 40
716#define MAX_EVENT_COUNT 31
717struct amd8111e_coalesce_conf{
718
719 unsigned int rx_timeout;
720 unsigned int rx_event_count;
721 unsigned long rx_packets;
722 unsigned long rx_prev_packets;
723 unsigned long rx_bytes;
724 unsigned long rx_prev_bytes;
725 unsigned int rx_coal_type;
726
727 unsigned int tx_timeout;
728 unsigned int tx_event_count;
729 unsigned long tx_packets;
730 unsigned long tx_prev_packets;
731 unsigned long tx_bytes;
732 unsigned long tx_prev_bytes;
733 unsigned int tx_coal_type;
734
735};
736struct ipg_info{
737
738 unsigned int ipg_state;
739 unsigned int ipg;
740 unsigned int current_ipg;
741 unsigned int col_cnt;
742 unsigned int diff_col_cnt;
743 unsigned int timer_tick;
744 unsigned int prev_ipg;
745 struct timer_list ipg_timer;
746};
747
748struct amd8111e_priv{
749
750 struct amd8111e_tx_dr* tx_ring;
751 struct amd8111e_rx_dr* rx_ring;
752 dma_addr_t tx_ring_dma_addr; /* tx descriptor ring base address */
753 dma_addr_t rx_ring_dma_addr; /* rx descriptor ring base address */
754 const char *name;
755 struct pci_dev *pci_dev; /* Ptr to the associated pci_dev */
756 struct net_device* amd8111e_net_dev; /* ptr to associated net_device */
757 /* Transmit and recive skbs */
758 struct sk_buff *tx_skbuff[NUM_TX_BUFFERS];
759 struct sk_buff *rx_skbuff[NUM_RX_BUFFERS];
760 /* Transmit and receive dma mapped addr */
761 dma_addr_t tx_dma_addr[NUM_TX_BUFFERS];
762 dma_addr_t rx_dma_addr[NUM_RX_BUFFERS];
763 /* Reg memory mapped address */
764 void __iomem *mmio;
765
766 struct napi_struct napi;
767
768 spinlock_t lock; /* Guard lock */
769 unsigned long rx_idx, tx_idx; /* The next free ring entry */
770 unsigned long tx_complete_idx;
771 unsigned long tx_ring_complete_idx;
772 unsigned long tx_ring_idx;
773 unsigned int rx_buff_len; /* Buffer length of rx buffers */
774 int options; /* Options enabled/disabled for the device */
775
776 unsigned long ext_phy_option;
777 int ext_phy_addr;
778 u32 ext_phy_id;
779
780 struct amd8111e_link_config link_config;
781 int pm_cap;
782
783 struct net_device *next;
784 int mii;
785 struct mii_if_info mii_if;
786 char opened;
787 unsigned int drv_rx_errors;
788 struct amd8111e_coalesce_conf coal_conf;
789
790 struct ipg_info ipg_data;
791
792};
793
794/* kernel provided writeq does not write 64 bits into the amd8111e device register instead writes only higher 32bits data into lower 32bits of the register.
795BUG? */
796#define amd8111e_writeq(_UlData,_memMap) \
797 writel(*(u32*)(&_UlData), _memMap); \
798 writel(*(u32*)((u8*)(&_UlData)+4), _memMap+4)
799
800/* maps the external speed options to internal value */
801typedef enum {
802 SPEED_AUTONEG,
803 SPEED10_HALF,
804 SPEED10_FULL,
805 SPEED100_HALF,
806 SPEED100_FULL,
807}EXT_PHY_OPTION;
808
809static int card_idx;
810static int speed_duplex[MAX_UNITS] = { 0, };
811static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
812static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
813static unsigned int chip_version;
814
815#endif /* _AMD8111E_H */
816
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
new file mode 100644
index 000000000000..eb18e1fe65c8
--- /dev/null
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -0,0 +1,793 @@
1/*
2 * Amiga Linux/m68k Ariadne Ethernet Driver
3 *
4 * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org)
5 * Peter De Schrijver (p2@mind.be)
6 *
7 * ---------------------------------------------------------------------------
8 *
9 * This program is based on
10 *
11 * lance.c: An AMD LANCE ethernet driver for linux.
12 * Written 1993-94 by Donald Becker.
13 *
14 * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
15 * Advanced Micro Devices
16 * Publication #16907, Rev. B, Amendment/0, May 1994
17 *
18 * MC68230: Parallel Interface/Timer (PI/T)
19 * Motorola Semiconductors, December, 1983
20 *
21 * ---------------------------------------------------------------------------
22 *
23 * This file is subject to the terms and conditions of the GNU General Public
24 * License. See the file COPYING in the main directory of the Linux
25 * distribution for more details.
26 *
27 * ---------------------------------------------------------------------------
28 *
29 * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
30 *
31 * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
32 * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
33 *
34 * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
35 */
36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38/*#define DEBUG*/
39
40#include <linux/module.h>
41#include <linux/stddef.h>
42#include <linux/kernel.h>
43#include <linux/string.h>
44#include <linux/errno.h>
45#include <linux/ioport.h>
46#include <linux/netdevice.h>
47#include <linux/etherdevice.h>
48#include <linux/interrupt.h>
49#include <linux/skbuff.h>
50#include <linux/init.h>
51#include <linux/zorro.h>
52#include <linux/bitops.h>
53
54#include <asm/amigaints.h>
55#include <asm/amigahw.h>
56#include <asm/irq.h>
57
58#include "ariadne.h"
59
60#ifdef ARIADNE_DEBUG
61int ariadne_debug = ARIADNE_DEBUG;
62#else
63int ariadne_debug = 1;
64#endif
65
66/* Macros to Fix Endianness problems */
67
68/* Swap the Bytes in a WORD */
69#define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00))
70/* Get the Low BYTE in a WORD */
71#define lowb(x) (x & 0xff)
72/* Get the Swapped High WORD in a LONG */
73#define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff))
74/* Get the Swapped Low WORD in a LONG */
75#define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff))
76
77/* Transmit/Receive Ring Definitions */
78
79#define TX_RING_SIZE 5
80#define RX_RING_SIZE 16
81
82#define PKT_BUF_SIZE 1520
83
84/* Private Device Data */
85
86struct ariadne_private {
87 volatile struct TDRE *tx_ring[TX_RING_SIZE];
88 volatile struct RDRE *rx_ring[RX_RING_SIZE];
89 volatile u_short *tx_buff[TX_RING_SIZE];
90 volatile u_short *rx_buff[RX_RING_SIZE];
91 int cur_tx, cur_rx; /* The next free ring entry */
92 int dirty_tx; /* The ring entries to be free()ed */
93 char tx_full;
94};
95
96/* Structure Created in the Ariadne's RAM Buffer */
97
98struct lancedata {
99 struct TDRE tx_ring[TX_RING_SIZE];
100 struct RDRE rx_ring[RX_RING_SIZE];
101 u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
102 u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
103};
104
105static void memcpyw(volatile u_short *dest, u_short *src, int len)
106{
107 while (len >= 2) {
108 *(dest++) = *(src++);
109 len -= 2;
110 }
111 if (len == 1)
112 *dest = (*(u_char *)src) << 8;
113}
114
115static void ariadne_init_ring(struct net_device *dev)
116{
117 struct ariadne_private *priv = netdev_priv(dev);
118 volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
119 int i;
120
121 netif_stop_queue(dev);
122
123 priv->tx_full = 0;
124 priv->cur_rx = priv->cur_tx = 0;
125 priv->dirty_tx = 0;
126
127 /* Set up TX Ring */
128 for (i = 0; i < TX_RING_SIZE; i++) {
129 volatile struct TDRE *t = &lancedata->tx_ring[i];
130 t->TMD0 = swloww(ARIADNE_RAM +
131 offsetof(struct lancedata, tx_buff[i]));
132 t->TMD1 = swhighw(ARIADNE_RAM +
133 offsetof(struct lancedata, tx_buff[i])) |
134 TF_STP | TF_ENP;
135 t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
136 t->TMD3 = 0;
137 priv->tx_ring[i] = &lancedata->tx_ring[i];
138 priv->tx_buff[i] = lancedata->tx_buff[i];
139 netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n",
140 i, &lancedata->tx_ring[i], lancedata->tx_buff[i]);
141 }
142
143 /* Set up RX Ring */
144 for (i = 0; i < RX_RING_SIZE; i++) {
145 volatile struct RDRE *r = &lancedata->rx_ring[i];
146 r->RMD0 = swloww(ARIADNE_RAM +
147 offsetof(struct lancedata, rx_buff[i]));
148 r->RMD1 = swhighw(ARIADNE_RAM +
149 offsetof(struct lancedata, rx_buff[i])) |
150 RF_OWN;
151 r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
152 r->RMD3 = 0x0000;
153 priv->rx_ring[i] = &lancedata->rx_ring[i];
154 priv->rx_buff[i] = lancedata->rx_buff[i];
155 netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n",
156 i, &lancedata->rx_ring[i], lancedata->rx_buff[i]);
157 }
158}
159
160static int ariadne_rx(struct net_device *dev)
161{
162 struct ariadne_private *priv = netdev_priv(dev);
163 int entry = priv->cur_rx % RX_RING_SIZE;
164 int i;
165
166 /* If we own the next entry, it's a new packet. Send it up */
167 while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
168 int status = lowb(priv->rx_ring[entry]->RMD1);
169
170 if (status != (RF_STP | RF_ENP)) { /* There was an error */
171 /* There is a tricky error noted by
172 * John Murphy <murf@perftech.com> to Russ Nelson:
173 * Even with full-sized buffers it's possible for a
174 * jabber packet to use two buffers, with only the
175 * last correctly noting the error
176 */
177 /* Only count a general error at the end of a packet */
178 if (status & RF_ENP)
179 dev->stats.rx_errors++;
180 if (status & RF_FRAM)
181 dev->stats.rx_frame_errors++;
182 if (status & RF_OFLO)
183 dev->stats.rx_over_errors++;
184 if (status & RF_CRC)
185 dev->stats.rx_crc_errors++;
186 if (status & RF_BUFF)
187 dev->stats.rx_fifo_errors++;
188 priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP;
189 } else {
190 /* Malloc up new buffer, compatible with net-3 */
191 short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
192 struct sk_buff *skb;
193
194 skb = dev_alloc_skb(pkt_len + 2);
195 if (skb == NULL) {
196 netdev_warn(dev, "Memory squeeze, deferring packet\n");
197 for (i = 0; i < RX_RING_SIZE; i++)
198 if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
199 break;
200
201 if (i > RX_RING_SIZE - 2) {
202 dev->stats.rx_dropped++;
203 priv->rx_ring[entry]->RMD1 |= RF_OWN;
204 priv->cur_rx++;
205 }
206 break;
207 }
208
209
210 skb_reserve(skb, 2); /* 16 byte align */
211 skb_put(skb, pkt_len); /* Make room */
212 skb_copy_to_linear_data(skb,
213 (const void *)priv->rx_buff[entry],
214 pkt_len);
215 skb->protocol = eth_type_trans(skb, dev);
216 netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
217 ((u_short *)skb->data)[6],
218 skb->data + 6, skb->data,
219 (int)skb->data, (int)skb->len);
220
221 netif_rx(skb);
222 dev->stats.rx_packets++;
223 dev->stats.rx_bytes += pkt_len;
224 }
225
226 priv->rx_ring[entry]->RMD1 |= RF_OWN;
227 entry = (++priv->cur_rx) % RX_RING_SIZE;
228 }
229
230 priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
231
232 /* We should check that at least two ring entries are free.
233 * If not, we should free one and mark stats->rx_dropped++
234 */
235
236 return 0;
237}
238
239static irqreturn_t ariadne_interrupt(int irq, void *data)
240{
241 struct net_device *dev = (struct net_device *)data;
242 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
243 struct ariadne_private *priv;
244 int csr0, boguscnt;
245 int handled = 0;
246
247 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
248
249 if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
250 return IRQ_NONE; /* generated by the board */
251
252 priv = netdev_priv(dev);
253
254 boguscnt = 10;
255 while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) {
256 /* Acknowledge all of the current interrupt sources ASAP */
257 lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT);
258
259#ifdef DEBUG
260 if (ariadne_debug > 5) {
261 netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [",
262 csr0, lance->RDP);
263 if (csr0 & INTR)
264 pr_cont(" INTR");
265 if (csr0 & INEA)
266 pr_cont(" INEA");
267 if (csr0 & RXON)
268 pr_cont(" RXON");
269 if (csr0 & TXON)
270 pr_cont(" TXON");
271 if (csr0 & TDMD)
272 pr_cont(" TDMD");
273 if (csr0 & STOP)
274 pr_cont(" STOP");
275 if (csr0 & STRT)
276 pr_cont(" STRT");
277 if (csr0 & INIT)
278 pr_cont(" INIT");
279 if (csr0 & ERR)
280 pr_cont(" ERR");
281 if (csr0 & BABL)
282 pr_cont(" BABL");
283 if (csr0 & CERR)
284 pr_cont(" CERR");
285 if (csr0 & MISS)
286 pr_cont(" MISS");
287 if (csr0 & MERR)
288 pr_cont(" MERR");
289 if (csr0 & RINT)
290 pr_cont(" RINT");
291 if (csr0 & TINT)
292 pr_cont(" TINT");
293 if (csr0 & IDON)
294 pr_cont(" IDON");
295 pr_cont(" ]\n");
296 }
297#endif
298
299 if (csr0 & RINT) { /* Rx interrupt */
300 handled = 1;
301 ariadne_rx(dev);
302 }
303
304 if (csr0 & TINT) { /* Tx-done interrupt */
305 int dirty_tx = priv->dirty_tx;
306
307 handled = 1;
308 while (dirty_tx < priv->cur_tx) {
309 int entry = dirty_tx % TX_RING_SIZE;
310 int status = lowb(priv->tx_ring[entry]->TMD1);
311
312 if (status & TF_OWN)
313 break; /* It still hasn't been Txed */
314
315 priv->tx_ring[entry]->TMD1 &= 0xff00;
316
317 if (status & TF_ERR) {
318 /* There was an major error, log it */
319 int err_status = priv->tx_ring[entry]->TMD3;
320 dev->stats.tx_errors++;
321 if (err_status & EF_RTRY)
322 dev->stats.tx_aborted_errors++;
323 if (err_status & EF_LCAR)
324 dev->stats.tx_carrier_errors++;
325 if (err_status & EF_LCOL)
326 dev->stats.tx_window_errors++;
327 if (err_status & EF_UFLO) {
328 /* Ackk! On FIFO errors the Tx unit is turned off! */
329 dev->stats.tx_fifo_errors++;
330 /* Remove this verbosity later! */
331 netdev_err(dev, "Tx FIFO error! Status %04x\n",
332 csr0);
333 /* Restart the chip */
334 lance->RDP = STRT;
335 }
336 } else {
337 if (status & (TF_MORE | TF_ONE))
338 dev->stats.collisions++;
339 dev->stats.tx_packets++;
340 }
341 dirty_tx++;
342 }
343
344#ifndef final_version
345 if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
346 netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
347 dirty_tx, priv->cur_tx,
348 priv->tx_full);
349 dirty_tx += TX_RING_SIZE;
350 }
351#endif
352
353 if (priv->tx_full && netif_queue_stopped(dev) &&
354 dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
355 /* The ring is no longer full */
356 priv->tx_full = 0;
357 netif_wake_queue(dev);
358 }
359
360 priv->dirty_tx = dirty_tx;
361 }
362
363 /* Log misc errors */
364 if (csr0 & BABL) {
365 handled = 1;
366 dev->stats.tx_errors++; /* Tx babble */
367 }
368 if (csr0 & MISS) {
369 handled = 1;
370 dev->stats.rx_errors++; /* Missed a Rx frame */
371 }
372 if (csr0 & MERR) {
373 handled = 1;
374 netdev_err(dev, "Bus master arbitration failure, status %04x\n",
375 csr0);
376 /* Restart the chip */
377 lance->RDP = STRT;
378 }
379 }
380
381 /* Clear any other interrupt, and set interrupt enable */
382 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
383 lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON;
384
385 if (ariadne_debug > 4)
386 netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n",
387 lance->RAP, lance->RDP);
388
389 return IRQ_RETVAL(handled);
390}
391
392static int ariadne_open(struct net_device *dev)
393{
394 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
395 u_short in;
396 u_long version;
397 int i;
398
399 /* Reset the LANCE */
400 in = lance->Reset;
401
402 /* Stop the LANCE */
403 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
404 lance->RDP = STOP;
405
406 /* Check the LANCE version */
407 lance->RAP = CSR88; /* Chip ID */
408 version = swapw(lance->RDP);
409 lance->RAP = CSR89; /* Chip ID */
410 version |= swapw(lance->RDP) << 16;
411 if ((version & 0x00000fff) != 0x00000003) {
412 pr_warn("Couldn't find AMD Ethernet Chip\n");
413 return -EAGAIN;
414 }
415 if ((version & 0x0ffff000) != 0x00003000) {
416 pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n",
417 (version & 0x0ffff000) >> 12);
418 return -EAGAIN;
419 }
420
421 netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n",
422 (version & 0xf0000000) >> 28);
423
424 ariadne_init_ring(dev);
425
426 /* Miscellaneous Stuff */
427 lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */
428 lance->RDP = 0x0000;
429 lance->RAP = CSR4; /* Test and Features Control */
430 lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM;
431
432 /* Set the Multicast Table */
433 lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */
434 lance->RDP = 0x0000;
435 lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */
436 lance->RDP = 0x0000;
437 lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */
438 lance->RDP = 0x0000;
439 lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */
440 lance->RDP = 0x0000;
441
442 /* Set the Ethernet Hardware Address */
443 lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
444 lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
445 lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
446 lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
447 lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
448 lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
449
450 /* Set the Init Block Mode */
451 lance->RAP = CSR15; /* Mode Register */
452 lance->RDP = 0x0000;
453
454 /* Set the Transmit Descriptor Ring Pointer */
455 lance->RAP = CSR30; /* Base Address of Transmit Ring */
456 lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
457 lance->RAP = CSR31; /* Base Address of transmit Ring */
458 lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
459
460 /* Set the Receive Descriptor Ring Pointer */
461 lance->RAP = CSR24; /* Base Address of Receive Ring */
462 lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
463 lance->RAP = CSR25; /* Base Address of Receive Ring */
464 lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
465
466 /* Set the Number of RX and TX Ring Entries */
467 lance->RAP = CSR76; /* Receive Ring Length */
468 lance->RDP = swapw(((u_short)-RX_RING_SIZE));
469 lance->RAP = CSR78; /* Transmit Ring Length */
470 lance->RDP = swapw(((u_short)-TX_RING_SIZE));
471
472 /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */
473 lance->RAP = ISACSR2; /* Miscellaneous Configuration */
474 lance->IDP = ASEL;
475
476 /* LED Control */
477 lance->RAP = ISACSR5; /* LED1 Status */
478 lance->IDP = PSE|XMTE;
479 lance->RAP = ISACSR6; /* LED2 Status */
480 lance->IDP = PSE|COLE;
481 lance->RAP = ISACSR7; /* LED3 Status */
482 lance->IDP = PSE|RCVE;
483
484 netif_start_queue(dev);
485
486 i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED,
487 dev->name, dev);
488 if (i)
489 return i;
490
491 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
492 lance->RDP = INEA | STRT;
493
494 return 0;
495}
496
497static int ariadne_close(struct net_device *dev)
498{
499 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
500
501 netif_stop_queue(dev);
502
503 lance->RAP = CSR112; /* Missed Frame Count */
504 dev->stats.rx_missed_errors = swapw(lance->RDP);
505 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
506
507 if (ariadne_debug > 1) {
508 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
509 lance->RDP);
510 netdev_dbg(dev, "%lu packets missed\n",
511 dev->stats.rx_missed_errors);
512 }
513
514 /* We stop the LANCE here -- it occasionally polls memory if we don't */
515 lance->RDP = STOP;
516
517 free_irq(IRQ_AMIGA_PORTS, dev);
518
519 return 0;
520}
521
522static inline void ariadne_reset(struct net_device *dev)
523{
524 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
525
526 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
527 lance->RDP = STOP;
528 ariadne_init_ring(dev);
529 lance->RDP = INEA | STRT;
530 netif_start_queue(dev);
531}
532
533static void ariadne_tx_timeout(struct net_device *dev)
534{
535 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
536
537 netdev_err(dev, "transmit timed out, status %04x, resetting\n",
538 lance->RDP);
539 ariadne_reset(dev);
540 netif_wake_queue(dev);
541}
542
543static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
544 struct net_device *dev)
545{
546 struct ariadne_private *priv = netdev_priv(dev);
547 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
548 int entry;
549 unsigned long flags;
550 int len = skb->len;
551
552#if 0
553 if (ariadne_debug > 3) {
554 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
555 netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP);
556 lance->RDP = 0x0000;
557 }
558#endif
559
560 /* FIXME: is the 79C960 new enough to do its own padding right ? */
561 if (skb->len < ETH_ZLEN) {
562 if (skb_padto(skb, ETH_ZLEN))
563 return NETDEV_TX_OK;
564 len = ETH_ZLEN;
565 }
566
567 /* Fill in a Tx ring entry */
568
569 netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data 0x%08x len %d\n",
570 ((u_short *)skb->data)[6],
571 skb->data + 6, skb->data,
572 (int)skb->data, (int)skb->len);
573
574 local_irq_save(flags);
575
576 entry = priv->cur_tx % TX_RING_SIZE;
577
578 /* Caution: the write order is important here, set the base address with
579 the "ownership" bits last */
580
581 priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
582 priv->tx_ring[entry]->TMD3 = 0x0000;
583 memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
584
585#ifdef DEBUG
586 print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1,
587 (void *)priv->tx_buff[entry],
588 skb->len > 64 ? 64 : skb->len, true);
589#endif
590
591 priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00)
592 | TF_OWN | TF_STP | TF_ENP;
593
594 dev_kfree_skb(skb);
595
596 priv->cur_tx++;
597 if ((priv->cur_tx >= TX_RING_SIZE) &&
598 (priv->dirty_tx >= TX_RING_SIZE)) {
599
600 netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n",
601 priv->cur_tx, priv->dirty_tx);
602
603 priv->cur_tx -= TX_RING_SIZE;
604 priv->dirty_tx -= TX_RING_SIZE;
605 }
606 dev->stats.tx_bytes += len;
607
608 /* Trigger an immediate send poll */
609 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
610 lance->RDP = INEA | TDMD;
611
612 if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) {
613 netif_stop_queue(dev);
614 priv->tx_full = 1;
615 }
616 local_irq_restore(flags);
617
618 return NETDEV_TX_OK;
619}
620
621static struct net_device_stats *ariadne_get_stats(struct net_device *dev)
622{
623 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
624 short saved_addr;
625 unsigned long flags;
626
627 local_irq_save(flags);
628 saved_addr = lance->RAP;
629 lance->RAP = CSR112; /* Missed Frame Count */
630 dev->stats.rx_missed_errors = swapw(lance->RDP);
631 lance->RAP = saved_addr;
632 local_irq_restore(flags);
633
634 return &dev->stats;
635}
636
637/* Set or clear the multicast filter for this adaptor.
638 * num_addrs == -1 Promiscuous mode, receive all packets
639 * num_addrs == 0 Normal mode, clear multicast list
640 * num_addrs > 0 Multicast mode, receive normal and MC packets,
641 * and do best-effort filtering.
642 */
643static void set_multicast_list(struct net_device *dev)
644{
645 volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
646
647 if (!netif_running(dev))
648 return;
649
650 netif_stop_queue(dev);
651
652 /* We take the simple way out and always enable promiscuous mode */
653 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
654 lance->RDP = STOP; /* Temporarily stop the lance */
655 ariadne_init_ring(dev);
656
657 if (dev->flags & IFF_PROMISC) {
658 lance->RAP = CSR15; /* Mode Register */
659 lance->RDP = PROM; /* Set promiscuous mode */
660 } else {
661 short multicast_table[4];
662 int num_addrs = netdev_mc_count(dev);
663 int i;
664 /* We don't use the multicast table,
665 * but rely on upper-layer filtering
666 */
667 memset(multicast_table, (num_addrs == 0) ? 0 : -1,
668 sizeof(multicast_table));
669 for (i = 0; i < 4; i++) {
670 lance->RAP = CSR8 + (i << 8);
671 /* Logical Address Filter */
672 lance->RDP = swapw(multicast_table[i]);
673 }
674 lance->RAP = CSR15; /* Mode Register */
675 lance->RDP = 0x0000; /* Unset promiscuous mode */
676 }
677
678 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
679 lance->RDP = INEA | STRT | IDON;/* Resume normal operation */
680
681 netif_wake_queue(dev);
682}
683
684
685static void __devexit ariadne_remove_one(struct zorro_dev *z)
686{
687 struct net_device *dev = zorro_get_drvdata(z);
688
689 unregister_netdev(dev);
690 release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
691 release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
692 free_netdev(dev);
693}
694
695static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
696 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
697 { 0 }
698};
699MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
700
701static const struct net_device_ops ariadne_netdev_ops = {
702 .ndo_open = ariadne_open,
703 .ndo_stop = ariadne_close,
704 .ndo_start_xmit = ariadne_start_xmit,
705 .ndo_tx_timeout = ariadne_tx_timeout,
706 .ndo_get_stats = ariadne_get_stats,
707 .ndo_set_rx_mode = set_multicast_list,
708 .ndo_validate_addr = eth_validate_addr,
709 .ndo_change_mtu = eth_change_mtu,
710 .ndo_set_mac_address = eth_mac_addr,
711};
712
713static int __devinit ariadne_init_one(struct zorro_dev *z,
714 const struct zorro_device_id *ent)
715{
716 unsigned long board = z->resource.start;
717 unsigned long base_addr = board + ARIADNE_LANCE;
718 unsigned long mem_start = board + ARIADNE_RAM;
719 struct resource *r1, *r2;
720 struct net_device *dev;
721 struct ariadne_private *priv;
722 int err;
723
724 r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
725 if (!r1)
726 return -EBUSY;
727 r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
728 if (!r2) {
729 release_mem_region(base_addr, sizeof(struct Am79C960));
730 return -EBUSY;
731 }
732
733 dev = alloc_etherdev(sizeof(struct ariadne_private));
734 if (dev == NULL) {
735 release_mem_region(base_addr, sizeof(struct Am79C960));
736 release_mem_region(mem_start, ARIADNE_RAM_SIZE);
737 return -ENOMEM;
738 }
739
740 priv = netdev_priv(dev);
741
742 r1->name = dev->name;
743 r2->name = dev->name;
744
745 dev->dev_addr[0] = 0x00;
746 dev->dev_addr[1] = 0x60;
747 dev->dev_addr[2] = 0x30;
748 dev->dev_addr[3] = (z->rom.er_SerialNumber >> 16) & 0xff;
749 dev->dev_addr[4] = (z->rom.er_SerialNumber >> 8) & 0xff;
750 dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
751 dev->base_addr = ZTWO_VADDR(base_addr);
752 dev->mem_start = ZTWO_VADDR(mem_start);
753 dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
754
755 dev->netdev_ops = &ariadne_netdev_ops;
756 dev->watchdog_timeo = 5 * HZ;
757
758 err = register_netdev(dev);
759 if (err) {
760 release_mem_region(base_addr, sizeof(struct Am79C960));
761 release_mem_region(mem_start, ARIADNE_RAM_SIZE);
762 free_netdev(dev);
763 return err;
764 }
765 zorro_set_drvdata(z, dev);
766
767 netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n",
768 board, dev->dev_addr);
769
770 return 0;
771}
772
773static struct zorro_driver ariadne_driver = {
774 .name = "ariadne",
775 .id_table = ariadne_zorro_tbl,
776 .probe = ariadne_init_one,
777 .remove = __devexit_p(ariadne_remove_one),
778};
779
780static int __init ariadne_init_module(void)
781{
782 return zorro_register_driver(&ariadne_driver);
783}
784
785static void __exit ariadne_cleanup_module(void)
786{
787 zorro_unregister_driver(&ariadne_driver);
788}
789
790module_init(ariadne_init_module);
791module_exit(ariadne_cleanup_module);
792
793MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ariadne.h b/drivers/net/ethernet/amd/ariadne.h
new file mode 100644
index 000000000000..727be5cdd1ea
--- /dev/null
+++ b/drivers/net/ethernet/amd/ariadne.h
@@ -0,0 +1,415 @@
1/*
2 * Amiga Linux/m68k Ariadne Ethernet Driver
3 *
4 * © Copyright 1995 by Geert Uytterhoeven (geert@linux-m68k.org)
5 * Peter De Schrijver
6 * (Peter.DeSchrijver@linux.cc.kuleuven.ac.be)
7 *
8 * ----------------------------------------------------------------------------------
9 *
10 * This program is based on
11 *
12 * lance.c: An AMD LANCE ethernet driver for linux.
13 * Written 1993-94 by Donald Becker.
14 *
15 * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
16 * Advanced Micro Devices
17 * Publication #16907, Rev. B, Amendment/0, May 1994
18 *
19 * MC68230: Parallel Interface/Timer (PI/T)
20 * Motorola Semiconductors, December, 1983
21 *
22 * ----------------------------------------------------------------------------------
23 *
24 * This file is subject to the terms and conditions of the GNU General Public
25 * License. See the file COPYING in the main directory of the Linux
26 * distribution for more details.
27 *
28 * ----------------------------------------------------------------------------------
29 *
30 * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
31 *
32 * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
33 * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
34 *
35 * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
36 */
37
38
39 /*
40 * Am79C960 PCnet-ISA
41 */
42
43struct Am79C960 {
44 volatile u_short AddressPROM[8];
45 /* IEEE Address PROM (Unused in the Ariadne) */
46 volatile u_short RDP; /* Register Data Port */
47 volatile u_short RAP; /* Register Address Port */
48 volatile u_short Reset; /* Reset Chip on Read Access */
49 volatile u_short IDP; /* ISACSR Data Port */
50};
51
52
53 /*
54 * Am79C960 Control and Status Registers
55 *
56 * These values are already swap()ed!!
57 *
58 * Only registers marked with a `-' are intended for network software
59 * access
60 */
61
62#define CSR0 0x0000 /* - PCnet-ISA Controller Status */
63#define CSR1 0x0100 /* - IADR[15:0] */
64#define CSR2 0x0200 /* - IADR[23:16] */
65#define CSR3 0x0300 /* - Interrupt Masks and Deferral Control */
66#define CSR4 0x0400 /* - Test and Features Control */
67#define CSR6 0x0600 /* RCV/XMT Descriptor Table Length */
68#define CSR8 0x0800 /* - Logical Address Filter, LADRF[15:0] */
69#define CSR9 0x0900 /* - Logical Address Filter, LADRF[31:16] */
70#define CSR10 0x0a00 /* - Logical Address Filter, LADRF[47:32] */
71#define CSR11 0x0b00 /* - Logical Address Filter, LADRF[63:48] */
72#define CSR12 0x0c00 /* - Physical Address Register, PADR[15:0] */
73#define CSR13 0x0d00 /* - Physical Address Register, PADR[31:16] */
74#define CSR14 0x0e00 /* - Physical Address Register, PADR[47:32] */
75#define CSR15 0x0f00 /* - Mode Register */
76#define CSR16 0x1000 /* Initialization Block Address Lower */
77#define CSR17 0x1100 /* Initialization Block Address Upper */
78#define CSR18 0x1200 /* Current Receive Buffer Address */
79#define CSR19 0x1300 /* Current Receive Buffer Address */
80#define CSR20 0x1400 /* Current Transmit Buffer Address */
81#define CSR21 0x1500 /* Current Transmit Buffer Address */
82#define CSR22 0x1600 /* Next Receive Buffer Address */
83#define CSR23 0x1700 /* Next Receive Buffer Address */
84#define CSR24 0x1800 /* - Base Address of Receive Ring */
85#define CSR25 0x1900 /* - Base Address of Receive Ring */
86#define CSR26 0x1a00 /* Next Receive Descriptor Address */
87#define CSR27 0x1b00 /* Next Receive Descriptor Address */
88#define CSR28 0x1c00 /* Current Receive Descriptor Address */
89#define CSR29 0x1d00 /* Current Receive Descriptor Address */
90#define CSR30 0x1e00 /* - Base Address of Transmit Ring */
91#define CSR31 0x1f00 /* - Base Address of transmit Ring */
92#define CSR32 0x2000 /* Next Transmit Descriptor Address */
93#define CSR33 0x2100 /* Next Transmit Descriptor Address */
94#define CSR34 0x2200 /* Current Transmit Descriptor Address */
95#define CSR35 0x2300 /* Current Transmit Descriptor Address */
96#define CSR36 0x2400 /* Next Next Receive Descriptor Address */
97#define CSR37 0x2500 /* Next Next Receive Descriptor Address */
98#define CSR38 0x2600 /* Next Next Transmit Descriptor Address */
99#define CSR39 0x2700 /* Next Next Transmit Descriptor Address */
100#define CSR40 0x2800 /* Current Receive Status and Byte Count */
101#define CSR41 0x2900 /* Current Receive Status and Byte Count */
102#define CSR42 0x2a00 /* Current Transmit Status and Byte Count */
103#define CSR43 0x2b00 /* Current Transmit Status and Byte Count */
104#define CSR44 0x2c00 /* Next Receive Status and Byte Count */
105#define CSR45 0x2d00 /* Next Receive Status and Byte Count */
106#define CSR46 0x2e00 /* Poll Time Counter */
107#define CSR47 0x2f00 /* Polling Interval */
108#define CSR48 0x3000 /* Temporary Storage */
109#define CSR49 0x3100 /* Temporary Storage */
110#define CSR50 0x3200 /* Temporary Storage */
111#define CSR51 0x3300 /* Temporary Storage */
112#define CSR52 0x3400 /* Temporary Storage */
113#define CSR53 0x3500 /* Temporary Storage */
114#define CSR54 0x3600 /* Temporary Storage */
115#define CSR55 0x3700 /* Temporary Storage */
116#define CSR56 0x3800 /* Temporary Storage */
117#define CSR57 0x3900 /* Temporary Storage */
118#define CSR58 0x3a00 /* Temporary Storage */
119#define CSR59 0x3b00 /* Temporary Storage */
120#define CSR60 0x3c00 /* Previous Transmit Descriptor Address */
121#define CSR61 0x3d00 /* Previous Transmit Descriptor Address */
122#define CSR62 0x3e00 /* Previous Transmit Status and Byte Count */
123#define CSR63 0x3f00 /* Previous Transmit Status and Byte Count */
124#define CSR64 0x4000 /* Next Transmit Buffer Address */
125#define CSR65 0x4100 /* Next Transmit Buffer Address */
126#define CSR66 0x4200 /* Next Transmit Status and Byte Count */
127#define CSR67 0x4300 /* Next Transmit Status and Byte Count */
128#define CSR68 0x4400 /* Transmit Status Temporary Storage */
129#define CSR69 0x4500 /* Transmit Status Temporary Storage */
130#define CSR70 0x4600 /* Temporary Storage */
131#define CSR71 0x4700 /* Temporary Storage */
132#define CSR72 0x4800 /* Receive Ring Counter */
133#define CSR74 0x4a00 /* Transmit Ring Counter */
134#define CSR76 0x4c00 /* - Receive Ring Length */
135#define CSR78 0x4e00 /* - Transmit Ring Length */
136#define CSR80 0x5000 /* - Burst and FIFO Threshold Control */
137#define CSR82 0x5200 /* - Bus Activity Timer */
138#define CSR84 0x5400 /* DMA Address */
139#define CSR85 0x5500 /* DMA Address */
140#define CSR86 0x5600 /* Buffer Byte Counter */
141#define CSR88 0x5800 /* - Chip ID */
142#define CSR89 0x5900 /* - Chip ID */
143#define CSR92 0x5c00 /* Ring Length Conversion */
144#define CSR94 0x5e00 /* Transmit Time Domain Reflectometry Count */
145#define CSR96 0x6000 /* Bus Interface Scratch Register 0 */
146#define CSR97 0x6100 /* Bus Interface Scratch Register 0 */
147#define CSR98 0x6200 /* Bus Interface Scratch Register 1 */
148#define CSR99 0x6300 /* Bus Interface Scratch Register 1 */
149#define CSR104 0x6800 /* SWAP */
150#define CSR105 0x6900 /* SWAP */
151#define CSR108 0x6c00 /* Buffer Management Scratch */
152#define CSR109 0x6d00 /* Buffer Management Scratch */
153#define CSR112 0x7000 /* - Missed Frame Count */
154#define CSR114 0x7200 /* - Receive Collision Count */
155#define CSR124 0x7c00 /* - Buffer Management Unit Test */
156
157
158 /*
159 * Am79C960 ISA Control and Status Registers
160 *
161 * These values are already swap()ed!!
162 */
163
164#define ISACSR0 0x0000 /* Master Mode Read Active */
165#define ISACSR1 0x0100 /* Master Mode Write Active */
166#define ISACSR2 0x0200 /* Miscellaneous Configuration */
167#define ISACSR4 0x0400 /* LED0 Status (Link Integrity) */
168#define ISACSR5 0x0500 /* LED1 Status */
169#define ISACSR6 0x0600 /* LED2 Status */
170#define ISACSR7 0x0700 /* LED3 Status */
171
172
173 /*
174 * Bit definitions for CSR0 (PCnet-ISA Controller Status)
175 *
176 * These values are already swap()ed!!
177 */
178
179#define ERR 0x0080 /* Error */
180#define BABL 0x0040 /* Babble: Transmitted too many bits */
181#define CERR 0x0020 /* No Heartbeat (10BASE-T) */
182#define MISS 0x0010 /* Missed Frame */
183#define MERR 0x0008 /* Memory Error */
184#define RINT 0x0004 /* Receive Interrupt */
185#define TINT 0x0002 /* Transmit Interrupt */
186#define IDON 0x0001 /* Initialization Done */
187#define INTR 0x8000 /* Interrupt Flag */
188#define INEA 0x4000 /* Interrupt Enable */
189#define RXON 0x2000 /* Receive On */
190#define TXON 0x1000 /* Transmit On */
191#define TDMD 0x0800 /* Transmit Demand */
192#define STOP 0x0400 /* Stop */
193#define STRT 0x0200 /* Start */
194#define INIT 0x0100 /* Initialize */
195
196
197 /*
198 * Bit definitions for CSR3 (Interrupt Masks and Deferral Control)
199 *
200 * These values are already swap()ed!!
201 */
202
203#define BABLM 0x0040 /* Babble Mask */
204#define MISSM 0x0010 /* Missed Frame Mask */
205#define MERRM 0x0008 /* Memory Error Mask */
206#define RINTM 0x0004 /* Receive Interrupt Mask */
207#define TINTM 0x0002 /* Transmit Interrupt Mask */
208#define IDONM 0x0001 /* Initialization Done Mask */
209#define DXMT2PD 0x1000 /* Disable Transmit Two Part Deferral */
210#define EMBA 0x0800 /* Enable Modified Back-off Algorithm */
211
212
213 /*
214 * Bit definitions for CSR4 (Test and Features Control)
215 *
216 * These values are already swap()ed!!
217 */
218
219#define ENTST 0x0080 /* Enable Test Mode */
220#define DMAPLUS 0x0040 /* Disable Burst Transaction Counter */
221#define TIMER 0x0020 /* Timer Enable Register */
222#define DPOLL 0x0010 /* Disable Transmit Polling */
223#define APAD_XMT 0x0008 /* Auto Pad Transmit */
224#define ASTRP_RCV 0x0004 /* Auto Pad Stripping */
225#define MFCO 0x0002 /* Missed Frame Counter Overflow Interrupt */
226#define MFCOM 0x0001 /* Missed Frame Counter Overflow Mask */
227#define RCVCCO 0x2000 /* Receive Collision Counter Overflow Interrupt */
228#define RCVCCOM 0x1000 /* Receive Collision Counter Overflow Mask */
229#define TXSTRT 0x0800 /* Transmit Start Status */
230#define TXSTRTM 0x0400 /* Transmit Start Mask */
231#define JAB 0x0200 /* Jabber Error */
232#define JABM 0x0100 /* Jabber Error Mask */
233
234
235 /*
236 * Bit definitions for CSR15 (Mode Register)
237 *
238 * These values are already swap()ed!!
239 */
240
241#define PROM 0x0080 /* Promiscuous Mode */
242#define DRCVBC 0x0040 /* Disable Receive Broadcast */
243#define DRCVPA 0x0020 /* Disable Receive Physical Address */
244#define DLNKTST 0x0010 /* Disable Link Status */
245#define DAPC 0x0008 /* Disable Automatic Polarity Correction */
246#define MENDECL 0x0004 /* MENDEC Loopback Mode */
247#define LRTTSEL 0x0002 /* Low Receive Threshold/Transmit Mode Select */
248#define PORTSEL1 0x0001 /* Port Select Bits */
249#define PORTSEL2 0x8000 /* Port Select Bits */
250#define INTL 0x4000 /* Internal Loopback */
251#define DRTY 0x2000 /* Disable Retry */
252#define FCOLL 0x1000 /* Force Collision */
253#define DXMTFCS 0x0800 /* Disable Transmit CRC */
254#define LOOP 0x0400 /* Loopback Enable */
255#define DTX 0x0200 /* Disable Transmitter */
256#define DRX 0x0100 /* Disable Receiver */
257
258
259 /*
260 * Bit definitions for ISACSR2 (Miscellaneous Configuration)
261 *
262 * These values are already swap()ed!!
263 */
264
265#define ASEL 0x0200 /* Media Interface Port Auto Select */
266
267
268 /*
269 * Bit definitions for ISACSR5-7 (LED1-3 Status)
270 *
271 * These values are already swap()ed!!
272 */
273
274#define LEDOUT 0x0080 /* Current LED Status */
275#define PSE 0x8000 /* Pulse Stretcher Enable */
276#define XMTE 0x1000 /* Enable Transmit Status Signal */
277#define RVPOLE 0x0800 /* Enable Receive Polarity Signal */
278#define RCVE 0x0400 /* Enable Receive Status Signal */
279#define JABE 0x0200 /* Enable Jabber Signal */
280#define COLE 0x0100 /* Enable Collision Signal */
281
282
283 /*
284 * Receive Descriptor Ring Entry
285 */
286
287struct RDRE {
288 volatile u_short RMD0; /* LADR[15:0] */
289 volatile u_short RMD1; /* HADR[23:16] | Receive Flags */
290 volatile u_short RMD2; /* Buffer Byte Count (two's complement) */
291 volatile u_short RMD3; /* Message Byte Count */
292};
293
294
295 /*
296 * Transmit Descriptor Ring Entry
297 */
298
299struct TDRE {
300 volatile u_short TMD0; /* LADR[15:0] */
301 volatile u_short TMD1; /* HADR[23:16] | Transmit Flags */
302 volatile u_short TMD2; /* Buffer Byte Count (two's complement) */
303 volatile u_short TMD3; /* Error Flags */
304};
305
306
307 /*
308 * Receive Flags
309 */
310
311#define RF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
312#define RF_ERR 0x0040 /* Error */
313#define RF_FRAM 0x0020 /* Framing Error */
314#define RF_OFLO 0x0010 /* Overflow Error */
315#define RF_CRC 0x0008 /* CRC Error */
316#define RF_BUFF 0x0004 /* Buffer Error */
317#define RF_STP 0x0002 /* Start of Packet */
318#define RF_ENP 0x0001 /* End of Packet */
319
320
321 /*
322 * Transmit Flags
323 */
324
325#define TF_OWN 0x0080 /* PCnet-ISA controller owns the descriptor */
326#define TF_ERR 0x0040 /* Error */
327#define TF_ADD_FCS 0x0020 /* Controls FCS Generation */
328#define TF_MORE 0x0010 /* More than one retry needed */
329#define TF_ONE 0x0008 /* One retry needed */
330#define TF_DEF 0x0004 /* Deferred */
331#define TF_STP 0x0002 /* Start of Packet */
332#define TF_ENP 0x0001 /* End of Packet */
333
334
335 /*
336 * Error Flags
337 */
338
339#define EF_BUFF 0x0080 /* Buffer Error */
340#define EF_UFLO 0x0040 /* Underflow Error */
341#define EF_LCOL 0x0010 /* Late Collision */
342#define EF_LCAR 0x0008 /* Loss of Carrier */
343#define EF_RTRY 0x0004 /* Retry Error */
344#define EF_TDR 0xff03 /* Time Domain Reflectometry */
345
346
347
348 /*
349 * MC68230 Parallel Interface/Timer
350 */
351
352struct MC68230 {
353 volatile u_char PGCR; /* Port General Control Register */
354 u_char Pad1[1];
355 volatile u_char PSRR; /* Port Service Request Register */
356 u_char Pad2[1];
357 volatile u_char PADDR; /* Port A Data Direction Register */
358 u_char Pad3[1];
359 volatile u_char PBDDR; /* Port B Data Direction Register */
360 u_char Pad4[1];
361 volatile u_char PCDDR; /* Port C Data Direction Register */
362 u_char Pad5[1];
363 volatile u_char PIVR; /* Port Interrupt Vector Register */
364 u_char Pad6[1];
365 volatile u_char PACR; /* Port A Control Register */
366 u_char Pad7[1];
367 volatile u_char PBCR; /* Port B Control Register */
368 u_char Pad8[1];
369 volatile u_char PADR; /* Port A Data Register */
370 u_char Pad9[1];
371 volatile u_char PBDR; /* Port B Data Register */
372 u_char Pad10[1];
373 volatile u_char PAAR; /* Port A Alternate Register */
374 u_char Pad11[1];
375 volatile u_char PBAR; /* Port B Alternate Register */
376 u_char Pad12[1];
377 volatile u_char PCDR; /* Port C Data Register */
378 u_char Pad13[1];
379 volatile u_char PSR; /* Port Status Register */
380 u_char Pad14[5];
381 volatile u_char TCR; /* Timer Control Register */
382 u_char Pad15[1];
383 volatile u_char TIVR; /* Timer Interrupt Vector Register */
384 u_char Pad16[3];
385 volatile u_char CPRH; /* Counter Preload Register (High) */
386 u_char Pad17[1];
387 volatile u_char CPRM; /* Counter Preload Register (Mid) */
388 u_char Pad18[1];
389 volatile u_char CPRL; /* Counter Preload Register (Low) */
390 u_char Pad19[3];
391 volatile u_char CNTRH; /* Count Register (High) */
392 u_char Pad20[1];
393 volatile u_char CNTRM; /* Count Register (Mid) */
394 u_char Pad21[1];
395 volatile u_char CNTRL; /* Count Register (Low) */
396 u_char Pad22[1];
397 volatile u_char TSR; /* Timer Status Register */
398 u_char Pad23[11];
399};
400
401
402 /*
403 * Ariadne Expansion Board Structure
404 */
405
406#define ARIADNE_LANCE 0x360
407
408#define ARIADNE_PIT 0x1000
409
410#define ARIADNE_BOOTPROM 0x4000 /* I guess it's here :-) */
411#define ARIADNE_BOOTPROM_SIZE 0x4000
412
413#define ARIADNE_RAM 0x8000 /* Always access WORDs!! */
414#define ARIADNE_RAM_SIZE 0x8000
415
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
new file mode 100644
index 000000000000..15bfa28d6c53
--- /dev/null
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -0,0 +1,1176 @@
1/* atarilance.c: Ethernet driver for VME Lance cards on the Atari */
2/*
3 Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de)
4
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
7
8 This drivers was written with the following sources of reference:
9 - The driver for the Riebl Lance card by the TU Vienna.
10 - The modified TUW driver for PAM's VME cards
11 - The PC-Linux driver for Lance cards (but this is for bus master
12 cards, not the shared memory ones)
13 - The Amiga Ariadne driver
14
15 v1.0: (in 1.2.13pl4/0.9.13)
16 Initial version
17 v1.1: (in 1.2.13pl5)
18 more comments
19 deleted some debugging stuff
20 optimized register access (keep AREG pointing to CSR0)
21 following AMD, CSR0_STRT should be set only after IDON is detected
22 use memcpy() for data transfers, that also employs long word moves
23 better probe procedure for 24-bit systems
24 non-VME-RieblCards need extra delays in memcpy
25 must also do write test, since 0xfxe00000 may hit ROM
26 use 8/32 tx/rx buffers, which should give better NFS performance;
27 this is made possible by shifting the last packet buffer after the
28 RieblCard reserved area
29 v1.2: (in 1.2.13pl8)
30 again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000
31 and thus RAM, in case of no Lance found all memory contents have to
32 be restored!
33 Now possible to compile as module.
34 v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3)
35 Several little 1.3 adaptions
36 When the lance is stopped it jumps back into little-endian
37 mode. It is therefore necessary to put it back where it
38 belongs, in big endian mode, in order to make things work.
39 This might be the reason why multicast-mode didn't work
40 before, but I'm not able to test it as I only got an Amiga
41 (we had similar problems with the A2065 driver).
42
43*/
44
45static char version[] = "atarilance.c: v1.3 04/04/96 "
46 "Roman.Hodek@informatik.uni-erlangen.de\n";
47
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/module.h>
51#include <linux/stddef.h>
52#include <linux/kernel.h>
53#include <linux/string.h>
54#include <linux/errno.h>
55#include <linux/skbuff.h>
56#include <linux/interrupt.h>
57#include <linux/init.h>
58#include <linux/bitops.h>
59
60#include <asm/setup.h>
61#include <asm/irq.h>
62#include <asm/atarihw.h>
63#include <asm/atariints.h>
64#include <asm/io.h>
65
66/* Debug level:
67 * 0 = silent, print only serious errors
68 * 1 = normal, print error messages
69 * 2 = debug, print debug infos
70 * 3 = debug, print even more debug infos (packet data)
71 */
72
73#define LANCE_DEBUG 1
74
75#ifdef LANCE_DEBUG
76static int lance_debug = LANCE_DEBUG;
77#else
78static int lance_debug = 1;
79#endif
80module_param(lance_debug, int, 0);
81MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
82MODULE_LICENSE("GPL");
83
84/* Print debug messages on probing? */
85#undef LANCE_DEBUG_PROBE
86
87#define DPRINTK(n,a) \
88 do { \
89 if (lance_debug >= n) \
90 printk a; \
91 } while( 0 )
92
93#ifdef LANCE_DEBUG_PROBE
94# define PROBE_PRINT(a) printk a
95#else
96# define PROBE_PRINT(a)
97#endif
98
99/* These define the number of Rx and Tx buffers as log2. (Only powers
100 * of two are valid)
101 * Much more rx buffers (32) are reserved than tx buffers (8), since receiving
102 * is more time critical then sending and packets may have to remain in the
103 * board's memory when main memory is low.
104 */
105
106#define TX_LOG_RING_SIZE 3
107#define RX_LOG_RING_SIZE 5
108
109/* These are the derived values */
110
111#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
112#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
113#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
114
115#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
118
119#define TX_TIMEOUT (HZ/5)
120
121/* The LANCE Rx and Tx ring descriptors. */
122struct lance_rx_head {
123 unsigned short base; /* Low word of base addr */
124 volatile unsigned char flag;
125 unsigned char base_hi; /* High word of base addr (unused) */
126 short buf_length; /* This length is 2s complement! */
127 volatile short msg_length; /* This length is "normal". */
128};
129
130struct lance_tx_head {
131 unsigned short base; /* Low word of base addr */
132 volatile unsigned char flag;
133 unsigned char base_hi; /* High word of base addr (unused) */
134 short length; /* Length is 2s complement! */
135 volatile short misc;
136};
137
138struct ringdesc {
139 unsigned short adr_lo; /* Low 16 bits of address */
140 unsigned char len; /* Length bits */
141 unsigned char adr_hi; /* High 8 bits of address (unused) */
142};
143
144/* The LANCE initialization block, described in databook. */
145struct lance_init_block {
146 unsigned short mode; /* Pre-set mode */
147 unsigned char hwaddr[6]; /* Physical ethernet address */
148 unsigned filter[2]; /* Multicast filter (unused). */
149 /* Receive and transmit ring base, along with length bits. */
150 struct ringdesc rx_ring;
151 struct ringdesc tx_ring;
152};
153
154/* The whole layout of the Lance shared memory */
155struct lance_memory {
156 struct lance_init_block init;
157 struct lance_tx_head tx_head[TX_RING_SIZE];
158 struct lance_rx_head rx_head[RX_RING_SIZE];
159 char packet_area[0]; /* packet data follow after the
160 * init block and the ring
161 * descriptors and are located
162 * at runtime */
163};
164
165/* RieblCard specifics:
166 * The original TOS driver for these cards reserves the area from offset
167 * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the
168 * Ethernet address there, and the magic for verifying the data's validity.
169 * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe
170 * is reserved for the interrupt vector number.
171 */
172#define RIEBL_RSVD_START 0xee70
173#define RIEBL_RSVD_END 0xeec0
174#define RIEBL_MAGIC 0x09051990
175#define RIEBL_MAGIC_ADDR ((unsigned long *)(((char *)MEM) + 0xee8a))
176#define RIEBL_HWADDR_ADDR ((unsigned char *)(((char *)MEM) + 0xee8e))
177#define RIEBL_IVEC_ADDR ((unsigned short *)(((char *)MEM) + 0xfffe))
178
179/* This is a default address for the old RieblCards without a battery
180 * that have no ethernet address at boot time. 00:00:36:04 is the
181 * prefix for Riebl cards, the 00:00 at the end is arbitrary.
182 */
183
184static unsigned char OldRieblDefHwaddr[6] = {
185 0x00, 0x00, 0x36, 0x04, 0x00, 0x00
186};
187
188
189/* I/O registers of the Lance chip */
190
191struct lance_ioreg {
192/* base+0x0 */ volatile unsigned short data;
193/* base+0x2 */ volatile unsigned short addr;
194 unsigned char _dummy1[3];
195/* base+0x7 */ volatile unsigned char ivec;
196 unsigned char _dummy2[5];
197/* base+0xd */ volatile unsigned char eeprom;
198 unsigned char _dummy3;
199/* base+0xf */ volatile unsigned char mem;
200};
201
202/* Types of boards this driver supports */
203
204enum lance_type {
205 OLD_RIEBL, /* old Riebl card without battery */
206 NEW_RIEBL, /* new Riebl card with battery */
207 PAM_CARD /* PAM card with EEPROM */
208};
209
210static char *lance_names[] = {
211 "Riebl-Card (without battery)",
212 "Riebl-Card (with battery)",
213 "PAM intern card"
214};
215
216/* The driver's private device structure */
217
218struct lance_private {
219 enum lance_type cardtype;
220 struct lance_ioreg *iobase;
221 struct lance_memory *mem;
222 int cur_rx, cur_tx; /* The next free ring entry */
223 int dirty_tx; /* Ring entries to be freed. */
224 /* copy function */
225 void *(*memcpy_f)( void *, const void *, size_t );
226/* This must be long for set_bit() */
227 long tx_full;
228 spinlock_t devlock;
229};
230
231/* I/O register access macros */
232
233#define MEM lp->mem
234#define DREG IO->data
235#define AREG IO->addr
236#define REGA(a) (*( AREG = (a), &DREG ))
237
238/* Definitions for packet buffer access: */
239#define PKT_BUF_SZ 1544
240/* Get the address of a packet buffer corresponding to a given buffer head */
241#define PKTBUF_ADDR(head) (((unsigned char *)(MEM)) + (head)->base)
242
243/* Possible memory/IO addresses for probing */
244
245static struct lance_addr {
246 unsigned long memaddr;
247 unsigned long ioaddr;
248 int slow_flag;
249} lance_addr_list[] = {
250 { 0xfe010000, 0xfe00fff0, 0 }, /* RieblCard VME in TT */
251 { 0xffc10000, 0xffc0fff0, 0 }, /* RieblCard VME in MegaSTE
252 (highest byte stripped) */
253 { 0xffe00000, 0xffff7000, 1 }, /* RieblCard in ST
254 (highest byte stripped) */
255 { 0xffd00000, 0xffff7000, 1 }, /* RieblCard in ST with hw modif. to
256 avoid conflict with ROM
257 (highest byte stripped) */
258 { 0xffcf0000, 0xffcffff0, 0 }, /* PAMCard VME in TT and MSTE
259 (highest byte stripped) */
260 { 0xfecf0000, 0xfecffff0, 0 }, /* Rhotron's PAMCard VME in TT and MSTE
261 (highest byte stripped) */
262};
263
264#define N_LANCE_ADDR ARRAY_SIZE(lance_addr_list)
265
266
267/* Definitions for the Lance */
268
269/* tx_head flags */
270#define TMD1_ENP 0x01 /* end of packet */
271#define TMD1_STP 0x02 /* start of packet */
272#define TMD1_DEF 0x04 /* deferred */
273#define TMD1_ONE 0x08 /* one retry needed */
274#define TMD1_MORE 0x10 /* more than one retry needed */
275#define TMD1_ERR 0x40 /* error summary */
276#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
277
278#define TMD1_OWN_CHIP TMD1_OWN
279#define TMD1_OWN_HOST 0
280
281/* tx_head misc field */
282#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
283#define TMD3_RTRY 0x0400 /* failed after 16 retries */
284#define TMD3_LCAR 0x0800 /* carrier lost */
285#define TMD3_LCOL 0x1000 /* late collision */
286#define TMD3_UFLO 0x4000 /* underflow (late memory) */
287#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
288
289/* rx_head flags */
290#define RMD1_ENP 0x01 /* end of packet */
291#define RMD1_STP 0x02 /* start of packet */
292#define RMD1_BUFF 0x04 /* buffer error */
293#define RMD1_CRC 0x08 /* CRC error */
294#define RMD1_OFLO 0x10 /* overflow */
295#define RMD1_FRAM 0x20 /* framing error */
296#define RMD1_ERR 0x40 /* error summary */
297#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
298
299#define RMD1_OWN_CHIP RMD1_OWN
300#define RMD1_OWN_HOST 0
301
302/* register names */
303#define CSR0 0 /* mode/status */
304#define CSR1 1 /* init block addr (low) */
305#define CSR2 2 /* init block addr (high) */
306#define CSR3 3 /* misc */
307#define CSR8 8 /* address filter */
308#define CSR15 15 /* promiscuous mode */
309
310/* CSR0 */
311/* (R=readable, W=writeable, S=set on write, C=clear on write) */
312#define CSR0_INIT 0x0001 /* initialize (RS) */
313#define CSR0_STRT 0x0002 /* start (RS) */
314#define CSR0_STOP 0x0004 /* stop (RS) */
315#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
316#define CSR0_TXON 0x0010 /* transmitter on (R) */
317#define CSR0_RXON 0x0020 /* receiver on (R) */
318#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
319#define CSR0_INTR 0x0080 /* interrupt active (R) */
320#define CSR0_IDON 0x0100 /* initialization done (RC) */
321#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
322#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
323#define CSR0_MERR 0x0800 /* memory error (RC) */
324#define CSR0_MISS 0x1000 /* missed frame (RC) */
325#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
326#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
327#define CSR0_ERR 0x8000 /* error (RC) */
328
329/* CSR3 */
330#define CSR3_BCON 0x0001 /* byte control */
331#define CSR3_ACON 0x0002 /* ALE control */
332#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
333
334
335
336/***************************** Prototypes *****************************/
337
338static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
339 *init_rec );
340static int lance_open( struct net_device *dev );
341static void lance_init_ring( struct net_device *dev );
342static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
343static irqreturn_t lance_interrupt( int irq, void *dev_id );
344static int lance_rx( struct net_device *dev );
345static int lance_close( struct net_device *dev );
346static void set_multicast_list( struct net_device *dev );
347static int lance_set_mac_address( struct net_device *dev, void *addr );
348static void lance_tx_timeout (struct net_device *dev);
349
350/************************* End of Prototypes **************************/
351
352
353
354
355
356static void *slow_memcpy( void *dst, const void *src, size_t len )
357
358{ char *cto = dst;
359 const char *cfrom = src;
360
361 while( len-- ) {
362 *cto++ = *cfrom++;
363 MFPDELAY();
364 }
365 return dst;
366}
367
368
369struct net_device * __init atarilance_probe(int unit)
370{
371 int i;
372 static int found;
373 struct net_device *dev;
374 int err = -ENODEV;
375
376 if (!MACH_IS_ATARI || found)
377 /* Assume there's only one board possible... That seems true, since
378 * the Riebl/PAM board's address cannot be changed. */
379 return ERR_PTR(-ENODEV);
380
381 dev = alloc_etherdev(sizeof(struct lance_private));
382 if (!dev)
383 return ERR_PTR(-ENOMEM);
384 if (unit >= 0) {
385 sprintf(dev->name, "eth%d", unit);
386 netdev_boot_setup_check(dev);
387 }
388
389 for( i = 0; i < N_LANCE_ADDR; ++i ) {
390 if (lance_probe1( dev, &lance_addr_list[i] )) {
391 found = 1;
392 err = register_netdev(dev);
393 if (!err)
394 return dev;
395 free_irq(dev->irq, dev);
396 break;
397 }
398 }
399 free_netdev(dev);
400 return ERR_PTR(err);
401}
402
403
404/* Derived from hwreg_present() in atari/config.c: */
405
406static noinline int __init addr_accessible(volatile void *regp, int wordflag,
407 int writeflag)
408{
409 int ret;
410 unsigned long flags;
411 long *vbr, save_berr;
412
413 local_irq_save(flags);
414
415 __asm__ __volatile__ ( "movec %/vbr,%0" : "=r" (vbr) : );
416 save_berr = vbr[2];
417
418 __asm__ __volatile__
419 ( "movel %/sp,%/d1\n\t"
420 "movel #Lberr,%2@\n\t"
421 "moveq #0,%0\n\t"
422 "tstl %3\n\t"
423 "bne 1f\n\t"
424 "moveb %1@,%/d0\n\t"
425 "nop \n\t"
426 "bra 2f\n"
427"1: movew %1@,%/d0\n\t"
428 "nop \n"
429"2: tstl %4\n\t"
430 "beq 2f\n\t"
431 "tstl %3\n\t"
432 "bne 1f\n\t"
433 "clrb %1@\n\t"
434 "nop \n\t"
435 "moveb %/d0,%1@\n\t"
436 "nop \n\t"
437 "bra 2f\n"
438"1: clrw %1@\n\t"
439 "nop \n\t"
440 "movew %/d0,%1@\n\t"
441 "nop \n"
442"2: moveq #1,%0\n"
443"Lberr: movel %/d1,%/sp"
444 : "=&d" (ret)
445 : "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag)
446 : "d0", "d1", "memory"
447 );
448
449 vbr[2] = save_berr;
450 local_irq_restore(flags);
451
452 return ret;
453}
454
455static const struct net_device_ops lance_netdev_ops = {
456 .ndo_open = lance_open,
457 .ndo_stop = lance_close,
458 .ndo_start_xmit = lance_start_xmit,
459 .ndo_set_rx_mode = set_multicast_list,
460 .ndo_set_mac_address = lance_set_mac_address,
461 .ndo_tx_timeout = lance_tx_timeout,
462 .ndo_validate_addr = eth_validate_addr,
463 .ndo_change_mtu = eth_change_mtu,
464};
465
466static unsigned long __init lance_probe1( struct net_device *dev,
467 struct lance_addr *init_rec )
468{
469 volatile unsigned short *memaddr =
470 (volatile unsigned short *)init_rec->memaddr;
471 volatile unsigned short *ioaddr =
472 (volatile unsigned short *)init_rec->ioaddr;
473 struct lance_private *lp;
474 struct lance_ioreg *IO;
475 int i;
476 static int did_version;
477 unsigned short save1, save2;
478
479 PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
480 (long)memaddr, (long)ioaddr ));
481
482 /* Test whether memory readable and writable */
483 PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" ));
484 if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail;
485
486 /* Written values should come back... */
487 PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" ));
488 save1 = *memaddr;
489 *memaddr = 0x0001;
490 if (*memaddr != 0x0001) goto probe_fail;
491 PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" ));
492 *memaddr = 0x0000;
493 if (*memaddr != 0x0000) goto probe_fail;
494 *memaddr = save1;
495
496 /* First port should be readable and writable */
497 PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" ));
498 if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail;
499
500 /* and written values should be readable */
501 PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" ));
502 save2 = ioaddr[1];
503 ioaddr[1] = 0x0001;
504 if (ioaddr[1] != 0x0001) goto probe_fail;
505
506 /* The CSR0_INIT bit should not be readable */
507 PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" ));
508 save1 = ioaddr[0];
509 ioaddr[1] = CSR0;
510 ioaddr[0] = CSR0_INIT | CSR0_STOP;
511 if (ioaddr[0] != CSR0_STOP) {
512 ioaddr[0] = save1;
513 ioaddr[1] = save2;
514 goto probe_fail;
515 }
516 PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" ));
517 ioaddr[0] = CSR0_STOP;
518 if (ioaddr[0] != CSR0_STOP) {
519 ioaddr[0] = save1;
520 ioaddr[1] = save2;
521 goto probe_fail;
522 }
523
524 /* Now ok... */
525 PROBE_PRINT(( "lance_probe1: Lance card detected\n" ));
526 goto probe_ok;
527
528 probe_fail:
529 return 0;
530
531 probe_ok:
532 lp = netdev_priv(dev);
533 MEM = (struct lance_memory *)memaddr;
534 IO = lp->iobase = (struct lance_ioreg *)ioaddr;
535 dev->base_addr = (unsigned long)ioaddr; /* informational only */
536 lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy;
537
538 REGA( CSR0 ) = CSR0_STOP;
539
540 /* Now test for type: If the eeprom I/O port is readable, it is a
541 * PAM card */
542 if (addr_accessible( &(IO->eeprom), 0, 0 )) {
543 /* Switch back to Ram */
544 i = IO->mem;
545 lp->cardtype = PAM_CARD;
546 }
547 else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) {
548 lp->cardtype = NEW_RIEBL;
549 }
550 else
551 lp->cardtype = OLD_RIEBL;
552
553 if (lp->cardtype == PAM_CARD ||
554 memaddr == (unsigned short *)0xffe00000) {
555 /* PAMs card and Riebl on ST use level 5 autovector */
556 if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
557 "PAM,Riebl-ST Ethernet", dev)) {
558 printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
559 return 0;
560 }
561 dev->irq = (unsigned short)IRQ_AUTO_5;
562 }
563 else {
564 /* For VME-RieblCards, request a free VME int;
565 * (This must be unsigned long, since dev->irq is short and the
566 * IRQ_MACHSPEC bit would be cut off...)
567 */
568 unsigned long irq = atari_register_vme_int();
569 if (!irq) {
570 printk( "Lance: request for VME interrupt failed\n" );
571 return 0;
572 }
573 if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
574 "Riebl-VME Ethernet", dev)) {
575 printk( "Lance: request for irq %ld failed\n", irq );
576 return 0;
577 }
578 dev->irq = irq;
579 }
580
581 printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ",
582 dev->name, lance_names[lp->cardtype],
583 (unsigned long)ioaddr,
584 (unsigned long)memaddr,
585 dev->irq,
586 init_rec->slow_flag ? " (slow memcpy)" : "" );
587
588 /* Get the ethernet address */
589 switch( lp->cardtype ) {
590 case OLD_RIEBL:
591 /* No ethernet address! (Set some default address) */
592 memcpy( dev->dev_addr, OldRieblDefHwaddr, 6 );
593 break;
594 case NEW_RIEBL:
595 lp->memcpy_f( dev->dev_addr, RIEBL_HWADDR_ADDR, 6 );
596 break;
597 case PAM_CARD:
598 i = IO->eeprom;
599 for( i = 0; i < 6; ++i )
600 dev->dev_addr[i] =
601 ((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
602 ((((unsigned short *)MEM)[i*2+1] & 0x0f));
603 i = IO->mem;
604 break;
605 }
606 printk("%pM\n", dev->dev_addr);
607 if (lp->cardtype == OLD_RIEBL) {
608 printk( "%s: Warning: This is a default ethernet address!\n",
609 dev->name );
610 printk( " Use \"ifconfig hw ether ...\" to set the address.\n" );
611 }
612
613 spin_lock_init(&lp->devlock);
614
615 MEM->init.mode = 0x0000; /* Disable Rx and Tx. */
616 for( i = 0; i < 6; i++ )
617 MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
618 MEM->init.filter[0] = 0x00000000;
619 MEM->init.filter[1] = 0x00000000;
620 MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head );
621 MEM->init.rx_ring.adr_hi = 0;
622 MEM->init.rx_ring.len = RX_RING_LEN_BITS;
623 MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
624 MEM->init.tx_ring.adr_hi = 0;
625 MEM->init.tx_ring.len = TX_RING_LEN_BITS;
626
627 if (lp->cardtype == PAM_CARD)
628 IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq);
629 else
630 *RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq);
631
632 if (did_version++ == 0)
633 DPRINTK( 1, ( version ));
634
635 dev->netdev_ops = &lance_netdev_ops;
636
637 /* XXX MSch */
638 dev->watchdog_timeo = TX_TIMEOUT;
639
640 return 1;
641}
642
643
644static int lance_open( struct net_device *dev )
645{
646 struct lance_private *lp = netdev_priv(dev);
647 struct lance_ioreg *IO = lp->iobase;
648 int i;
649
650 DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
651
652 lance_init_ring(dev);
653 /* Re-initialize the LANCE, and start it when done. */
654
655 REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
656 REGA( CSR2 ) = 0;
657 REGA( CSR1 ) = 0;
658 REGA( CSR0 ) = CSR0_INIT;
659 /* From now on, AREG is kept to point to CSR0 */
660
661 i = 1000000;
662 while (--i > 0)
663 if (DREG & CSR0_IDON)
664 break;
665 if (i <= 0 || (DREG & CSR0_ERR)) {
666 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
667 dev->name, i, DREG ));
668 DREG = CSR0_STOP;
669 return -EIO;
670 }
671 DREG = CSR0_IDON;
672 DREG = CSR0_STRT;
673 DREG = CSR0_INEA;
674
675 netif_start_queue (dev);
676
677 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
678
679 return 0;
680}
681
682
683/* Initialize the LANCE Rx and Tx rings. */
684
685static void lance_init_ring( struct net_device *dev )
686{
687 struct lance_private *lp = netdev_priv(dev);
688 int i;
689 unsigned offset;
690
691 lp->tx_full = 0;
692 lp->cur_rx = lp->cur_tx = 0;
693 lp->dirty_tx = 0;
694
695 offset = offsetof( struct lance_memory, packet_area );
696
697/* If the packet buffer at offset 'o' would conflict with the reserved area
698 * of RieblCards, advance it */
699#define CHECK_OFFSET(o) \
700 do { \
701 if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) { \
702 if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \
703 : (o) < RIEBL_RSVD_END) \
704 (o) = RIEBL_RSVD_END; \
705 } \
706 } while(0)
707
708 for( i = 0; i < TX_RING_SIZE; i++ ) {
709 CHECK_OFFSET(offset);
710 MEM->tx_head[i].base = offset;
711 MEM->tx_head[i].flag = TMD1_OWN_HOST;
712 MEM->tx_head[i].base_hi = 0;
713 MEM->tx_head[i].length = 0;
714 MEM->tx_head[i].misc = 0;
715 offset += PKT_BUF_SZ;
716 }
717
718 for( i = 0; i < RX_RING_SIZE; i++ ) {
719 CHECK_OFFSET(offset);
720 MEM->rx_head[i].base = offset;
721 MEM->rx_head[i].flag = TMD1_OWN_CHIP;
722 MEM->rx_head[i].base_hi = 0;
723 MEM->rx_head[i].buf_length = -PKT_BUF_SZ;
724 MEM->rx_head[i].msg_length = 0;
725 offset += PKT_BUF_SZ;
726 }
727}
728
729
730/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
731
732
733static void lance_tx_timeout (struct net_device *dev)
734{
735 struct lance_private *lp = netdev_priv(dev);
736 struct lance_ioreg *IO = lp->iobase;
737
738 AREG = CSR0;
739 DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
740 dev->name, DREG ));
741 DREG = CSR0_STOP;
742 /*
743 * Always set BSWP after a STOP as STOP puts it back into
744 * little endian mode.
745 */
746 REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
747 dev->stats.tx_errors++;
748#ifndef final_version
749 { int i;
750 DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
751 lp->dirty_tx, lp->cur_tx,
752 lp->tx_full ? " (full)" : "",
753 lp->cur_rx ));
754 for( i = 0 ; i < RX_RING_SIZE; i++ )
755 DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
756 i, MEM->rx_head[i].base,
757 -MEM->rx_head[i].buf_length,
758 MEM->rx_head[i].msg_length ));
759 for( i = 0 ; i < TX_RING_SIZE; i++ )
760 DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n",
761 i, MEM->tx_head[i].base,
762 -MEM->tx_head[i].length,
763 MEM->tx_head[i].misc ));
764 }
765#endif
766 /* XXX MSch: maybe purge/reinit ring here */
767 /* lance_restart, essentially */
768 lance_init_ring(dev);
769 REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
770 dev->trans_start = jiffies; /* prevent tx timeout */
771 netif_wake_queue(dev);
772}
773
774/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
775
776static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
777{
778 struct lance_private *lp = netdev_priv(dev);
779 struct lance_ioreg *IO = lp->iobase;
780 int entry, len;
781 struct lance_tx_head *head;
782 unsigned long flags;
783
784 DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
785 dev->name, DREG ));
786
787
788 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
789 len = skb->len;
790 if (len < ETH_ZLEN)
791 len = ETH_ZLEN;
792 /* PAM-Card has a bug: Can only send packets with even number of bytes! */
793 else if (lp->cardtype == PAM_CARD && (len & 1))
794 ++len;
795
796 if (len > skb->len) {
797 if (skb_padto(skb, len))
798 return NETDEV_TX_OK;
799 }
800
801 netif_stop_queue (dev);
802
803 /* Fill in a Tx ring entry */
804 if (lance_debug >= 3) {
805 printk( "%s: TX pkt type 0x%04x from %pM to %pM"
806 " data at 0x%08x len %d\n",
807 dev->name, ((u_short *)skb->data)[6],
808 &skb->data[6], skb->data,
809 (int)skb->data, (int)skb->len );
810 }
811
812 /* We're not prepared for the int until the last flags are set/reset. And
813 * the int may happen already after setting the OWN_CHIP... */
814 spin_lock_irqsave (&lp->devlock, flags);
815
816 /* Mask to ring buffer boundary. */
817 entry = lp->cur_tx & TX_RING_MOD_MASK;
818 head = &(MEM->tx_head[entry]);
819
820 /* Caution: the write order is important here, set the "ownership" bits
821 * last.
822 */
823
824
825 head->length = -len;
826 head->misc = 0;
827 lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
828 head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
829 dev->stats.tx_bytes += skb->len;
830 dev_kfree_skb( skb );
831 lp->cur_tx++;
832 while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
833 lp->cur_tx -= TX_RING_SIZE;
834 lp->dirty_tx -= TX_RING_SIZE;
835 }
836
837 /* Trigger an immediate send poll. */
838 DREG = CSR0_INEA | CSR0_TDMD;
839
840 if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
841 TMD1_OWN_HOST)
842 netif_start_queue (dev);
843 else
844 lp->tx_full = 1;
845 spin_unlock_irqrestore (&lp->devlock, flags);
846
847 return NETDEV_TX_OK;
848}
849
850/* The LANCE interrupt handler. */
851
852static irqreturn_t lance_interrupt( int irq, void *dev_id )
853{
854 struct net_device *dev = dev_id;
855 struct lance_private *lp;
856 struct lance_ioreg *IO;
857 int csr0, boguscnt = 10;
858 int handled = 0;
859
860 if (dev == NULL) {
861 DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
862 return IRQ_NONE;
863 }
864
865 lp = netdev_priv(dev);
866 IO = lp->iobase;
867 spin_lock (&lp->devlock);
868
869 AREG = CSR0;
870
871 while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) &&
872 --boguscnt >= 0) {
873 handled = 1;
874 /* Acknowledge all of the current interrupt sources ASAP. */
875 DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP |
876 CSR0_TDMD | CSR0_INEA);
877
878 DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
879 dev->name, csr0, DREG ));
880
881 if (csr0 & CSR0_RINT) /* Rx interrupt */
882 lance_rx( dev );
883
884 if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
885 int dirty_tx = lp->dirty_tx;
886
887 while( dirty_tx < lp->cur_tx) {
888 int entry = dirty_tx & TX_RING_MOD_MASK;
889 int status = MEM->tx_head[entry].flag;
890
891 if (status & TMD1_OWN_CHIP)
892 break; /* It still hasn't been Txed */
893
894 MEM->tx_head[entry].flag = 0;
895
896 if (status & TMD1_ERR) {
897 /* There was an major error, log it. */
898 int err_status = MEM->tx_head[entry].misc;
899 dev->stats.tx_errors++;
900 if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
901 if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
902 if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++;
903 if (err_status & TMD3_UFLO) {
904 /* Ackk! On FIFO errors the Tx unit is turned off! */
905 dev->stats.tx_fifo_errors++;
906 /* Remove this verbosity later! */
907 DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n",
908 dev->name, csr0 ));
909 /* Restart the chip. */
910 DREG = CSR0_STRT;
911 }
912 } else {
913 if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF))
914 dev->stats.collisions++;
915 dev->stats.tx_packets++;
916 }
917
918 /* XXX MSch: free skb?? */
919 dirty_tx++;
920 }
921
922#ifndef final_version
923 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
924 DPRINTK( 0, ( "out-of-sync dirty pointer,"
925 " %d vs. %d, full=%ld.\n",
926 dirty_tx, lp->cur_tx, lp->tx_full ));
927 dirty_tx += TX_RING_SIZE;
928 }
929#endif
930
931 if (lp->tx_full && (netif_queue_stopped(dev)) &&
932 dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
933 /* The ring is no longer full, clear tbusy. */
934 lp->tx_full = 0;
935 netif_wake_queue (dev);
936 }
937
938 lp->dirty_tx = dirty_tx;
939 }
940
941 /* Log misc errors. */
942 if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
943 if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
944 if (csr0 & CSR0_MERR) {
945 DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
946 "status %04x.\n", dev->name, csr0 ));
947 /* Restart the chip. */
948 DREG = CSR0_STRT;
949 }
950 }
951
952 /* Clear any other interrupt, and set interrupt enable. */
953 DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
954 CSR0_IDON | CSR0_INEA;
955
956 DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
957 dev->name, DREG ));
958
959 spin_unlock (&lp->devlock);
960 return IRQ_RETVAL(handled);
961}
962
963
964static int lance_rx( struct net_device *dev )
965{
966 struct lance_private *lp = netdev_priv(dev);
967 int entry = lp->cur_rx & RX_RING_MOD_MASK;
968 int i;
969
970 DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
971 MEM->rx_head[entry].flag ));
972
973 /* If we own the next entry, it's a new packet. Send it up. */
974 while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
975 struct lance_rx_head *head = &(MEM->rx_head[entry]);
976 int status = head->flag;
977
978 if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
979 /* There is a tricky error noted by John Murphy,
980 <murf@perftech.com> to Russ Nelson: Even with full-sized
981 buffers it's possible for a jabber packet to use two
982 buffers, with only the last correctly noting the error. */
983 if (status & RMD1_ENP) /* Only count a general error at the */
984 dev->stats.rx_errors++; /* end of a packet.*/
985 if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
986 if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
987 if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
988 if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
989 head->flag &= (RMD1_ENP|RMD1_STP);
990 } else {
991 /* Malloc up new buffer, compatible with net-3. */
992 short pkt_len = head->msg_length & 0xfff;
993 struct sk_buff *skb;
994
995 if (pkt_len < 60) {
996 printk( "%s: Runt packet!\n", dev->name );
997 dev->stats.rx_errors++;
998 }
999 else {
1000 skb = dev_alloc_skb( pkt_len+2 );
1001 if (skb == NULL) {
1002 DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
1003 dev->name ));
1004 for( i = 0; i < RX_RING_SIZE; i++ )
1005 if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
1006 RMD1_OWN_CHIP)
1007 break;
1008
1009 if (i > RX_RING_SIZE - 2) {
1010 dev->stats.rx_dropped++;
1011 head->flag |= RMD1_OWN_CHIP;
1012 lp->cur_rx++;
1013 }
1014 break;
1015 }
1016
1017 if (lance_debug >= 3) {
1018 u_char *data = PKTBUF_ADDR(head);
1019
1020 printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM "
1021 "data %02x %02x %02x %02x %02x %02x %02x %02x "
1022 "len %d\n",
1023 dev->name, ((u_short *)data)[6],
1024 &data[6], data,
1025 data[15], data[16], data[17], data[18],
1026 data[19], data[20], data[21], data[22],
1027 pkt_len);
1028 }
1029
1030 skb_reserve( skb, 2 ); /* 16 byte align */
1031 skb_put( skb, pkt_len ); /* Make room */
1032 lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
1033 skb->protocol = eth_type_trans( skb, dev );
1034 netif_rx( skb );
1035 dev->stats.rx_packets++;
1036 dev->stats.rx_bytes += pkt_len;
1037 }
1038 }
1039
1040 head->flag |= RMD1_OWN_CHIP;
1041 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1042 }
1043 lp->cur_rx &= RX_RING_MOD_MASK;
1044
1045 /* From lance.c (Donald Becker): */
1046 /* We should check that at least two ring entries are free. If not,
1047 we should free one and mark stats->rx_dropped++. */
1048
1049 return 0;
1050}
1051
1052
1053static int lance_close( struct net_device *dev )
1054{
1055 struct lance_private *lp = netdev_priv(dev);
1056 struct lance_ioreg *IO = lp->iobase;
1057
1058 netif_stop_queue (dev);
1059
1060 AREG = CSR0;
1061
1062 DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
1063 dev->name, DREG ));
1064
1065 /* We stop the LANCE here -- it occasionally polls
1066 memory if we don't. */
1067 DREG = CSR0_STOP;
1068
1069 return 0;
1070}
1071
1072
1073/* Set or clear the multicast filter for this adaptor.
1074 num_addrs == -1 Promiscuous mode, receive all packets
1075 num_addrs == 0 Normal mode, clear multicast list
1076 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
1077 best-effort filtering.
1078 */
1079
1080static void set_multicast_list( struct net_device *dev )
1081{
1082 struct lance_private *lp = netdev_priv(dev);
1083 struct lance_ioreg *IO = lp->iobase;
1084
1085 if (netif_running(dev))
1086 /* Only possible if board is already started */
1087 return;
1088
1089 /* We take the simple way out and always enable promiscuous mode. */
1090 DREG = CSR0_STOP; /* Temporarily stop the lance. */
1091
1092 if (dev->flags & IFF_PROMISC) {
1093 /* Log any net taps. */
1094 DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name ));
1095 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
1096 } else {
1097 short multicast_table[4];
1098 int num_addrs = netdev_mc_count(dev);
1099 int i;
1100 /* We don't use the multicast table, but rely on upper-layer
1101 * filtering. */
1102 memset( multicast_table, (num_addrs == 0) ? 0 : -1,
1103 sizeof(multicast_table) );
1104 for( i = 0; i < 4; i++ )
1105 REGA( CSR8+i ) = multicast_table[i];
1106 REGA( CSR15 ) = 0; /* Unset promiscuous mode */
1107 }
1108
1109 /*
1110 * Always set BSWP after a STOP as STOP puts it back into
1111 * little endian mode.
1112 */
1113 REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
1114
1115 /* Resume normal operation and reset AREG to CSR0 */
1116 REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
1117}
1118
1119
1120/* This is needed for old RieblCards and possible for new RieblCards */
1121
1122static int lance_set_mac_address( struct net_device *dev, void *addr )
1123{
1124 struct lance_private *lp = netdev_priv(dev);
1125 struct sockaddr *saddr = addr;
1126 int i;
1127
1128 if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
1129 return -EOPNOTSUPP;
1130
1131 if (netif_running(dev)) {
1132 /* Only possible while card isn't started */
1133 DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
1134 dev->name ));
1135 return -EIO;
1136 }
1137
1138 memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
1139 for( i = 0; i < 6; i++ )
1140 MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
1141 lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
1142 /* set also the magic for future sessions */
1143 *RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
1144
1145 return 0;
1146}
1147
1148
1149#ifdef MODULE
1150static struct net_device *atarilance_dev;
1151
1152static int __init atarilance_module_init(void)
1153{
1154 atarilance_dev = atarilance_probe(-1);
1155 if (IS_ERR(atarilance_dev))
1156 return PTR_ERR(atarilance_dev);
1157 return 0;
1158}
1159
1160static void __exit atarilance_module_exit(void)
1161{
1162 unregister_netdev(atarilance_dev);
1163 free_irq(atarilance_dev->irq, atarilance_dev);
1164 free_netdev(atarilance_dev);
1165}
1166module_init(atarilance_module_init);
1167module_exit(atarilance_module_exit);
1168#endif /* MODULE */
1169
1170
1171/*
1172 * Local variables:
1173 * c-indent-level: 4
1174 * tab-width: 4
1175 * End:
1176 */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
new file mode 100644
index 000000000000..4865ff14bebf
--- /dev/null
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -0,0 +1,1356 @@
1/*
2 *
3 * Alchemy Au1x00 ethernet driver
4 *
5 * Copyright 2001-2003, 2006 MontaVista Software Inc.
6 * Copyright 2002 TimeSys Corp.
7 * Added ethtool/mii-tool support,
8 * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
9 * Update: 2004 Bjoern Riemer, riemer@fokus.fraunhofer.de
10 * or riemer@riemer-nt.de: fixed the link beat detection with
11 * ioctls (SIOCGMIIPHY)
12 * Copyright 2006 Herbert Valerio Riedel <hvr@gnu.org>
13 * converted to use linux-2.6.x's PHY framework
14 *
15 * Author: MontaVista Software, Inc.
16 * ppopov@mvista.com or source@mvista.com
17 *
18 * ########################################################################
19 *
20 * This program is free software; you can distribute it and/or modify it
21 * under the terms of the GNU General Public License (Version 2) as
22 * published by the Free Software Foundation.
23 *
24 * This program is distributed in the hope it will be useful, but WITHOUT
25 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
26 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
27 * for more details.
28 *
29 * You should have received a copy of the GNU General Public License along
30 * with this program; if not, write to the Free Software Foundation, Inc.,
31 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
32 *
33 * ########################################################################
34 *
35 *
36 */
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
39#include <linux/capability.h>
40#include <linux/dma-mapping.h>
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/string.h>
44#include <linux/timer.h>
45#include <linux/errno.h>
46#include <linux/in.h>
47#include <linux/ioport.h>
48#include <linux/bitops.h>
49#include <linux/slab.h>
50#include <linux/interrupt.h>
51#include <linux/init.h>
52#include <linux/netdevice.h>
53#include <linux/etherdevice.h>
54#include <linux/ethtool.h>
55#include <linux/mii.h>
56#include <linux/skbuff.h>
57#include <linux/delay.h>
58#include <linux/crc32.h>
59#include <linux/phy.h>
60#include <linux/platform_device.h>
61#include <linux/cpu.h>
62#include <linux/io.h>
63
64#include <asm/mipsregs.h>
65#include <asm/irq.h>
66#include <asm/processor.h>
67
68#include <au1000.h>
69#include <au1xxx_eth.h>
70#include <prom.h>
71
72#include "au1000_eth.h"
73
74#ifdef AU1000_ETH_DEBUG
75static int au1000_debug = 5;
76#else
77static int au1000_debug = 3;
78#endif
79
80#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
81 NETIF_MSG_PROBE | \
82 NETIF_MSG_LINK)
83
84#define DRV_NAME "au1000_eth"
85#define DRV_VERSION "1.7"
86#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
87#define DRV_DESC "Au1xxx on-chip Ethernet driver"
88
89MODULE_AUTHOR(DRV_AUTHOR);
90MODULE_DESCRIPTION(DRV_DESC);
91MODULE_LICENSE("GPL");
92MODULE_VERSION(DRV_VERSION);
93
94/*
95 * Theory of operation
96 *
97 * The Au1000 MACs use a simple rx and tx descriptor ring scheme.
98 * There are four receive and four transmit descriptors. These
99 * descriptors are not in memory; rather, they are just a set of
100 * hardware registers.
101 *
102 * Since the Au1000 has a coherent data cache, the receive and
103 * transmit buffers are allocated from the KSEG0 segment. The
104 * hardware registers, however, are still mapped at KSEG1 to
105 * make sure there's no out-of-order writes, and that all writes
106 * complete immediately.
107 */
108
109/*
110 * board-specific configurations
111 *
112 * PHY detection algorithm
113 *
114 * If phy_static_config is undefined, the PHY setup is
115 * autodetected:
116 *
117 * mii_probe() first searches the current MAC's MII bus for a PHY,
118 * selecting the first (or last, if phy_search_highest_addr is
119 * defined) PHY address not already claimed by another netdev.
120 *
121 * If nothing was found that way when searching for the 2nd ethernet
122 * controller's PHY and phy1_search_mac0 is defined, then
123 * the first MII bus is searched as well for an unclaimed PHY; this is
124 * needed in case of a dual-PHY accessible only through the MAC0's MII
125 * bus.
126 *
127 * Finally, if no PHY is found, then the corresponding ethernet
128 * controller is not registered to the network subsystem.
129 */
130
131/* autodetection defaults: phy1_search_mac0 */
132
133/* static PHY setup
134 *
135 * most boards PHY setup should be detectable properly with the
136 * autodetection algorithm in mii_probe(), but in some cases (e.g. if
137 * you have a switch attached, or want to use the PHY's interrupt
138 * notification capabilities) you can provide a static PHY
139 * configuration here
140 *
141 * IRQs may only be set, if a PHY address was configured
142 * If a PHY address is given, also a bus id is required to be set
143 *
144 * ps: make sure the used irqs are configured properly in the board
145 * specific irq-map
146 */
147
148static void au1000_enable_mac(struct net_device *dev, int force_reset)
149{
150 unsigned long flags;
151 struct au1000_private *aup = netdev_priv(dev);
152
153 spin_lock_irqsave(&aup->lock, flags);
154
155 if (force_reset || (!aup->mac_enabled)) {
156 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
157 au_sync_delay(2);
158 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
159 | MAC_EN_CLOCK_ENABLE), aup->enable);
160 au_sync_delay(2);
161
162 aup->mac_enabled = 1;
163 }
164
165 spin_unlock_irqrestore(&aup->lock, flags);
166}
167
168/*
169 * MII operations
170 */
171static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
172{
173 struct au1000_private *aup = netdev_priv(dev);
174 u32 *const mii_control_reg = &aup->mac->mii_control;
175 u32 *const mii_data_reg = &aup->mac->mii_data;
176 u32 timedout = 20;
177 u32 mii_control;
178
179 while (readl(mii_control_reg) & MAC_MII_BUSY) {
180 mdelay(1);
181 if (--timedout == 0) {
182 netdev_err(dev, "read_MII busy timeout!!\n");
183 return -1;
184 }
185 }
186
187 mii_control = MAC_SET_MII_SELECT_REG(reg) |
188 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
189
190 writel(mii_control, mii_control_reg);
191
192 timedout = 20;
193 while (readl(mii_control_reg) & MAC_MII_BUSY) {
194 mdelay(1);
195 if (--timedout == 0) {
196 netdev_err(dev, "mdio_read busy timeout!!\n");
197 return -1;
198 }
199 }
200 return readl(mii_data_reg);
201}
202
203static void au1000_mdio_write(struct net_device *dev, int phy_addr,
204 int reg, u16 value)
205{
206 struct au1000_private *aup = netdev_priv(dev);
207 u32 *const mii_control_reg = &aup->mac->mii_control;
208 u32 *const mii_data_reg = &aup->mac->mii_data;
209 u32 timedout = 20;
210 u32 mii_control;
211
212 while (readl(mii_control_reg) & MAC_MII_BUSY) {
213 mdelay(1);
214 if (--timedout == 0) {
215 netdev_err(dev, "mdio_write busy timeout!!\n");
216 return;
217 }
218 }
219
220 mii_control = MAC_SET_MII_SELECT_REG(reg) |
221 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
222
223 writel(value, mii_data_reg);
224 writel(mii_control, mii_control_reg);
225}
226
227static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
228{
229 /* WARNING: bus->phy_map[phy_addr].attached_dev == dev does
230 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus)
231 */
232 struct net_device *const dev = bus->priv;
233
234 /* make sure the MAC associated with this
235 * mii_bus is enabled
236 */
237 au1000_enable_mac(dev, 0);
238
239 return au1000_mdio_read(dev, phy_addr, regnum);
240}
241
242static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
243 u16 value)
244{
245 struct net_device *const dev = bus->priv;
246
247 /* make sure the MAC associated with this
248 * mii_bus is enabled
249 */
250 au1000_enable_mac(dev, 0);
251
252 au1000_mdio_write(dev, phy_addr, regnum, value);
253 return 0;
254}
255
256static int au1000_mdiobus_reset(struct mii_bus *bus)
257{
258 struct net_device *const dev = bus->priv;
259
260 /* make sure the MAC associated with this
261 * mii_bus is enabled
262 */
263 au1000_enable_mac(dev, 0);
264
265 return 0;
266}
267
268static void au1000_hard_stop(struct net_device *dev)
269{
270 struct au1000_private *aup = netdev_priv(dev);
271 u32 reg;
272
273 netif_dbg(aup, drv, dev, "hard stop\n");
274
275 reg = readl(&aup->mac->control);
276 reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
277 writel(reg, &aup->mac->control);
278 au_sync_delay(10);
279}
280
281static void au1000_enable_rx_tx(struct net_device *dev)
282{
283 struct au1000_private *aup = netdev_priv(dev);
284 u32 reg;
285
286 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
287
288 reg = readl(&aup->mac->control);
289 reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
290 writel(reg, &aup->mac->control);
291 au_sync_delay(10);
292}
293
294static void
295au1000_adjust_link(struct net_device *dev)
296{
297 struct au1000_private *aup = netdev_priv(dev);
298 struct phy_device *phydev = aup->phy_dev;
299 unsigned long flags;
300 u32 reg;
301
302 int status_change = 0;
303
304 BUG_ON(!aup->phy_dev);
305
306 spin_lock_irqsave(&aup->lock, flags);
307
308 if (phydev->link && (aup->old_speed != phydev->speed)) {
309 /* speed changed */
310
311 switch (phydev->speed) {
312 case SPEED_10:
313 case SPEED_100:
314 break;
315 default:
316 netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
317 phydev->speed);
318 break;
319 }
320
321 aup->old_speed = phydev->speed;
322
323 status_change = 1;
324 }
325
326 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
327 /* duplex mode changed */
328
329 /* switching duplex mode requires to disable rx and tx! */
330 au1000_hard_stop(dev);
331
332 reg = readl(&aup->mac->control);
333 if (DUPLEX_FULL == phydev->duplex) {
334 reg |= MAC_FULL_DUPLEX;
335 reg &= ~MAC_DISABLE_RX_OWN;
336 } else {
337 reg &= ~MAC_FULL_DUPLEX;
338 reg |= MAC_DISABLE_RX_OWN;
339 }
340 writel(reg, &aup->mac->control);
341 au_sync_delay(1);
342
343 au1000_enable_rx_tx(dev);
344 aup->old_duplex = phydev->duplex;
345
346 status_change = 1;
347 }
348
349 if (phydev->link != aup->old_link) {
350 /* link state changed */
351
352 if (!phydev->link) {
353 /* link went down */
354 aup->old_speed = 0;
355 aup->old_duplex = -1;
356 }
357
358 aup->old_link = phydev->link;
359 status_change = 1;
360 }
361
362 spin_unlock_irqrestore(&aup->lock, flags);
363
364 if (status_change) {
365 if (phydev->link)
366 netdev_info(dev, "link up (%d/%s)\n",
367 phydev->speed,
368 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
369 else
370 netdev_info(dev, "link down\n");
371 }
372}
373
374static int au1000_mii_probe(struct net_device *dev)
375{
376 struct au1000_private *const aup = netdev_priv(dev);
377 struct phy_device *phydev = NULL;
378 int phy_addr;
379
380 if (aup->phy_static_config) {
381 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
382
383 if (aup->phy_addr)
384 phydev = aup->mii_bus->phy_map[aup->phy_addr];
385 else
386 netdev_info(dev, "using PHY-less setup\n");
387 return 0;
388 }
389
390 /* find the first (lowest address) PHY
391 * on the current MAC's MII bus
392 */
393 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
394 if (aup->mii_bus->phy_map[phy_addr]) {
395 phydev = aup->mii_bus->phy_map[phy_addr];
396 if (!aup->phy_search_highest_addr)
397 /* break out with first one found */
398 break;
399 }
400
401 if (aup->phy1_search_mac0) {
402 /* try harder to find a PHY */
403 if (!phydev && (aup->mac_id == 1)) {
404 /* no PHY found, maybe we have a dual PHY? */
405 dev_info(&dev->dev, ": no PHY found on MAC1, "
406 "let's see if it's attached to MAC0...\n");
407
408 /* find the first (lowest address) non-attached
409 * PHY on the MAC0 MII bus
410 */
411 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
412 struct phy_device *const tmp_phydev =
413 aup->mii_bus->phy_map[phy_addr];
414
415 if (aup->mac_id == 1)
416 break;
417
418 /* no PHY here... */
419 if (!tmp_phydev)
420 continue;
421
422 /* already claimed by MAC0 */
423 if (tmp_phydev->attached_dev)
424 continue;
425
426 phydev = tmp_phydev;
427 break; /* found it */
428 }
429 }
430 }
431
432 if (!phydev) {
433 netdev_err(dev, "no PHY found\n");
434 return -1;
435 }
436
437 /* now we are supposed to have a proper phydev, to attach to... */
438 BUG_ON(phydev->attached_dev);
439
440 phydev = phy_connect(dev, dev_name(&phydev->dev), &au1000_adjust_link,
441 0, PHY_INTERFACE_MODE_MII);
442
443 if (IS_ERR(phydev)) {
444 netdev_err(dev, "Could not attach to PHY\n");
445 return PTR_ERR(phydev);
446 }
447
448 /* mask with MAC supported features */
449 phydev->supported &= (SUPPORTED_10baseT_Half
450 | SUPPORTED_10baseT_Full
451 | SUPPORTED_100baseT_Half
452 | SUPPORTED_100baseT_Full
453 | SUPPORTED_Autoneg
454 /* | SUPPORTED_Pause | SUPPORTED_Asym_Pause */
455 | SUPPORTED_MII
456 | SUPPORTED_TP);
457
458 phydev->advertising = phydev->supported;
459
460 aup->old_link = 0;
461 aup->old_speed = 0;
462 aup->old_duplex = -1;
463 aup->phy_dev = phydev;
464
465 netdev_info(dev, "attached PHY driver [%s] "
466 "(mii_bus:phy_addr=%s, irq=%d)\n",
467 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
468
469 return 0;
470}
471
472
473/*
474 * Buffer allocation/deallocation routines. The buffer descriptor returned
475 * has the virtual and dma address of a buffer suitable for
476 * both, receive and transmit operations.
477 */
478static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
479{
480 struct db_dest *pDB;
481 pDB = aup->pDBfree;
482
483 if (pDB)
484 aup->pDBfree = pDB->pnext;
485
486 return pDB;
487}
488
489void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
490{
491 struct db_dest *pDBfree = aup->pDBfree;
492 if (pDBfree)
493 pDBfree->pnext = pDB;
494 aup->pDBfree = pDB;
495}
496
497static void au1000_reset_mac_unlocked(struct net_device *dev)
498{
499 struct au1000_private *const aup = netdev_priv(dev);
500 int i;
501
502 au1000_hard_stop(dev);
503
504 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
505 au_sync_delay(2);
506 writel(0, aup->enable);
507 au_sync_delay(2);
508
509 aup->tx_full = 0;
510 for (i = 0; i < NUM_RX_DMA; i++) {
511 /* reset control bits */
512 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
513 }
514 for (i = 0; i < NUM_TX_DMA; i++) {
515 /* reset control bits */
516 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
517 }
518
519 aup->mac_enabled = 0;
520
521}
522
523static void au1000_reset_mac(struct net_device *dev)
524{
525 struct au1000_private *const aup = netdev_priv(dev);
526 unsigned long flags;
527
528 netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
529 (unsigned)aup);
530
531 spin_lock_irqsave(&aup->lock, flags);
532
533 au1000_reset_mac_unlocked(dev);
534
535 spin_unlock_irqrestore(&aup->lock, flags);
536}
537
538/*
539 * Setup the receive and transmit "rings". These pointers are the addresses
540 * of the rx and tx MAC DMA registers so they are fixed by the hardware --
541 * these are not descriptors sitting in memory.
542 */
543static void
544au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
545{
546 int i;
547
548 for (i = 0; i < NUM_RX_DMA; i++) {
549 aup->rx_dma_ring[i] = (struct rx_dma *)
550 (tx_base + 0x100 + sizeof(struct rx_dma) * i);
551 }
552 for (i = 0; i < NUM_TX_DMA; i++) {
553 aup->tx_dma_ring[i] = (struct tx_dma *)
554 (tx_base + sizeof(struct tx_dma) * i);
555 }
556}
557
558/*
559 * ethtool operations
560 */
561
562static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
563{
564 struct au1000_private *aup = netdev_priv(dev);
565
566 if (aup->phy_dev)
567 return phy_ethtool_gset(aup->phy_dev, cmd);
568
569 return -EINVAL;
570}
571
572static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
573{
574 struct au1000_private *aup = netdev_priv(dev);
575
576 if (!capable(CAP_NET_ADMIN))
577 return -EPERM;
578
579 if (aup->phy_dev)
580 return phy_ethtool_sset(aup->phy_dev, cmd);
581
582 return -EINVAL;
583}
584
585static void
586au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
587{
588 struct au1000_private *aup = netdev_priv(dev);
589
590 strcpy(info->driver, DRV_NAME);
591 strcpy(info->version, DRV_VERSION);
592 info->fw_version[0] = '\0';
593 sprintf(info->bus_info, "%s %d", DRV_NAME, aup->mac_id);
594 info->regdump_len = 0;
595}
596
597static void au1000_set_msglevel(struct net_device *dev, u32 value)
598{
599 struct au1000_private *aup = netdev_priv(dev);
600 aup->msg_enable = value;
601}
602
603static u32 au1000_get_msglevel(struct net_device *dev)
604{
605 struct au1000_private *aup = netdev_priv(dev);
606 return aup->msg_enable;
607}
608
609static const struct ethtool_ops au1000_ethtool_ops = {
610 .get_settings = au1000_get_settings,
611 .set_settings = au1000_set_settings,
612 .get_drvinfo = au1000_get_drvinfo,
613 .get_link = ethtool_op_get_link,
614 .get_msglevel = au1000_get_msglevel,
615 .set_msglevel = au1000_set_msglevel,
616};
617
618
619/*
620 * Initialize the interface.
621 *
622 * When the device powers up, the clocks are disabled and the
623 * mac is in reset state. When the interface is closed, we
624 * do the same -- reset the device and disable the clocks to
625 * conserve power. Thus, whenever au1000_init() is called,
626 * the device should already be in reset state.
627 */
628static int au1000_init(struct net_device *dev)
629{
630 struct au1000_private *aup = netdev_priv(dev);
631 unsigned long flags;
632 int i;
633 u32 control;
634
635 netif_dbg(aup, hw, dev, "au1000_init\n");
636
637 /* bring the device out of reset */
638 au1000_enable_mac(dev, 1);
639
640 spin_lock_irqsave(&aup->lock, flags);
641
642 writel(0, &aup->mac->control);
643 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
644 aup->tx_tail = aup->tx_head;
645 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
646
647 writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
648 &aup->mac->mac_addr_high);
649 writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
650 dev->dev_addr[1]<<8 | dev->dev_addr[0],
651 &aup->mac->mac_addr_low);
652
653
654 for (i = 0; i < NUM_RX_DMA; i++)
655 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
656
657 au_sync();
658
659 control = MAC_RX_ENABLE | MAC_TX_ENABLE;
660#ifndef CONFIG_CPU_LITTLE_ENDIAN
661 control |= MAC_BIG_ENDIAN;
662#endif
663 if (aup->phy_dev) {
664 if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
665 control |= MAC_FULL_DUPLEX;
666 else
667 control |= MAC_DISABLE_RX_OWN;
668 } else { /* PHY-less op, assume full-duplex */
669 control |= MAC_FULL_DUPLEX;
670 }
671
672 writel(control, &aup->mac->control);
673 writel(0x8100, &aup->mac->vlan1_tag); /* activate vlan support */
674 au_sync();
675
676 spin_unlock_irqrestore(&aup->lock, flags);
677 return 0;
678}
679
680static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
681{
682 struct net_device_stats *ps = &dev->stats;
683
684 ps->rx_packets++;
685 if (status & RX_MCAST_FRAME)
686 ps->multicast++;
687
688 if (status & RX_ERROR) {
689 ps->rx_errors++;
690 if (status & RX_MISSED_FRAME)
691 ps->rx_missed_errors++;
692 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
693 ps->rx_length_errors++;
694 if (status & RX_CRC_ERROR)
695 ps->rx_crc_errors++;
696 if (status & RX_COLL)
697 ps->collisions++;
698 } else
699 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
700
701}
702
703/*
704 * Au1000 receive routine.
705 */
706static int au1000_rx(struct net_device *dev)
707{
708 struct au1000_private *aup = netdev_priv(dev);
709 struct sk_buff *skb;
710 struct rx_dma *prxd;
711 u32 buff_stat, status;
712 struct db_dest *pDB;
713 u32 frmlen;
714
715 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
716
717 prxd = aup->rx_dma_ring[aup->rx_head];
718 buff_stat = prxd->buff_stat;
719 while (buff_stat & RX_T_DONE) {
720 status = prxd->status;
721 pDB = aup->rx_db_inuse[aup->rx_head];
722 au1000_update_rx_stats(dev, status);
723 if (!(status & RX_ERROR)) {
724
725 /* good frame */
726 frmlen = (status & RX_FRAME_LEN_MASK);
727 frmlen -= 4; /* Remove FCS */
728 skb = dev_alloc_skb(frmlen + 2);
729 if (skb == NULL) {
730 netdev_err(dev, "Memory squeeze, dropping packet.\n");
731 dev->stats.rx_dropped++;
732 continue;
733 }
734 skb_reserve(skb, 2); /* 16 byte IP header align */
735 skb_copy_to_linear_data(skb,
736 (unsigned char *)pDB->vaddr, frmlen);
737 skb_put(skb, frmlen);
738 skb->protocol = eth_type_trans(skb, dev);
739 netif_rx(skb); /* pass the packet to upper layers */
740 } else {
741 if (au1000_debug > 4) {
742 pr_err("rx_error(s):");
743 if (status & RX_MISSED_FRAME)
744 pr_cont(" miss");
745 if (status & RX_WDOG_TIMER)
746 pr_cont(" wdog");
747 if (status & RX_RUNT)
748 pr_cont(" runt");
749 if (status & RX_OVERLEN)
750 pr_cont(" overlen");
751 if (status & RX_COLL)
752 pr_cont(" coll");
753 if (status & RX_MII_ERROR)
754 pr_cont(" mii error");
755 if (status & RX_CRC_ERROR)
756 pr_cont(" crc error");
757 if (status & RX_LEN_ERROR)
758 pr_cont(" len error");
759 if (status & RX_U_CNTRL_FRAME)
760 pr_cont(" u control frame");
761 pr_cont("\n");
762 }
763 }
764 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
765 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
766 au_sync();
767
768 /* next descriptor */
769 prxd = aup->rx_dma_ring[aup->rx_head];
770 buff_stat = prxd->buff_stat;
771 }
772 return 0;
773}
774
775static void au1000_update_tx_stats(struct net_device *dev, u32 status)
776{
777 struct au1000_private *aup = netdev_priv(dev);
778 struct net_device_stats *ps = &dev->stats;
779
780 if (status & TX_FRAME_ABORTED) {
781 if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
782 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
783 /* any other tx errors are only valid
784 * in half duplex mode
785 */
786 ps->tx_errors++;
787 ps->tx_aborted_errors++;
788 }
789 } else {
790 ps->tx_errors++;
791 ps->tx_aborted_errors++;
792 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
793 ps->tx_carrier_errors++;
794 }
795 }
796}
797
798/*
799 * Called from the interrupt service routine to acknowledge
800 * the TX DONE bits. This is a must if the irq is setup as
801 * edge triggered.
802 */
803static void au1000_tx_ack(struct net_device *dev)
804{
805 struct au1000_private *aup = netdev_priv(dev);
806 struct tx_dma *ptxd;
807
808 ptxd = aup->tx_dma_ring[aup->tx_tail];
809
810 while (ptxd->buff_stat & TX_T_DONE) {
811 au1000_update_tx_stats(dev, ptxd->status);
812 ptxd->buff_stat &= ~TX_T_DONE;
813 ptxd->len = 0;
814 au_sync();
815
816 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
817 ptxd = aup->tx_dma_ring[aup->tx_tail];
818
819 if (aup->tx_full) {
820 aup->tx_full = 0;
821 netif_wake_queue(dev);
822 }
823 }
824}
825
826/*
827 * Au1000 interrupt service routine.
828 */
829static irqreturn_t au1000_interrupt(int irq, void *dev_id)
830{
831 struct net_device *dev = dev_id;
832
833 /* Handle RX interrupts first to minimize chance of overrun */
834
835 au1000_rx(dev);
836 au1000_tx_ack(dev);
837 return IRQ_RETVAL(1);
838}
839
840static int au1000_open(struct net_device *dev)
841{
842 int retval;
843 struct au1000_private *aup = netdev_priv(dev);
844
845 netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
846
847 retval = request_irq(dev->irq, au1000_interrupt, 0,
848 dev->name, dev);
849 if (retval) {
850 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
851 return retval;
852 }
853
854 retval = au1000_init(dev);
855 if (retval) {
856 netdev_err(dev, "error in au1000_init\n");
857 free_irq(dev->irq, dev);
858 return retval;
859 }
860
861 if (aup->phy_dev) {
862 /* cause the PHY state machine to schedule a link state check */
863 aup->phy_dev->state = PHY_CHANGELINK;
864 phy_start(aup->phy_dev);
865 }
866
867 netif_start_queue(dev);
868
869 netif_dbg(aup, drv, dev, "open: Initialization done.\n");
870
871 return 0;
872}
873
874static int au1000_close(struct net_device *dev)
875{
876 unsigned long flags;
877 struct au1000_private *const aup = netdev_priv(dev);
878
879 netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
880
881 if (aup->phy_dev)
882 phy_stop(aup->phy_dev);
883
884 spin_lock_irqsave(&aup->lock, flags);
885
886 au1000_reset_mac_unlocked(dev);
887
888 /* stop the device */
889 netif_stop_queue(dev);
890
891 /* disable the interrupt */
892 free_irq(dev->irq, dev);
893 spin_unlock_irqrestore(&aup->lock, flags);
894
895 return 0;
896}
897
898/*
899 * Au1000 transmit routine.
900 */
901static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
902{
903 struct au1000_private *aup = netdev_priv(dev);
904 struct net_device_stats *ps = &dev->stats;
905 struct tx_dma *ptxd;
906 u32 buff_stat;
907 struct db_dest *pDB;
908 int i;
909
910 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
911 (unsigned)aup, skb->len,
912 skb->data, aup->tx_head);
913
914 ptxd = aup->tx_dma_ring[aup->tx_head];
915 buff_stat = ptxd->buff_stat;
916 if (buff_stat & TX_DMA_ENABLE) {
917 /* We've wrapped around and the transmitter is still busy */
918 netif_stop_queue(dev);
919 aup->tx_full = 1;
920 return NETDEV_TX_BUSY;
921 } else if (buff_stat & TX_T_DONE) {
922 au1000_update_tx_stats(dev, ptxd->status);
923 ptxd->len = 0;
924 }
925
926 if (aup->tx_full) {
927 aup->tx_full = 0;
928 netif_wake_queue(dev);
929 }
930
931 pDB = aup->tx_db_inuse[aup->tx_head];
932 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
933 if (skb->len < ETH_ZLEN) {
934 for (i = skb->len; i < ETH_ZLEN; i++)
935 ((char *)pDB->vaddr)[i] = 0;
936
937 ptxd->len = ETH_ZLEN;
938 } else
939 ptxd->len = skb->len;
940
941 ps->tx_packets++;
942 ps->tx_bytes += ptxd->len;
943
944 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
945 au_sync();
946 dev_kfree_skb(skb);
947 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
948 return NETDEV_TX_OK;
949}
950
951/*
952 * The Tx ring has been full longer than the watchdog timeout
953 * value. The transmitter must be hung?
954 */
955static void au1000_tx_timeout(struct net_device *dev)
956{
957 netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
958 au1000_reset_mac(dev);
959 au1000_init(dev);
960 dev->trans_start = jiffies; /* prevent tx timeout */
961 netif_wake_queue(dev);
962}
963
964static void au1000_multicast_list(struct net_device *dev)
965{
966 struct au1000_private *aup = netdev_priv(dev);
967 u32 reg;
968
969 netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
970 reg = readl(&aup->mac->control);
971 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
972 reg |= MAC_PROMISCUOUS;
973 } else if ((dev->flags & IFF_ALLMULTI) ||
974 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
975 reg |= MAC_PASS_ALL_MULTI;
976 reg &= ~MAC_PROMISCUOUS;
977 netdev_info(dev, "Pass all multicast\n");
978 } else {
979 struct netdev_hw_addr *ha;
980 u32 mc_filter[2]; /* Multicast hash filter */
981
982 mc_filter[1] = mc_filter[0] = 0;
983 netdev_for_each_mc_addr(ha, dev)
984 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
985 (long *)mc_filter);
986 writel(mc_filter[1], &aup->mac->multi_hash_high);
987 writel(mc_filter[0], &aup->mac->multi_hash_low);
988 reg &= ~MAC_PROMISCUOUS;
989 reg |= MAC_HASH_MODE;
990 }
991 writel(reg, &aup->mac->control);
992}
993
994static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
995{
996 struct au1000_private *aup = netdev_priv(dev);
997
998 if (!netif_running(dev))
999 return -EINVAL;
1000
1001 if (!aup->phy_dev)
1002 return -EINVAL; /* PHY not controllable */
1003
1004 return phy_mii_ioctl(aup->phy_dev, rq, cmd);
1005}
1006
1007static const struct net_device_ops au1000_netdev_ops = {
1008 .ndo_open = au1000_open,
1009 .ndo_stop = au1000_close,
1010 .ndo_start_xmit = au1000_tx,
1011 .ndo_set_rx_mode = au1000_multicast_list,
1012 .ndo_do_ioctl = au1000_ioctl,
1013 .ndo_tx_timeout = au1000_tx_timeout,
1014 .ndo_set_mac_address = eth_mac_addr,
1015 .ndo_validate_addr = eth_validate_addr,
1016 .ndo_change_mtu = eth_change_mtu,
1017};
1018
1019static int __devinit au1000_probe(struct platform_device *pdev)
1020{
1021 static unsigned version_printed;
1022 struct au1000_private *aup = NULL;
1023 struct au1000_eth_platform_data *pd;
1024 struct net_device *dev = NULL;
1025 struct db_dest *pDB, *pDBfree;
1026 int irq, i, err = 0;
1027 struct resource *base, *macen, *macdma;
1028
1029 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1030 if (!base) {
1031 dev_err(&pdev->dev, "failed to retrieve base register\n");
1032 err = -ENODEV;
1033 goto out;
1034 }
1035
1036 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1037 if (!macen) {
1038 dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1039 err = -ENODEV;
1040 goto out;
1041 }
1042
1043 irq = platform_get_irq(pdev, 0);
1044 if (irq < 0) {
1045 dev_err(&pdev->dev, "failed to retrieve IRQ\n");
1046 err = -ENODEV;
1047 goto out;
1048 }
1049
1050 macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1051 if (!macdma) {
1052 dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1053 err = -ENODEV;
1054 goto out;
1055 }
1056
1057 if (!request_mem_region(base->start, resource_size(base),
1058 pdev->name)) {
1059 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1060 err = -ENXIO;
1061 goto out;
1062 }
1063
1064 if (!request_mem_region(macen->start, resource_size(macen),
1065 pdev->name)) {
1066 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1067 err = -ENXIO;
1068 goto err_request;
1069 }
1070
1071 if (!request_mem_region(macdma->start, resource_size(macdma),
1072 pdev->name)) {
1073 dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1074 err = -ENXIO;
1075 goto err_macdma;
1076 }
1077
1078 dev = alloc_etherdev(sizeof(struct au1000_private));
1079 if (!dev) {
1080 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1081 err = -ENOMEM;
1082 goto err_alloc;
1083 }
1084
1085 SET_NETDEV_DEV(dev, &pdev->dev);
1086 platform_set_drvdata(pdev, dev);
1087 aup = netdev_priv(dev);
1088
1089 spin_lock_init(&aup->lock);
1090 aup->msg_enable = (au1000_debug < 4 ?
1091 AU1000_DEF_MSG_ENABLE : au1000_debug);
1092
1093 /* Allocate the data buffers
1094 * Snooping works fine with eth on all au1xxx
1095 */
1096 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
1097 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1098 &aup->dma_addr, 0);
1099 if (!aup->vaddr) {
1100 dev_err(&pdev->dev, "failed to allocate data buffers\n");
1101 err = -ENOMEM;
1102 goto err_vaddr;
1103 }
1104
1105 /* aup->mac is the base address of the MAC's registers */
1106 aup->mac = (struct mac_reg *)
1107 ioremap_nocache(base->start, resource_size(base));
1108 if (!aup->mac) {
1109 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1110 err = -ENXIO;
1111 goto err_remap1;
1112 }
1113
1114 /* Setup some variables for quick register address access */
1115 aup->enable = (u32 *)ioremap_nocache(macen->start,
1116 resource_size(macen));
1117 if (!aup->enable) {
1118 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1119 err = -ENXIO;
1120 goto err_remap2;
1121 }
1122 aup->mac_id = pdev->id;
1123
1124 aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
1125 if (!aup->macdma) {
1126 dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1127 err = -ENXIO;
1128 goto err_remap3;
1129 }
1130
1131 au1000_setup_hw_rings(aup, aup->macdma);
1132
1133 /* set a random MAC now in case platform_data doesn't provide one */
1134 random_ether_addr(dev->dev_addr);
1135
1136 writel(0, aup->enable);
1137 aup->mac_enabled = 0;
1138
1139 pd = pdev->dev.platform_data;
1140 if (!pd) {
1141 dev_info(&pdev->dev, "no platform_data passed,"
1142 " PHY search on MAC0\n");
1143 aup->phy1_search_mac0 = 1;
1144 } else {
1145 if (is_valid_ether_addr(pd->mac))
1146 memcpy(dev->dev_addr, pd->mac, 6);
1147
1148 aup->phy_static_config = pd->phy_static_config;
1149 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1150 aup->phy1_search_mac0 = pd->phy1_search_mac0;
1151 aup->phy_addr = pd->phy_addr;
1152 aup->phy_busid = pd->phy_busid;
1153 aup->phy_irq = pd->phy_irq;
1154 }
1155
1156 if (aup->phy_busid && aup->phy_busid > 0) {
1157 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1158 err = -ENODEV;
1159 goto err_mdiobus_alloc;
1160 }
1161
1162 aup->mii_bus = mdiobus_alloc();
1163 if (aup->mii_bus == NULL) {
1164 dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1165 err = -ENOMEM;
1166 goto err_mdiobus_alloc;
1167 }
1168
1169 aup->mii_bus->priv = dev;
1170 aup->mii_bus->read = au1000_mdiobus_read;
1171 aup->mii_bus->write = au1000_mdiobus_write;
1172 aup->mii_bus->reset = au1000_mdiobus_reset;
1173 aup->mii_bus->name = "au1000_eth_mii";
1174 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
1175 aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1176 if (aup->mii_bus->irq == NULL)
1177 goto err_out;
1178
1179 for (i = 0; i < PHY_MAX_ADDR; ++i)
1180 aup->mii_bus->irq[i] = PHY_POLL;
1181 /* if known, set corresponding PHY IRQs */
1182 if (aup->phy_static_config)
1183 if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1184 aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1185
1186 err = mdiobus_register(aup->mii_bus);
1187 if (err) {
1188 dev_err(&pdev->dev, "failed to register MDIO bus\n");
1189 goto err_mdiobus_reg;
1190 }
1191
1192 if (au1000_mii_probe(dev) != 0)
1193 goto err_out;
1194
1195 pDBfree = NULL;
1196 /* setup the data buffer descriptors and attach a buffer to each one */
1197 pDB = aup->db;
1198 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1199 pDB->pnext = pDBfree;
1200 pDBfree = pDB;
1201 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1202 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1203 pDB++;
1204 }
1205 aup->pDBfree = pDBfree;
1206
1207 for (i = 0; i < NUM_RX_DMA; i++) {
1208 pDB = au1000_GetFreeDB(aup);
1209 if (!pDB)
1210 goto err_out;
1211
1212 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1213 aup->rx_db_inuse[i] = pDB;
1214 }
1215 for (i = 0; i < NUM_TX_DMA; i++) {
1216 pDB = au1000_GetFreeDB(aup);
1217 if (!pDB)
1218 goto err_out;
1219
1220 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1221 aup->tx_dma_ring[i]->len = 0;
1222 aup->tx_db_inuse[i] = pDB;
1223 }
1224
1225 dev->base_addr = base->start;
1226 dev->irq = irq;
1227 dev->netdev_ops = &au1000_netdev_ops;
1228 SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
1229 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1230
1231 /*
1232 * The boot code uses the ethernet controller, so reset it to start
1233 * fresh. au1000_init() expects that the device is in reset state.
1234 */
1235 au1000_reset_mac(dev);
1236
1237 err = register_netdev(dev);
1238 if (err) {
1239 netdev_err(dev, "Cannot register net device, aborting.\n");
1240 goto err_out;
1241 }
1242
1243 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1244 (unsigned long)base->start, irq);
1245 if (version_printed++ == 0)
1246 pr_info("%s version %s %s\n",
1247 DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1248
1249 return 0;
1250
1251err_out:
1252 if (aup->mii_bus != NULL)
1253 mdiobus_unregister(aup->mii_bus);
1254
1255 /* here we should have a valid dev plus aup-> register addresses
1256 * so we can reset the mac properly.
1257 */
1258 au1000_reset_mac(dev);
1259
1260 for (i = 0; i < NUM_RX_DMA; i++) {
1261 if (aup->rx_db_inuse[i])
1262 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1263 }
1264 for (i = 0; i < NUM_TX_DMA; i++) {
1265 if (aup->tx_db_inuse[i])
1266 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1267 }
1268err_mdiobus_reg:
1269 mdiobus_free(aup->mii_bus);
1270err_mdiobus_alloc:
1271 iounmap(aup->macdma);
1272err_remap3:
1273 iounmap(aup->enable);
1274err_remap2:
1275 iounmap(aup->mac);
1276err_remap1:
1277 dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1278 (void *)aup->vaddr, aup->dma_addr);
1279err_vaddr:
1280 free_netdev(dev);
1281err_alloc:
1282 release_mem_region(macdma->start, resource_size(macdma));
1283err_macdma:
1284 release_mem_region(macen->start, resource_size(macen));
1285err_request:
1286 release_mem_region(base->start, resource_size(base));
1287out:
1288 return err;
1289}
1290
1291static int __devexit au1000_remove(struct platform_device *pdev)
1292{
1293 struct net_device *dev = platform_get_drvdata(pdev);
1294 struct au1000_private *aup = netdev_priv(dev);
1295 int i;
1296 struct resource *base, *macen;
1297
1298 platform_set_drvdata(pdev, NULL);
1299
1300 unregister_netdev(dev);
1301 mdiobus_unregister(aup->mii_bus);
1302 mdiobus_free(aup->mii_bus);
1303
1304 for (i = 0; i < NUM_RX_DMA; i++)
1305 if (aup->rx_db_inuse[i])
1306 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1307
1308 for (i = 0; i < NUM_TX_DMA; i++)
1309 if (aup->tx_db_inuse[i])
1310 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1311
1312 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1313 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1314 (void *)aup->vaddr, aup->dma_addr);
1315
1316 iounmap(aup->macdma);
1317 iounmap(aup->mac);
1318 iounmap(aup->enable);
1319
1320 base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1321 release_mem_region(base->start, resource_size(base));
1322
1323 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1324 release_mem_region(base->start, resource_size(base));
1325
1326 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1327 release_mem_region(macen->start, resource_size(macen));
1328
1329 free_netdev(dev);
1330
1331 return 0;
1332}
1333
1334static struct platform_driver au1000_eth_driver = {
1335 .probe = au1000_probe,
1336 .remove = __devexit_p(au1000_remove),
1337 .driver = {
1338 .name = "au1000-eth",
1339 .owner = THIS_MODULE,
1340 },
1341};
1342MODULE_ALIAS("platform:au1000-eth");
1343
1344
1345static int __init au1000_init_module(void)
1346{
1347 return platform_driver_register(&au1000_eth_driver);
1348}
1349
1350static void __exit au1000_exit_module(void)
1351{
1352 platform_driver_unregister(&au1000_eth_driver);
1353}
1354
1355module_init(au1000_init_module);
1356module_exit(au1000_exit_module);
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
new file mode 100644
index 000000000000..4b7f7ad62bb8
--- /dev/null
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -0,0 +1,134 @@
1/*
2 *
3 * Alchemy Au1x00 ethernet driver include file
4 *
5 * Author: Pete Popov <ppopov@mvista.com>
6 *
7 * Copyright 2001 MontaVista Software Inc.
8 *
9 * ########################################################################
10 *
11 * This program is free software; you can distribute it and/or modify it
12 * under the terms of the GNU General Public License (Version 2) as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 *
24 * ########################################################################
25 *
26 *
27 */
28
29
30#define MAC_IOSIZE 0x10000
31#define NUM_RX_DMA 4 /* Au1x00 has 4 rx hardware descriptors */
32#define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */
33
34#define NUM_RX_BUFFS 4
35#define NUM_TX_BUFFS 4
36#define MAX_BUF_SIZE 2048
37
38#define ETH_TX_TIMEOUT (HZ/4)
39#define MAC_MIN_PKT_SIZE 64
40
41#define MULTICAST_FILTER_LIMIT 64
42
43/*
44 * Data Buffer Descriptor. Data buffers must be aligned on 32 byte
45 * boundary for both, receive and transmit.
46 */
47struct db_dest {
48 struct db_dest *pnext;
49 u32 *vaddr;
50 dma_addr_t dma_addr;
51};
52
53/*
54 * The transmit and receive descriptors are memory
55 * mapped registers.
56 */
57struct tx_dma {
58 u32 status;
59 u32 buff_stat;
60 u32 len;
61 u32 pad;
62};
63
64struct rx_dma {
65 u32 status;
66 u32 buff_stat;
67 u32 pad[2];
68};
69
70
71/*
72 * MAC control registers, memory mapped.
73 */
74struct mac_reg {
75 u32 control;
76 u32 mac_addr_high;
77 u32 mac_addr_low;
78 u32 multi_hash_high;
79 u32 multi_hash_low;
80 u32 mii_control;
81 u32 mii_data;
82 u32 flow_control;
83 u32 vlan1_tag;
84 u32 vlan2_tag;
85};
86
87
88struct au1000_private {
89 struct db_dest *pDBfree;
90 struct db_dest db[NUM_RX_BUFFS+NUM_TX_BUFFS];
91 struct rx_dma *rx_dma_ring[NUM_RX_DMA];
92 struct tx_dma *tx_dma_ring[NUM_TX_DMA];
93 struct db_dest *rx_db_inuse[NUM_RX_DMA];
94 struct db_dest *tx_db_inuse[NUM_TX_DMA];
95 u32 rx_head;
96 u32 tx_head;
97 u32 tx_tail;
98 u32 tx_full;
99
100 int mac_id;
101
102 int mac_enabled; /* whether MAC is currently enabled and running
103 * (req. for mdio)
104 */
105
106 int old_link; /* used by au1000_adjust_link */
107 int old_speed;
108 int old_duplex;
109
110 struct phy_device *phy_dev;
111 struct mii_bus *mii_bus;
112
113 /* PHY configuration */
114 int phy_static_config;
115 int phy_search_highest_addr;
116 int phy1_search_mac0;
117
118 int phy_addr;
119 int phy_busid;
120 int phy_irq;
121
122 /* These variables are just for quick access
123 * to certain regs addresses.
124 */
125 struct mac_reg *mac; /* mac registers */
126 u32 *enable; /* address of MAC Enable Register */
127 void __iomem *macdma; /* base of MAC DMA port */
128 u32 vaddr; /* virtual address of rx/tx buffers */
129 dma_addr_t dma_addr; /* dma address of rx/tx buffers */
130
131 spinlock_t lock; /* Serialise access to device */
132
133 u32 msg_enable;
134};
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
new file mode 100644
index 000000000000..73f8d4fa682d
--- /dev/null
+++ b/drivers/net/ethernet/amd/declance.c
@@ -0,0 +1,1381 @@
1/*
2 * Lance ethernet driver for the MIPS processor based
3 * DECstation family
4 *
5 *
6 * adopted from sunlance.c by Richard van den Berg
7 *
8 * Copyright (C) 2002, 2003, 2005, 2006 Maciej W. Rozycki
9 *
10 * additional sources:
11 * - PMAD-AA TURBOchannel Ethernet Module Functional Specification,
12 * Revision 1.2
13 *
14 * History:
15 *
16 * v0.001: The kernel accepts the code and it shows the hardware address.
17 *
18 * v0.002: Removed most sparc stuff, left only some module and dma stuff.
19 *
20 * v0.003: Enhanced base address calculation from proposals by
21 * Harald Koerfgen and Thomas Riemer.
22 *
23 * v0.004: lance-regs is pointing at the right addresses, added prom
24 * check. First start of address mapping and DMA.
25 *
26 * v0.005: started to play around with LANCE-DMA. This driver will not
27 * work for non IOASIC lances. HK
28 *
29 * v0.006: added pointer arrays to lance_private and setup routine for
30 * them in dec_lance_init. HK
31 *
32 * v0.007: Big shit. The LANCE seems to use a different DMA mechanism to
33 * access the init block. This looks like one (short) word at a
34 * time, but the smallest amount the IOASIC can transfer is a
35 * (long) word. So we have a 2-2 padding here. Changed
36 * lance_init_block accordingly. The 16-16 padding for the buffers
37 * seems to be correct. HK
38 *
39 * v0.008: mods to make PMAX_LANCE work. 01/09/1999 triemer
40 *
41 * v0.009: Module support fixes, multiple interfaces support, various
42 * bits. macro
43 *
44 * v0.010: Fixes for the PMAD mapping of the LANCE buffer and for the
45 * PMAX requirement to only use halfword accesses to the
46 * buffer. macro
47 *
48 * v0.011: Converted the PMAD to the driver model. macro
49 */
50
51#include <linux/crc32.h>
52#include <linux/delay.h>
53#include <linux/errno.h>
54#include <linux/if_ether.h>
55#include <linux/init.h>
56#include <linux/kernel.h>
57#include <linux/module.h>
58#include <linux/netdevice.h>
59#include <linux/etherdevice.h>
60#include <linux/spinlock.h>
61#include <linux/stddef.h>
62#include <linux/string.h>
63#include <linux/tc.h>
64#include <linux/types.h>
65
66#include <asm/addrspace.h>
67#include <asm/system.h>
68
69#include <asm/dec/interrupts.h>
70#include <asm/dec/ioasic.h>
71#include <asm/dec/ioasic_addrs.h>
72#include <asm/dec/kn01.h>
73#include <asm/dec/machtype.h>
74#include <asm/dec/system.h>
75
76static char version[] __devinitdata =
77"declance.c: v0.011 by Linux MIPS DECstation task force\n";
78
79MODULE_AUTHOR("Linux MIPS DECstation task force");
80MODULE_DESCRIPTION("DEC LANCE (DECstation onboard, PMAD-xx) driver");
81MODULE_LICENSE("GPL");
82
83#define __unused __attribute__ ((unused))
84
85/*
86 * card types
87 */
88#define ASIC_LANCE 1
89#define PMAD_LANCE 2
90#define PMAX_LANCE 3
91
92
93#define LE_CSR0 0
94#define LE_CSR1 1
95#define LE_CSR2 2
96#define LE_CSR3 3
97
98#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
99
100#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
101#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
102#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
103#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
104#define LE_C0_MERR 0x0800 /* ME: Memory error */
105#define LE_C0_RINT 0x0400 /* Received interrupt */
106#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
107#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
108#define LE_C0_INTR 0x0080 /* Interrupt or error */
109#define LE_C0_INEA 0x0040 /* Interrupt enable */
110#define LE_C0_RXON 0x0020 /* Receiver on */
111#define LE_C0_TXON 0x0010 /* Transmitter on */
112#define LE_C0_TDMD 0x0008 /* Transmitter demand */
113#define LE_C0_STOP 0x0004 /* Stop the card */
114#define LE_C0_STRT 0x0002 /* Start the card */
115#define LE_C0_INIT 0x0001 /* Init the card */
116
117#define LE_C3_BSWP 0x4 /* SWAP */
118#define LE_C3_ACON 0x2 /* ALE Control */
119#define LE_C3_BCON 0x1 /* Byte control */
120
121/* Receive message descriptor 1 */
122#define LE_R1_OWN 0x8000 /* Who owns the entry */
123#define LE_R1_ERR 0x4000 /* Error: if FRA, OFL, CRC or BUF is set */
124#define LE_R1_FRA 0x2000 /* FRA: Frame error */
125#define LE_R1_OFL 0x1000 /* OFL: Frame overflow */
126#define LE_R1_CRC 0x0800 /* CRC error */
127#define LE_R1_BUF 0x0400 /* BUF: Buffer error */
128#define LE_R1_SOP 0x0200 /* Start of packet */
129#define LE_R1_EOP 0x0100 /* End of packet */
130#define LE_R1_POK 0x0300 /* Packet is complete: SOP + EOP */
131
132/* Transmit message descriptor 1 */
133#define LE_T1_OWN 0x8000 /* Lance owns the packet */
134#define LE_T1_ERR 0x4000 /* Error summary */
135#define LE_T1_EMORE 0x1000 /* Error: more than one retry needed */
136#define LE_T1_EONE 0x0800 /* Error: one retry needed */
137#define LE_T1_EDEF 0x0400 /* Error: deferred */
138#define LE_T1_SOP 0x0200 /* Start of packet */
139#define LE_T1_EOP 0x0100 /* End of packet */
140#define LE_T1_POK 0x0300 /* Packet is complete: SOP + EOP */
141
142#define LE_T3_BUF 0x8000 /* Buffer error */
143#define LE_T3_UFL 0x4000 /* Error underflow */
144#define LE_T3_LCOL 0x1000 /* Error late collision */
145#define LE_T3_CLOS 0x0800 /* Error carrier loss */
146#define LE_T3_RTY 0x0400 /* Error retry */
147#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
148
149/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
150
151#ifndef LANCE_LOG_TX_BUFFERS
152#define LANCE_LOG_TX_BUFFERS 4
153#define LANCE_LOG_RX_BUFFERS 4
154#endif
155
156#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
157#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
158
159#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
160#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
161
162#define PKT_BUF_SZ 1536
163#define RX_BUFF_SIZE PKT_BUF_SZ
164#define TX_BUFF_SIZE PKT_BUF_SZ
165
166#undef TEST_HITS
167#define ZERO 0
168
169/*
170 * The DS2100/3100 have a linear 64 kB buffer which supports halfword
171 * accesses only. Each halfword of the buffer is word-aligned in the
172 * CPU address space.
173 *
174 * The PMAD-AA has a 128 kB buffer on-board.
175 *
176 * The IOASIC LANCE devices use a shared memory region. This region
177 * as seen from the CPU is (max) 128 kB long and has to be on an 128 kB
178 * boundary. The LANCE sees this as a 64 kB long continuous memory
179 * region.
180 *
181 * The LANCE's DMA address is used as an index in this buffer and DMA
182 * takes place in bursts of eight 16-bit words which are packed into
183 * four 32-bit words by the IOASIC. This leads to a strange padding:
184 * 16 bytes of valid data followed by a 16 byte gap :-(.
185 */
186
187struct lance_rx_desc {
188 unsigned short rmd0; /* low address of packet */
189 unsigned short rmd1; /* high address of packet
190 and descriptor bits */
191 short length; /* 2s complement (negative!)
192 of buffer length */
193 unsigned short mblength; /* actual number of bytes received */
194};
195
196struct lance_tx_desc {
197 unsigned short tmd0; /* low address of packet */
198 unsigned short tmd1; /* high address of packet
199 and descriptor bits */
200 short length; /* 2s complement (negative!)
201 of buffer length */
202 unsigned short misc;
203};
204
205
206/* First part of the LANCE initialization block, described in databook. */
207struct lance_init_block {
208 unsigned short mode; /* pre-set mode (reg. 15) */
209
210 unsigned short phys_addr[3]; /* physical ethernet address */
211 unsigned short filter[4]; /* multicast filter */
212
213 /* Receive and transmit ring base, along with extra bits. */
214 unsigned short rx_ptr; /* receive descriptor addr */
215 unsigned short rx_len; /* receive len and high addr */
216 unsigned short tx_ptr; /* transmit descriptor addr */
217 unsigned short tx_len; /* transmit len and high addr */
218
219 short gap[4];
220
221 /* The buffer descriptors */
222 struct lance_rx_desc brx_ring[RX_RING_SIZE];
223 struct lance_tx_desc btx_ring[TX_RING_SIZE];
224};
225
226#define BUF_OFFSET_CPU sizeof(struct lance_init_block)
227#define BUF_OFFSET_LNC sizeof(struct lance_init_block)
228
229#define shift_off(off, type) \
230 (type == ASIC_LANCE || type == PMAX_LANCE ? off << 1 : off)
231
232#define lib_off(rt, type) \
233 shift_off(offsetof(struct lance_init_block, rt), type)
234
235#define lib_ptr(ib, rt, type) \
236 ((volatile u16 *)((u8 *)(ib) + lib_off(rt, type)))
237
238#define rds_off(rt, type) \
239 shift_off(offsetof(struct lance_rx_desc, rt), type)
240
241#define rds_ptr(rd, rt, type) \
242 ((volatile u16 *)((u8 *)(rd) + rds_off(rt, type)))
243
244#define tds_off(rt, type) \
245 shift_off(offsetof(struct lance_tx_desc, rt), type)
246
247#define tds_ptr(td, rt, type) \
248 ((volatile u16 *)((u8 *)(td) + tds_off(rt, type)))
249
250struct lance_private {
251 struct net_device *next;
252 int type;
253 int dma_irq;
254 volatile struct lance_regs *ll;
255
256 spinlock_t lock;
257
258 int rx_new, tx_new;
259 int rx_old, tx_old;
260
261 unsigned short busmaster_regval;
262
263 struct timer_list multicast_timer;
264
265 /* Pointers to the ring buffers as seen from the CPU */
266 char *rx_buf_ptr_cpu[RX_RING_SIZE];
267 char *tx_buf_ptr_cpu[TX_RING_SIZE];
268
269 /* Pointers to the ring buffers as seen from the LANCE */
270 uint rx_buf_ptr_lnc[RX_RING_SIZE];
271 uint tx_buf_ptr_lnc[TX_RING_SIZE];
272};
273
274#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
275 lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
276 lp->tx_old - lp->tx_new-1)
277
278/* The lance control ports are at an absolute address, machine and tc-slot
279 * dependent.
280 * DECstations do only 32-bit access and the LANCE uses 16 bit addresses,
281 * so we have to give the structure an extra member making rap pointing
282 * at the right address
283 */
284struct lance_regs {
285 volatile unsigned short rdp; /* register data port */
286 unsigned short pad;
287 volatile unsigned short rap; /* register address port */
288};
289
290int dec_lance_debug = 2;
291
292static struct tc_driver dec_lance_tc_driver;
293static struct net_device *root_lance_dev;
294
295static inline void writereg(volatile unsigned short *regptr, short value)
296{
297 *regptr = value;
298 iob();
299}
300
301/* Load the CSR registers */
302static void load_csrs(struct lance_private *lp)
303{
304 volatile struct lance_regs *ll = lp->ll;
305 uint leptr;
306
307 /* The address space as seen from the LANCE
308 * begins at address 0. HK
309 */
310 leptr = 0;
311
312 writereg(&ll->rap, LE_CSR1);
313 writereg(&ll->rdp, (leptr & 0xFFFF));
314 writereg(&ll->rap, LE_CSR2);
315 writereg(&ll->rdp, leptr >> 16);
316 writereg(&ll->rap, LE_CSR3);
317 writereg(&ll->rdp, lp->busmaster_regval);
318
319 /* Point back to csr0 */
320 writereg(&ll->rap, LE_CSR0);
321}
322
323/*
324 * Our specialized copy routines
325 *
326 */
327static void cp_to_buf(const int type, void *to, const void *from, int len)
328{
329 unsigned short *tp;
330 const unsigned short *fp;
331 unsigned short clen;
332 unsigned char *rtp;
333 const unsigned char *rfp;
334
335 if (type == PMAD_LANCE) {
336 memcpy(to, from, len);
337 } else if (type == PMAX_LANCE) {
338 clen = len >> 1;
339 tp = to;
340 fp = from;
341
342 while (clen--) {
343 *tp++ = *fp++;
344 tp++;
345 }
346
347 clen = len & 1;
348 rtp = tp;
349 rfp = fp;
350 while (clen--) {
351 *rtp++ = *rfp++;
352 }
353 } else {
354 /*
355 * copy 16 Byte chunks
356 */
357 clen = len >> 4;
358 tp = to;
359 fp = from;
360 while (clen--) {
361 *tp++ = *fp++;
362 *tp++ = *fp++;
363 *tp++ = *fp++;
364 *tp++ = *fp++;
365 *tp++ = *fp++;
366 *tp++ = *fp++;
367 *tp++ = *fp++;
368 *tp++ = *fp++;
369 tp += 8;
370 }
371
372 /*
373 * do the rest, if any.
374 */
375 clen = len & 15;
376 rtp = (unsigned char *) tp;
377 rfp = (unsigned char *) fp;
378 while (clen--) {
379 *rtp++ = *rfp++;
380 }
381 }
382
383 iob();
384}
385
386static void cp_from_buf(const int type, void *to, const void *from, int len)
387{
388 unsigned short *tp;
389 const unsigned short *fp;
390 unsigned short clen;
391 unsigned char *rtp;
392 const unsigned char *rfp;
393
394 if (type == PMAD_LANCE) {
395 memcpy(to, from, len);
396 } else if (type == PMAX_LANCE) {
397 clen = len >> 1;
398 tp = to;
399 fp = from;
400 while (clen--) {
401 *tp++ = *fp++;
402 fp++;
403 }
404
405 clen = len & 1;
406
407 rtp = tp;
408 rfp = fp;
409
410 while (clen--) {
411 *rtp++ = *rfp++;
412 }
413 } else {
414
415 /*
416 * copy 16 Byte chunks
417 */
418 clen = len >> 4;
419 tp = to;
420 fp = from;
421 while (clen--) {
422 *tp++ = *fp++;
423 *tp++ = *fp++;
424 *tp++ = *fp++;
425 *tp++ = *fp++;
426 *tp++ = *fp++;
427 *tp++ = *fp++;
428 *tp++ = *fp++;
429 *tp++ = *fp++;
430 fp += 8;
431 }
432
433 /*
434 * do the rest, if any.
435 */
436 clen = len & 15;
437 rtp = (unsigned char *) tp;
438 rfp = (unsigned char *) fp;
439 while (clen--) {
440 *rtp++ = *rfp++;
441 }
442
443
444 }
445
446}
447
448/* Setup the Lance Rx and Tx rings */
449static void lance_init_ring(struct net_device *dev)
450{
451 struct lance_private *lp = netdev_priv(dev);
452 volatile u16 *ib = (volatile u16 *)dev->mem_start;
453 uint leptr;
454 int i;
455
456 /* Lock out other processes while setting up hardware */
457 netif_stop_queue(dev);
458 lp->rx_new = lp->tx_new = 0;
459 lp->rx_old = lp->tx_old = 0;
460
461 /* Copy the ethernet address to the lance init block.
462 * XXX bit 0 of the physical address registers has to be zero
463 */
464 *lib_ptr(ib, phys_addr[0], lp->type) = (dev->dev_addr[1] << 8) |
465 dev->dev_addr[0];
466 *lib_ptr(ib, phys_addr[1], lp->type) = (dev->dev_addr[3] << 8) |
467 dev->dev_addr[2];
468 *lib_ptr(ib, phys_addr[2], lp->type) = (dev->dev_addr[5] << 8) |
469 dev->dev_addr[4];
470 /* Setup the initialization block */
471
472 /* Setup rx descriptor pointer */
473 leptr = offsetof(struct lance_init_block, brx_ring);
474 *lib_ptr(ib, rx_len, lp->type) = (LANCE_LOG_RX_BUFFERS << 13) |
475 (leptr >> 16);
476 *lib_ptr(ib, rx_ptr, lp->type) = leptr;
477 if (ZERO)
478 printk("RX ptr: %8.8x(%8.8x)\n",
479 leptr, lib_off(brx_ring, lp->type));
480
481 /* Setup tx descriptor pointer */
482 leptr = offsetof(struct lance_init_block, btx_ring);
483 *lib_ptr(ib, tx_len, lp->type) = (LANCE_LOG_TX_BUFFERS << 13) |
484 (leptr >> 16);
485 *lib_ptr(ib, tx_ptr, lp->type) = leptr;
486 if (ZERO)
487 printk("TX ptr: %8.8x(%8.8x)\n",
488 leptr, lib_off(btx_ring, lp->type));
489
490 if (ZERO)
491 printk("TX rings:\n");
492
493 /* Setup the Tx ring entries */
494 for (i = 0; i < TX_RING_SIZE; i++) {
495 leptr = lp->tx_buf_ptr_lnc[i];
496 *lib_ptr(ib, btx_ring[i].tmd0, lp->type) = leptr;
497 *lib_ptr(ib, btx_ring[i].tmd1, lp->type) = (leptr >> 16) &
498 0xff;
499 *lib_ptr(ib, btx_ring[i].length, lp->type) = 0xf000;
500 /* The ones required by tmd2 */
501 *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
502 if (i < 3 && ZERO)
503 printk("%d: 0x%8.8x(0x%8.8x)\n",
504 i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
505 }
506
507 /* Setup the Rx ring entries */
508 if (ZERO)
509 printk("RX rings:\n");
510 for (i = 0; i < RX_RING_SIZE; i++) {
511 leptr = lp->rx_buf_ptr_lnc[i];
512 *lib_ptr(ib, brx_ring[i].rmd0, lp->type) = leptr;
513 *lib_ptr(ib, brx_ring[i].rmd1, lp->type) = ((leptr >> 16) &
514 0xff) |
515 LE_R1_OWN;
516 *lib_ptr(ib, brx_ring[i].length, lp->type) = -RX_BUFF_SIZE |
517 0xf000;
518 *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
519 if (i < 3 && ZERO)
520 printk("%d: 0x%8.8x(0x%8.8x)\n",
521 i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
522 }
523 iob();
524}
525
526static int init_restart_lance(struct lance_private *lp)
527{
528 volatile struct lance_regs *ll = lp->ll;
529 int i;
530
531 writereg(&ll->rap, LE_CSR0);
532 writereg(&ll->rdp, LE_C0_INIT);
533
534 /* Wait for the lance to complete initialization */
535 for (i = 0; (i < 100) && !(ll->rdp & LE_C0_IDON); i++) {
536 udelay(10);
537 }
538 if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
539 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
540 i, ll->rdp);
541 return -1;
542 }
543 if ((ll->rdp & LE_C0_ERR)) {
544 printk("LANCE unopened after %d ticks, csr0=%4.4x.\n",
545 i, ll->rdp);
546 return -1;
547 }
548 writereg(&ll->rdp, LE_C0_IDON);
549 writereg(&ll->rdp, LE_C0_STRT);
550 writereg(&ll->rdp, LE_C0_INEA);
551
552 return 0;
553}
554
555static int lance_rx(struct net_device *dev)
556{
557 struct lance_private *lp = netdev_priv(dev);
558 volatile u16 *ib = (volatile u16 *)dev->mem_start;
559 volatile u16 *rd;
560 unsigned short bits;
561 int entry, len;
562 struct sk_buff *skb;
563
564#ifdef TEST_HITS
565 {
566 int i;
567
568 printk("[");
569 for (i = 0; i < RX_RING_SIZE; i++) {
570 if (i == lp->rx_new)
571 printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
572 lp->type) &
573 LE_R1_OWN ? "_" : "X");
574 else
575 printk("%s", *lib_ptr(ib, brx_ring[i].rmd1,
576 lp->type) &
577 LE_R1_OWN ? "." : "1");
578 }
579 printk("]");
580 }
581#endif
582
583 for (rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type);
584 !((bits = *rds_ptr(rd, rmd1, lp->type)) & LE_R1_OWN);
585 rd = lib_ptr(ib, brx_ring[lp->rx_new], lp->type)) {
586 entry = lp->rx_new;
587
588 /* We got an incomplete frame? */
589 if ((bits & LE_R1_POK) != LE_R1_POK) {
590 dev->stats.rx_over_errors++;
591 dev->stats.rx_errors++;
592 } else if (bits & LE_R1_ERR) {
593 /* Count only the end frame as a rx error,
594 * not the beginning
595 */
596 if (bits & LE_R1_BUF)
597 dev->stats.rx_fifo_errors++;
598 if (bits & LE_R1_CRC)
599 dev->stats.rx_crc_errors++;
600 if (bits & LE_R1_OFL)
601 dev->stats.rx_over_errors++;
602 if (bits & LE_R1_FRA)
603 dev->stats.rx_frame_errors++;
604 if (bits & LE_R1_EOP)
605 dev->stats.rx_errors++;
606 } else {
607 len = (*rds_ptr(rd, mblength, lp->type) & 0xfff) - 4;
608 skb = dev_alloc_skb(len + 2);
609
610 if (skb == 0) {
611 printk("%s: Memory squeeze, deferring packet.\n",
612 dev->name);
613 dev->stats.rx_dropped++;
614 *rds_ptr(rd, mblength, lp->type) = 0;
615 *rds_ptr(rd, rmd1, lp->type) =
616 ((lp->rx_buf_ptr_lnc[entry] >> 16) &
617 0xff) | LE_R1_OWN;
618 lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
619 return 0;
620 }
621 dev->stats.rx_bytes += len;
622
623 skb_reserve(skb, 2); /* 16 byte align */
624 skb_put(skb, len); /* make room */
625
626 cp_from_buf(lp->type, skb->data,
627 (char *)lp->rx_buf_ptr_cpu[entry], len);
628
629 skb->protocol = eth_type_trans(skb, dev);
630 netif_rx(skb);
631 dev->stats.rx_packets++;
632 }
633
634 /* Return the packet to the pool */
635 *rds_ptr(rd, mblength, lp->type) = 0;
636 *rds_ptr(rd, length, lp->type) = -RX_BUFF_SIZE | 0xf000;
637 *rds_ptr(rd, rmd1, lp->type) =
638 ((lp->rx_buf_ptr_lnc[entry] >> 16) & 0xff) | LE_R1_OWN;
639 lp->rx_new = (entry + 1) & RX_RING_MOD_MASK;
640 }
641 return 0;
642}
643
644static void lance_tx(struct net_device *dev)
645{
646 struct lance_private *lp = netdev_priv(dev);
647 volatile u16 *ib = (volatile u16 *)dev->mem_start;
648 volatile struct lance_regs *ll = lp->ll;
649 volatile u16 *td;
650 int i, j;
651 int status;
652
653 j = lp->tx_old;
654
655 spin_lock(&lp->lock);
656
657 for (i = j; i != lp->tx_new; i = j) {
658 td = lib_ptr(ib, btx_ring[i], lp->type);
659 /* If we hit a packet not owned by us, stop */
660 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_OWN)
661 break;
662
663 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_ERR) {
664 status = *tds_ptr(td, misc, lp->type);
665
666 dev->stats.tx_errors++;
667 if (status & LE_T3_RTY)
668 dev->stats.tx_aborted_errors++;
669 if (status & LE_T3_LCOL)
670 dev->stats.tx_window_errors++;
671
672 if (status & LE_T3_CLOS) {
673 dev->stats.tx_carrier_errors++;
674 printk("%s: Carrier Lost\n", dev->name);
675 /* Stop the lance */
676 writereg(&ll->rap, LE_CSR0);
677 writereg(&ll->rdp, LE_C0_STOP);
678 lance_init_ring(dev);
679 load_csrs(lp);
680 init_restart_lance(lp);
681 goto out;
682 }
683 /* Buffer errors and underflows turn off the
684 * transmitter, restart the adapter.
685 */
686 if (status & (LE_T3_BUF | LE_T3_UFL)) {
687 dev->stats.tx_fifo_errors++;
688
689 printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
690 dev->name);
691 /* Stop the lance */
692 writereg(&ll->rap, LE_CSR0);
693 writereg(&ll->rdp, LE_C0_STOP);
694 lance_init_ring(dev);
695 load_csrs(lp);
696 init_restart_lance(lp);
697 goto out;
698 }
699 } else if ((*tds_ptr(td, tmd1, lp->type) & LE_T1_POK) ==
700 LE_T1_POK) {
701 /*
702 * So we don't count the packet more than once.
703 */
704 *tds_ptr(td, tmd1, lp->type) &= ~(LE_T1_POK);
705
706 /* One collision before packet was sent. */
707 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EONE)
708 dev->stats.collisions++;
709
710 /* More than one collision, be optimistic. */
711 if (*tds_ptr(td, tmd1, lp->type) & LE_T1_EMORE)
712 dev->stats.collisions += 2;
713
714 dev->stats.tx_packets++;
715 }
716 j = (j + 1) & TX_RING_MOD_MASK;
717 }
718 lp->tx_old = j;
719out:
720 if (netif_queue_stopped(dev) &&
721 TX_BUFFS_AVAIL > 0)
722 netif_wake_queue(dev);
723
724 spin_unlock(&lp->lock);
725}
726
727static irqreturn_t lance_dma_merr_int(int irq, void *dev_id)
728{
729 struct net_device *dev = dev_id;
730
731 printk(KERN_ERR "%s: DMA error\n", dev->name);
732 return IRQ_HANDLED;
733}
734
735static irqreturn_t lance_interrupt(int irq, void *dev_id)
736{
737 struct net_device *dev = dev_id;
738 struct lance_private *lp = netdev_priv(dev);
739 volatile struct lance_regs *ll = lp->ll;
740 int csr0;
741
742 writereg(&ll->rap, LE_CSR0);
743 csr0 = ll->rdp;
744
745 /* Acknowledge all the interrupt sources ASAP */
746 writereg(&ll->rdp, csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT));
747
748 if ((csr0 & LE_C0_ERR)) {
749 /* Clear the error condition */
750 writereg(&ll->rdp, LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
751 LE_C0_CERR | LE_C0_MERR);
752 }
753 if (csr0 & LE_C0_RINT)
754 lance_rx(dev);
755
756 if (csr0 & LE_C0_TINT)
757 lance_tx(dev);
758
759 if (csr0 & LE_C0_BABL)
760 dev->stats.tx_errors++;
761
762 if (csr0 & LE_C0_MISS)
763 dev->stats.rx_errors++;
764
765 if (csr0 & LE_C0_MERR) {
766 printk("%s: Memory error, status %04x\n", dev->name, csr0);
767
768 writereg(&ll->rdp, LE_C0_STOP);
769
770 lance_init_ring(dev);
771 load_csrs(lp);
772 init_restart_lance(lp);
773 netif_wake_queue(dev);
774 }
775
776 writereg(&ll->rdp, LE_C0_INEA);
777 writereg(&ll->rdp, LE_C0_INEA);
778 return IRQ_HANDLED;
779}
780
781static int lance_open(struct net_device *dev)
782{
783 volatile u16 *ib = (volatile u16 *)dev->mem_start;
784 struct lance_private *lp = netdev_priv(dev);
785 volatile struct lance_regs *ll = lp->ll;
786 int status = 0;
787
788 /* Stop the Lance */
789 writereg(&ll->rap, LE_CSR0);
790 writereg(&ll->rdp, LE_C0_STOP);
791
792 /* Set mode and clear multicast filter only at device open,
793 * so that lance_init_ring() called at any error will not
794 * forget multicast filters.
795 *
796 * BTW it is common bug in all lance drivers! --ANK
797 */
798 *lib_ptr(ib, mode, lp->type) = 0;
799 *lib_ptr(ib, filter[0], lp->type) = 0;
800 *lib_ptr(ib, filter[1], lp->type) = 0;
801 *lib_ptr(ib, filter[2], lp->type) = 0;
802 *lib_ptr(ib, filter[3], lp->type) = 0;
803
804 lance_init_ring(dev);
805 load_csrs(lp);
806
807 netif_start_queue(dev);
808
809 /* Associate IRQ with lance_interrupt */
810 if (request_irq(dev->irq, lance_interrupt, 0, "lance", dev)) {
811 printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
812 return -EAGAIN;
813 }
814 if (lp->dma_irq >= 0) {
815 unsigned long flags;
816
817 if (request_irq(lp->dma_irq, lance_dma_merr_int, 0,
818 "lance error", dev)) {
819 free_irq(dev->irq, dev);
820 printk("%s: Can't get DMA IRQ %d\n", dev->name,
821 lp->dma_irq);
822 return -EAGAIN;
823 }
824
825 spin_lock_irqsave(&ioasic_ssr_lock, flags);
826
827 fast_mb();
828 /* Enable I/O ASIC LANCE DMA. */
829 ioasic_write(IO_REG_SSR,
830 ioasic_read(IO_REG_SSR) | IO_SSR_LANCE_DMA_EN);
831
832 fast_mb();
833 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
834 }
835
836 status = init_restart_lance(lp);
837 return status;
838}
839
840static int lance_close(struct net_device *dev)
841{
842 struct lance_private *lp = netdev_priv(dev);
843 volatile struct lance_regs *ll = lp->ll;
844
845 netif_stop_queue(dev);
846 del_timer_sync(&lp->multicast_timer);
847
848 /* Stop the card */
849 writereg(&ll->rap, LE_CSR0);
850 writereg(&ll->rdp, LE_C0_STOP);
851
852 if (lp->dma_irq >= 0) {
853 unsigned long flags;
854
855 spin_lock_irqsave(&ioasic_ssr_lock, flags);
856
857 fast_mb();
858 /* Disable I/O ASIC LANCE DMA. */
859 ioasic_write(IO_REG_SSR,
860 ioasic_read(IO_REG_SSR) & ~IO_SSR_LANCE_DMA_EN);
861
862 fast_iob();
863 spin_unlock_irqrestore(&ioasic_ssr_lock, flags);
864
865 free_irq(lp->dma_irq, dev);
866 }
867 free_irq(dev->irq, dev);
868 return 0;
869}
870
871static inline int lance_reset(struct net_device *dev)
872{
873 struct lance_private *lp = netdev_priv(dev);
874 volatile struct lance_regs *ll = lp->ll;
875 int status;
876
877 /* Stop the lance */
878 writereg(&ll->rap, LE_CSR0);
879 writereg(&ll->rdp, LE_C0_STOP);
880
881 lance_init_ring(dev);
882 load_csrs(lp);
883 dev->trans_start = jiffies; /* prevent tx timeout */
884 status = init_restart_lance(lp);
885 return status;
886}
887
888static void lance_tx_timeout(struct net_device *dev)
889{
890 struct lance_private *lp = netdev_priv(dev);
891 volatile struct lance_regs *ll = lp->ll;
892
893 printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
894 dev->name, ll->rdp);
895 lance_reset(dev);
896 netif_wake_queue(dev);
897}
898
899static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
900{
901 struct lance_private *lp = netdev_priv(dev);
902 volatile struct lance_regs *ll = lp->ll;
903 volatile u16 *ib = (volatile u16 *)dev->mem_start;
904 unsigned long flags;
905 int entry, len;
906
907 len = skb->len;
908
909 if (len < ETH_ZLEN) {
910 if (skb_padto(skb, ETH_ZLEN))
911 return NETDEV_TX_OK;
912 len = ETH_ZLEN;
913 }
914
915 dev->stats.tx_bytes += len;
916
917 spin_lock_irqsave(&lp->lock, flags);
918
919 entry = lp->tx_new;
920 *lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
921 *lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
922
923 cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
924
925 /* Now, give the packet to the lance */
926 *lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
927 ((lp->tx_buf_ptr_lnc[entry] >> 16) & 0xff) |
928 (LE_T1_POK | LE_T1_OWN);
929 lp->tx_new = (entry + 1) & TX_RING_MOD_MASK;
930
931 if (TX_BUFFS_AVAIL <= 0)
932 netif_stop_queue(dev);
933
934 /* Kick the lance: transmit now */
935 writereg(&ll->rdp, LE_C0_INEA | LE_C0_TDMD);
936
937 spin_unlock_irqrestore(&lp->lock, flags);
938
939 dev_kfree_skb(skb);
940
941 return NETDEV_TX_OK;
942}
943
944static void lance_load_multicast(struct net_device *dev)
945{
946 struct lance_private *lp = netdev_priv(dev);
947 volatile u16 *ib = (volatile u16 *)dev->mem_start;
948 struct netdev_hw_addr *ha;
949 u32 crc;
950
951 /* set all multicast bits */
952 if (dev->flags & IFF_ALLMULTI) {
953 *lib_ptr(ib, filter[0], lp->type) = 0xffff;
954 *lib_ptr(ib, filter[1], lp->type) = 0xffff;
955 *lib_ptr(ib, filter[2], lp->type) = 0xffff;
956 *lib_ptr(ib, filter[3], lp->type) = 0xffff;
957 return;
958 }
959 /* clear the multicast filter */
960 *lib_ptr(ib, filter[0], lp->type) = 0;
961 *lib_ptr(ib, filter[1], lp->type) = 0;
962 *lib_ptr(ib, filter[2], lp->type) = 0;
963 *lib_ptr(ib, filter[3], lp->type) = 0;
964
965 /* Add addresses */
966 netdev_for_each_mc_addr(ha, dev) {
967 crc = ether_crc_le(ETH_ALEN, ha->addr);
968 crc = crc >> 26;
969 *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
970 }
971}
972
973static void lance_set_multicast(struct net_device *dev)
974{
975 struct lance_private *lp = netdev_priv(dev);
976 volatile u16 *ib = (volatile u16 *)dev->mem_start;
977 volatile struct lance_regs *ll = lp->ll;
978
979 if (!netif_running(dev))
980 return;
981
982 if (lp->tx_old != lp->tx_new) {
983 mod_timer(&lp->multicast_timer, jiffies + 4 * HZ/100);
984 netif_wake_queue(dev);
985 return;
986 }
987
988 netif_stop_queue(dev);
989
990 writereg(&ll->rap, LE_CSR0);
991 writereg(&ll->rdp, LE_C0_STOP);
992
993 lance_init_ring(dev);
994
995 if (dev->flags & IFF_PROMISC) {
996 *lib_ptr(ib, mode, lp->type) |= LE_MO_PROM;
997 } else {
998 *lib_ptr(ib, mode, lp->type) &= ~LE_MO_PROM;
999 lance_load_multicast(dev);
1000 }
1001 load_csrs(lp);
1002 init_restart_lance(lp);
1003 netif_wake_queue(dev);
1004}
1005
1006static void lance_set_multicast_retry(unsigned long _opaque)
1007{
1008 struct net_device *dev = (struct net_device *) _opaque;
1009
1010 lance_set_multicast(dev);
1011}
1012
1013static const struct net_device_ops lance_netdev_ops = {
1014 .ndo_open = lance_open,
1015 .ndo_stop = lance_close,
1016 .ndo_start_xmit = lance_start_xmit,
1017 .ndo_tx_timeout = lance_tx_timeout,
1018 .ndo_set_rx_mode = lance_set_multicast,
1019 .ndo_change_mtu = eth_change_mtu,
1020 .ndo_validate_addr = eth_validate_addr,
1021 .ndo_set_mac_address = eth_mac_addr,
1022};
1023
1024static int __devinit dec_lance_probe(struct device *bdev, const int type)
1025{
1026 static unsigned version_printed;
1027 static const char fmt[] = "declance%d";
1028 char name[10];
1029 struct net_device *dev;
1030 struct lance_private *lp;
1031 volatile struct lance_regs *ll;
1032 resource_size_t start = 0, len = 0;
1033 int i, ret;
1034 unsigned long esar_base;
1035 unsigned char *esar;
1036
1037 if (dec_lance_debug && version_printed++ == 0)
1038 printk(version);
1039
1040 if (bdev)
1041 snprintf(name, sizeof(name), "%s", dev_name(bdev));
1042 else {
1043 i = 0;
1044 dev = root_lance_dev;
1045 while (dev) {
1046 i++;
1047 lp = netdev_priv(dev);
1048 dev = lp->next;
1049 }
1050 snprintf(name, sizeof(name), fmt, i);
1051 }
1052
1053 dev = alloc_etherdev(sizeof(struct lance_private));
1054 if (!dev) {
1055 printk(KERN_ERR "%s: Unable to allocate etherdev, aborting.\n",
1056 name);
1057 ret = -ENOMEM;
1058 goto err_out;
1059 }
1060
1061 /*
1062 * alloc_etherdev ensures the data structures used by the LANCE
1063 * are aligned.
1064 */
1065 lp = netdev_priv(dev);
1066 spin_lock_init(&lp->lock);
1067
1068 lp->type = type;
1069 switch (type) {
1070 case ASIC_LANCE:
1071 dev->base_addr = CKSEG1ADDR(dec_kn_slot_base + IOASIC_LANCE);
1072
1073 /* buffer space for the on-board LANCE shared memory */
1074 /*
1075 * FIXME: ugly hack!
1076 */
1077 dev->mem_start = CKSEG1ADDR(0x00020000);
1078 dev->mem_end = dev->mem_start + 0x00020000;
1079 dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1080 esar_base = CKSEG1ADDR(dec_kn_slot_base + IOASIC_ESAR);
1081
1082 /* Workaround crash with booting KN04 2.1k from Disk */
1083 memset((void *)dev->mem_start, 0,
1084 dev->mem_end - dev->mem_start);
1085
1086 /*
1087 * setup the pointer arrays, this sucks [tm] :-(
1088 */
1089 for (i = 0; i < RX_RING_SIZE; i++) {
1090 lp->rx_buf_ptr_cpu[i] =
1091 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1092 2 * i * RX_BUFF_SIZE);
1093 lp->rx_buf_ptr_lnc[i] =
1094 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1095 }
1096 for (i = 0; i < TX_RING_SIZE; i++) {
1097 lp->tx_buf_ptr_cpu[i] =
1098 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1099 2 * RX_RING_SIZE * RX_BUFF_SIZE +
1100 2 * i * TX_BUFF_SIZE);
1101 lp->tx_buf_ptr_lnc[i] =
1102 (BUF_OFFSET_LNC +
1103 RX_RING_SIZE * RX_BUFF_SIZE +
1104 i * TX_BUFF_SIZE);
1105 }
1106
1107 /* Setup I/O ASIC LANCE DMA. */
1108 lp->dma_irq = dec_interrupt[DEC_IRQ_LANCE_MERR];
1109 ioasic_write(IO_REG_LANCE_DMA_P,
1110 CPHYSADDR(dev->mem_start) << 3);
1111
1112 break;
1113#ifdef CONFIG_TC
1114 case PMAD_LANCE:
1115 dev_set_drvdata(bdev, dev);
1116
1117 start = to_tc_dev(bdev)->resource.start;
1118 len = to_tc_dev(bdev)->resource.end - start + 1;
1119 if (!request_mem_region(start, len, dev_name(bdev))) {
1120 printk(KERN_ERR
1121 "%s: Unable to reserve MMIO resource\n",
1122 dev_name(bdev));
1123 ret = -EBUSY;
1124 goto err_out_dev;
1125 }
1126
1127 dev->mem_start = CKSEG1ADDR(start);
1128 dev->mem_end = dev->mem_start + 0x100000;
1129 dev->base_addr = dev->mem_start + 0x100000;
1130 dev->irq = to_tc_dev(bdev)->interrupt;
1131 esar_base = dev->mem_start + 0x1c0002;
1132 lp->dma_irq = -1;
1133
1134 for (i = 0; i < RX_RING_SIZE; i++) {
1135 lp->rx_buf_ptr_cpu[i] =
1136 (char *)(dev->mem_start + BUF_OFFSET_CPU +
1137 i * RX_BUFF_SIZE);
1138 lp->rx_buf_ptr_lnc[i] =
1139 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1140 }
1141 for (i = 0; i < TX_RING_SIZE; i++) {
1142 lp->tx_buf_ptr_cpu[i] =
1143 (char *)(dev->mem_start + BUF_OFFSET_CPU +
1144 RX_RING_SIZE * RX_BUFF_SIZE +
1145 i * TX_BUFF_SIZE);
1146 lp->tx_buf_ptr_lnc[i] =
1147 (BUF_OFFSET_LNC +
1148 RX_RING_SIZE * RX_BUFF_SIZE +
1149 i * TX_BUFF_SIZE);
1150 }
1151
1152 break;
1153#endif
1154 case PMAX_LANCE:
1155 dev->irq = dec_interrupt[DEC_IRQ_LANCE];
1156 dev->base_addr = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE);
1157 dev->mem_start = CKSEG1ADDR(KN01_SLOT_BASE + KN01_LANCE_MEM);
1158 dev->mem_end = dev->mem_start + KN01_SLOT_SIZE;
1159 esar_base = CKSEG1ADDR(KN01_SLOT_BASE + KN01_ESAR + 1);
1160 lp->dma_irq = -1;
1161
1162 /*
1163 * setup the pointer arrays, this sucks [tm] :-(
1164 */
1165 for (i = 0; i < RX_RING_SIZE; i++) {
1166 lp->rx_buf_ptr_cpu[i] =
1167 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1168 2 * i * RX_BUFF_SIZE);
1169 lp->rx_buf_ptr_lnc[i] =
1170 (BUF_OFFSET_LNC + i * RX_BUFF_SIZE);
1171 }
1172 for (i = 0; i < TX_RING_SIZE; i++) {
1173 lp->tx_buf_ptr_cpu[i] =
1174 (char *)(dev->mem_start + 2 * BUF_OFFSET_CPU +
1175 2 * RX_RING_SIZE * RX_BUFF_SIZE +
1176 2 * i * TX_BUFF_SIZE);
1177 lp->tx_buf_ptr_lnc[i] =
1178 (BUF_OFFSET_LNC +
1179 RX_RING_SIZE * RX_BUFF_SIZE +
1180 i * TX_BUFF_SIZE);
1181 }
1182
1183 break;
1184
1185 default:
1186 printk(KERN_ERR "%s: declance_init called with unknown type\n",
1187 name);
1188 ret = -ENODEV;
1189 goto err_out_dev;
1190 }
1191
1192 ll = (struct lance_regs *) dev->base_addr;
1193 esar = (unsigned char *) esar_base;
1194
1195 /* prom checks */
1196 /* First, check for test pattern */
1197 if (esar[0x60] != 0xff && esar[0x64] != 0x00 &&
1198 esar[0x68] != 0x55 && esar[0x6c] != 0xaa) {
1199 printk(KERN_ERR
1200 "%s: Ethernet station address prom not found!\n",
1201 name);
1202 ret = -ENODEV;
1203 goto err_out_resource;
1204 }
1205 /* Check the prom contents */
1206 for (i = 0; i < 8; i++) {
1207 if (esar[i * 4] != esar[0x3c - i * 4] &&
1208 esar[i * 4] != esar[0x40 + i * 4] &&
1209 esar[0x3c - i * 4] != esar[0x40 + i * 4]) {
1210 printk(KERN_ERR "%s: Something is wrong with the "
1211 "ethernet station address prom!\n", name);
1212 ret = -ENODEV;
1213 goto err_out_resource;
1214 }
1215 }
1216
1217 /* Copy the ethernet address to the device structure, later to the
1218 * lance initialization block so the lance gets it every time it's
1219 * (re)initialized.
1220 */
1221 switch (type) {
1222 case ASIC_LANCE:
1223 printk("%s: IOASIC onboard LANCE", name);
1224 break;
1225 case PMAD_LANCE:
1226 printk("%s: PMAD-AA", name);
1227 break;
1228 case PMAX_LANCE:
1229 printk("%s: PMAX onboard LANCE", name);
1230 break;
1231 }
1232 for (i = 0; i < 6; i++)
1233 dev->dev_addr[i] = esar[i * 4];
1234
1235 printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
1236
1237 dev->netdev_ops = &lance_netdev_ops;
1238 dev->watchdog_timeo = 5*HZ;
1239
1240 /* lp->ll is the location of the registers for lance card */
1241 lp->ll = ll;
1242
1243 /* busmaster_regval (CSR3) should be zero according to the PMAD-AA
1244 * specification.
1245 */
1246 lp->busmaster_regval = 0;
1247
1248 dev->dma = 0;
1249
1250 /* We cannot sleep if the chip is busy during a
1251 * multicast list update event, because such events
1252 * can occur from interrupts (ex. IPv6). So we
1253 * use a timer to try again later when necessary. -DaveM
1254 */
1255 init_timer(&lp->multicast_timer);
1256 lp->multicast_timer.data = (unsigned long) dev;
1257 lp->multicast_timer.function = lance_set_multicast_retry;
1258
1259 ret = register_netdev(dev);
1260 if (ret) {
1261 printk(KERN_ERR
1262 "%s: Unable to register netdev, aborting.\n", name);
1263 goto err_out_resource;
1264 }
1265
1266 if (!bdev) {
1267 lp->next = root_lance_dev;
1268 root_lance_dev = dev;
1269 }
1270
1271 printk("%s: registered as %s.\n", name, dev->name);
1272 return 0;
1273
1274err_out_resource:
1275 if (bdev)
1276 release_mem_region(start, len);
1277
1278err_out_dev:
1279 free_netdev(dev);
1280
1281err_out:
1282 return ret;
1283}
1284
1285static void __exit dec_lance_remove(struct device *bdev)
1286{
1287 struct net_device *dev = dev_get_drvdata(bdev);
1288 resource_size_t start, len;
1289
1290 unregister_netdev(dev);
1291 start = to_tc_dev(bdev)->resource.start;
1292 len = to_tc_dev(bdev)->resource.end - start + 1;
1293 release_mem_region(start, len);
1294 free_netdev(dev);
1295}
1296
1297/* Find all the lance cards on the system and initialize them */
1298static int __init dec_lance_platform_probe(void)
1299{
1300 int count = 0;
1301
1302 if (dec_interrupt[DEC_IRQ_LANCE] >= 0) {
1303 if (dec_interrupt[DEC_IRQ_LANCE_MERR] >= 0) {
1304 if (dec_lance_probe(NULL, ASIC_LANCE) >= 0)
1305 count++;
1306 } else if (!TURBOCHANNEL) {
1307 if (dec_lance_probe(NULL, PMAX_LANCE) >= 0)
1308 count++;
1309 }
1310 }
1311
1312 return (count > 0) ? 0 : -ENODEV;
1313}
1314
1315static void __exit dec_lance_platform_remove(void)
1316{
1317 while (root_lance_dev) {
1318 struct net_device *dev = root_lance_dev;
1319 struct lance_private *lp = netdev_priv(dev);
1320
1321 unregister_netdev(dev);
1322 root_lance_dev = lp->next;
1323 free_netdev(dev);
1324 }
1325}
1326
1327#ifdef CONFIG_TC
1328static int __devinit dec_lance_tc_probe(struct device *dev);
1329static int __exit dec_lance_tc_remove(struct device *dev);
1330
1331static const struct tc_device_id dec_lance_tc_table[] = {
1332 { "DEC ", "PMAD-AA " },
1333 { }
1334};
1335MODULE_DEVICE_TABLE(tc, dec_lance_tc_table);
1336
1337static struct tc_driver dec_lance_tc_driver = {
1338 .id_table = dec_lance_tc_table,
1339 .driver = {
1340 .name = "declance",
1341 .bus = &tc_bus_type,
1342 .probe = dec_lance_tc_probe,
1343 .remove = __exit_p(dec_lance_tc_remove),
1344 },
1345};
1346
1347static int __devinit dec_lance_tc_probe(struct device *dev)
1348{
1349 int status = dec_lance_probe(dev, PMAD_LANCE);
1350 if (!status)
1351 get_device(dev);
1352 return status;
1353}
1354
1355static int __exit dec_lance_tc_remove(struct device *dev)
1356{
1357 put_device(dev);
1358 dec_lance_remove(dev);
1359 return 0;
1360}
1361#endif
1362
1363static int __init dec_lance_init(void)
1364{
1365 int status;
1366
1367 status = tc_register_driver(&dec_lance_tc_driver);
1368 if (!status)
1369 dec_lance_platform_probe();
1370 return status;
1371}
1372
1373static void __exit dec_lance_exit(void)
1374{
1375 dec_lance_platform_remove();
1376 tc_unregister_driver(&dec_lance_tc_driver);
1377}
1378
1379
1380module_init(dec_lance_init);
1381module_exit(dec_lance_exit);
diff --git a/drivers/net/ethernet/amd/depca.c b/drivers/net/ethernet/amd/depca.c
new file mode 100644
index 000000000000..681970c07f22
--- /dev/null
+++ b/drivers/net/ethernet/amd/depca.c
@@ -0,0 +1,2111 @@
1/* depca.c: A DIGITAL DEPCA & EtherWORKS ethernet driver for linux.
2
3 Written 1994, 1995 by David C. Davies.
4
5
6 Copyright 1994 David C. Davies
7 and
8 United States Government
9 (as represented by the Director, National Security Agency).
10
11 Copyright 1995 Digital Equipment Corporation.
12
13
14 This software may be used and distributed according to the terms of
15 the GNU General Public License, incorporated herein by reference.
16
17 This driver is written for the Digital Equipment Corporation series
18 of DEPCA and EtherWORKS ethernet cards:
19
20 DEPCA (the original)
21 DE100
22 DE101
23 DE200 Turbo
24 DE201 Turbo
25 DE202 Turbo (TP BNC)
26 DE210
27 DE422 (EISA)
28
29 The driver has been tested on DE100, DE200 and DE202 cards in a
30 relatively busy network. The DE422 has been tested a little.
31
32 This driver will NOT work for the DE203, DE204 and DE205 series of
33 cards, since they have a new custom ASIC in place of the AMD LANCE
34 chip. See the 'ewrk3.c' driver in the Linux source tree for running
35 those cards.
36
37 I have benchmarked the driver with a DE100 at 595kB/s to (542kB/s from)
38 a DECstation 5000/200.
39
40 The author may be reached at davies@maniac.ultranet.com
41
42 =========================================================================
43
44 The driver was originally based on the 'lance.c' driver from Donald
45 Becker which is included with the standard driver distribution for
46 linux. V0.4 is a complete re-write with only the kernel interface
47 remaining from the original code.
48
49 1) Lance.c code in /linux/drivers/net/
50 2) "Ethernet/IEEE 802.3 Family. 1992 World Network Data Book/Handbook",
51 AMD, 1992 [(800) 222-9323].
52 3) "Am79C90 CMOS Local Area Network Controller for Ethernet (C-LANCE)",
53 AMD, Pub. #17881, May 1993.
54 4) "Am79C960 PCnet-ISA(tm), Single-Chip Ethernet Controller for ISA",
55 AMD, Pub. #16907, May 1992
56 5) "DEC EtherWORKS LC Ethernet Controller Owners Manual",
57 Digital Equipment corporation, 1990, Pub. #EK-DE100-OM.003
58 6) "DEC EtherWORKS Turbo Ethernet Controller Owners Manual",
59 Digital Equipment corporation, 1990, Pub. #EK-DE200-OM.003
60 7) "DEPCA Hardware Reference Manual", Pub. #EK-DEPCA-PR
61 Digital Equipment Corporation, 1989
62 8) "DEC EtherWORKS Turbo_(TP BNC) Ethernet Controller Owners Manual",
63 Digital Equipment corporation, 1991, Pub. #EK-DE202-OM.001
64
65
66 Peter Bauer's depca.c (V0.5) was referred to when debugging V0.1 of this
67 driver.
68
69 The original DEPCA card requires that the ethernet ROM address counter
70 be enabled to count and has an 8 bit NICSR. The ROM counter enabling is
71 only done when a 0x08 is read as the first address octet (to minimise
72 the chances of writing over some other hardware's I/O register). The
73 NICSR accesses have been changed to byte accesses for all the cards
74 supported by this driver, since there is only one useful bit in the MSB
75 (remote boot timeout) and it is not used. Also, there is a maximum of
76 only 48kB network RAM for this card. My thanks to Torbjorn Lindh for
77 help debugging all this (and holding my feet to the fire until I got it
78 right).
79
80 The DE200 series boards have on-board 64kB RAM for use as a shared
81 memory network buffer. Only the DE100 cards make use of a 2kB buffer
82 mode which has not been implemented in this driver (only the 32kB and
83 64kB modes are supported [16kB/48kB for the original DEPCA]).
84
85 At the most only 2 DEPCA cards can be supported on the ISA bus because
86 there is only provision for two I/O base addresses on each card (0x300
87 and 0x200). The I/O address is detected by searching for a byte sequence
88 in the Ethernet station address PROM at the expected I/O address for the
89 Ethernet PROM. The shared memory base address is 'autoprobed' by
90 looking for the self test PROM and detecting the card name. When a
91 second DEPCA is detected, information is placed in the base_addr
92 variable of the next device structure (which is created if necessary),
93 thus enabling ethif_probe initialization for the device. More than 2
94 EISA cards can be supported, but care will be needed assigning the
95 shared memory to ensure that each slot has the correct IRQ, I/O address
96 and shared memory address assigned.
97
98 ************************************************************************
99
100 NOTE: If you are using two ISA DEPCAs, it is important that you assign
101 the base memory addresses correctly. The driver autoprobes I/O 0x300
102 then 0x200. The base memory address for the first device must be less
103 than that of the second so that the auto probe will correctly assign the
104 I/O and memory addresses on the same card. I can't think of a way to do
105 this unambiguously at the moment, since there is nothing on the cards to
106 tie I/O and memory information together.
107
108 I am unable to test 2 cards together for now, so this code is
109 unchecked. All reports, good or bad, are welcome.
110
111 ************************************************************************
112
113 The board IRQ setting must be at an unused IRQ which is auto-probed
114 using Donald Becker's autoprobe routines. DEPCA and DE100 board IRQs are
115 {2,3,4,5,7}, whereas the DE200 is at {5,9,10,11,15}. Note that IRQ2 is
116 really IRQ9 in machines with 16 IRQ lines.
117
118 No 16MB memory limitation should exist with this driver as DMA is not
119 used and the common memory area is in low memory on the network card (my
120 current system has 20MB and I've not had problems yet).
121
122 The ability to load this driver as a loadable module has been added. To
123 utilise this ability, you have to do <8 things:
124
125 0) have a copy of the loadable modules code installed on your system.
126 1) copy depca.c from the /linux/drivers/net directory to your favourite
127 temporary directory.
128 2) if you wish, edit the source code near line 1530 to reflect the I/O
129 address and IRQ you're using (see also 5).
130 3) compile depca.c, but include -DMODULE in the command line to ensure
131 that the correct bits are compiled (see end of source code).
132 4) if you are wanting to add a new card, goto 5. Otherwise, recompile a
133 kernel with the depca configuration turned off and reboot.
134 5) insmod depca.o [irq=7] [io=0x200] [mem=0xd0000] [adapter_name=DE100]
135 [Alan Cox: Changed the code to allow command line irq/io assignments]
136 [Dave Davies: Changed the code to allow command line mem/name
137 assignments]
138 6) run the net startup bits for your eth?? interface manually
139 (usually /etc/rc.inet[12] at boot time).
140 7) enjoy!
141
142 Note that autoprobing is not allowed in loadable modules - the system is
143 already up and running and you're messing with interrupts.
144
145 To unload a module, turn off the associated interface
146 'ifconfig eth?? down' then 'rmmod depca'.
147
148 To assign a base memory address for the shared memory when running as a
149 loadable module, see 5 above. To include the adapter name (if you have
150 no PROM but know the card name) also see 5 above. Note that this last
151 option will not work with kernel built-in depca's.
152
153 The shared memory assignment for a loadable module makes sense to avoid
154 the 'memory autoprobe' picking the wrong shared memory (for the case of
155 2 depca's in a PC).
156
157 ************************************************************************
158 Support for MCA EtherWORKS cards added 11-3-98.
159 Verified to work with up to 2 DE212 cards in a system (although not
160 fully stress-tested).
161
162 Currently known bugs/limitations:
163
164 Note: with the MCA stuff as a module, it trusts the MCA configuration,
165 not the command line for IRQ and memory address. You can
166 specify them if you want, but it will throw your values out.
167 You still have to pass the IO address it was configured as
168 though.
169
170 ************************************************************************
171 TO DO:
172 ------
173
174
175 Revision History
176 ----------------
177
178 Version Date Description
179
180 0.1 25-jan-94 Initial writing.
181 0.2 27-jan-94 Added LANCE TX hardware buffer chaining.
182 0.3 1-feb-94 Added multiple DEPCA support.
183 0.31 4-feb-94 Added DE202 recognition.
184 0.32 19-feb-94 Tidy up. Improve multi-DEPCA support.
185 0.33 25-feb-94 Fix DEPCA ethernet ROM counter enable.
186 Add jabber packet fix from murf@perftech.com
187 and becker@super.org
188 0.34 7-mar-94 Fix DEPCA max network memory RAM & NICSR access.
189 0.35 8-mar-94 Added DE201 recognition. Tidied up.
190 0.351 30-apr-94 Added EISA support. Added DE422 recognition.
191 0.36 16-may-94 DE422 fix released.
192 0.37 22-jul-94 Added MODULE support
193 0.38 15-aug-94 Added DBR ROM switch in depca_close().
194 Multi DEPCA bug fix.
195 0.38axp 15-sep-94 Special version for Alpha AXP Linux V1.0.
196 0.381 12-dec-94 Added DE101 recognition, fix multicast bug.
197 0.382 9-feb-95 Fix recognition bug reported by <bkm@star.rl.ac.uk>.
198 0.383 22-feb-95 Fix for conflict with VESA SCSI reported by
199 <stromain@alf.dec.com>
200 0.384 17-mar-95 Fix a ring full bug reported by <bkm@star.rl.ac.uk>
201 0.385 3-apr-95 Fix a recognition bug reported by
202 <ryan.niemi@lastfrontier.com>
203 0.386 21-apr-95 Fix the last fix...sorry, must be galloping senility
204 0.40 25-May-95 Rewrite for portability & updated.
205 ALPHA support from <jestabro@amt.tay1.dec.com>
206 0.41 26-Jun-95 Added verify_area() calls in depca_ioctl() from
207 suggestion by <heiko@colossus.escape.de>
208 0.42 27-Dec-95 Add 'mem' shared memory assignment for loadable
209 modules.
210 Add 'adapter_name' for loadable modules when no PROM.
211 Both above from a suggestion by
212 <pchen@woodruffs121.residence.gatech.edu>.
213 Add new multicasting code.
214 0.421 22-Apr-96 Fix alloc_device() bug <jari@markkus2.fimr.fi>
215 0.422 29-Apr-96 Fix depca_hw_init() bug <jari@markkus2.fimr.fi>
216 0.423 7-Jun-96 Fix module load bug <kmg@barco.be>
217 0.43 16-Aug-96 Update alloc_device() to conform to de4x5.c
218 0.44 1-Sep-97 Fix *_probe() to test check_region() first - bug
219 reported by <mmogilvi@elbert.uccs.edu>
220 0.45 3-Nov-98 Added support for MCA EtherWORKS (DE210/DE212) cards
221 by <tymm@computer.org>
222 0.451 5-Nov-98 Fixed mca stuff cuz I'm a dummy. <tymm@computer.org>
223 0.5 14-Nov-98 Re-spin for 2.1.x kernels.
224 0.51 27-Jun-99 Correct received packet length for CRC from
225 report by <worm@dkik.dk>
226 0.52 16-Oct-00 Fixes for 2.3 io memory accesses
227 Fix show-stopper (ints left masked) in depca_interrupt
228 by <peterd@pnd-pc.demon.co.uk>
229 0.53 12-Jan-01 Release resources on failure, bss tidbits
230 by acme@conectiva.com.br
231 0.54 08-Nov-01 use library crc32 functions
232 by Matt_Domsch@dell.com
233 0.55 01-Mar-03 Use EISA/sysfs framework <maz@wild-wind.fr.eu.org>
234
235 =========================================================================
236*/
237
238#include <linux/module.h>
239#include <linux/kernel.h>
240#include <linux/sched.h>
241#include <linux/string.h>
242#include <linux/errno.h>
243#include <linux/ioport.h>
244#include <linux/slab.h>
245#include <linux/interrupt.h>
246#include <linux/delay.h>
247#include <linux/init.h>
248#include <linux/crc32.h>
249#include <linux/netdevice.h>
250#include <linux/etherdevice.h>
251#include <linux/skbuff.h>
252#include <linux/time.h>
253#include <linux/types.h>
254#include <linux/unistd.h>
255#include <linux/ctype.h>
256#include <linux/moduleparam.h>
257#include <linux/platform_device.h>
258#include <linux/bitops.h>
259
260#include <asm/uaccess.h>
261#include <asm/io.h>
262#include <asm/dma.h>
263
264#ifdef CONFIG_MCA
265#include <linux/mca.h>
266#endif
267
268#ifdef CONFIG_EISA
269#include <linux/eisa.h>
270#endif
271
272#include "depca.h"
273
274static char version[] __initdata = "depca.c:v0.53 2001/1/12 davies@maniac.ultranet.com\n";
275
276#ifdef DEPCA_DEBUG
277static int depca_debug = DEPCA_DEBUG;
278#else
279static int depca_debug = 1;
280#endif
281
282#define DEPCA_NDA 0xffe0 /* No Device Address */
283
284#define TX_TIMEOUT (1*HZ)
285
286/*
287** Ethernet PROM defines
288*/
289#define PROBE_LENGTH 32
290#define ETH_PROM_SIG 0xAA5500FFUL
291
292/*
293** Set the number of Tx and Rx buffers. Ensure that the memory requested
294** here is <= to the amount of shared memory set up by the board switches.
295** The number of descriptors MUST BE A POWER OF 2.
296**
297** total_memory = NUM_RX_DESC*(8+RX_BUFF_SZ) + NUM_TX_DESC*(8+TX_BUFF_SZ)
298*/
299#define NUM_RX_DESC 8 /* Number of RX descriptors */
300#define NUM_TX_DESC 8 /* Number of TX descriptors */
301#define RX_BUFF_SZ 1536 /* Buffer size for each Rx buffer */
302#define TX_BUFF_SZ 1536 /* Buffer size for each Tx buffer */
303
304/*
305** EISA bus defines
306*/
307#define DEPCA_EISA_IO_PORTS 0x0c00 /* I/O port base address, slot 0 */
308
309/*
310** ISA Bus defines
311*/
312#define DEPCA_RAM_BASE_ADDRESSES {0xc0000,0xd0000,0xe0000,0x00000}
313#define DEPCA_TOTAL_SIZE 0x10
314
315static struct {
316 u_long iobase;
317 struct platform_device *device;
318} depca_io_ports[] = {
319 { 0x300, NULL },
320 { 0x200, NULL },
321 { 0 , NULL },
322};
323
324/*
325** Name <-> Adapter mapping
326*/
327#define DEPCA_SIGNATURE {"DEPCA",\
328 "DE100","DE101",\
329 "DE200","DE201","DE202",\
330 "DE210","DE212",\
331 "DE422",\
332 ""}
333
334static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
335
336enum depca_type {
337 DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
338};
339
340static char depca_string[] = "depca";
341
342static int depca_device_remove (struct device *device);
343
344#ifdef CONFIG_EISA
345static struct eisa_device_id depca_eisa_ids[] = {
346 { "DEC4220", de422 },
347 { "" }
348};
349MODULE_DEVICE_TABLE(eisa, depca_eisa_ids);
350
351static int depca_eisa_probe (struct device *device);
352
353static struct eisa_driver depca_eisa_driver = {
354 .id_table = depca_eisa_ids,
355 .driver = {
356 .name = depca_string,
357 .probe = depca_eisa_probe,
358 .remove = __devexit_p (depca_device_remove)
359 }
360};
361#endif
362
363#ifdef CONFIG_MCA
364/*
365** Adapter ID for the MCA EtherWORKS DE210/212 adapter
366*/
367#define DE210_ID 0x628d
368#define DE212_ID 0x6def
369
370static short depca_mca_adapter_ids[] = {
371 DE210_ID,
372 DE212_ID,
373 0x0000
374};
375
376static char *depca_mca_adapter_name[] = {
377 "DEC EtherWORKS MC Adapter (DE210)",
378 "DEC EtherWORKS MC Adapter (DE212)",
379 NULL
380};
381
382static enum depca_type depca_mca_adapter_type[] = {
383 de210,
384 de212,
385 0
386};
387
388static int depca_mca_probe (struct device *);
389
390static struct mca_driver depca_mca_driver = {
391 .id_table = depca_mca_adapter_ids,
392 .driver = {
393 .name = depca_string,
394 .bus = &mca_bus_type,
395 .probe = depca_mca_probe,
396 .remove = __devexit_p(depca_device_remove),
397 },
398};
399#endif
400
401static int depca_isa_probe (struct platform_device *);
402
403static int __devexit depca_isa_remove(struct platform_device *pdev)
404{
405 return depca_device_remove(&pdev->dev);
406}
407
408static struct platform_driver depca_isa_driver = {
409 .probe = depca_isa_probe,
410 .remove = __devexit_p(depca_isa_remove),
411 .driver = {
412 .name = depca_string,
413 },
414};
415
416/*
417** Miscellaneous info...
418*/
419#define DEPCA_STRLEN 16
420
421/*
422** Memory Alignment. Each descriptor is 4 longwords long. To force a
423** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
424** DESC_ALIGN. DEPCA_ALIGN aligns the start address of the private memory area
425** and hence the RX descriptor ring's first entry.
426*/
427#define DEPCA_ALIGN4 ((u_long)4 - 1) /* 1 longword align */
428#define DEPCA_ALIGN8 ((u_long)8 - 1) /* 2 longword (quadword) align */
429#define DEPCA_ALIGN DEPCA_ALIGN8 /* Keep the LANCE happy... */
430
431/*
432** The DEPCA Rx and Tx ring descriptors.
433*/
434struct depca_rx_desc {
435 volatile s32 base;
436 s16 buf_length; /* This length is negative 2's complement! */
437 s16 msg_length; /* This length is "normal". */
438};
439
440struct depca_tx_desc {
441 volatile s32 base;
442 s16 length; /* This length is negative 2's complement! */
443 s16 misc; /* Errors and TDR info */
444};
445
446#define LA_MASK 0x0000ffff /* LANCE address mask for mapping network RAM
447 to LANCE memory address space */
448
449/*
450** The Lance initialization block, described in databook, in common memory.
451*/
452struct depca_init {
453 u16 mode; /* Mode register */
454 u8 phys_addr[ETH_ALEN]; /* Physical ethernet address */
455 u8 mcast_table[8]; /* Multicast Hash Table. */
456 u32 rx_ring; /* Rx ring base pointer & ring length */
457 u32 tx_ring; /* Tx ring base pointer & ring length */
458};
459
460#define DEPCA_PKT_STAT_SZ 16
461#define DEPCA_PKT_BIN_SZ 128 /* Should be >=100 unless you
462 increase DEPCA_PKT_STAT_SZ */
463struct depca_private {
464 char adapter_name[DEPCA_STRLEN]; /* /proc/ioports string */
465 enum depca_type adapter; /* Adapter type */
466 enum {
467 DEPCA_BUS_MCA = 1,
468 DEPCA_BUS_ISA,
469 DEPCA_BUS_EISA,
470 } depca_bus; /* type of bus */
471 struct depca_init init_block; /* Shadow Initialization block */
472/* CPU address space fields */
473 struct depca_rx_desc __iomem *rx_ring; /* Pointer to start of RX descriptor ring */
474 struct depca_tx_desc __iomem *tx_ring; /* Pointer to start of TX descriptor ring */
475 void __iomem *rx_buff[NUM_RX_DESC]; /* CPU virt address of sh'd memory buffs */
476 void __iomem *tx_buff[NUM_TX_DESC]; /* CPU virt address of sh'd memory buffs */
477 void __iomem *sh_mem; /* CPU mapped virt address of device RAM */
478 u_long mem_start; /* Bus address of device RAM (before remap) */
479 u_long mem_len; /* device memory size */
480/* Device address space fields */
481 u_long device_ram_start; /* Start of RAM in device addr space */
482/* Offsets used in both address spaces */
483 u_long rx_ring_offset; /* Offset from start of RAM to rx_ring */
484 u_long tx_ring_offset; /* Offset from start of RAM to tx_ring */
485 u_long buffs_offset; /* LANCE Rx and Tx buffers start address. */
486/* Kernel-only (not device) fields */
487 int rx_new, tx_new; /* The next free ring entry */
488 int rx_old, tx_old; /* The ring entries to be free()ed. */
489 spinlock_t lock;
490 struct { /* Private stats counters */
491 u32 bins[DEPCA_PKT_STAT_SZ];
492 u32 unicast;
493 u32 multicast;
494 u32 broadcast;
495 u32 excessive_collisions;
496 u32 tx_underruns;
497 u32 excessive_underruns;
498 } pktStats;
499 int txRingMask; /* TX ring mask */
500 int rxRingMask; /* RX ring mask */
501 s32 rx_rlen; /* log2(rxRingMask+1) for the descriptors */
502 s32 tx_rlen; /* log2(txRingMask+1) for the descriptors */
503};
504
505/*
506** The transmit ring full condition is described by the tx_old and tx_new
507** pointers by:
508** tx_old = tx_new Empty ring
509** tx_old = tx_new+1 Full ring
510** tx_old+txRingMask = tx_new Full ring (wrapped condition)
511*/
512#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
513 lp->tx_old+lp->txRingMask-lp->tx_new:\
514 lp->tx_old -lp->tx_new-1)
515
516/*
517** Public Functions
518*/
519static int depca_open(struct net_device *dev);
520static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
521 struct net_device *dev);
522static irqreturn_t depca_interrupt(int irq, void *dev_id);
523static int depca_close(struct net_device *dev);
524static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
525static void depca_tx_timeout(struct net_device *dev);
526static void set_multicast_list(struct net_device *dev);
527
528/*
529** Private functions
530*/
531static void depca_init_ring(struct net_device *dev);
532static int depca_rx(struct net_device *dev);
533static int depca_tx(struct net_device *dev);
534
535static void LoadCSRs(struct net_device *dev);
536static int InitRestartDepca(struct net_device *dev);
537static int DepcaSignature(char *name, u_long paddr);
538static int DevicePresent(u_long ioaddr);
539static int get_hw_addr(struct net_device *dev);
540static void SetMulticastFilter(struct net_device *dev);
541static int load_packet(struct net_device *dev, struct sk_buff *skb);
542static void depca_dbg_open(struct net_device *dev);
543
544static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
545static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
546static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
547static u_char *depca_irq;
548
549static int irq;
550static int io;
551static char *adapter_name;
552static int mem; /* For loadable module assignment
553 use insmod mem=0x????? .... */
554module_param (irq, int, 0);
555module_param (io, int, 0);
556module_param (adapter_name, charp, 0);
557module_param (mem, int, 0);
558MODULE_PARM_DESC(irq, "DEPCA IRQ number");
559MODULE_PARM_DESC(io, "DEPCA I/O base address");
560MODULE_PARM_DESC(adapter_name, "DEPCA adapter name");
561MODULE_PARM_DESC(mem, "DEPCA shared memory address");
562MODULE_LICENSE("GPL");
563
564/*
565** Miscellaneous defines...
566*/
567#define STOP_DEPCA \
568 outw(CSR0, DEPCA_ADDR);\
569 outw(STOP, DEPCA_DATA)
570
571static const struct net_device_ops depca_netdev_ops = {
572 .ndo_open = depca_open,
573 .ndo_start_xmit = depca_start_xmit,
574 .ndo_stop = depca_close,
575 .ndo_set_rx_mode = set_multicast_list,
576 .ndo_do_ioctl = depca_ioctl,
577 .ndo_tx_timeout = depca_tx_timeout,
578 .ndo_change_mtu = eth_change_mtu,
579 .ndo_set_mac_address = eth_mac_addr,
580 .ndo_validate_addr = eth_validate_addr,
581};
582
583static int __init depca_hw_init (struct net_device *dev, struct device *device)
584{
585 struct depca_private *lp;
586 int i, j, offset, netRAM, mem_len, status = 0;
587 s16 nicsr;
588 u_long ioaddr;
589 u_long mem_start;
590
591 /*
592 * We are now supposed to enter this function with the
593 * following fields filled with proper values :
594 *
595 * dev->base_addr
596 * lp->mem_start
597 * lp->depca_bus
598 * lp->adapter
599 *
600 * dev->irq can be set if known from device configuration (on
601 * MCA or EISA) or module option. Otherwise, it will be auto
602 * detected.
603 */
604
605 ioaddr = dev->base_addr;
606
607 STOP_DEPCA;
608
609 nicsr = inb(DEPCA_NICSR);
610 nicsr = ((nicsr & ~SHE & ~RBE & ~IEN) | IM);
611 outb(nicsr, DEPCA_NICSR);
612
613 if (inw(DEPCA_DATA) != STOP) {
614 return -ENXIO;
615 }
616
617 lp = netdev_priv(dev);
618 mem_start = lp->mem_start;
619
620 if (!mem_start || lp->adapter < DEPCA || lp->adapter >=unknown)
621 return -ENXIO;
622
623 printk("%s: %s at 0x%04lx",
624 dev_name(device), depca_signature[lp->adapter], ioaddr);
625
626 switch (lp->depca_bus) {
627#ifdef CONFIG_MCA
628 case DEPCA_BUS_MCA:
629 printk(" (MCA slot %d)", to_mca_device(device)->slot + 1);
630 break;
631#endif
632
633#ifdef CONFIG_EISA
634 case DEPCA_BUS_EISA:
635 printk(" (EISA slot %d)", to_eisa_device(device)->slot);
636 break;
637#endif
638
639 case DEPCA_BUS_ISA:
640 break;
641
642 default:
643 printk("Unknown DEPCA bus %d\n", lp->depca_bus);
644 return -ENXIO;
645 }
646
647 printk(", h/w address ");
648 status = get_hw_addr(dev);
649 printk("%pM", dev->dev_addr);
650 if (status != 0) {
651 printk(" which has an Ethernet PROM CRC error.\n");
652 return -ENXIO;
653 }
654
655 /* Set up the maximum amount of network RAM(kB) */
656 netRAM = ((lp->adapter != DEPCA) ? 64 : 48);
657 if ((nicsr & _128KB) && (lp->adapter == de422))
658 netRAM = 128;
659
660 /* Shared Memory Base Address */
661 if (nicsr & BUF) {
662 nicsr &= ~BS; /* DEPCA RAM in top 32k */
663 netRAM -= 32;
664
665 /* Only EISA/ISA needs start address to be re-computed */
666 if (lp->depca_bus != DEPCA_BUS_MCA)
667 mem_start += 0x8000;
668 }
669
670 if ((mem_len = (NUM_RX_DESC * (sizeof(struct depca_rx_desc) + RX_BUFF_SZ) + NUM_TX_DESC * (sizeof(struct depca_tx_desc) + TX_BUFF_SZ) + sizeof(struct depca_init)))
671 > (netRAM << 10)) {
672 printk(",\n requests %dkB RAM: only %dkB is available!\n", (mem_len >> 10), netRAM);
673 return -ENXIO;
674 }
675
676 printk(",\n has %dkB RAM at 0x%.5lx", netRAM, mem_start);
677
678 /* Enable the shadow RAM. */
679 if (lp->adapter != DEPCA) {
680 nicsr |= SHE;
681 outb(nicsr, DEPCA_NICSR);
682 }
683
684 spin_lock_init(&lp->lock);
685 sprintf(lp->adapter_name, "%s (%s)",
686 depca_signature[lp->adapter], dev_name(device));
687 status = -EBUSY;
688
689 /* Initialisation Block */
690 if (!request_mem_region (mem_start, mem_len, lp->adapter_name)) {
691 printk(KERN_ERR "depca: cannot request ISA memory, aborting\n");
692 goto out_priv;
693 }
694
695 status = -EIO;
696 lp->sh_mem = ioremap(mem_start, mem_len);
697 if (lp->sh_mem == NULL) {
698 printk(KERN_ERR "depca: cannot remap ISA memory, aborting\n");
699 goto out1;
700 }
701
702 lp->mem_start = mem_start;
703 lp->mem_len = mem_len;
704 lp->device_ram_start = mem_start & LA_MASK;
705
706 offset = 0;
707 offset += sizeof(struct depca_init);
708
709 /* Tx & Rx descriptors (aligned to a quadword boundary) */
710 offset = (offset + DEPCA_ALIGN) & ~DEPCA_ALIGN;
711 lp->rx_ring = lp->sh_mem + offset;
712 lp->rx_ring_offset = offset;
713
714 offset += (sizeof(struct depca_rx_desc) * NUM_RX_DESC);
715 lp->tx_ring = lp->sh_mem + offset;
716 lp->tx_ring_offset = offset;
717
718 offset += (sizeof(struct depca_tx_desc) * NUM_TX_DESC);
719
720 lp->buffs_offset = offset;
721
722 /* Finish initialising the ring information. */
723 lp->rxRingMask = NUM_RX_DESC - 1;
724 lp->txRingMask = NUM_TX_DESC - 1;
725
726 /* Calculate Tx/Rx RLEN size for the descriptors. */
727 for (i = 0, j = lp->rxRingMask; j > 0; i++) {
728 j >>= 1;
729 }
730 lp->rx_rlen = (s32) (i << 29);
731 for (i = 0, j = lp->txRingMask; j > 0; i++) {
732 j >>= 1;
733 }
734 lp->tx_rlen = (s32) (i << 29);
735
736 /* Load the initialisation block */
737 depca_init_ring(dev);
738
739 /* Initialise the control and status registers */
740 LoadCSRs(dev);
741
742 /* Enable DEPCA board interrupts for autoprobing */
743 nicsr = ((nicsr & ~IM) | IEN);
744 outb(nicsr, DEPCA_NICSR);
745
746 /* To auto-IRQ we enable the initialization-done and DMA err,
747 interrupts. For now we will always get a DMA error. */
748 if (dev->irq < 2) {
749 unsigned char irqnum;
750 unsigned long irq_mask, delay;
751
752 irq_mask = probe_irq_on();
753
754 /* Assign the correct irq list */
755 switch (lp->adapter) {
756 case DEPCA:
757 case de100:
758 case de101:
759 depca_irq = de1xx_irq;
760 break;
761 case de200:
762 case de201:
763 case de202:
764 case de210:
765 case de212:
766 depca_irq = de2xx_irq;
767 break;
768 case de422:
769 depca_irq = de422_irq;
770 break;
771
772 default:
773 break; /* Not reached */
774 }
775
776 /* Trigger an initialization just for the interrupt. */
777 outw(INEA | INIT, DEPCA_DATA);
778
779 delay = jiffies + HZ/50;
780 while (time_before(jiffies, delay))
781 yield();
782
783 irqnum = probe_irq_off(irq_mask);
784
785 status = -ENXIO;
786 if (!irqnum) {
787 printk(" and failed to detect IRQ line.\n");
788 goto out2;
789 } else {
790 for (dev->irq = 0, i = 0; (depca_irq[i]) && (!dev->irq); i++)
791 if (irqnum == depca_irq[i]) {
792 dev->irq = irqnum;
793 printk(" and uses IRQ%d.\n", dev->irq);
794 }
795
796 if (!dev->irq) {
797 printk(" but incorrect IRQ line detected.\n");
798 goto out2;
799 }
800 }
801 } else {
802 printk(" and assigned IRQ%d.\n", dev->irq);
803 }
804
805 if (depca_debug > 1) {
806 printk(version);
807 }
808
809 /* The DEPCA-specific entries in the device structure. */
810 dev->netdev_ops = &depca_netdev_ops;
811 dev->watchdog_timeo = TX_TIMEOUT;
812
813 dev->mem_start = 0;
814
815 dev_set_drvdata(device, dev);
816 SET_NETDEV_DEV (dev, device);
817
818 status = register_netdev(dev);
819 if (status == 0)
820 return 0;
821out2:
822 iounmap(lp->sh_mem);
823out1:
824 release_mem_region (mem_start, mem_len);
825out_priv:
826 return status;
827}
828
829
830static int depca_open(struct net_device *dev)
831{
832 struct depca_private *lp = netdev_priv(dev);
833 u_long ioaddr = dev->base_addr;
834 s16 nicsr;
835 int status = 0;
836
837 STOP_DEPCA;
838 nicsr = inb(DEPCA_NICSR);
839
840 /* Make sure the shadow RAM is enabled */
841 if (lp->adapter != DEPCA) {
842 nicsr |= SHE;
843 outb(nicsr, DEPCA_NICSR);
844 }
845
846 /* Re-initialize the DEPCA... */
847 depca_init_ring(dev);
848 LoadCSRs(dev);
849
850 depca_dbg_open(dev);
851
852 if (request_irq(dev->irq, depca_interrupt, 0, lp->adapter_name, dev)) {
853 printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
854 status = -EAGAIN;
855 } else {
856
857 /* Enable DEPCA board interrupts and turn off LED */
858 nicsr = ((nicsr & ~IM & ~LED) | IEN);
859 outb(nicsr, DEPCA_NICSR);
860 outw(CSR0, DEPCA_ADDR);
861
862 netif_start_queue(dev);
863
864 status = InitRestartDepca(dev);
865
866 if (depca_debug > 1) {
867 printk("CSR0: 0x%4.4x\n", inw(DEPCA_DATA));
868 printk("nicsr: 0x%02x\n", inb(DEPCA_NICSR));
869 }
870 }
871 return status;
872}
873
874/* Initialize the lance Rx and Tx descriptor rings. */
875static void depca_init_ring(struct net_device *dev)
876{
877 struct depca_private *lp = netdev_priv(dev);
878 u_int i;
879 u_long offset;
880
881 /* Lock out other processes whilst setting up the hardware */
882 netif_stop_queue(dev);
883
884 lp->rx_new = lp->tx_new = 0;
885 lp->rx_old = lp->tx_old = 0;
886
887 /* Initialize the base address and length of each buffer in the ring */
888 for (i = 0; i <= lp->rxRingMask; i++) {
889 offset = lp->buffs_offset + i * RX_BUFF_SZ;
890 writel((lp->device_ram_start + offset) | R_OWN, &lp->rx_ring[i].base);
891 writew(-RX_BUFF_SZ, &lp->rx_ring[i].buf_length);
892 lp->rx_buff[i] = lp->sh_mem + offset;
893 }
894
895 for (i = 0; i <= lp->txRingMask; i++) {
896 offset = lp->buffs_offset + (i + lp->rxRingMask + 1) * TX_BUFF_SZ;
897 writel((lp->device_ram_start + offset) & 0x00ffffff, &lp->tx_ring[i].base);
898 lp->tx_buff[i] = lp->sh_mem + offset;
899 }
900
901 /* Set up the initialization block */
902 lp->init_block.rx_ring = (lp->device_ram_start + lp->rx_ring_offset) | lp->rx_rlen;
903 lp->init_block.tx_ring = (lp->device_ram_start + lp->tx_ring_offset) | lp->tx_rlen;
904
905 SetMulticastFilter(dev);
906
907 for (i = 0; i < ETH_ALEN; i++) {
908 lp->init_block.phys_addr[i] = dev->dev_addr[i];
909 }
910
911 lp->init_block.mode = 0x0000; /* Enable the Tx and Rx */
912}
913
914
915static void depca_tx_timeout(struct net_device *dev)
916{
917 u_long ioaddr = dev->base_addr;
918
919 printk("%s: transmit timed out, status %04x, resetting.\n", dev->name, inw(DEPCA_DATA));
920
921 STOP_DEPCA;
922 depca_init_ring(dev);
923 LoadCSRs(dev);
924 dev->trans_start = jiffies; /* prevent tx timeout */
925 netif_wake_queue(dev);
926 InitRestartDepca(dev);
927}
928
929
930/*
931** Writes a socket buffer to TX descriptor ring and starts transmission
932*/
933static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
934 struct net_device *dev)
935{
936 struct depca_private *lp = netdev_priv(dev);
937 u_long ioaddr = dev->base_addr;
938 int status = 0;
939
940 /* Transmitter timeout, serious problems. */
941 if (skb->len < 1)
942 goto out;
943
944 if (skb_padto(skb, ETH_ZLEN))
945 goto out;
946
947 netif_stop_queue(dev);
948
949 if (TX_BUFFS_AVAIL) { /* Fill in a Tx ring entry */
950 status = load_packet(dev, skb);
951
952 if (!status) {
953 /* Trigger an immediate send demand. */
954 outw(CSR0, DEPCA_ADDR);
955 outw(INEA | TDMD, DEPCA_DATA);
956
957 dev_kfree_skb(skb);
958 }
959 if (TX_BUFFS_AVAIL)
960 netif_start_queue(dev);
961 } else
962 status = NETDEV_TX_LOCKED;
963
964 out:
965 return status;
966}
967
968/*
969** The DEPCA interrupt handler.
970*/
971static irqreturn_t depca_interrupt(int irq, void *dev_id)
972{
973 struct net_device *dev = dev_id;
974 struct depca_private *lp;
975 s16 csr0, nicsr;
976 u_long ioaddr;
977
978 if (dev == NULL) {
979 printk("depca_interrupt(): irq %d for unknown device.\n", irq);
980 return IRQ_NONE;
981 }
982
983 lp = netdev_priv(dev);
984 ioaddr = dev->base_addr;
985
986 spin_lock(&lp->lock);
987
988 /* mask the DEPCA board interrupts and turn on the LED */
989 nicsr = inb(DEPCA_NICSR);
990 nicsr |= (IM | LED);
991 outb(nicsr, DEPCA_NICSR);
992
993 outw(CSR0, DEPCA_ADDR);
994 csr0 = inw(DEPCA_DATA);
995
996 /* Acknowledge all of the current interrupt sources ASAP. */
997 outw(csr0 & INTE, DEPCA_DATA);
998
999 if (csr0 & RINT) /* Rx interrupt (packet arrived) */
1000 depca_rx(dev);
1001
1002 if (csr0 & TINT) /* Tx interrupt (packet sent) */
1003 depca_tx(dev);
1004
1005 /* Any resources available? */
1006 if ((TX_BUFFS_AVAIL >= 0) && netif_queue_stopped(dev)) {
1007 netif_wake_queue(dev);
1008 }
1009
1010 /* Unmask the DEPCA board interrupts and turn off the LED */
1011 nicsr = (nicsr & ~IM & ~LED);
1012 outb(nicsr, DEPCA_NICSR);
1013
1014 spin_unlock(&lp->lock);
1015 return IRQ_HANDLED;
1016}
1017
1018/* Called with lp->lock held */
1019static int depca_rx(struct net_device *dev)
1020{
1021 struct depca_private *lp = netdev_priv(dev);
1022 int i, entry;
1023 s32 status;
1024
1025 for (entry = lp->rx_new; !(readl(&lp->rx_ring[entry].base) & R_OWN); entry = lp->rx_new) {
1026 status = readl(&lp->rx_ring[entry].base) >> 16;
1027 if (status & R_STP) { /* Remember start of frame */
1028 lp->rx_old = entry;
1029 }
1030 if (status & R_ENP) { /* Valid frame status */
1031 if (status & R_ERR) { /* There was an error. */
1032 dev->stats.rx_errors++; /* Update the error stats. */
1033 if (status & R_FRAM)
1034 dev->stats.rx_frame_errors++;
1035 if (status & R_OFLO)
1036 dev->stats.rx_over_errors++;
1037 if (status & R_CRC)
1038 dev->stats.rx_crc_errors++;
1039 if (status & R_BUFF)
1040 dev->stats.rx_fifo_errors++;
1041 } else {
1042 short len, pkt_len = readw(&lp->rx_ring[entry].msg_length) - 4;
1043 struct sk_buff *skb;
1044
1045 skb = dev_alloc_skb(pkt_len + 2);
1046 if (skb != NULL) {
1047 unsigned char *buf;
1048 skb_reserve(skb, 2); /* 16 byte align the IP header */
1049 buf = skb_put(skb, pkt_len);
1050 if (entry < lp->rx_old) { /* Wrapped buffer */
1051 len = (lp->rxRingMask - lp->rx_old + 1) * RX_BUFF_SZ;
1052 memcpy_fromio(buf, lp->rx_buff[lp->rx_old], len);
1053 memcpy_fromio(buf + len, lp->rx_buff[0], pkt_len - len);
1054 } else { /* Linear buffer */
1055 memcpy_fromio(buf, lp->rx_buff[lp->rx_old], pkt_len);
1056 }
1057
1058 /*
1059 ** Notify the upper protocol layers that there is another
1060 ** packet to handle
1061 */
1062 skb->protocol = eth_type_trans(skb, dev);
1063 netif_rx(skb);
1064
1065 /*
1066 ** Update stats
1067 */
1068 dev->stats.rx_packets++;
1069 dev->stats.rx_bytes += pkt_len;
1070 for (i = 1; i < DEPCA_PKT_STAT_SZ - 1; i++) {
1071 if (pkt_len < (i * DEPCA_PKT_BIN_SZ)) {
1072 lp->pktStats.bins[i]++;
1073 i = DEPCA_PKT_STAT_SZ;
1074 }
1075 }
1076 if (is_multicast_ether_addr(buf)) {
1077 if (is_broadcast_ether_addr(buf)) {
1078 lp->pktStats.broadcast++;
1079 } else {
1080 lp->pktStats.multicast++;
1081 }
1082 } else if (compare_ether_addr(buf, dev->dev_addr) == 0) {
1083 lp->pktStats.unicast++;
1084 }
1085
1086 lp->pktStats.bins[0]++; /* Duplicates stats.rx_packets */
1087 if (lp->pktStats.bins[0] == 0) { /* Reset counters */
1088 memset((char *) &lp->pktStats, 0, sizeof(lp->pktStats));
1089 }
1090 } else {
1091 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1092 dev->stats.rx_dropped++; /* Really, deferred. */
1093 break;
1094 }
1095 }
1096 /* Change buffer ownership for this last frame, back to the adapter */
1097 for (; lp->rx_old != entry; lp->rx_old = (lp->rx_old + 1) & lp->rxRingMask) {
1098 writel(readl(&lp->rx_ring[lp->rx_old].base) | R_OWN, &lp->rx_ring[lp->rx_old].base);
1099 }
1100 writel(readl(&lp->rx_ring[entry].base) | R_OWN, &lp->rx_ring[entry].base);
1101 }
1102
1103 /*
1104 ** Update entry information
1105 */
1106 lp->rx_new = (lp->rx_new + 1) & lp->rxRingMask;
1107 }
1108
1109 return 0;
1110}
1111
1112/*
1113** Buffer sent - check for buffer errors.
1114** Called with lp->lock held
1115*/
1116static int depca_tx(struct net_device *dev)
1117{
1118 struct depca_private *lp = netdev_priv(dev);
1119 int entry;
1120 s32 status;
1121 u_long ioaddr = dev->base_addr;
1122
1123 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1124 status = readl(&lp->tx_ring[entry].base) >> 16;
1125
1126 if (status < 0) { /* Packet not yet sent! */
1127 break;
1128 } else if (status & T_ERR) { /* An error occurred. */
1129 status = readl(&lp->tx_ring[entry].misc);
1130 dev->stats.tx_errors++;
1131 if (status & TMD3_RTRY)
1132 dev->stats.tx_aborted_errors++;
1133 if (status & TMD3_LCAR)
1134 dev->stats.tx_carrier_errors++;
1135 if (status & TMD3_LCOL)
1136 dev->stats.tx_window_errors++;
1137 if (status & TMD3_UFLO)
1138 dev->stats.tx_fifo_errors++;
1139 if (status & (TMD3_BUFF | TMD3_UFLO)) {
1140 /* Trigger an immediate send demand. */
1141 outw(CSR0, DEPCA_ADDR);
1142 outw(INEA | TDMD, DEPCA_DATA);
1143 }
1144 } else if (status & (T_MORE | T_ONE)) {
1145 dev->stats.collisions++;
1146 } else {
1147 dev->stats.tx_packets++;
1148 }
1149
1150 /* Update all the pointers */
1151 lp->tx_old = (lp->tx_old + 1) & lp->txRingMask;
1152 }
1153
1154 return 0;
1155}
1156
1157static int depca_close(struct net_device *dev)
1158{
1159 struct depca_private *lp = netdev_priv(dev);
1160 s16 nicsr;
1161 u_long ioaddr = dev->base_addr;
1162
1163 netif_stop_queue(dev);
1164
1165 outw(CSR0, DEPCA_ADDR);
1166
1167 if (depca_debug > 1) {
1168 printk("%s: Shutting down ethercard, status was %2.2x.\n", dev->name, inw(DEPCA_DATA));
1169 }
1170
1171 /*
1172 ** We stop the DEPCA here -- it occasionally polls
1173 ** memory if we don't.
1174 */
1175 outw(STOP, DEPCA_DATA);
1176
1177 /*
1178 ** Give back the ROM in case the user wants to go to DOS
1179 */
1180 if (lp->adapter != DEPCA) {
1181 nicsr = inb(DEPCA_NICSR);
1182 nicsr &= ~SHE;
1183 outb(nicsr, DEPCA_NICSR);
1184 }
1185
1186 /*
1187 ** Free the associated irq
1188 */
1189 free_irq(dev->irq, dev);
1190 return 0;
1191}
1192
1193static void LoadCSRs(struct net_device *dev)
1194{
1195 struct depca_private *lp = netdev_priv(dev);
1196 u_long ioaddr = dev->base_addr;
1197
1198 outw(CSR1, DEPCA_ADDR); /* initialisation block address LSW */
1199 outw((u16) lp->device_ram_start, DEPCA_DATA);
1200 outw(CSR2, DEPCA_ADDR); /* initialisation block address MSW */
1201 outw((u16) (lp->device_ram_start >> 16), DEPCA_DATA);
1202 outw(CSR3, DEPCA_ADDR); /* ALE control */
1203 outw(ACON, DEPCA_DATA);
1204
1205 outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
1206}
1207
1208static int InitRestartDepca(struct net_device *dev)
1209{
1210 struct depca_private *lp = netdev_priv(dev);
1211 u_long ioaddr = dev->base_addr;
1212 int i, status = 0;
1213
1214 /* Copy the shadow init_block to shared memory */
1215 memcpy_toio(lp->sh_mem, &lp->init_block, sizeof(struct depca_init));
1216
1217 outw(CSR0, DEPCA_ADDR); /* point back to CSR0 */
1218 outw(INIT, DEPCA_DATA); /* initialize DEPCA */
1219
1220 /* wait for lance to complete initialisation */
1221 for (i = 0; (i < 100) && !(inw(DEPCA_DATA) & IDON); i++);
1222
1223 if (i != 100) {
1224 /* clear IDON by writing a "1", enable interrupts and start lance */
1225 outw(IDON | INEA | STRT, DEPCA_DATA);
1226 if (depca_debug > 2) {
1227 printk("%s: DEPCA open after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
1228 }
1229 } else {
1230 printk("%s: DEPCA unopen after %d ticks, init block 0x%08lx csr0 %4.4x.\n", dev->name, i, lp->mem_start, inw(DEPCA_DATA));
1231 status = -1;
1232 }
1233
1234 return status;
1235}
1236
1237/*
1238** Set or clear the multicast filter for this adaptor.
1239*/
1240static void set_multicast_list(struct net_device *dev)
1241{
1242 struct depca_private *lp = netdev_priv(dev);
1243 u_long ioaddr = dev->base_addr;
1244
1245 netif_stop_queue(dev);
1246 while (lp->tx_old != lp->tx_new); /* Wait for the ring to empty */
1247
1248 STOP_DEPCA; /* Temporarily stop the depca. */
1249 depca_init_ring(dev); /* Initialize the descriptor rings */
1250
1251 if (dev->flags & IFF_PROMISC) { /* Set promiscuous mode */
1252 lp->init_block.mode |= PROM;
1253 } else {
1254 SetMulticastFilter(dev);
1255 lp->init_block.mode &= ~PROM; /* Unset promiscuous mode */
1256 }
1257
1258 LoadCSRs(dev); /* Reload CSR3 */
1259 InitRestartDepca(dev); /* Resume normal operation. */
1260 netif_start_queue(dev); /* Unlock the TX ring */
1261}
1262
1263/*
1264** Calculate the hash code and update the logical address filter
1265** from a list of ethernet multicast addresses.
1266** Big endian crc one liner is mine, all mine, ha ha ha ha!
1267** LANCE calculates its hash codes big endian.
1268*/
1269static void SetMulticastFilter(struct net_device *dev)
1270{
1271 struct depca_private *lp = netdev_priv(dev);
1272 struct netdev_hw_addr *ha;
1273 int i, j, bit, byte;
1274 u16 hashcode;
1275 u32 crc;
1276
1277 if (dev->flags & IFF_ALLMULTI) { /* Set all multicast bits */
1278 for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) {
1279 lp->init_block.mcast_table[i] = (char) 0xff;
1280 }
1281 } else {
1282 for (i = 0; i < (HASH_TABLE_LEN >> 3); i++) { /* Clear the multicast table */
1283 lp->init_block.mcast_table[i] = 0;
1284 }
1285 /* Add multicast addresses */
1286 netdev_for_each_mc_addr(ha, dev) {
1287 crc = ether_crc(ETH_ALEN, ha->addr);
1288 hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
1289 for (j = 0; j < 5; j++) { /* ... in reverse order. */
1290 hashcode = (hashcode << 1) | ((crc >>= 1) & 1);
1291 }
1292
1293 byte = hashcode >> 3; /* bit[3-5] -> byte in filter */
1294 bit = 1 << (hashcode & 0x07); /* bit[0-2] -> bit in byte */
1295 lp->init_block.mcast_table[byte] |= bit;
1296 }
1297 }
1298}
1299
1300static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
1301{
1302 int status = 0;
1303
1304 if (!request_region (ioaddr, DEPCA_TOTAL_SIZE, depca_string)) {
1305 status = -EBUSY;
1306 goto out;
1307 }
1308
1309 if (DevicePresent(ioaddr)) {
1310 status = -ENODEV;
1311 goto out_release;
1312 }
1313
1314 if (!(*devp = alloc_etherdev (sizeof (struct depca_private)))) {
1315 status = -ENOMEM;
1316 goto out_release;
1317 }
1318
1319 return 0;
1320
1321 out_release:
1322 release_region (ioaddr, DEPCA_TOTAL_SIZE);
1323 out:
1324 return status;
1325}
1326
1327#ifdef CONFIG_MCA
1328/*
1329** Microchannel bus I/O device probe
1330*/
1331static int __init depca_mca_probe(struct device *device)
1332{
1333 unsigned char pos[2];
1334 unsigned char where;
1335 unsigned long iobase, mem_start;
1336 int irq, err;
1337 struct mca_device *mdev = to_mca_device (device);
1338 struct net_device *dev;
1339 struct depca_private *lp;
1340
1341 /*
1342 ** Search for the adapter. If an address has been given, search
1343 ** specifically for the card at that address. Otherwise find the
1344 ** first card in the system.
1345 */
1346
1347 pos[0] = mca_device_read_stored_pos(mdev, 2);
1348 pos[1] = mca_device_read_stored_pos(mdev, 3);
1349
1350 /*
1351 ** IO of card is handled by bits 1 and 2 of pos0.
1352 **
1353 ** bit2 bit1 IO
1354 ** 0 0 0x2c00
1355 ** 0 1 0x2c10
1356 ** 1 0 0x2c20
1357 ** 1 1 0x2c30
1358 */
1359 where = (pos[0] & 6) >> 1;
1360 iobase = 0x2c00 + (0x10 * where);
1361
1362 /*
1363 ** Found the adapter we were looking for. Now start setting it up.
1364 **
1365 ** First work on decoding the IRQ. It's stored in the lower 4 bits
1366 ** of pos1. Bits are as follows (from the ADF file):
1367 **
1368 ** Bits
1369 ** 3 2 1 0 IRQ
1370 ** --------------------
1371 ** 0 0 1 0 5
1372 ** 0 0 0 1 9
1373 ** 0 1 0 0 10
1374 ** 1 0 0 0 11
1375 */
1376 where = pos[1] & 0x0f;
1377 switch (where) {
1378 case 1:
1379 irq = 9;
1380 break;
1381 case 2:
1382 irq = 5;
1383 break;
1384 case 4:
1385 irq = 10;
1386 break;
1387 case 8:
1388 irq = 11;
1389 break;
1390 default:
1391 printk("%s: mca_probe IRQ error. You should never get here (%d).\n", mdev->name, where);
1392 return -EINVAL;
1393 }
1394
1395 /*
1396 ** Shared memory address of adapter is stored in bits 3-5 of pos0.
1397 ** They are mapped as follows:
1398 **
1399 ** Bit
1400 ** 5 4 3 Memory Addresses
1401 ** 0 0 0 C0000-CFFFF (64K)
1402 ** 1 0 0 C8000-CFFFF (32K)
1403 ** 0 0 1 D0000-DFFFF (64K)
1404 ** 1 0 1 D8000-DFFFF (32K)
1405 ** 0 1 0 E0000-EFFFF (64K)
1406 ** 1 1 0 E8000-EFFFF (32K)
1407 */
1408 where = (pos[0] & 0x18) >> 3;
1409 mem_start = 0xc0000 + (where * 0x10000);
1410 if (pos[0] & 0x20) {
1411 mem_start += 0x8000;
1412 }
1413
1414 /* claim the slot */
1415 strncpy(mdev->name, depca_mca_adapter_name[mdev->index],
1416 sizeof(mdev->name));
1417 mca_device_set_claim(mdev, 1);
1418
1419 /*
1420 ** Get everything allocated and initialized... (almost just
1421 ** like the ISA and EISA probes)
1422 */
1423 irq = mca_device_transform_irq(mdev, irq);
1424 iobase = mca_device_transform_ioport(mdev, iobase);
1425
1426 if ((err = depca_common_init (iobase, &dev)))
1427 goto out_unclaim;
1428
1429 dev->irq = irq;
1430 dev->base_addr = iobase;
1431 lp = netdev_priv(dev);
1432 lp->depca_bus = DEPCA_BUS_MCA;
1433 lp->adapter = depca_mca_adapter_type[mdev->index];
1434 lp->mem_start = mem_start;
1435
1436 if ((err = depca_hw_init(dev, device)))
1437 goto out_free;
1438
1439 return 0;
1440
1441 out_free:
1442 free_netdev (dev);
1443 release_region (iobase, DEPCA_TOTAL_SIZE);
1444 out_unclaim:
1445 mca_device_set_claim(mdev, 0);
1446
1447 return err;
1448}
1449#endif
1450
1451/*
1452** ISA bus I/O device probe
1453*/
1454
1455static void __init depca_platform_probe (void)
1456{
1457 int i;
1458 struct platform_device *pldev;
1459
1460 for (i = 0; depca_io_ports[i].iobase; i++) {
1461 depca_io_ports[i].device = NULL;
1462
1463 /* if an address has been specified on the command
1464 * line, use it (if valid) */
1465 if (io && io != depca_io_ports[i].iobase)
1466 continue;
1467
1468 pldev = platform_device_alloc(depca_string, i);
1469 if (!pldev)
1470 continue;
1471
1472 pldev->dev.platform_data = (void *) depca_io_ports[i].iobase;
1473 depca_io_ports[i].device = pldev;
1474
1475 if (platform_device_add(pldev)) {
1476 depca_io_ports[i].device = NULL;
1477 pldev->dev.platform_data = NULL;
1478 platform_device_put(pldev);
1479 continue;
1480 }
1481
1482 if (!pldev->dev.driver) {
1483 /* The driver was not bound to this device, there was
1484 * no hardware at this address. Unregister it, as the
1485 * release function will take care of freeing the
1486 * allocated structure */
1487
1488 depca_io_ports[i].device = NULL;
1489 pldev->dev.platform_data = NULL;
1490 platform_device_unregister (pldev);
1491 }
1492 }
1493}
1494
1495static enum depca_type __init depca_shmem_probe (ulong *mem_start)
1496{
1497 u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
1498 enum depca_type adapter = unknown;
1499 int i;
1500
1501 for (i = 0; mem_base[i]; i++) {
1502 *mem_start = mem ? mem : mem_base[i];
1503 adapter = DepcaSignature (adapter_name, *mem_start);
1504 if (adapter != unknown)
1505 break;
1506 }
1507
1508 return adapter;
1509}
1510
1511static int __devinit depca_isa_probe (struct platform_device *device)
1512{
1513 struct net_device *dev;
1514 struct depca_private *lp;
1515 u_long ioaddr, mem_start = 0;
1516 enum depca_type adapter = unknown;
1517 int status = 0;
1518
1519 ioaddr = (u_long) device->dev.platform_data;
1520
1521 if ((status = depca_common_init (ioaddr, &dev)))
1522 goto out;
1523
1524 adapter = depca_shmem_probe (&mem_start);
1525
1526 if (adapter == unknown) {
1527 status = -ENODEV;
1528 goto out_free;
1529 }
1530
1531 dev->base_addr = ioaddr;
1532 dev->irq = irq; /* Use whatever value the user gave
1533 * us, and 0 if he didn't. */
1534 lp = netdev_priv(dev);
1535 lp->depca_bus = DEPCA_BUS_ISA;
1536 lp->adapter = adapter;
1537 lp->mem_start = mem_start;
1538
1539 if ((status = depca_hw_init(dev, &device->dev)))
1540 goto out_free;
1541
1542 return 0;
1543
1544 out_free:
1545 free_netdev (dev);
1546 release_region (ioaddr, DEPCA_TOTAL_SIZE);
1547 out:
1548 return status;
1549}
1550
1551/*
1552** EISA callbacks from sysfs.
1553*/
1554
1555#ifdef CONFIG_EISA
1556static int __init depca_eisa_probe (struct device *device)
1557{
1558 enum depca_type adapter = unknown;
1559 struct eisa_device *edev;
1560 struct net_device *dev;
1561 struct depca_private *lp;
1562 u_long ioaddr, mem_start;
1563 int status = 0;
1564
1565 edev = to_eisa_device (device);
1566 ioaddr = edev->base_addr + DEPCA_EISA_IO_PORTS;
1567
1568 if ((status = depca_common_init (ioaddr, &dev)))
1569 goto out;
1570
1571 /* It would have been nice to get card configuration from the
1572 * card. Unfortunately, this register is write-only (shares
1573 * it's address with the ethernet prom)... As we don't parse
1574 * the EISA configuration structures (yet... :-), just rely on
1575 * the ISA probing to sort it out... */
1576
1577 adapter = depca_shmem_probe (&mem_start);
1578 if (adapter == unknown) {
1579 status = -ENODEV;
1580 goto out_free;
1581 }
1582
1583 dev->base_addr = ioaddr;
1584 dev->irq = irq;
1585 lp = netdev_priv(dev);
1586 lp->depca_bus = DEPCA_BUS_EISA;
1587 lp->adapter = edev->id.driver_data;
1588 lp->mem_start = mem_start;
1589
1590 if ((status = depca_hw_init(dev, device)))
1591 goto out_free;
1592
1593 return 0;
1594
1595 out_free:
1596 free_netdev (dev);
1597 release_region (ioaddr, DEPCA_TOTAL_SIZE);
1598 out:
1599 return status;
1600}
1601#endif
1602
1603static int __devexit depca_device_remove (struct device *device)
1604{
1605 struct net_device *dev;
1606 struct depca_private *lp;
1607 int bus;
1608
1609 dev = dev_get_drvdata(device);
1610 lp = netdev_priv(dev);
1611
1612 unregister_netdev (dev);
1613 iounmap (lp->sh_mem);
1614 release_mem_region (lp->mem_start, lp->mem_len);
1615 release_region (dev->base_addr, DEPCA_TOTAL_SIZE);
1616 bus = lp->depca_bus;
1617 free_netdev (dev);
1618
1619 return 0;
1620}
1621
1622/*
1623** Look for a particular board name in the on-board Remote Diagnostics
1624** and Boot (readb) ROM. This will also give us a clue to the network RAM
1625** base address.
1626*/
1627static int __init DepcaSignature(char *name, u_long base_addr)
1628{
1629 u_int i, j, k;
1630 void __iomem *ptr;
1631 char tmpstr[16];
1632 u_long prom_addr = base_addr + 0xc000;
1633 u_long mem_addr = base_addr + 0x8000; /* 32KB */
1634
1635 /* Can't reserve the prom region, it is already marked as
1636 * used, at least on x86. Instead, reserve a memory region a
1637 * board would certainly use. If it works, go ahead. If not,
1638 * run like hell... */
1639
1640 if (!request_mem_region (mem_addr, 16, depca_string))
1641 return unknown;
1642
1643 /* Copy the first 16 bytes of ROM */
1644
1645 ptr = ioremap(prom_addr, 16);
1646 if (ptr == NULL) {
1647 printk(KERN_ERR "depca: I/O remap failed at %lx\n", prom_addr);
1648 return unknown;
1649 }
1650 for (i = 0; i < 16; i++) {
1651 tmpstr[i] = readb(ptr + i);
1652 }
1653 iounmap(ptr);
1654
1655 release_mem_region (mem_addr, 16);
1656
1657 /* Check if PROM contains a valid string */
1658 for (i = 0; *depca_signature[i] != '\0'; i++) {
1659 for (j = 0, k = 0; j < 16 && k < strlen(depca_signature[i]); j++) {
1660 if (depca_signature[i][k] == tmpstr[j]) { /* track signature */
1661 k++;
1662 } else { /* lost signature; begin search again */
1663 k = 0;
1664 }
1665 }
1666 if (k == strlen(depca_signature[i]))
1667 break;
1668 }
1669
1670 /* Check if name string is valid, provided there's no PROM */
1671 if (name && *name && (i == unknown)) {
1672 for (i = 0; *depca_signature[i] != '\0'; i++) {
1673 if (strcmp(name, depca_signature[i]) == 0)
1674 break;
1675 }
1676 }
1677
1678 return i;
1679}
1680
1681/*
1682** Look for a special sequence in the Ethernet station address PROM that
1683** is common across all DEPCA products. Note that the original DEPCA needs
1684** its ROM address counter to be initialized and enabled. Only enable
1685** if the first address octet is a 0x08 - this minimises the chances of
1686** messing around with some other hardware, but it assumes that this DEPCA
1687** card initialized itself correctly.
1688**
1689** Search the Ethernet address ROM for the signature. Since the ROM address
1690** counter can start at an arbitrary point, the search must include the entire
1691** probe sequence length plus the (length_of_the_signature - 1).
1692** Stop the search IMMEDIATELY after the signature is found so that the
1693** PROM address counter is correctly positioned at the start of the
1694** ethernet address for later read out.
1695*/
1696static int __init DevicePresent(u_long ioaddr)
1697{
1698 union {
1699 struct {
1700 u32 a;
1701 u32 b;
1702 } llsig;
1703 char Sig[sizeof(u32) << 1];
1704 }
1705 dev;
1706 short sigLength = 0;
1707 s8 data;
1708 s16 nicsr;
1709 int i, j, status = 0;
1710
1711 data = inb(DEPCA_PROM); /* clear counter on DEPCA */
1712 data = inb(DEPCA_PROM); /* read data */
1713
1714 if (data == 0x08) { /* Enable counter on DEPCA */
1715 nicsr = inb(DEPCA_NICSR);
1716 nicsr |= AAC;
1717 outb(nicsr, DEPCA_NICSR);
1718 }
1719
1720 dev.llsig.a = ETH_PROM_SIG;
1721 dev.llsig.b = ETH_PROM_SIG;
1722 sigLength = sizeof(u32) << 1;
1723
1724 for (i = 0, j = 0; j < sigLength && i < PROBE_LENGTH + sigLength - 1; i++) {
1725 data = inb(DEPCA_PROM);
1726 if (dev.Sig[j] == data) { /* track signature */
1727 j++;
1728 } else { /* lost signature; begin search again */
1729 if (data == dev.Sig[0]) { /* rare case.... */
1730 j = 1;
1731 } else {
1732 j = 0;
1733 }
1734 }
1735 }
1736
1737 if (j != sigLength) {
1738 status = -ENODEV; /* search failed */
1739 }
1740
1741 return status;
1742}
1743
1744/*
1745** The DE100 and DE101 PROM accesses were made non-standard for some bizarre
1746** reason: access the upper half of the PROM with x=0; access the lower half
1747** with x=1.
1748*/
1749static int __init get_hw_addr(struct net_device *dev)
1750{
1751 u_long ioaddr = dev->base_addr;
1752 struct depca_private *lp = netdev_priv(dev);
1753 int i, k, tmp, status = 0;
1754 u_short j, x, chksum;
1755
1756 x = (((lp->adapter == de100) || (lp->adapter == de101)) ? 1 : 0);
1757
1758 for (i = 0, k = 0, j = 0; j < 3; j++) {
1759 k <<= 1;
1760 if (k > 0xffff)
1761 k -= 0xffff;
1762
1763 k += (u_char) (tmp = inb(DEPCA_PROM + x));
1764 dev->dev_addr[i++] = (u_char) tmp;
1765 k += (u_short) ((tmp = inb(DEPCA_PROM + x)) << 8);
1766 dev->dev_addr[i++] = (u_char) tmp;
1767
1768 if (k > 0xffff)
1769 k -= 0xffff;
1770 }
1771 if (k == 0xffff)
1772 k = 0;
1773
1774 chksum = (u_char) inb(DEPCA_PROM + x);
1775 chksum |= (u_short) (inb(DEPCA_PROM + x) << 8);
1776 if (k != chksum)
1777 status = -1;
1778
1779 return status;
1780}
1781
1782/*
1783** Load a packet into the shared memory
1784*/
1785static int load_packet(struct net_device *dev, struct sk_buff *skb)
1786{
1787 struct depca_private *lp = netdev_priv(dev);
1788 int i, entry, end, len, status = NETDEV_TX_OK;
1789
1790 entry = lp->tx_new; /* Ring around buffer number. */
1791 end = (entry + (skb->len - 1) / TX_BUFF_SZ) & lp->txRingMask;
1792 if (!(readl(&lp->tx_ring[end].base) & T_OWN)) { /* Enough room? */
1793 /*
1794 ** Caution: the write order is important here... don't set up the
1795 ** ownership rights until all the other information is in place.
1796 */
1797 if (end < entry) { /* wrapped buffer */
1798 len = (lp->txRingMask - entry + 1) * TX_BUFF_SZ;
1799 memcpy_toio(lp->tx_buff[entry], skb->data, len);
1800 memcpy_toio(lp->tx_buff[0], skb->data + len, skb->len - len);
1801 } else { /* linear buffer */
1802 memcpy_toio(lp->tx_buff[entry], skb->data, skb->len);
1803 }
1804
1805 /* set up the buffer descriptors */
1806 len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
1807 for (i = entry; i != end; i = (i+1) & lp->txRingMask) {
1808 /* clean out flags */
1809 writel(readl(&lp->tx_ring[i].base) & ~T_FLAGS, &lp->tx_ring[i].base);
1810 writew(0x0000, &lp->tx_ring[i].misc); /* clears other error flags */
1811 writew(-TX_BUFF_SZ, &lp->tx_ring[i].length); /* packet length in buffer */
1812 len -= TX_BUFF_SZ;
1813 }
1814 /* clean out flags */
1815 writel(readl(&lp->tx_ring[end].base) & ~T_FLAGS, &lp->tx_ring[end].base);
1816 writew(0x0000, &lp->tx_ring[end].misc); /* clears other error flags */
1817 writew(-len, &lp->tx_ring[end].length); /* packet length in last buff */
1818
1819 /* start of packet */
1820 writel(readl(&lp->tx_ring[entry].base) | T_STP, &lp->tx_ring[entry].base);
1821 /* end of packet */
1822 writel(readl(&lp->tx_ring[end].base) | T_ENP, &lp->tx_ring[end].base);
1823
1824 for (i = end; i != entry; --i) {
1825 /* ownership of packet */
1826 writel(readl(&lp->tx_ring[i].base) | T_OWN, &lp->tx_ring[i].base);
1827 if (i == 0)
1828 i = lp->txRingMask + 1;
1829 }
1830 writel(readl(&lp->tx_ring[entry].base) | T_OWN, &lp->tx_ring[entry].base);
1831
1832 lp->tx_new = (++end) & lp->txRingMask; /* update current pointers */
1833 } else {
1834 status = NETDEV_TX_LOCKED;
1835 }
1836
1837 return status;
1838}
1839
1840static void depca_dbg_open(struct net_device *dev)
1841{
1842 struct depca_private *lp = netdev_priv(dev);
1843 u_long ioaddr = dev->base_addr;
1844 struct depca_init *p = &lp->init_block;
1845 int i;
1846
1847 if (depca_debug > 1) {
1848 /* Do not copy the shadow init block into shared memory */
1849 /* Debugging should not affect normal operation! */
1850 /* The shadow init block will get copied across during InitRestartDepca */
1851 printk("%s: depca open with irq %d\n", dev->name, dev->irq);
1852 printk("Descriptor head addresses (CPU):\n");
1853 printk(" 0x%lx 0x%lx\n", (u_long) lp->rx_ring, (u_long) lp->tx_ring);
1854 printk("Descriptor addresses (CPU):\nRX: ");
1855 for (i = 0; i < lp->rxRingMask; i++) {
1856 if (i < 3) {
1857 printk("%p ", &lp->rx_ring[i].base);
1858 }
1859 }
1860 printk("...%p\n", &lp->rx_ring[i].base);
1861 printk("TX: ");
1862 for (i = 0; i < lp->txRingMask; i++) {
1863 if (i < 3) {
1864 printk("%p ", &lp->tx_ring[i].base);
1865 }
1866 }
1867 printk("...%p\n", &lp->tx_ring[i].base);
1868 printk("\nDescriptor buffers (Device):\nRX: ");
1869 for (i = 0; i < lp->rxRingMask; i++) {
1870 if (i < 3) {
1871 printk("0x%8.8x ", readl(&lp->rx_ring[i].base));
1872 }
1873 }
1874 printk("...0x%8.8x\n", readl(&lp->rx_ring[i].base));
1875 printk("TX: ");
1876 for (i = 0; i < lp->txRingMask; i++) {
1877 if (i < 3) {
1878 printk("0x%8.8x ", readl(&lp->tx_ring[i].base));
1879 }
1880 }
1881 printk("...0x%8.8x\n", readl(&lp->tx_ring[i].base));
1882 printk("Initialisation block at 0x%8.8lx(Phys)\n", lp->mem_start);
1883 printk(" mode: 0x%4.4x\n", p->mode);
1884 printk(" physical address: %pM\n", p->phys_addr);
1885 printk(" multicast hash table: ");
1886 for (i = 0; i < (HASH_TABLE_LEN >> 3) - 1; i++) {
1887 printk("%2.2x:", p->mcast_table[i]);
1888 }
1889 printk("%2.2x\n", p->mcast_table[i]);
1890 printk(" rx_ring at: 0x%8.8x\n", p->rx_ring);
1891 printk(" tx_ring at: 0x%8.8x\n", p->tx_ring);
1892 printk("buffers (Phys): 0x%8.8lx\n", lp->mem_start + lp->buffs_offset);
1893 printk("Ring size:\nRX: %d Log2(rxRingMask): 0x%8.8x\n", (int) lp->rxRingMask + 1, lp->rx_rlen);
1894 printk("TX: %d Log2(txRingMask): 0x%8.8x\n", (int) lp->txRingMask + 1, lp->tx_rlen);
1895 outw(CSR2, DEPCA_ADDR);
1896 printk("CSR2&1: 0x%4.4x", inw(DEPCA_DATA));
1897 outw(CSR1, DEPCA_ADDR);
1898 printk("%4.4x\n", inw(DEPCA_DATA));
1899 outw(CSR3, DEPCA_ADDR);
1900 printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
1901 }
1902}
1903
1904/*
1905** Perform IOCTL call functions here. Some are privileged operations and the
1906** effective uid is checked in those cases.
1907** All multicast IOCTLs will not work here and are for testing purposes only.
1908*/
1909static int depca_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1910{
1911 struct depca_private *lp = netdev_priv(dev);
1912 struct depca_ioctl *ioc = (struct depca_ioctl *) &rq->ifr_ifru;
1913 int i, status = 0;
1914 u_long ioaddr = dev->base_addr;
1915 union {
1916 u8 addr[(HASH_TABLE_LEN * ETH_ALEN)];
1917 u16 sval[(HASH_TABLE_LEN * ETH_ALEN) >> 1];
1918 u32 lval[(HASH_TABLE_LEN * ETH_ALEN) >> 2];
1919 } tmp;
1920 unsigned long flags;
1921 void *buf;
1922
1923 switch (ioc->cmd) {
1924 case DEPCA_GET_HWADDR: /* Get the hardware address */
1925 for (i = 0; i < ETH_ALEN; i++) {
1926 tmp.addr[i] = dev->dev_addr[i];
1927 }
1928 ioc->len = ETH_ALEN;
1929 if (copy_to_user(ioc->data, tmp.addr, ioc->len))
1930 return -EFAULT;
1931 break;
1932
1933 case DEPCA_SET_HWADDR: /* Set the hardware address */
1934 if (!capable(CAP_NET_ADMIN))
1935 return -EPERM;
1936 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN))
1937 return -EFAULT;
1938 for (i = 0; i < ETH_ALEN; i++) {
1939 dev->dev_addr[i] = tmp.addr[i];
1940 }
1941 netif_stop_queue(dev);
1942 while (lp->tx_old != lp->tx_new)
1943 cpu_relax(); /* Wait for the ring to empty */
1944
1945 STOP_DEPCA; /* Temporarily stop the depca. */
1946 depca_init_ring(dev); /* Initialize the descriptor rings */
1947 LoadCSRs(dev); /* Reload CSR3 */
1948 InitRestartDepca(dev); /* Resume normal operation. */
1949 netif_start_queue(dev); /* Unlock the TX ring */
1950 break;
1951
1952 case DEPCA_SET_PROM: /* Set Promiscuous Mode */
1953 if (!capable(CAP_NET_ADMIN))
1954 return -EPERM;
1955 netif_stop_queue(dev);
1956 while (lp->tx_old != lp->tx_new)
1957 cpu_relax(); /* Wait for the ring to empty */
1958
1959 STOP_DEPCA; /* Temporarily stop the depca. */
1960 depca_init_ring(dev); /* Initialize the descriptor rings */
1961 lp->init_block.mode |= PROM; /* Set promiscuous mode */
1962
1963 LoadCSRs(dev); /* Reload CSR3 */
1964 InitRestartDepca(dev); /* Resume normal operation. */
1965 netif_start_queue(dev); /* Unlock the TX ring */
1966 break;
1967
1968 case DEPCA_CLR_PROM: /* Clear Promiscuous Mode */
1969 if (!capable(CAP_NET_ADMIN))
1970 return -EPERM;
1971 netif_stop_queue(dev);
1972 while (lp->tx_old != lp->tx_new)
1973 cpu_relax(); /* Wait for the ring to empty */
1974
1975 STOP_DEPCA; /* Temporarily stop the depca. */
1976 depca_init_ring(dev); /* Initialize the descriptor rings */
1977 lp->init_block.mode &= ~PROM; /* Clear promiscuous mode */
1978
1979 LoadCSRs(dev); /* Reload CSR3 */
1980 InitRestartDepca(dev); /* Resume normal operation. */
1981 netif_start_queue(dev); /* Unlock the TX ring */
1982 break;
1983
1984 case DEPCA_SAY_BOO: /* Say "Boo!" to the kernel log file */
1985 if(!capable(CAP_NET_ADMIN))
1986 return -EPERM;
1987 printk("%s: Boo!\n", dev->name);
1988 break;
1989
1990 case DEPCA_GET_MCA: /* Get the multicast address table */
1991 ioc->len = (HASH_TABLE_LEN >> 3);
1992 if (copy_to_user(ioc->data, lp->init_block.mcast_table, ioc->len))
1993 return -EFAULT;
1994 break;
1995
1996 case DEPCA_SET_MCA: /* Set a multicast address */
1997 if (!capable(CAP_NET_ADMIN))
1998 return -EPERM;
1999 if (ioc->len >= HASH_TABLE_LEN)
2000 return -EINVAL;
2001 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN * ioc->len))
2002 return -EFAULT;
2003 set_multicast_list(dev);
2004 break;
2005
2006 case DEPCA_CLR_MCA: /* Clear all multicast addresses */
2007 if (!capable(CAP_NET_ADMIN))
2008 return -EPERM;
2009 set_multicast_list(dev);
2010 break;
2011
2012 case DEPCA_MCA_EN: /* Enable pass all multicast addressing */
2013 if (!capable(CAP_NET_ADMIN))
2014 return -EPERM;
2015 set_multicast_list(dev);
2016 break;
2017
2018 case DEPCA_GET_STATS: /* Get the driver statistics */
2019 ioc->len = sizeof(lp->pktStats);
2020 buf = kmalloc(ioc->len, GFP_KERNEL);
2021 if(!buf)
2022 return -ENOMEM;
2023 spin_lock_irqsave(&lp->lock, flags);
2024 memcpy(buf, &lp->pktStats, ioc->len);
2025 spin_unlock_irqrestore(&lp->lock, flags);
2026 if (copy_to_user(ioc->data, buf, ioc->len))
2027 status = -EFAULT;
2028 kfree(buf);
2029 break;
2030
2031 case DEPCA_CLR_STATS: /* Zero out the driver statistics */
2032 if (!capable(CAP_NET_ADMIN))
2033 return -EPERM;
2034 spin_lock_irqsave(&lp->lock, flags);
2035 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
2036 spin_unlock_irqrestore(&lp->lock, flags);
2037 break;
2038
2039 case DEPCA_GET_REG: /* Get the DEPCA Registers */
2040 i = 0;
2041 tmp.sval[i++] = inw(DEPCA_NICSR);
2042 outw(CSR0, DEPCA_ADDR); /* status register */
2043 tmp.sval[i++] = inw(DEPCA_DATA);
2044 memcpy(&tmp.sval[i], &lp->init_block, sizeof(struct depca_init));
2045 ioc->len = i + sizeof(struct depca_init);
2046 if (copy_to_user(ioc->data, tmp.addr, ioc->len))
2047 return -EFAULT;
2048 break;
2049
2050 default:
2051 return -EOPNOTSUPP;
2052 }
2053
2054 return status;
2055}
2056
2057static int __init depca_module_init (void)
2058{
2059 int err = 0;
2060
2061#ifdef CONFIG_MCA
2062 err = mca_register_driver(&depca_mca_driver);
2063 if (err)
2064 goto err;
2065#endif
2066#ifdef CONFIG_EISA
2067 err = eisa_driver_register(&depca_eisa_driver);
2068 if (err)
2069 goto err_mca;
2070#endif
2071 err = platform_driver_register(&depca_isa_driver);
2072 if (err)
2073 goto err_eisa;
2074
2075 depca_platform_probe();
2076 return 0;
2077
2078err_eisa:
2079#ifdef CONFIG_EISA
2080 eisa_driver_unregister(&depca_eisa_driver);
2081err_mca:
2082#endif
2083#ifdef CONFIG_MCA
2084 mca_unregister_driver(&depca_mca_driver);
2085err:
2086#endif
2087 return err;
2088}
2089
2090static void __exit depca_module_exit (void)
2091{
2092 int i;
2093#ifdef CONFIG_MCA
2094 mca_unregister_driver (&depca_mca_driver);
2095#endif
2096#ifdef CONFIG_EISA
2097 eisa_driver_unregister (&depca_eisa_driver);
2098#endif
2099 platform_driver_unregister (&depca_isa_driver);
2100
2101 for (i = 0; depca_io_ports[i].iobase; i++) {
2102 if (depca_io_ports[i].device) {
2103 depca_io_ports[i].device->dev.platform_data = NULL;
2104 platform_device_unregister (depca_io_ports[i].device);
2105 depca_io_ports[i].device = NULL;
2106 }
2107 }
2108}
2109
2110module_init (depca_module_init);
2111module_exit (depca_module_exit);
diff --git a/drivers/net/ethernet/amd/depca.h b/drivers/net/ethernet/amd/depca.h
new file mode 100644
index 000000000000..cdcfe4252c16
--- /dev/null
+++ b/drivers/net/ethernet/amd/depca.h
@@ -0,0 +1,183 @@
1/*
2 Written 1994 by David C. Davies.
3
4 Copyright 1994 David C. Davies. This software may be used and distributed
5 according to the terms of the GNU General Public License, incorporated herein by
6 reference.
7*/
8
9/*
10** I/O addresses. Note that the 2k buffer option is not supported in
11** this driver.
12*/
13#define DEPCA_NICSR ioaddr+0x00 /* Network interface CSR */
14#define DEPCA_RBI ioaddr+0x02 /* RAM buffer index (2k buffer mode) */
15#define DEPCA_DATA ioaddr+0x04 /* LANCE registers' data port */
16#define DEPCA_ADDR ioaddr+0x06 /* LANCE registers' address port */
17#define DEPCA_HBASE ioaddr+0x08 /* EISA high memory base address reg. */
18#define DEPCA_PROM ioaddr+0x0c /* Ethernet address ROM data port */
19#define DEPCA_CNFG ioaddr+0x0c /* EISA Configuration port */
20#define DEPCA_RBSA ioaddr+0x0e /* RAM buffer starting address (2k buff.) */
21
22/*
23** These are LANCE registers addressable through DEPCA_ADDR
24*/
25#define CSR0 0
26#define CSR1 1
27#define CSR2 2
28#define CSR3 3
29
30/*
31** NETWORK INTERFACE CSR (NI_CSR) bit definitions
32*/
33
34#define TO 0x0100 /* Time Out for remote boot */
35#define SHE 0x0080 /* SHadow memory Enable */
36#define BS 0x0040 /* Bank Select */
37#define BUF 0x0020 /* BUFfer size (1->32k, 0->64k) */
38#define RBE 0x0010 /* Remote Boot Enable (1->net boot) */
39#define AAC 0x0008 /* Address ROM Address Counter (1->enable) */
40#define _128KB 0x0008 /* 128kB Network RAM (1->enable) */
41#define IM 0x0004 /* Interrupt Mask (1->mask) */
42#define IEN 0x0002 /* Interrupt tristate ENable (1->enable) */
43#define LED 0x0001 /* LED control */
44
45/*
46** Control and Status Register 0 (CSR0) bit definitions
47*/
48
49#define ERR 0x8000 /* Error summary */
50#define BABL 0x4000 /* Babble transmitter timeout error */
51#define CERR 0x2000 /* Collision Error */
52#define MISS 0x1000 /* Missed packet */
53#define MERR 0x0800 /* Memory Error */
54#define RINT 0x0400 /* Receiver Interrupt */
55#define TINT 0x0200 /* Transmit Interrupt */
56#define IDON 0x0100 /* Initialization Done */
57#define INTR 0x0080 /* Interrupt Flag */
58#define INEA 0x0040 /* Interrupt Enable */
59#define RXON 0x0020 /* Receiver on */
60#define TXON 0x0010 /* Transmitter on */
61#define TDMD 0x0008 /* Transmit Demand */
62#define STOP 0x0004 /* Stop */
63#define STRT 0x0002 /* Start */
64#define INIT 0x0001 /* Initialize */
65#define INTM 0xff00 /* Interrupt Mask */
66#define INTE 0xfff0 /* Interrupt Enable */
67
68/*
69** CONTROL AND STATUS REGISTER 3 (CSR3)
70*/
71
72#define BSWP 0x0004 /* Byte SWaP */
73#define ACON 0x0002 /* ALE control */
74#define BCON 0x0001 /* Byte CONtrol */
75
76/*
77** Initialization Block Mode Register
78*/
79
80#define PROM 0x8000 /* Promiscuous Mode */
81#define EMBA 0x0080 /* Enable Modified Back-off Algorithm */
82#define INTL 0x0040 /* Internal Loopback */
83#define DRTY 0x0020 /* Disable Retry */
84#define COLL 0x0010 /* Force Collision */
85#define DTCR 0x0008 /* Disable Transmit CRC */
86#define LOOP 0x0004 /* Loopback */
87#define DTX 0x0002 /* Disable the Transmitter */
88#define DRX 0x0001 /* Disable the Receiver */
89
90/*
91** Receive Message Descriptor 1 (RMD1) bit definitions.
92*/
93
94#define R_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
95#define R_ERR 0x4000 /* Error Summary */
96#define R_FRAM 0x2000 /* Framing Error */
97#define R_OFLO 0x1000 /* Overflow Error */
98#define R_CRC 0x0800 /* CRC Error */
99#define R_BUFF 0x0400 /* Buffer Error */
100#define R_STP 0x0200 /* Start of Packet */
101#define R_ENP 0x0100 /* End of Packet */
102
103/*
104** Transmit Message Descriptor 1 (TMD1) bit definitions.
105*/
106
107#define T_OWN 0x80000000 /* Owner bit 0 = host, 1 = lance */
108#define T_ERR 0x4000 /* Error Summary */
109#define T_ADD_FCS 0x2000 /* More the 1 retry needed to Xmit */
110#define T_MORE 0x1000 /* >1 retry to transmit packet */
111#define T_ONE 0x0800 /* 1 try needed to transmit the packet */
112#define T_DEF 0x0400 /* Deferred */
113#define T_STP 0x02000000 /* Start of Packet */
114#define T_ENP 0x01000000 /* End of Packet */
115#define T_FLAGS 0xff000000 /* TX Flags Field */
116
117/*
118** Transmit Message Descriptor 3 (TMD3) bit definitions.
119*/
120
121#define TMD3_BUFF 0x8000 /* BUFFer error */
122#define TMD3_UFLO 0x4000 /* UnderFLOw error */
123#define TMD3_RES 0x2000 /* REServed */
124#define TMD3_LCOL 0x1000 /* Late COLlision */
125#define TMD3_LCAR 0x0800 /* Loss of CARrier */
126#define TMD3_RTRY 0x0400 /* ReTRY error */
127
128/*
129** EISA configuration Register (CNFG) bit definitions
130*/
131
132#define TIMEOUT 0x0100 /* 0:2.5 mins, 1: 30 secs */
133#define REMOTE 0x0080 /* Remote Boot Enable -> 1 */
134#define IRQ11 0x0040 /* Enable -> 1 */
135#define IRQ10 0x0020 /* Enable -> 1 */
136#define IRQ9 0x0010 /* Enable -> 1 */
137#define IRQ5 0x0008 /* Enable -> 1 */
138#define BUFF 0x0004 /* 0: 64kB or 128kB, 1: 32kB */
139#define PADR16 0x0002 /* RAM on 64kB boundary */
140#define PADR17 0x0001 /* RAM on 128kB boundary */
141
142/*
143** Miscellaneous
144*/
145#define HASH_TABLE_LEN 64 /* Bits */
146#define HASH_BITS 0x003f /* 6 LS bits */
147
148#define MASK_INTERRUPTS 1
149#define UNMASK_INTERRUPTS 0
150
151#define EISA_EN 0x0001 /* Enable EISA bus buffers */
152#define EISA_ID iobase+0x0080 /* ID long word for EISA card */
153#define EISA_CTRL iobase+0x0084 /* Control word for EISA card */
154
155/*
156** Include the IOCTL stuff
157*/
158#include <linux/sockios.h>
159
160struct depca_ioctl {
161 unsigned short cmd; /* Command to run */
162 unsigned short len; /* Length of the data buffer */
163 unsigned char __user *data; /* Pointer to the data buffer */
164};
165
166/*
167** Recognised commands for the driver
168*/
169#define DEPCA_GET_HWADDR 0x01 /* Get the hardware address */
170#define DEPCA_SET_HWADDR 0x02 /* Get the hardware address */
171#define DEPCA_SET_PROM 0x03 /* Set Promiscuous Mode */
172#define DEPCA_CLR_PROM 0x04 /* Clear Promiscuous Mode */
173#define DEPCA_SAY_BOO 0x05 /* Say "Boo!" to the kernel log file */
174#define DEPCA_GET_MCA 0x06 /* Get a multicast address */
175#define DEPCA_SET_MCA 0x07 /* Set a multicast address */
176#define DEPCA_CLR_MCA 0x08 /* Clear a multicast address */
177#define DEPCA_MCA_EN 0x09 /* Enable a multicast address group */
178#define DEPCA_GET_STATS 0x0a /* Get the driver statistics */
179#define DEPCA_CLR_STATS 0x0b /* Zero out the driver statistics */
180#define DEPCA_GET_REG 0x0c /* Get the Register contents */
181#define DEPCA_SET_REG 0x0d /* Set the Register contents */
182#define DEPCA_DUMP 0x0f /* Dump the DEPCA Status */
183
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
new file mode 100644
index 000000000000..86aa0d546a5b
--- /dev/null
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -0,0 +1,242 @@
1/* hplance.c : the Linux/hp300/lance ethernet driver
2 *
3 * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
4 * Based on the Sun Lance driver and the NetBSD HP Lance driver
5 * Uses the generic 7990.c LANCE code.
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/interrupt.h>
12#include <linux/ioport.h>
13#include <linux/string.h>
14#include <linux/delay.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17/* Used for the temporal inet entries and routing */
18#include <linux/socket.h>
19#include <linux/route.h>
20#include <linux/dio.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24
25#include <asm/system.h>
26#include <asm/io.h>
27#include <asm/pgtable.h>
28
29#include "hplance.h"
30
31/* We have 16834 bytes of RAM for the init block and buffers. This places
32 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
33 * buffers and 2 Tx buffers.
34 */
35#define LANCE_LOG_TX_BUFFERS 1
36#define LANCE_LOG_RX_BUFFERS 3
37
38#include "7990.h" /* use generic LANCE code */
39
40/* Our private data structure */
41struct hplance_private {
42 struct lance_private lance;
43};
44
45/* function prototypes... This is easy because all the grot is in the
46 * generic LANCE support. All we have to support is probing for boards,
47 * plus board-specific init, open and close actions.
48 * Oh, and we need to tell the generic code how to read and write LANCE registers...
49 */
50static int __devinit hplance_init_one(struct dio_dev *d,
51 const struct dio_device_id *ent);
52static void __devinit hplance_init(struct net_device *dev,
53 struct dio_dev *d);
54static void __devexit hplance_remove_one(struct dio_dev *d);
55static void hplance_writerap(void *priv, unsigned short value);
56static void hplance_writerdp(void *priv, unsigned short value);
57static unsigned short hplance_readrdp(void *priv);
58static int hplance_open(struct net_device *dev);
59static int hplance_close(struct net_device *dev);
60
61static struct dio_device_id hplance_dio_tbl[] = {
62 { DIO_ID_LAN },
63 { 0 }
64};
65
66static struct dio_driver hplance_driver = {
67 .name = "hplance",
68 .id_table = hplance_dio_tbl,
69 .probe = hplance_init_one,
70 .remove = __devexit_p(hplance_remove_one),
71};
72
73static const struct net_device_ops hplance_netdev_ops = {
74 .ndo_open = hplance_open,
75 .ndo_stop = hplance_close,
76 .ndo_start_xmit = lance_start_xmit,
77 .ndo_set_rx_mode = lance_set_multicast,
78 .ndo_change_mtu = eth_change_mtu,
79 .ndo_validate_addr = eth_validate_addr,
80 .ndo_set_mac_address = eth_mac_addr,
81#ifdef CONFIG_NET_POLL_CONTROLLER
82 .ndo_poll_controller = lance_poll,
83#endif
84};
85
86/* Find all the HP Lance boards and initialise them... */
87static int __devinit hplance_init_one(struct dio_dev *d,
88 const struct dio_device_id *ent)
89{
90 struct net_device *dev;
91 int err = -ENOMEM;
92 int i;
93
94 dev = alloc_etherdev(sizeof(struct hplance_private));
95 if (!dev)
96 goto out;
97
98 err = -EBUSY;
99 if (!request_mem_region(dio_resource_start(d),
100 dio_resource_len(d), d->name))
101 goto out_free_netdev;
102
103 hplance_init(dev, d);
104 err = register_netdev(dev);
105 if (err)
106 goto out_release_mem_region;
107
108 dio_set_drvdata(d, dev);
109
110 printk(KERN_INFO "%s: %s; select code %d, addr %2.2x", dev->name, d->name, d->scode, dev->dev_addr[0]);
111
112 for (i=1; i<6; i++) {
113 printk(":%2.2x", dev->dev_addr[i]);
114 }
115
116 printk(", irq %d\n", d->ipl);
117
118 return 0;
119
120 out_release_mem_region:
121 release_mem_region(dio_resource_start(d), dio_resource_len(d));
122 out_free_netdev:
123 free_netdev(dev);
124 out:
125 return err;
126}
127
128static void __devexit hplance_remove_one(struct dio_dev *d)
129{
130 struct net_device *dev = dio_get_drvdata(d);
131
132 unregister_netdev(dev);
133 release_mem_region(dio_resource_start(d), dio_resource_len(d));
134 free_netdev(dev);
135}
136
137/* Initialise a single lance board at the given DIO device */
138static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
139{
140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
141 struct hplance_private *lp;
142 int i;
143
144 /* reset the board */
145 out_8(va+DIO_IDOFF, 0xff);
146 udelay(100); /* ariba! ariba! udelay! udelay! */
147
148 /* Fill the dev fields */
149 dev->base_addr = va;
150 dev->netdev_ops = &hplance_netdev_ops;
151 dev->dma = 0;
152
153 for (i=0; i<6; i++) {
154 /* The NVRAM holds our ethernet address, one nibble per byte,
155 * at bytes NVRAMOFF+1,3,5,7,9...
156 */
157 dev->dev_addr[i] = ((in_8(va + HPLANCE_NVRAMOFF + i*4 + 1) & 0xF) << 4)
158 | (in_8(va + HPLANCE_NVRAMOFF + i*4 + 3) & 0xF);
159 }
160
161 lp = netdev_priv(dev);
162 lp->lance.name = (char*)d->name; /* discards const, shut up gcc */
163 lp->lance.base = va;
164 lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
165 lp->lance.lance_init_block = NULL; /* LANCE addr of same RAM */
166 lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
167 lp->lance.irq = d->ipl;
168 lp->lance.writerap = hplance_writerap;
169 lp->lance.writerdp = hplance_writerdp;
170 lp->lance.readrdp = hplance_readrdp;
171 lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
172 lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
173 lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
174 lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
175}
176
177/* This is disgusting. We have to check the DIO status register for ack every
178 * time we read or write the LANCE registers.
179 */
180static void hplance_writerap(void *priv, unsigned short value)
181{
182 struct lance_private *lp = (struct lance_private *)priv;
183 do {
184 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
185 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
186}
187
188static void hplance_writerdp(void *priv, unsigned short value)
189{
190 struct lance_private *lp = (struct lance_private *)priv;
191 do {
192 out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
193 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
194}
195
196static unsigned short hplance_readrdp(void *priv)
197{
198 struct lance_private *lp = (struct lance_private *)priv;
199 __u16 value;
200 do {
201 value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
202 } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
203 return value;
204}
205
206static int hplance_open(struct net_device *dev)
207{
208 int status;
209 struct lance_private *lp = netdev_priv(dev);
210
211 status = lance_open(dev); /* call generic lance open code */
212 if (status)
213 return status;
214 /* enable interrupts at board level. */
215 out_8(lp->base + HPLANCE_STATUS, LE_IE);
216
217 return 0;
218}
219
220static int hplance_close(struct net_device *dev)
221{
222 struct lance_private *lp = netdev_priv(dev);
223
224 out_8(lp->base + HPLANCE_STATUS, 0); /* disable interrupts at boardlevel */
225 lance_close(dev);
226 return 0;
227}
228
229static int __init hplance_init_module(void)
230{
231 return dio_register_driver(&hplance_driver);
232}
233
234static void __exit hplance_cleanup_module(void)
235{
236 dio_unregister_driver(&hplance_driver);
237}
238
239module_init(hplance_init_module);
240module_exit(hplance_cleanup_module);
241
242MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/hplance.h b/drivers/net/ethernet/amd/hplance.h
new file mode 100644
index 000000000000..04aee9e0376a
--- /dev/null
+++ b/drivers/net/ethernet/amd/hplance.h
@@ -0,0 +1,26 @@
1/* Random defines and structures for the HP Lance driver.
2 * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
3 * Based on the Sun Lance driver and the NetBSD HP Lance driver
4 */
5
6/* Registers */
7#define HPLANCE_ID 0x01 /* DIO register: ID byte */
8#define HPLANCE_STATUS 0x03 /* DIO register: interrupt enable/status */
9
10/* Control and status bits for the status register */
11#define LE_IE 0x80 /* interrupt enable */
12#define LE_IR 0x40 /* interrupt requested */
13#define LE_LOCK 0x08 /* lock status register */
14#define LE_ACK 0x04 /* ack of lock */
15#define LE_JAB 0x02 /* loss of tx clock (???) */
16/* We can also extract the IPL from the status register with the standard
17 * DIO_IPL(hplance) macro, or using dio_scodetoipl()
18 */
19
20/* These are the offsets for the DIO regs (hplance_reg), lance_ioreg,
21 * memory and NVRAM:
22 */
23#define HPLANCE_IDOFF 0 /* board baseaddr */
24#define HPLANCE_REGOFF 0x4000 /* lance registers */
25#define HPLANCE_MEMOFF 0x8000 /* struct lance_init_block */
26#define HPLANCE_NVRAMOFF 0xC008 /* etheraddress as one *nibble* per byte */
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
new file mode 100644
index 000000000000..a6e2e840884e
--- /dev/null
+++ b/drivers/net/ethernet/amd/lance.c
@@ -0,0 +1,1313 @@
1/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2/*
3 Written/copyright 1993-1998 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU General Public License, incorporated herein by reference.
9
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12
13 The author may be reached as becker@scyld.com, or C/O
14 Scyld Computing Corporation
15 410 Severn Ave., Suite 210
16 Annapolis MD 21403
17
18 Andrey V. Savochkin:
19 - alignment problem with 1.3.* kernel and some minor changes.
20 Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21 - added support for Linux/Alpha, but removed most of it, because
22 it worked only for the PCI chip.
23 - added hook for the 32bit lance driver
24 - added PCnetPCI II (79C970A) to chip table
25 Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26 - hopefully fix above so Linux/Alpha can use ISA cards too.
27 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28 v1.12 10/27/97 Module support -djb
29 v1.14 2/3/98 Module support modified, made PCI support optional -djb
30 v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31 before unregister_netdev() which caused NULL pointer
32 reference later in the chain (in rtnetlink_fill_ifinfo())
33 -- Mika Kuoppala <miku@iki.fi>
34
35 Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36 the 2.1 version of the old driver - Alan Cox
37
38 Get rid of check_region, check kmalloc return in lance_probe1
39 Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
40
41 Reworked detection, added support for Racal InterLan EtherBlaster cards
42 Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
43*/
44
45static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46
47#include <linux/module.h>
48#include <linux/kernel.h>
49#include <linux/string.h>
50#include <linux/delay.h>
51#include <linux/errno.h>
52#include <linux/ioport.h>
53#include <linux/slab.h>
54#include <linux/interrupt.h>
55#include <linux/pci.h>
56#include <linux/init.h>
57#include <linux/netdevice.h>
58#include <linux/etherdevice.h>
59#include <linux/skbuff.h>
60#include <linux/mm.h>
61#include <linux/bitops.h>
62
63#include <asm/io.h>
64#include <asm/dma.h>
65
66static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
67static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
68static int __init do_lance_probe(struct net_device *dev);
69
70
71static struct card {
72 char id_offset14;
73 char id_offset15;
74} cards[] = {
75 { //"normal"
76 .id_offset14 = 0x57,
77 .id_offset15 = 0x57,
78 },
79 { //NI6510EB
80 .id_offset14 = 0x52,
81 .id_offset15 = 0x44,
82 },
83 { //Racal InterLan EtherBlaster
84 .id_offset14 = 0x52,
85 .id_offset15 = 0x49,
86 },
87};
88#define NUM_CARDS 3
89
90#ifdef LANCE_DEBUG
91static int lance_debug = LANCE_DEBUG;
92#else
93static int lance_debug = 1;
94#endif
95
96/*
97 Theory of Operation
98
99I. Board Compatibility
100
101This device driver is designed for the AMD 79C960, the "PCnet-ISA
102single-chip ethernet controller for ISA". This chip is used in a wide
103variety of boards from vendors such as Allied Telesis, HP, Kingston,
104and Boca. This driver is also intended to work with older AMD 7990
105designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
106I use the name LANCE to refer to all of the AMD chips, even though it properly
107refers only to the original 7990.
108
109II. Board-specific settings
110
111The driver is designed to work the boards that use the faster
112bus-master mode, rather than in shared memory mode. (Only older designs
113have on-board buffer memory needed to support the slower shared memory mode.)
114
115Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
116channel. This driver probes the likely base addresses:
117{0x300, 0x320, 0x340, 0x360}.
118After the board is found it generates a DMA-timeout interrupt and uses
119autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
120of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
121probed for by enabling each free DMA channel in turn and checking if
122initialization succeeds.
123
124The HP-J2405A board is an exception: with this board it is easy to read the
125EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
126_know_ the base address -- that field is for writing the EEPROM.)
127
128III. Driver operation
129
130IIIa. Ring buffers
131The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
132the base and length of the data buffer, along with status bits. The length
133of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
134the buffer length (rather than being directly the buffer length) for
135implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
136ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
137needlessly uses extra space and reduces the chance that an upper layer will
138be able to reorder queued Tx packets based on priority. Decreasing the number
139of entries makes it more difficult to achieve back-to-back packet transmission
140and increases the chance that Rx ring will overflow. (Consider the worst case
141of receiving back-to-back minimum-sized packets.)
142
143The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
144statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
145avoid the administrative overhead. For the Rx side this avoids dynamically
146allocating full-sized buffers "just in case", at the expense of a
147memory-to-memory data copy for each packet received. For most systems this
148is a good tradeoff: the Rx buffer will always be in low memory, the copy
149is inexpensive, and it primes the cache for later packet processing. For Tx
150the buffers are only used when needed as low-memory bounce buffers.
151
152IIIB. 16M memory limitations.
153For the ISA bus master mode all structures used directly by the LANCE,
154the initialization block, Rx and Tx rings, and data buffers, must be
155accessible from the ISA bus, i.e. in the lower 16M of real memory.
156This is a problem for current Linux kernels on >16M machines. The network
157devices are initialized after memory initialization, and the kernel doles out
158memory from the top of memory downward. The current solution is to have a
159special network initialization routine that's called before memory
160initialization; this will eventually be generalized for all network devices.
161As mentioned before, low-memory "bounce-buffers" are used when needed.
162
163IIIC. Synchronization
164The driver runs as two independent, single-threaded flows of control. One
165is the send-packet routine, which enforces single-threaded use by the
166dev->tbusy flag. The other thread is the interrupt handler, which is single
167threaded by the hardware and other software.
168
169The send packet thread has partial control over the Tx ring and 'dev->tbusy'
170flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
171queue slot is empty, it clears the tbusy flag when finished otherwise it sets
172the 'lp->tx_full' flag.
173
174The interrupt handler has exclusive control over the Rx ring and records stats
175from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
176we can't avoid the interrupt overhead by having the Tx routine reap the Tx
177stats.) After reaping the stats, it marks the queue entry as empty by setting
178the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
179tx_full and tbusy flags.
180
181*/
182
183/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
184 Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
185 That translates to 4 and 4 (16 == 2^^4).
186 This is a compile-time option for efficiency.
187 */
188#ifndef LANCE_LOG_TX_BUFFERS
189#define LANCE_LOG_TX_BUFFERS 4
190#define LANCE_LOG_RX_BUFFERS 4
191#endif
192
193#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
194#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
195#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
196
197#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
198#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
199#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
200
201#define PKT_BUF_SZ 1544
202
203/* Offsets from base I/O address. */
204#define LANCE_DATA 0x10
205#define LANCE_ADDR 0x12
206#define LANCE_RESET 0x14
207#define LANCE_BUS_IF 0x16
208#define LANCE_TOTAL_SIZE 0x18
209
210#define TX_TIMEOUT (HZ/5)
211
212/* The LANCE Rx and Tx ring descriptors. */
213struct lance_rx_head {
214 s32 base;
215 s16 buf_length; /* This length is 2s complement (negative)! */
216 s16 msg_length; /* This length is "normal". */
217};
218
219struct lance_tx_head {
220 s32 base;
221 s16 length; /* Length is 2s complement (negative)! */
222 s16 misc;
223};
224
225/* The LANCE initialization block, described in databook. */
226struct lance_init_block {
227 u16 mode; /* Pre-set mode (reg. 15) */
228 u8 phys_addr[6]; /* Physical ethernet address */
229 u32 filter[2]; /* Multicast filter (unused). */
230 /* Receive and transmit ring base, along with extra bits. */
231 u32 rx_ring; /* Tx and Rx ring base pointers */
232 u32 tx_ring;
233};
234
235struct lance_private {
236 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
237 struct lance_rx_head rx_ring[RX_RING_SIZE];
238 struct lance_tx_head tx_ring[TX_RING_SIZE];
239 struct lance_init_block init_block;
240 const char *name;
241 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
242 struct sk_buff* tx_skbuff[TX_RING_SIZE];
243 /* The addresses of receive-in-place skbuffs. */
244 struct sk_buff* rx_skbuff[RX_RING_SIZE];
245 unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
246 /* Tx low-memory "bounce buffer" address. */
247 char (*tx_bounce_buffs)[PKT_BUF_SZ];
248 int cur_rx, cur_tx; /* The next free ring entry */
249 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
250 int dma;
251 unsigned char chip_version; /* See lance_chip_type. */
252 spinlock_t devlock;
253};
254
255#define LANCE_MUST_PAD 0x00000001
256#define LANCE_ENABLE_AUTOSELECT 0x00000002
257#define LANCE_MUST_REINIT_RING 0x00000004
258#define LANCE_MUST_UNRESET 0x00000008
259#define LANCE_HAS_MISSED_FRAME 0x00000010
260
261/* A mapping from the chip ID number to the part number and features.
262 These are from the datasheets -- in real life the '970 version
263 reportedly has the same ID as the '965. */
264static struct lance_chip_type {
265 int id_number;
266 const char *name;
267 int flags;
268} chip_table[] = {
269 {0x0000, "LANCE 7990", /* Ancient lance chip. */
270 LANCE_MUST_PAD + LANCE_MUST_UNRESET},
271 {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
272 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
273 LANCE_HAS_MISSED_FRAME},
274 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
275 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
276 LANCE_HAS_MISSED_FRAME},
277 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
278 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
279 LANCE_HAS_MISSED_FRAME},
280 /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
281 it the PCnet32. */
282 {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
283 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
284 LANCE_HAS_MISSED_FRAME},
285 {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
286 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
287 LANCE_HAS_MISSED_FRAME},
288 {0x0, "PCnet (unknown)",
289 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
290 LANCE_HAS_MISSED_FRAME},
291};
292
293enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
294
295
296/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
297 Assume yes until we know the memory size. */
298static unsigned char lance_need_isa_bounce_buffers = 1;
299
300static int lance_open(struct net_device *dev);
301static void lance_init_ring(struct net_device *dev, gfp_t mode);
302static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
303 struct net_device *dev);
304static int lance_rx(struct net_device *dev);
305static irqreturn_t lance_interrupt(int irq, void *dev_id);
306static int lance_close(struct net_device *dev);
307static struct net_device_stats *lance_get_stats(struct net_device *dev);
308static void set_multicast_list(struct net_device *dev);
309static void lance_tx_timeout (struct net_device *dev);
310
311
312
313#ifdef MODULE
314#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
315
316static struct net_device *dev_lance[MAX_CARDS];
317static int io[MAX_CARDS];
318static int dma[MAX_CARDS];
319static int irq[MAX_CARDS];
320
321module_param_array(io, int, NULL, 0);
322module_param_array(dma, int, NULL, 0);
323module_param_array(irq, int, NULL, 0);
324module_param(lance_debug, int, 0);
325MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
326MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
327MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
328MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
329
330int __init init_module(void)
331{
332 struct net_device *dev;
333 int this_dev, found = 0;
334
335 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
336 if (io[this_dev] == 0) {
337 if (this_dev != 0) /* only complain once */
338 break;
339 printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
340 return -EPERM;
341 }
342 dev = alloc_etherdev(0);
343 if (!dev)
344 break;
345 dev->irq = irq[this_dev];
346 dev->base_addr = io[this_dev];
347 dev->dma = dma[this_dev];
348 if (do_lance_probe(dev) == 0) {
349 dev_lance[found++] = dev;
350 continue;
351 }
352 free_netdev(dev);
353 break;
354 }
355 if (found != 0)
356 return 0;
357 return -ENXIO;
358}
359
360static void cleanup_card(struct net_device *dev)
361{
362 struct lance_private *lp = dev->ml_priv;
363 if (dev->dma != 4)
364 free_dma(dev->dma);
365 release_region(dev->base_addr, LANCE_TOTAL_SIZE);
366 kfree(lp->tx_bounce_buffs);
367 kfree((void*)lp->rx_buffs);
368 kfree(lp);
369}
370
371void __exit cleanup_module(void)
372{
373 int this_dev;
374
375 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
376 struct net_device *dev = dev_lance[this_dev];
377 if (dev) {
378 unregister_netdev(dev);
379 cleanup_card(dev);
380 free_netdev(dev);
381 }
382 }
383}
384#endif /* MODULE */
385MODULE_LICENSE("GPL");
386
387
388/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
389 board probes now that kmalloc() can allocate ISA DMA-able regions.
390 This also allows the LANCE driver to be used as a module.
391 */
392static int __init do_lance_probe(struct net_device *dev)
393{
394 unsigned int *port;
395 int result;
396
397 if (high_memory <= phys_to_virt(16*1024*1024))
398 lance_need_isa_bounce_buffers = 0;
399
400 for (port = lance_portlist; *port; port++) {
401 int ioaddr = *port;
402 struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
403 "lance-probe");
404
405 if (r) {
406 /* Detect the card with minimal I/O reads */
407 char offset14 = inb(ioaddr + 14);
408 int card;
409 for (card = 0; card < NUM_CARDS; ++card)
410 if (cards[card].id_offset14 == offset14)
411 break;
412 if (card < NUM_CARDS) {/*yes, the first byte matches*/
413 char offset15 = inb(ioaddr + 15);
414 for (card = 0; card < NUM_CARDS; ++card)
415 if ((cards[card].id_offset14 == offset14) &&
416 (cards[card].id_offset15 == offset15))
417 break;
418 }
419 if (card < NUM_CARDS) { /*Signature OK*/
420 result = lance_probe1(dev, ioaddr, 0, 0);
421 if (!result) {
422 struct lance_private *lp = dev->ml_priv;
423 int ver = lp->chip_version;
424
425 r->name = chip_table[ver].name;
426 return 0;
427 }
428 }
429 release_region(ioaddr, LANCE_TOTAL_SIZE);
430 }
431 }
432 return -ENODEV;
433}
434
435#ifndef MODULE
436struct net_device * __init lance_probe(int unit)
437{
438 struct net_device *dev = alloc_etherdev(0);
439 int err;
440
441 if (!dev)
442 return ERR_PTR(-ENODEV);
443
444 sprintf(dev->name, "eth%d", unit);
445 netdev_boot_setup_check(dev);
446
447 err = do_lance_probe(dev);
448 if (err)
449 goto out;
450 return dev;
451out:
452 free_netdev(dev);
453 return ERR_PTR(err);
454}
455#endif
456
457static const struct net_device_ops lance_netdev_ops = {
458 .ndo_open = lance_open,
459 .ndo_start_xmit = lance_start_xmit,
460 .ndo_stop = lance_close,
461 .ndo_get_stats = lance_get_stats,
462 .ndo_set_rx_mode = set_multicast_list,
463 .ndo_tx_timeout = lance_tx_timeout,
464 .ndo_change_mtu = eth_change_mtu,
465 .ndo_set_mac_address = eth_mac_addr,
466 .ndo_validate_addr = eth_validate_addr,
467};
468
469static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
470{
471 struct lance_private *lp;
472 unsigned long dma_channels; /* Mark spuriously-busy DMA channels */
473 int i, reset_val, lance_version;
474 const char *chipname;
475 /* Flags for specific chips or boards. */
476 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
477 int hp_builtin = 0; /* HP on-board ethernet. */
478 static int did_version; /* Already printed version info. */
479 unsigned long flags;
480 int err = -ENOMEM;
481 void __iomem *bios;
482
483 /* First we look for special cases.
484 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
485 There are two HP versions, check the BIOS for the configuration port.
486 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
487 */
488 bios = ioremap(0xf00f0, 0x14);
489 if (!bios)
490 return -ENOMEM;
491 if (readw(bios + 0x12) == 0x5048) {
492 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
493 int hp_port = (readl(bios + 1) & 1) ? 0x499 : 0x99;
494 /* We can have boards other than the built-in! Verify this is on-board. */
495 if ((inb(hp_port) & 0xc0) == 0x80 &&
496 ioaddr_table[inb(hp_port) & 3] == ioaddr)
497 hp_builtin = hp_port;
498 }
499 iounmap(bios);
500 /* We also recognize the HP Vectra on-board here, but check below. */
501 hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
502 inb(ioaddr+2) == 0x09);
503
504 /* Reset the LANCE. */
505 reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
506
507 /* The Un-Reset needed is only needed for the real NE2100, and will
508 confuse the HP board. */
509 if (!hpJ2405A)
510 outw(reset_val, ioaddr+LANCE_RESET);
511
512 outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
513 if (inw(ioaddr+LANCE_DATA) != 0x0004)
514 return -ENODEV;
515
516 /* Get the version of the chip. */
517 outw(88, ioaddr+LANCE_ADDR);
518 if (inw(ioaddr+LANCE_ADDR) != 88) {
519 lance_version = 0;
520 } else { /* Good, it's a newer chip. */
521 int chip_version = inw(ioaddr+LANCE_DATA);
522 outw(89, ioaddr+LANCE_ADDR);
523 chip_version |= inw(ioaddr+LANCE_DATA) << 16;
524 if (lance_debug > 2)
525 printk(" LANCE chip version is %#x.\n", chip_version);
526 if ((chip_version & 0xfff) != 0x003)
527 return -ENODEV;
528 chip_version = (chip_version >> 12) & 0xffff;
529 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
530 if (chip_table[lance_version].id_number == chip_version)
531 break;
532 }
533 }
534
535 /* We can't allocate private data from alloc_etherdev() because it must
536 a ISA DMA-able region. */
537 chipname = chip_table[lance_version].name;
538 printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
539
540 /* There is a 16 byte station address PROM at the base address.
541 The first six bytes are the station address. */
542 for (i = 0; i < 6; i++)
543 dev->dev_addr[i] = inb(ioaddr + i);
544 printk("%pM", dev->dev_addr);
545
546 dev->base_addr = ioaddr;
547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
548
549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
550 if(lp==NULL)
551 return -ENODEV;
552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
553 dev->ml_priv = lp;
554 lp->name = chipname;
555 lp->rx_buffs = (unsigned long)kmalloc(PKT_BUF_SZ*RX_RING_SIZE,
556 GFP_DMA | GFP_KERNEL);
557 if (!lp->rx_buffs)
558 goto out_lp;
559 if (lance_need_isa_bounce_buffers) {
560 lp->tx_bounce_buffs = kmalloc(PKT_BUF_SZ*TX_RING_SIZE,
561 GFP_DMA | GFP_KERNEL);
562 if (!lp->tx_bounce_buffs)
563 goto out_rx;
564 } else
565 lp->tx_bounce_buffs = NULL;
566
567 lp->chip_version = lance_version;
568 spin_lock_init(&lp->devlock);
569
570 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
571 for (i = 0; i < 6; i++)
572 lp->init_block.phys_addr[i] = dev->dev_addr[i];
573 lp->init_block.filter[0] = 0x00000000;
574 lp->init_block.filter[1] = 0x00000000;
575 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
576 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
577
578 outw(0x0001, ioaddr+LANCE_ADDR);
579 inw(ioaddr+LANCE_ADDR);
580 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
581 outw(0x0002, ioaddr+LANCE_ADDR);
582 inw(ioaddr+LANCE_ADDR);
583 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
584 outw(0x0000, ioaddr+LANCE_ADDR);
585 inw(ioaddr+LANCE_ADDR);
586
587 if (irq) { /* Set iff PCI card. */
588 dev->dma = 4; /* Native bus-master, no DMA channel needed. */
589 dev->irq = irq;
590 } else if (hp_builtin) {
591 static const char dma_tbl[4] = {3, 5, 6, 0};
592 static const char irq_tbl[4] = {3, 4, 5, 9};
593 unsigned char port_val = inb(hp_builtin);
594 dev->dma = dma_tbl[(port_val >> 4) & 3];
595 dev->irq = irq_tbl[(port_val >> 2) & 3];
596 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
597 } else if (hpJ2405A) {
598 static const char dma_tbl[4] = {3, 5, 6, 7};
599 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
600 short reset_val = inw(ioaddr+LANCE_RESET);
601 dev->dma = dma_tbl[(reset_val >> 2) & 3];
602 dev->irq = irq_tbl[(reset_val >> 4) & 7];
603 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
604 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
605 short bus_info;
606 outw(8, ioaddr+LANCE_ADDR);
607 bus_info = inw(ioaddr+LANCE_BUS_IF);
608 dev->dma = bus_info & 0x07;
609 dev->irq = (bus_info >> 4) & 0x0F;
610 } else {
611 /* The DMA channel may be passed in PARAM1. */
612 if (dev->mem_start & 0x07)
613 dev->dma = dev->mem_start & 0x07;
614 }
615
616 if (dev->dma == 0) {
617 /* Read the DMA channel status register, so that we can avoid
618 stuck DMA channels in the DMA detection below. */
619 dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
620 (inb(DMA2_STAT_REG) & 0xf0);
621 }
622 err = -ENODEV;
623 if (dev->irq >= 2)
624 printk(" assigned IRQ %d", dev->irq);
625 else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
626 unsigned long irq_mask;
627
628 /* To auto-IRQ we enable the initialization-done and DMA error
629 interrupts. For ISA boards we get a DMA error, but VLB and PCI
630 boards will work. */
631 irq_mask = probe_irq_on();
632
633 /* Trigger an initialization just for the interrupt. */
634 outw(0x0041, ioaddr+LANCE_DATA);
635
636 mdelay(20);
637 dev->irq = probe_irq_off(irq_mask);
638 if (dev->irq)
639 printk(", probed IRQ %d", dev->irq);
640 else {
641 printk(", failed to detect IRQ line.\n");
642 goto out_tx;
643 }
644
645 /* Check for the initialization done bit, 0x0100, which means
646 that we don't need a DMA channel. */
647 if (inw(ioaddr+LANCE_DATA) & 0x0100)
648 dev->dma = 4;
649 }
650
651 if (dev->dma == 4) {
652 printk(", no DMA needed.\n");
653 } else if (dev->dma) {
654 if (request_dma(dev->dma, chipname)) {
655 printk("DMA %d allocation failed.\n", dev->dma);
656 goto out_tx;
657 } else
658 printk(", assigned DMA %d.\n", dev->dma);
659 } else { /* OK, we have to auto-DMA. */
660 for (i = 0; i < 4; i++) {
661 static const char dmas[] = { 5, 6, 7, 3 };
662 int dma = dmas[i];
663 int boguscnt;
664
665 /* Don't enable a permanently busy DMA channel, or the machine
666 will hang. */
667 if (test_bit(dma, &dma_channels))
668 continue;
669 outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
670 if (request_dma(dma, chipname))
671 continue;
672
673 flags=claim_dma_lock();
674 set_dma_mode(dma, DMA_MODE_CASCADE);
675 enable_dma(dma);
676 release_dma_lock(flags);
677
678 /* Trigger an initialization. */
679 outw(0x0001, ioaddr+LANCE_DATA);
680 for (boguscnt = 100; boguscnt > 0; --boguscnt)
681 if (inw(ioaddr+LANCE_DATA) & 0x0900)
682 break;
683 if (inw(ioaddr+LANCE_DATA) & 0x0100) {
684 dev->dma = dma;
685 printk(", DMA %d.\n", dev->dma);
686 break;
687 } else {
688 flags=claim_dma_lock();
689 disable_dma(dma);
690 release_dma_lock(flags);
691 free_dma(dma);
692 }
693 }
694 if (i == 4) { /* Failure: bail. */
695 printk("DMA detection failed.\n");
696 goto out_tx;
697 }
698 }
699
700 if (lance_version == 0 && dev->irq == 0) {
701 /* We may auto-IRQ now that we have a DMA channel. */
702 /* Trigger an initialization just for the interrupt. */
703 unsigned long irq_mask;
704
705 irq_mask = probe_irq_on();
706 outw(0x0041, ioaddr+LANCE_DATA);
707
708 mdelay(40);
709 dev->irq = probe_irq_off(irq_mask);
710 if (dev->irq == 0) {
711 printk(" Failed to detect the 7990 IRQ line.\n");
712 goto out_dma;
713 }
714 printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
715 }
716
717 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
718 /* Turn on auto-select of media (10baseT or BNC) so that the user
719 can watch the LEDs even if the board isn't opened. */
720 outw(0x0002, ioaddr+LANCE_ADDR);
721 /* Don't touch 10base2 power bit. */
722 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
723 }
724
725 if (lance_debug > 0 && did_version++ == 0)
726 printk(version);
727
728 /* The LANCE-specific entries in the device structure. */
729 dev->netdev_ops = &lance_netdev_ops;
730 dev->watchdog_timeo = TX_TIMEOUT;
731
732 err = register_netdev(dev);
733 if (err)
734 goto out_dma;
735 return 0;
736out_dma:
737 if (dev->dma != 4)
738 free_dma(dev->dma);
739out_tx:
740 kfree(lp->tx_bounce_buffs);
741out_rx:
742 kfree((void*)lp->rx_buffs);
743out_lp:
744 kfree(lp);
745 return err;
746}
747
748
749static int
750lance_open(struct net_device *dev)
751{
752 struct lance_private *lp = dev->ml_priv;
753 int ioaddr = dev->base_addr;
754 int i;
755
756 if (dev->irq == 0 ||
757 request_irq(dev->irq, lance_interrupt, 0, lp->name, dev)) {
758 return -EAGAIN;
759 }
760
761 /* We used to allocate DMA here, but that was silly.
762 DMA lines can't be shared! We now permanently allocate them. */
763
764 /* Reset the LANCE */
765 inw(ioaddr+LANCE_RESET);
766
767 /* The DMA controller is used as a no-operation slave, "cascade mode". */
768 if (dev->dma != 4) {
769 unsigned long flags=claim_dma_lock();
770 enable_dma(dev->dma);
771 set_dma_mode(dev->dma, DMA_MODE_CASCADE);
772 release_dma_lock(flags);
773 }
774
775 /* Un-Reset the LANCE, needed only for the NE2100. */
776 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
777 outw(0, ioaddr+LANCE_RESET);
778
779 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
780 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
781 outw(0x0002, ioaddr+LANCE_ADDR);
782 /* Only touch autoselect bit. */
783 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
784 }
785
786 if (lance_debug > 1)
787 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
788 dev->name, dev->irq, dev->dma,
789 (u32) isa_virt_to_bus(lp->tx_ring),
790 (u32) isa_virt_to_bus(lp->rx_ring),
791 (u32) isa_virt_to_bus(&lp->init_block));
792
793 lance_init_ring(dev, GFP_KERNEL);
794 /* Re-initialize the LANCE, and start it when done. */
795 outw(0x0001, ioaddr+LANCE_ADDR);
796 outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
797 outw(0x0002, ioaddr+LANCE_ADDR);
798 outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
799
800 outw(0x0004, ioaddr+LANCE_ADDR);
801 outw(0x0915, ioaddr+LANCE_DATA);
802
803 outw(0x0000, ioaddr+LANCE_ADDR);
804 outw(0x0001, ioaddr+LANCE_DATA);
805
806 netif_start_queue (dev);
807
808 i = 0;
809 while (i++ < 100)
810 if (inw(ioaddr+LANCE_DATA) & 0x0100)
811 break;
812 /*
813 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
814 * reports that doing so triggers a bug in the '974.
815 */
816 outw(0x0042, ioaddr+LANCE_DATA);
817
818 if (lance_debug > 2)
819 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
820 dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
821
822 return 0; /* Always succeed */
823}
824
825/* The LANCE has been halted for one reason or another (busmaster memory
826 arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
827 etc.). Modern LANCE variants always reload their ring-buffer
828 configuration when restarted, so we must reinitialize our ring
829 context before restarting. As part of this reinitialization,
830 find all packets still on the Tx ring and pretend that they had been
831 sent (in effect, drop the packets on the floor) - the higher-level
832 protocols will time out and retransmit. It'd be better to shuffle
833 these skbs to a temp list and then actually re-Tx them after
834 restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
835*/
836
837static void
838lance_purge_ring(struct net_device *dev)
839{
840 struct lance_private *lp = dev->ml_priv;
841 int i;
842
843 /* Free all the skbuffs in the Rx and Tx queues. */
844 for (i = 0; i < RX_RING_SIZE; i++) {
845 struct sk_buff *skb = lp->rx_skbuff[i];
846 lp->rx_skbuff[i] = NULL;
847 lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
848 if (skb)
849 dev_kfree_skb_any(skb);
850 }
851 for (i = 0; i < TX_RING_SIZE; i++) {
852 if (lp->tx_skbuff[i]) {
853 dev_kfree_skb_any(lp->tx_skbuff[i]);
854 lp->tx_skbuff[i] = NULL;
855 }
856 }
857}
858
859
860/* Initialize the LANCE Rx and Tx rings. */
861static void
862lance_init_ring(struct net_device *dev, gfp_t gfp)
863{
864 struct lance_private *lp = dev->ml_priv;
865 int i;
866
867 lp->cur_rx = lp->cur_tx = 0;
868 lp->dirty_rx = lp->dirty_tx = 0;
869
870 for (i = 0; i < RX_RING_SIZE; i++) {
871 struct sk_buff *skb;
872 void *rx_buff;
873
874 skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
875 lp->rx_skbuff[i] = skb;
876 if (skb) {
877 skb->dev = dev;
878 rx_buff = skb->data;
879 } else
880 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
881 if (rx_buff == NULL)
882 lp->rx_ring[i].base = 0;
883 else
884 lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
885 lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
886 }
887 /* The Tx buffer address is filled in as needed, but we do need to clear
888 the upper ownership bit. */
889 for (i = 0; i < TX_RING_SIZE; i++) {
890 lp->tx_skbuff[i] = NULL;
891 lp->tx_ring[i].base = 0;
892 }
893
894 lp->init_block.mode = 0x0000;
895 for (i = 0; i < 6; i++)
896 lp->init_block.phys_addr[i] = dev->dev_addr[i];
897 lp->init_block.filter[0] = 0x00000000;
898 lp->init_block.filter[1] = 0x00000000;
899 lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
900 lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
901}
902
903static void
904lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
905{
906 struct lance_private *lp = dev->ml_priv;
907
908 if (must_reinit ||
909 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
910 lance_purge_ring(dev);
911 lance_init_ring(dev, GFP_ATOMIC);
912 }
913 outw(0x0000, dev->base_addr + LANCE_ADDR);
914 outw(csr0_bits, dev->base_addr + LANCE_DATA);
915}
916
917
918static void lance_tx_timeout (struct net_device *dev)
919{
920 struct lance_private *lp = (struct lance_private *) dev->ml_priv;
921 int ioaddr = dev->base_addr;
922
923 outw (0, ioaddr + LANCE_ADDR);
924 printk ("%s: transmit timed out, status %4.4x, resetting.\n",
925 dev->name, inw (ioaddr + LANCE_DATA));
926 outw (0x0004, ioaddr + LANCE_DATA);
927 dev->stats.tx_errors++;
928#ifndef final_version
929 if (lance_debug > 3) {
930 int i;
931 printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
932 lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
933 lp->cur_rx);
934 for (i = 0; i < RX_RING_SIZE; i++)
935 printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
936 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
937 lp->rx_ring[i].msg_length);
938 for (i = 0; i < TX_RING_SIZE; i++)
939 printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
940 lp->tx_ring[i].base, -lp->tx_ring[i].length,
941 lp->tx_ring[i].misc);
942 printk ("\n");
943 }
944#endif
945 lance_restart (dev, 0x0043, 1);
946
947 dev->trans_start = jiffies; /* prevent tx timeout */
948 netif_wake_queue (dev);
949}
950
951
952static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
953 struct net_device *dev)
954{
955 struct lance_private *lp = dev->ml_priv;
956 int ioaddr = dev->base_addr;
957 int entry;
958 unsigned long flags;
959
960 spin_lock_irqsave(&lp->devlock, flags);
961
962 if (lance_debug > 3) {
963 outw(0x0000, ioaddr+LANCE_ADDR);
964 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
965 inw(ioaddr+LANCE_DATA));
966 outw(0x0000, ioaddr+LANCE_DATA);
967 }
968
969 /* Fill in a Tx ring entry */
970
971 /* Mask to ring buffer boundary. */
972 entry = lp->cur_tx & TX_RING_MOD_MASK;
973
974 /* Caution: the write order is important here, set the base address
975 with the "ownership" bits last. */
976
977 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
978 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
979 if (skb->len < ETH_ZLEN) {
980 if (skb_padto(skb, ETH_ZLEN))
981 goto out;
982 lp->tx_ring[entry].length = -ETH_ZLEN;
983 }
984 else
985 lp->tx_ring[entry].length = -skb->len;
986 } else
987 lp->tx_ring[entry].length = -skb->len;
988
989 lp->tx_ring[entry].misc = 0x0000;
990
991 dev->stats.tx_bytes += skb->len;
992
993 /* If any part of this buffer is >16M we must copy it to a low-memory
994 buffer. */
995 if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
996 if (lance_debug > 5)
997 printk("%s: bouncing a high-memory packet (%#x).\n",
998 dev->name, (u32)isa_virt_to_bus(skb->data));
999 skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
1000 lp->tx_ring[entry].base =
1001 ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1002 dev_kfree_skb(skb);
1003 } else {
1004 lp->tx_skbuff[entry] = skb;
1005 lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1006 }
1007 lp->cur_tx++;
1008
1009 /* Trigger an immediate send poll. */
1010 outw(0x0000, ioaddr+LANCE_ADDR);
1011 outw(0x0048, ioaddr+LANCE_DATA);
1012
1013 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1014 netif_stop_queue(dev);
1015
1016out:
1017 spin_unlock_irqrestore(&lp->devlock, flags);
1018 return NETDEV_TX_OK;
1019}
1020
1021/* The LANCE interrupt handler. */
1022static irqreturn_t lance_interrupt(int irq, void *dev_id)
1023{
1024 struct net_device *dev = dev_id;
1025 struct lance_private *lp;
1026 int csr0, ioaddr, boguscnt=10;
1027 int must_restart;
1028
1029 ioaddr = dev->base_addr;
1030 lp = dev->ml_priv;
1031
1032 spin_lock (&lp->devlock);
1033
1034 outw(0x00, dev->base_addr + LANCE_ADDR);
1035 while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1036 --boguscnt >= 0) {
1037 /* Acknowledge all of the current interrupt sources ASAP. */
1038 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1039
1040 must_restart = 0;
1041
1042 if (lance_debug > 5)
1043 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
1044 dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1045
1046 if (csr0 & 0x0400) /* Rx interrupt */
1047 lance_rx(dev);
1048
1049 if (csr0 & 0x0200) { /* Tx-done interrupt */
1050 int dirty_tx = lp->dirty_tx;
1051
1052 while (dirty_tx < lp->cur_tx) {
1053 int entry = dirty_tx & TX_RING_MOD_MASK;
1054 int status = lp->tx_ring[entry].base;
1055
1056 if (status < 0)
1057 break; /* It still hasn't been Txed */
1058
1059 lp->tx_ring[entry].base = 0;
1060
1061 if (status & 0x40000000) {
1062 /* There was an major error, log it. */
1063 int err_status = lp->tx_ring[entry].misc;
1064 dev->stats.tx_errors++;
1065 if (err_status & 0x0400)
1066 dev->stats.tx_aborted_errors++;
1067 if (err_status & 0x0800)
1068 dev->stats.tx_carrier_errors++;
1069 if (err_status & 0x1000)
1070 dev->stats.tx_window_errors++;
1071 if (err_status & 0x4000) {
1072 /* Ackk! On FIFO errors the Tx unit is turned off! */
1073 dev->stats.tx_fifo_errors++;
1074 /* Remove this verbosity later! */
1075 printk("%s: Tx FIFO error! Status %4.4x.\n",
1076 dev->name, csr0);
1077 /* Restart the chip. */
1078 must_restart = 1;
1079 }
1080 } else {
1081 if (status & 0x18000000)
1082 dev->stats.collisions++;
1083 dev->stats.tx_packets++;
1084 }
1085
1086 /* We must free the original skb if it's not a data-only copy
1087 in the bounce buffer. */
1088 if (lp->tx_skbuff[entry]) {
1089 dev_kfree_skb_irq(lp->tx_skbuff[entry]);
1090 lp->tx_skbuff[entry] = NULL;
1091 }
1092 dirty_tx++;
1093 }
1094
1095#ifndef final_version
1096 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1097 printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1098 dirty_tx, lp->cur_tx,
1099 netif_queue_stopped(dev) ? "yes" : "no");
1100 dirty_tx += TX_RING_SIZE;
1101 }
1102#endif
1103
1104 /* if the ring is no longer full, accept more packets */
1105 if (netif_queue_stopped(dev) &&
1106 dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1107 netif_wake_queue (dev);
1108
1109 lp->dirty_tx = dirty_tx;
1110 }
1111
1112 /* Log misc errors. */
1113 if (csr0 & 0x4000)
1114 dev->stats.tx_errors++; /* Tx babble. */
1115 if (csr0 & 0x1000)
1116 dev->stats.rx_errors++; /* Missed a Rx frame. */
1117 if (csr0 & 0x0800) {
1118 printk("%s: Bus master arbitration failure, status %4.4x.\n",
1119 dev->name, csr0);
1120 /* Restart the chip. */
1121 must_restart = 1;
1122 }
1123
1124 if (must_restart) {
1125 /* stop the chip to clear the error condition, then restart */
1126 outw(0x0000, dev->base_addr + LANCE_ADDR);
1127 outw(0x0004, dev->base_addr + LANCE_DATA);
1128 lance_restart(dev, 0x0002, 0);
1129 }
1130 }
1131
1132 /* Clear any other interrupt, and set interrupt enable. */
1133 outw(0x0000, dev->base_addr + LANCE_ADDR);
1134 outw(0x7940, dev->base_addr + LANCE_DATA);
1135
1136 if (lance_debug > 4)
1137 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1138 dev->name, inw(ioaddr + LANCE_ADDR),
1139 inw(dev->base_addr + LANCE_DATA));
1140
1141 spin_unlock (&lp->devlock);
1142 return IRQ_HANDLED;
1143}
1144
1145static int
1146lance_rx(struct net_device *dev)
1147{
1148 struct lance_private *lp = dev->ml_priv;
1149 int entry = lp->cur_rx & RX_RING_MOD_MASK;
1150 int i;
1151
1152 /* If we own the next entry, it's a new packet. Send it up. */
1153 while (lp->rx_ring[entry].base >= 0) {
1154 int status = lp->rx_ring[entry].base >> 24;
1155
1156 if (status != 0x03) { /* There was an error. */
1157 /* There is a tricky error noted by John Murphy,
1158 <murf@perftech.com> to Russ Nelson: Even with full-sized
1159 buffers it's possible for a jabber packet to use two
1160 buffers, with only the last correctly noting the error. */
1161 if (status & 0x01) /* Only count a general error at the */
1162 dev->stats.rx_errors++; /* end of a packet.*/
1163 if (status & 0x20)
1164 dev->stats.rx_frame_errors++;
1165 if (status & 0x10)
1166 dev->stats.rx_over_errors++;
1167 if (status & 0x08)
1168 dev->stats.rx_crc_errors++;
1169 if (status & 0x04)
1170 dev->stats.rx_fifo_errors++;
1171 lp->rx_ring[entry].base &= 0x03ffffff;
1172 }
1173 else
1174 {
1175 /* Malloc up new buffer, compatible with net3. */
1176 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1177 struct sk_buff *skb;
1178
1179 if(pkt_len<60)
1180 {
1181 printk("%s: Runt packet!\n",dev->name);
1182 dev->stats.rx_errors++;
1183 }
1184 else
1185 {
1186 skb = dev_alloc_skb(pkt_len+2);
1187 if (skb == NULL)
1188 {
1189 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1190 for (i=0; i < RX_RING_SIZE; i++)
1191 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1192 break;
1193
1194 if (i > RX_RING_SIZE -2)
1195 {
1196 dev->stats.rx_dropped++;
1197 lp->rx_ring[entry].base |= 0x80000000;
1198 lp->cur_rx++;
1199 }
1200 break;
1201 }
1202 skb_reserve(skb,2); /* 16 byte align */
1203 skb_put(skb,pkt_len); /* Make room */
1204 skb_copy_to_linear_data(skb,
1205 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1206 pkt_len);
1207 skb->protocol=eth_type_trans(skb,dev);
1208 netif_rx(skb);
1209 dev->stats.rx_packets++;
1210 dev->stats.rx_bytes += pkt_len;
1211 }
1212 }
1213 /* The docs say that the buffer length isn't touched, but Andrew Boyd
1214 of QNX reports that some revs of the 79C965 clear it. */
1215 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1216 lp->rx_ring[entry].base |= 0x80000000;
1217 entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1218 }
1219
1220 /* We should check that at least two ring entries are free. If not,
1221 we should free one and mark stats->rx_dropped++. */
1222
1223 return 0;
1224}
1225
1226static int
1227lance_close(struct net_device *dev)
1228{
1229 int ioaddr = dev->base_addr;
1230 struct lance_private *lp = dev->ml_priv;
1231
1232 netif_stop_queue (dev);
1233
1234 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1235 outw(112, ioaddr+LANCE_ADDR);
1236 dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1237 }
1238 outw(0, ioaddr+LANCE_ADDR);
1239
1240 if (lance_debug > 1)
1241 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1242 dev->name, inw(ioaddr+LANCE_DATA));
1243
1244 /* We stop the LANCE here -- it occasionally polls
1245 memory if we don't. */
1246 outw(0x0004, ioaddr+LANCE_DATA);
1247
1248 if (dev->dma != 4)
1249 {
1250 unsigned long flags=claim_dma_lock();
1251 disable_dma(dev->dma);
1252 release_dma_lock(flags);
1253 }
1254 free_irq(dev->irq, dev);
1255
1256 lance_purge_ring(dev);
1257
1258 return 0;
1259}
1260
1261static struct net_device_stats *lance_get_stats(struct net_device *dev)
1262{
1263 struct lance_private *lp = dev->ml_priv;
1264
1265 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1266 short ioaddr = dev->base_addr;
1267 short saved_addr;
1268 unsigned long flags;
1269
1270 spin_lock_irqsave(&lp->devlock, flags);
1271 saved_addr = inw(ioaddr+LANCE_ADDR);
1272 outw(112, ioaddr+LANCE_ADDR);
1273 dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1274 outw(saved_addr, ioaddr+LANCE_ADDR);
1275 spin_unlock_irqrestore(&lp->devlock, flags);
1276 }
1277
1278 return &dev->stats;
1279}
1280
1281/* Set or clear the multicast filter for this adaptor.
1282 */
1283
1284static void set_multicast_list(struct net_device *dev)
1285{
1286 short ioaddr = dev->base_addr;
1287
1288 outw(0, ioaddr+LANCE_ADDR);
1289 outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance. */
1290
1291 if (dev->flags&IFF_PROMISC) {
1292 outw(15, ioaddr+LANCE_ADDR);
1293 outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1294 } else {
1295 short multicast_table[4];
1296 int i;
1297 int num_addrs=netdev_mc_count(dev);
1298 if(dev->flags&IFF_ALLMULTI)
1299 num_addrs=1;
1300 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1301 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1302 for (i = 0; i < 4; i++) {
1303 outw(8 + i, ioaddr+LANCE_ADDR);
1304 outw(multicast_table[i], ioaddr+LANCE_DATA);
1305 }
1306 outw(15, ioaddr+LANCE_ADDR);
1307 outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1308 }
1309
1310 lance_restart(dev, 0x0142, 0); /* Resume normal operation */
1311
1312}
1313
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
new file mode 100644
index 000000000000..56bc47a94186
--- /dev/null
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -0,0 +1,205 @@
1/* mvme147.c : the Linux/mvme147/lance ethernet driver
2 *
3 * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
4 * Based on the Sun Lance driver and the NetBSD HP Lance driver
5 * Uses the generic 7990.c LANCE code.
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/types.h>
11#include <linux/interrupt.h>
12#include <linux/ioport.h>
13#include <linux/string.h>
14#include <linux/delay.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/gfp.h>
18/* Used for the temporal inet entries and routing */
19#include <linux/socket.h>
20#include <linux/route.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24
25#include <asm/system.h>
26#include <asm/io.h>
27#include <asm/pgtable.h>
28#include <asm/mvme147hw.h>
29
30/* We have 16834 bytes of RAM for the init block and buffers. This places
31 * an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
32 * buffers and 2 Tx buffers.
33 */
34#define LANCE_LOG_TX_BUFFERS 1
35#define LANCE_LOG_RX_BUFFERS 3
36
37#include "7990.h" /* use generic LANCE code */
38
39/* Our private data structure */
40struct m147lance_private {
41 struct lance_private lance;
42 unsigned long ram;
43};
44
45/* function prototypes... This is easy because all the grot is in the
46 * generic LANCE support. All we have to support is probing for boards,
47 * plus board-specific init, open and close actions.
48 * Oh, and we need to tell the generic code how to read and write LANCE registers...
49 */
50static int m147lance_open(struct net_device *dev);
51static int m147lance_close(struct net_device *dev);
52static void m147lance_writerap(struct lance_private *lp, unsigned short value);
53static void m147lance_writerdp(struct lance_private *lp, unsigned short value);
54static unsigned short m147lance_readrdp(struct lance_private *lp);
55
56typedef void (*writerap_t)(void *, unsigned short);
57typedef void (*writerdp_t)(void *, unsigned short);
58typedef unsigned short (*readrdp_t)(void *);
59
60static const struct net_device_ops lance_netdev_ops = {
61 .ndo_open = m147lance_open,
62 .ndo_stop = m147lance_close,
63 .ndo_start_xmit = lance_start_xmit,
64 .ndo_set_rx_mode = lance_set_multicast,
65 .ndo_tx_timeout = lance_tx_timeout,
66 .ndo_change_mtu = eth_change_mtu,
67 .ndo_validate_addr = eth_validate_addr,
68 .ndo_set_mac_address = eth_mac_addr,
69};
70
71/* Initialise the one and only on-board 7990 */
72struct net_device * __init mvme147lance_probe(int unit)
73{
74 struct net_device *dev;
75 static int called;
76 static const char name[] = "MVME147 LANCE";
77 struct m147lance_private *lp;
78 u_long *addr;
79 u_long address;
80 int err;
81
82 if (!MACH_IS_MVME147 || called)
83 return ERR_PTR(-ENODEV);
84 called++;
85
86 dev = alloc_etherdev(sizeof(struct m147lance_private));
87 if (!dev)
88 return ERR_PTR(-ENOMEM);
89
90 if (unit >= 0)
91 sprintf(dev->name, "eth%d", unit);
92
93 /* Fill the dev fields */
94 dev->base_addr = (unsigned long)MVME147_LANCE_BASE;
95 dev->netdev_ops = &lance_netdev_ops;
96 dev->dma = 0;
97
98 addr=(u_long *)ETHERNET_ADDRESS;
99 address = *addr;
100 dev->dev_addr[0]=0x08;
101 dev->dev_addr[1]=0x00;
102 dev->dev_addr[2]=0x3e;
103 address=address>>8;
104 dev->dev_addr[5]=address&0xff;
105 address=address>>8;
106 dev->dev_addr[4]=address&0xff;
107 address=address>>8;
108 dev->dev_addr[3]=address&0xff;
109
110 printk("%s: MVME147 at 0x%08lx, irq %d, "
111 "Hardware Address %pM\n",
112 dev->name, dev->base_addr, MVME147_LANCE_IRQ,
113 dev->dev_addr);
114
115 lp = netdev_priv(dev);
116 lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
117 if (!lp->ram)
118 {
119 printk("%s: No memory for LANCE buffers\n", dev->name);
120 free_netdev(dev);
121 return ERR_PTR(-ENOMEM);
122 }
123
124 lp->lance.name = (char*)name; /* discards const, shut up gcc */
125 lp->lance.base = dev->base_addr;
126 lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
127 lp->lance.lance_init_block = (struct lance_init_block *)(lp->ram); /* LANCE addr of same RAM */
128 lp->lance.busmaster_regval = LE_C3_BSWP; /* we're bigendian */
129 lp->lance.irq = MVME147_LANCE_IRQ;
130 lp->lance.writerap = (writerap_t)m147lance_writerap;
131 lp->lance.writerdp = (writerdp_t)m147lance_writerdp;
132 lp->lance.readrdp = (readrdp_t)m147lance_readrdp;
133 lp->lance.lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
134 lp->lance.lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
135 lp->lance.rx_ring_mod_mask = RX_RING_MOD_MASK;
136 lp->lance.tx_ring_mod_mask = TX_RING_MOD_MASK;
137
138 err = register_netdev(dev);
139 if (err) {
140 free_pages(lp->ram, 3);
141 free_netdev(dev);
142 return ERR_PTR(err);
143 }
144
145 return dev;
146}
147
148static void m147lance_writerap(struct lance_private *lp, unsigned short value)
149{
150 out_be16(lp->base + LANCE_RAP, value);
151}
152
153static void m147lance_writerdp(struct lance_private *lp, unsigned short value)
154{
155 out_be16(lp->base + LANCE_RDP, value);
156}
157
158static unsigned short m147lance_readrdp(struct lance_private *lp)
159{
160 return in_be16(lp->base + LANCE_RDP);
161}
162
163static int m147lance_open(struct net_device *dev)
164{
165 int status;
166
167 status = lance_open(dev); /* call generic lance open code */
168 if (status)
169 return status;
170 /* enable interrupts at board level. */
171 m147_pcc->lan_cntrl=0; /* clear the interrupts (if any) */
172 m147_pcc->lan_cntrl=0x08 | 0x04; /* Enable irq 4 */
173
174 return 0;
175}
176
177static int m147lance_close(struct net_device *dev)
178{
179 /* disable interrupts at boardlevel */
180 m147_pcc->lan_cntrl=0x0; /* disable interrupts */
181 lance_close(dev);
182 return 0;
183}
184
185#ifdef MODULE
186MODULE_LICENSE("GPL");
187
188static struct net_device *dev_mvme147_lance;
189int __init init_module(void)
190{
191 dev_mvme147_lance = mvme147lance_probe(-1);
192 if (IS_ERR(dev_mvme147_lance))
193 return PTR_ERR(dev_mvme147_lance);
194 return 0;
195}
196
197void __exit cleanup_module(void)
198{
199 struct m147lance_private *lp = netdev_priv(dev_mvme147_lance);
200 unregister_netdev(dev_mvme147_lance);
201 free_pages(lp->ram, 3);
202 free_netdev(dev_mvme147_lance);
203}
204
205#endif /* MODULE */
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
new file mode 100644
index 000000000000..6e6aa7213aab
--- /dev/null
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -0,0 +1,1254 @@
1/*
2 * ni6510 (am7990 'lance' chip) driver for Linux-net-3
3 * BETAcode v0.71 (96/09/29) for 2.0.0 (or later)
4 * copyrights (c) 1994,1995,1996 by M.Hipp
5 *
6 * This driver can handle the old ni6510 board and the newer ni6510
7 * EtherBlaster. (probably it also works with every full NE2100
8 * compatible card)
9 *
10 * driver probes: io: 0x360,0x300,0x320,0x340 / dma: 3,5,6,7
11 *
12 * This is an extension to the Linux operating system, and is covered by the
13 * same GNU General Public License that covers the Linux-kernel.
14 *
15 * comments/bugs/suggestions can be sent to:
16 * Michael Hipp
17 * email: hippm@informatik.uni-tuebingen.de
18 *
19 * sources:
20 * some things are from the 'ni6510-packet-driver for dos by Russ Nelson'
21 * and from the original drivers by D.Becker
22 *
23 * known problems:
24 * - on some PCI boards (including my own) the card/board/ISA-bridge has
25 * problems with bus master DMA. This results in lotsa overruns.
26 * It may help to '#define RCV_PARANOIA_CHECK' or try to #undef
27 * the XMT and RCV_VIA_SKB option .. this reduces driver performance.
28 * Or just play with your BIOS options to optimize ISA-DMA access.
29 * Maybe you also wanna play with the LOW_PERFORAMCE and MID_PERFORMANCE
30 * defines -> please report me your experience then
31 * - Harald reported for ASUS SP3G mainboards, that you should use
32 * the 'optimal settings' from the user's manual on page 3-12!
33 *
34 * credits:
35 * thanx to Jason Sullivan for sending me a ni6510 card!
36 * lot of debug runs with ASUS SP3G Boards (Intel Saturn) by Harald Koenig
37 *
38 * simple performance test: (486DX-33/Ni6510-EB receives from 486DX4-100/Ni6510-EB)
39 * average: FTP -> 8384421 bytes received in 8.5 seconds
40 * (no RCV_VIA_SKB,no XMT_VIA_SKB,PARANOIA_CHECK,4 XMIT BUFS, 8 RCV_BUFFS)
41 * peak: FTP -> 8384421 bytes received in 7.5 seconds
42 * (RCV_VIA_SKB,XMT_VIA_SKB,no PARANOIA_CHECK,1(!) XMIT BUF, 16 RCV BUFFS)
43 */
44
45/*
46 * 99.Jun.8: added support for /proc/net/dev byte count for xosview (HK)
47 * 96.Sept.29: virt_to_bus stuff added for new memory modell
48 * 96.April.29: Added Harald Koenig's Patches (MH)
49 * 96.April.13: enhanced error handling .. more tests (MH)
50 * 96.April.5/6: a lot of performance tests. Got it stable now (hopefully) (MH)
51 * 96.April.1: (no joke ;) .. added EtherBlaster and Module support (MH)
52 * 96.Feb.19: fixed a few bugs .. cleanups .. tested for 1.3.66 (MH)
53 * hopefully no more 16MB limit
54 *
55 * 95.Nov.18: multicast tweaked (AC).
56 *
57 * 94.Aug.22: changes in xmit_intr (ack more than one xmitted-packet), ni65_send_packet (p->lock) (MH)
58 *
59 * 94.July.16: fixed bugs in recv_skb and skb-alloc stuff (MH)
60 */
61
62#include <linux/kernel.h>
63#include <linux/string.h>
64#include <linux/errno.h>
65#include <linux/ioport.h>
66#include <linux/slab.h>
67#include <linux/interrupt.h>
68#include <linux/delay.h>
69#include <linux/init.h>
70#include <linux/netdevice.h>
71#include <linux/etherdevice.h>
72#include <linux/skbuff.h>
73#include <linux/module.h>
74#include <linux/bitops.h>
75
76#include <asm/io.h>
77#include <asm/dma.h>
78
79#include "ni65.h"
80
81/*
82 * the current setting allows an acceptable performance
83 * for 'RCV_PARANOIA_CHECK' read the 'known problems' part in
84 * the header of this file
85 * 'invert' the defines for max. performance. This may cause DMA problems
86 * on some boards (e.g on my ASUS SP3G)
87 */
88#undef XMT_VIA_SKB
89#undef RCV_VIA_SKB
90#define RCV_PARANOIA_CHECK
91
92#define MID_PERFORMANCE
93
94#if defined( LOW_PERFORMANCE )
95 static int isa0=7,isa1=7,csr80=0x0c10;
96#elif defined( MID_PERFORMANCE )
97 static int isa0=5,isa1=5,csr80=0x2810;
98#else /* high performance */
99 static int isa0=4,isa1=4,csr80=0x0017;
100#endif
101
102/*
103 * a few card/vendor specific defines
104 */
105#define NI65_ID0 0x00
106#define NI65_ID1 0x55
107#define NI65_EB_ID0 0x52
108#define NI65_EB_ID1 0x44
109#define NE2100_ID0 0x57
110#define NE2100_ID1 0x57
111
112#define PORT p->cmdr_addr
113
114/*
115 * buffer configuration
116 */
117#if 1
118#define RMDNUM 16
119#define RMDNUMMASK 0x80000000
120#else
121#define RMDNUM 8
122#define RMDNUMMASK 0x60000000 /* log2(RMDNUM)<<29 */
123#endif
124
125#if 0
126#define TMDNUM 1
127#define TMDNUMMASK 0x00000000
128#else
129#define TMDNUM 4
130#define TMDNUMMASK 0x40000000 /* log2(TMDNUM)<<29 */
131#endif
132
133/* slightly oversized */
134#define R_BUF_SIZE 1544
135#define T_BUF_SIZE 1544
136
137/*
138 * lance register defines
139 */
140#define L_DATAREG 0x00
141#define L_ADDRREG 0x02
142#define L_RESET 0x04
143#define L_CONFIG 0x05
144#define L_BUSIF 0x06
145
146/*
147 * to access the lance/am7990-regs, you have to write
148 * reg-number into L_ADDRREG, then you can access it using L_DATAREG
149 */
150#define CSR0 0x00
151#define CSR1 0x01
152#define CSR2 0x02
153#define CSR3 0x03
154
155#define INIT_RING_BEFORE_START 0x1
156#define FULL_RESET_ON_ERROR 0x2
157
158#if 0
159#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);inw(PORT+L_ADDRREG); \
160 outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
161#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_ADDRREG),\
162 inw(PORT+L_DATAREG))
163#if 0
164#define writedatareg(val) {outw(val,PORT+L_DATAREG);inw(PORT+L_DATAREG);}
165#else
166#define writedatareg(val) { writereg(val,CSR0); }
167#endif
168#else
169#define writereg(val,reg) {outw(reg,PORT+L_ADDRREG);outw(val,PORT+L_DATAREG);}
170#define readreg(reg) (outw(reg,PORT+L_ADDRREG),inw(PORT+L_DATAREG))
171#define writedatareg(val) { writereg(val,CSR0); }
172#endif
173
174static unsigned char ni_vendor[] = { 0x02,0x07,0x01 };
175
176static struct card {
177 unsigned char id0,id1;
178 short id_offset;
179 short total_size;
180 short cmd_offset;
181 short addr_offset;
182 unsigned char *vendor_id;
183 char *cardname;
184 unsigned long config;
185} cards[] = {
186 {
187 .id0 = NI65_ID0,
188 .id1 = NI65_ID1,
189 .id_offset = 0x0e,
190 .total_size = 0x10,
191 .cmd_offset = 0x0,
192 .addr_offset = 0x8,
193 .vendor_id = ni_vendor,
194 .cardname = "ni6510",
195 .config = 0x1,
196 },
197 {
198 .id0 = NI65_EB_ID0,
199 .id1 = NI65_EB_ID1,
200 .id_offset = 0x0e,
201 .total_size = 0x18,
202 .cmd_offset = 0x10,
203 .addr_offset = 0x0,
204 .vendor_id = ni_vendor,
205 .cardname = "ni6510 EtherBlaster",
206 .config = 0x2,
207 },
208 {
209 .id0 = NE2100_ID0,
210 .id1 = NE2100_ID1,
211 .id_offset = 0x0e,
212 .total_size = 0x18,
213 .cmd_offset = 0x10,
214 .addr_offset = 0x0,
215 .vendor_id = NULL,
216 .cardname = "generic NE2100",
217 .config = 0x0,
218 },
219};
220#define NUM_CARDS 3
221
222struct priv
223{
224 struct rmd rmdhead[RMDNUM];
225 struct tmd tmdhead[TMDNUM];
226 struct init_block ib;
227 int rmdnum;
228 int tmdnum,tmdlast;
229#ifdef RCV_VIA_SKB
230 struct sk_buff *recv_skb[RMDNUM];
231#else
232 void *recvbounce[RMDNUM];
233#endif
234#ifdef XMT_VIA_SKB
235 struct sk_buff *tmd_skb[TMDNUM];
236#endif
237 void *tmdbounce[TMDNUM];
238 int tmdbouncenum;
239 int lock,xmit_queued;
240
241 void *self;
242 int cmdr_addr;
243 int cardno;
244 int features;
245 spinlock_t ring_lock;
246};
247
248static int ni65_probe1(struct net_device *dev,int);
249static irqreturn_t ni65_interrupt(int irq, void * dev_id);
250static void ni65_recv_intr(struct net_device *dev,int);
251static void ni65_xmit_intr(struct net_device *dev,int);
252static int ni65_open(struct net_device *dev);
253static int ni65_lance_reinit(struct net_device *dev);
254static void ni65_init_lance(struct priv *p,unsigned char*,int,int);
255static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
256 struct net_device *dev);
257static void ni65_timeout(struct net_device *dev);
258static int ni65_close(struct net_device *dev);
259static int ni65_alloc_buffer(struct net_device *dev);
260static void ni65_free_buffer(struct priv *p);
261static void set_multicast_list(struct net_device *dev);
262
263static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
264static int dmatab[] __initdata = { 0,3,5,6,7 }; /* dma config-translate and autodetect */
265
266static int debuglevel = 1;
267
268/*
269 * set 'performance' registers .. we must STOP lance for that
270 */
271static void ni65_set_performance(struct priv *p)
272{
273 writereg(CSR0_STOP | CSR0_CLRALL,CSR0); /* STOP */
274
275 if( !(cards[p->cardno].config & 0x02) )
276 return;
277
278 outw(80,PORT+L_ADDRREG);
279 if(inw(PORT+L_ADDRREG) != 80)
280 return;
281
282 writereg( (csr80 & 0x3fff) ,80); /* FIFO watermarks */
283 outw(0,PORT+L_ADDRREG);
284 outw((short)isa0,PORT+L_BUSIF); /* write ISA 0: DMA_R : isa0 * 50ns */
285 outw(1,PORT+L_ADDRREG);
286 outw((short)isa1,PORT+L_BUSIF); /* write ISA 1: DMA_W : isa1 * 50ns */
287
288 outw(CSR0,PORT+L_ADDRREG); /* switch back to CSR0 */
289}
290
291/*
292 * open interface (up)
293 */
294static int ni65_open(struct net_device *dev)
295{
296 struct priv *p = dev->ml_priv;
297 int irqval = request_irq(dev->irq, ni65_interrupt,0,
298 cards[p->cardno].cardname,dev);
299 if (irqval) {
300 printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
301 dev->name,dev->irq, irqval);
302 return -EAGAIN;
303 }
304
305 if(ni65_lance_reinit(dev))
306 {
307 netif_start_queue(dev);
308 return 0;
309 }
310 else
311 {
312 free_irq(dev->irq,dev);
313 return -EAGAIN;
314 }
315}
316
317/*
318 * close interface (down)
319 */
320static int ni65_close(struct net_device *dev)
321{
322 struct priv *p = dev->ml_priv;
323
324 netif_stop_queue(dev);
325
326 outw(inw(PORT+L_RESET),PORT+L_RESET); /* that's the hard way */
327
328#ifdef XMT_VIA_SKB
329 {
330 int i;
331 for(i=0;i<TMDNUM;i++)
332 {
333 if(p->tmd_skb[i]) {
334 dev_kfree_skb(p->tmd_skb[i]);
335 p->tmd_skb[i] = NULL;
336 }
337 }
338 }
339#endif
340 free_irq(dev->irq,dev);
341 return 0;
342}
343
344static void cleanup_card(struct net_device *dev)
345{
346 struct priv *p = dev->ml_priv;
347 disable_dma(dev->dma);
348 free_dma(dev->dma);
349 release_region(dev->base_addr, cards[p->cardno].total_size);
350 ni65_free_buffer(p);
351}
352
353/* set: io,irq,dma or set it when calling insmod */
354static int irq;
355static int io;
356static int dma;
357
358/*
359 * Probe The Card (not the lance-chip)
360 */
361struct net_device * __init ni65_probe(int unit)
362{
363 struct net_device *dev = alloc_etherdev(0);
364 static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
365 const int *port;
366 int err = 0;
367
368 if (!dev)
369 return ERR_PTR(-ENOMEM);
370
371 if (unit >= 0) {
372 sprintf(dev->name, "eth%d", unit);
373 netdev_boot_setup_check(dev);
374 irq = dev->irq;
375 dma = dev->dma;
376 } else {
377 dev->base_addr = io;
378 }
379
380 if (dev->base_addr > 0x1ff) { /* Check a single specified location. */
381 err = ni65_probe1(dev, dev->base_addr);
382 } else if (dev->base_addr > 0) { /* Don't probe at all. */
383 err = -ENXIO;
384 } else {
385 for (port = ports; *port && ni65_probe1(dev, *port); port++)
386 ;
387 if (!*port)
388 err = -ENODEV;
389 }
390 if (err)
391 goto out;
392
393 err = register_netdev(dev);
394 if (err)
395 goto out1;
396 return dev;
397out1:
398 cleanup_card(dev);
399out:
400 free_netdev(dev);
401 return ERR_PTR(err);
402}
403
404static const struct net_device_ops ni65_netdev_ops = {
405 .ndo_open = ni65_open,
406 .ndo_stop = ni65_close,
407 .ndo_start_xmit = ni65_send_packet,
408 .ndo_tx_timeout = ni65_timeout,
409 .ndo_set_rx_mode = set_multicast_list,
410 .ndo_change_mtu = eth_change_mtu,
411 .ndo_set_mac_address = eth_mac_addr,
412 .ndo_validate_addr = eth_validate_addr,
413};
414
415/*
416 * this is the real card probe ..
417 */
418static int __init ni65_probe1(struct net_device *dev,int ioaddr)
419{
420 int i,j;
421 struct priv *p;
422 unsigned long flags;
423
424 dev->irq = irq;
425 dev->dma = dma;
426
427 for(i=0;i<NUM_CARDS;i++) {
428 if(!request_region(ioaddr, cards[i].total_size, cards[i].cardname))
429 continue;
430 if(cards[i].id_offset >= 0) {
431 if(inb(ioaddr+cards[i].id_offset+0) != cards[i].id0 ||
432 inb(ioaddr+cards[i].id_offset+1) != cards[i].id1) {
433 release_region(ioaddr, cards[i].total_size);
434 continue;
435 }
436 }
437 if(cards[i].vendor_id) {
438 for(j=0;j<3;j++)
439 if(inb(ioaddr+cards[i].addr_offset+j) != cards[i].vendor_id[j]) {
440 release_region(ioaddr, cards[i].total_size);
441 continue;
442 }
443 }
444 break;
445 }
446 if(i == NUM_CARDS)
447 return -ENODEV;
448
449 for(j=0;j<6;j++)
450 dev->dev_addr[j] = inb(ioaddr+cards[i].addr_offset+j);
451
452 if( (j=ni65_alloc_buffer(dev)) < 0) {
453 release_region(ioaddr, cards[i].total_size);
454 return j;
455 }
456 p = dev->ml_priv;
457 p->cmdr_addr = ioaddr + cards[i].cmd_offset;
458 p->cardno = i;
459 spin_lock_init(&p->ring_lock);
460
461 printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cards[p->cardno].cardname , ioaddr);
462
463 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
464 if( (j=readreg(CSR0)) != 0x4) {
465 printk("failed.\n");
466 printk(KERN_ERR "%s: Can't RESET card: %04x\n", dev->name, j);
467 ni65_free_buffer(p);
468 release_region(ioaddr, cards[p->cardno].total_size);
469 return -EAGAIN;
470 }
471
472 outw(88,PORT+L_ADDRREG);
473 if(inw(PORT+L_ADDRREG) == 88) {
474 unsigned long v;
475 v = inw(PORT+L_DATAREG);
476 v <<= 16;
477 outw(89,PORT+L_ADDRREG);
478 v |= inw(PORT+L_DATAREG);
479 printk("Version %#08lx, ",v);
480 p->features = INIT_RING_BEFORE_START;
481 }
482 else {
483 printk("ancient LANCE, ");
484 p->features = 0x0;
485 }
486
487 if(test_bit(0,&cards[i].config)) {
488 dev->irq = irqtab[(inw(ioaddr+L_CONFIG)>>2)&3];
489 dev->dma = dmatab[inw(ioaddr+L_CONFIG)&3];
490 printk("IRQ %d (from card), DMA %d (from card).\n",dev->irq,dev->dma);
491 }
492 else {
493 if(dev->dma == 0) {
494 /* 'stuck test' from lance.c */
495 unsigned long dma_channels =
496 ((inb(DMA1_STAT_REG) >> 4) & 0x0f)
497 | (inb(DMA2_STAT_REG) & 0xf0);
498 for(i=1;i<5;i++) {
499 int dma = dmatab[i];
500 if(test_bit(dma,&dma_channels) || request_dma(dma,"ni6510"))
501 continue;
502
503 flags=claim_dma_lock();
504 disable_dma(dma);
505 set_dma_mode(dma,DMA_MODE_CASCADE);
506 enable_dma(dma);
507 release_dma_lock(flags);
508
509 ni65_init_lance(p,dev->dev_addr,0,0); /* trigger memory access */
510
511 flags=claim_dma_lock();
512 disable_dma(dma);
513 free_dma(dma);
514 release_dma_lock(flags);
515
516 if(readreg(CSR0) & CSR0_IDON)
517 break;
518 }
519 if(i == 5) {
520 printk("failed.\n");
521 printk(KERN_ERR "%s: Can't detect DMA channel!\n", dev->name);
522 ni65_free_buffer(p);
523 release_region(ioaddr, cards[p->cardno].total_size);
524 return -EAGAIN;
525 }
526 dev->dma = dmatab[i];
527 printk("DMA %d (autodetected), ",dev->dma);
528 }
529 else
530 printk("DMA %d (assigned), ",dev->dma);
531
532 if(dev->irq < 2)
533 {
534 unsigned long irq_mask;
535
536 ni65_init_lance(p,dev->dev_addr,0,0);
537 irq_mask = probe_irq_on();
538 writereg(CSR0_INIT|CSR0_INEA,CSR0); /* trigger interrupt */
539 msleep(20);
540 dev->irq = probe_irq_off(irq_mask);
541 if(!dev->irq)
542 {
543 printk("Failed to detect IRQ line!\n");
544 ni65_free_buffer(p);
545 release_region(ioaddr, cards[p->cardno].total_size);
546 return -EAGAIN;
547 }
548 printk("IRQ %d (autodetected).\n",dev->irq);
549 }
550 else
551 printk("IRQ %d (assigned).\n",dev->irq);
552 }
553
554 if(request_dma(dev->dma, cards[p->cardno].cardname ) != 0)
555 {
556 printk(KERN_ERR "%s: Can't request dma-channel %d\n",dev->name,(int) dev->dma);
557 ni65_free_buffer(p);
558 release_region(ioaddr, cards[p->cardno].total_size);
559 return -EAGAIN;
560 }
561
562 dev->base_addr = ioaddr;
563 dev->netdev_ops = &ni65_netdev_ops;
564 dev->watchdog_timeo = HZ/2;
565
566 return 0; /* everything is OK */
567}
568
569/*
570 * set lance register and trigger init
571 */
572static void ni65_init_lance(struct priv *p,unsigned char *daddr,int filter,int mode)
573{
574 int i;
575 u32 pib;
576
577 writereg(CSR0_CLRALL|CSR0_STOP,CSR0);
578
579 for(i=0;i<6;i++)
580 p->ib.eaddr[i] = daddr[i];
581
582 for(i=0;i<8;i++)
583 p->ib.filter[i] = filter;
584 p->ib.mode = mode;
585
586 p->ib.trp = (u32) isa_virt_to_bus(p->tmdhead) | TMDNUMMASK;
587 p->ib.rrp = (u32) isa_virt_to_bus(p->rmdhead) | RMDNUMMASK;
588 writereg(0,CSR3); /* busmaster/no word-swap */
589 pib = (u32) isa_virt_to_bus(&p->ib);
590 writereg(pib & 0xffff,CSR1);
591 writereg(pib >> 16,CSR2);
592
593 writereg(CSR0_INIT,CSR0); /* this changes L_ADDRREG to CSR0 */
594
595 for(i=0;i<32;i++)
596 {
597 mdelay(4);
598 if(inw(PORT+L_DATAREG) & (CSR0_IDON | CSR0_MERR) )
599 break; /* init ok ? */
600 }
601}
602
603/*
604 * allocate memory area and check the 16MB border
605 */
606static void *ni65_alloc_mem(struct net_device *dev,char *what,int size,int type)
607{
608 struct sk_buff *skb=NULL;
609 unsigned char *ptr;
610 void *ret;
611
612 if(type) {
613 ret = skb = alloc_skb(2+16+size,GFP_KERNEL|GFP_DMA);
614 if(!skb) {
615 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
616 return NULL;
617 }
618 skb_reserve(skb,2+16);
619 skb_put(skb,R_BUF_SIZE); /* grab the whole space .. (not necessary) */
620 ptr = skb->data;
621 }
622 else {
623 ret = ptr = kmalloc(T_BUF_SIZE,GFP_KERNEL | GFP_DMA);
624 if(!ret) {
625 printk(KERN_WARNING "%s: unable to allocate %s memory.\n",dev->name,what);
626 return NULL;
627 }
628 }
629 if( (u32) virt_to_phys(ptr+size) > 0x1000000) {
630 printk(KERN_WARNING "%s: unable to allocate %s memory in lower 16MB!\n",dev->name,what);
631 if(type)
632 kfree_skb(skb);
633 else
634 kfree(ptr);
635 return NULL;
636 }
637 return ret;
638}
639
640/*
641 * allocate all memory structures .. send/recv buffers etc ...
642 */
643static int ni65_alloc_buffer(struct net_device *dev)
644{
645 unsigned char *ptr;
646 struct priv *p;
647 int i;
648
649 /*
650 * we need 8-aligned memory ..
651 */
652 ptr = ni65_alloc_mem(dev,"BUFFER",sizeof(struct priv)+8,0);
653 if(!ptr)
654 return -ENOMEM;
655
656 p = dev->ml_priv = (struct priv *) (((unsigned long) ptr + 7) & ~0x7);
657 memset((char *)p, 0, sizeof(struct priv));
658 p->self = ptr;
659
660 for(i=0;i<TMDNUM;i++)
661 {
662#ifdef XMT_VIA_SKB
663 p->tmd_skb[i] = NULL;
664#endif
665 p->tmdbounce[i] = ni65_alloc_mem(dev,"XMIT",T_BUF_SIZE,0);
666 if(!p->tmdbounce[i]) {
667 ni65_free_buffer(p);
668 return -ENOMEM;
669 }
670 }
671
672 for(i=0;i<RMDNUM;i++)
673 {
674#ifdef RCV_VIA_SKB
675 p->recv_skb[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,1);
676 if(!p->recv_skb[i]) {
677 ni65_free_buffer(p);
678 return -ENOMEM;
679 }
680#else
681 p->recvbounce[i] = ni65_alloc_mem(dev,"RECV",R_BUF_SIZE,0);
682 if(!p->recvbounce[i]) {
683 ni65_free_buffer(p);
684 return -ENOMEM;
685 }
686#endif
687 }
688
689 return 0; /* everything is OK */
690}
691
692/*
693 * free buffers and private struct
694 */
695static void ni65_free_buffer(struct priv *p)
696{
697 int i;
698
699 if(!p)
700 return;
701
702 for(i=0;i<TMDNUM;i++) {
703 kfree(p->tmdbounce[i]);
704#ifdef XMT_VIA_SKB
705 if(p->tmd_skb[i])
706 dev_kfree_skb(p->tmd_skb[i]);
707#endif
708 }
709
710 for(i=0;i<RMDNUM;i++)
711 {
712#ifdef RCV_VIA_SKB
713 if(p->recv_skb[i])
714 dev_kfree_skb(p->recv_skb[i]);
715#else
716 kfree(p->recvbounce[i]);
717#endif
718 }
719 kfree(p->self);
720}
721
722
723/*
724 * stop and (re)start lance .. e.g after an error
725 */
726static void ni65_stop_start(struct net_device *dev,struct priv *p)
727{
728 int csr0 = CSR0_INEA;
729
730 writedatareg(CSR0_STOP);
731
732 if(debuglevel > 1)
733 printk(KERN_DEBUG "ni65_stop_start\n");
734
735 if(p->features & INIT_RING_BEFORE_START) {
736 int i;
737#ifdef XMT_VIA_SKB
738 struct sk_buff *skb_save[TMDNUM];
739#endif
740 unsigned long buffer[TMDNUM];
741 short blen[TMDNUM];
742
743 if(p->xmit_queued) {
744 while(1) {
745 if((p->tmdhead[p->tmdlast].u.s.status & XMIT_OWN))
746 break;
747 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
748 if(p->tmdlast == p->tmdnum)
749 break;
750 }
751 }
752
753 for(i=0;i<TMDNUM;i++) {
754 struct tmd *tmdp = p->tmdhead + i;
755#ifdef XMT_VIA_SKB
756 skb_save[i] = p->tmd_skb[i];
757#endif
758 buffer[i] = (u32) isa_bus_to_virt(tmdp->u.buffer);
759 blen[i] = tmdp->blen;
760 tmdp->u.s.status = 0x0;
761 }
762
763 for(i=0;i<RMDNUM;i++) {
764 struct rmd *rmdp = p->rmdhead + i;
765 rmdp->u.s.status = RCV_OWN;
766 }
767 p->tmdnum = p->xmit_queued = 0;
768 writedatareg(CSR0_STRT | csr0);
769
770 for(i=0;i<TMDNUM;i++) {
771 int num = (i + p->tmdlast) & (TMDNUM-1);
772 p->tmdhead[i].u.buffer = (u32) isa_virt_to_bus((char *)buffer[num]); /* status is part of buffer field */
773 p->tmdhead[i].blen = blen[num];
774 if(p->tmdhead[i].u.s.status & XMIT_OWN) {
775 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
776 p->xmit_queued = 1;
777 writedatareg(CSR0_TDMD | CSR0_INEA | csr0);
778 }
779#ifdef XMT_VIA_SKB
780 p->tmd_skb[i] = skb_save[num];
781#endif
782 }
783 p->rmdnum = p->tmdlast = 0;
784 if(!p->lock)
785 if (p->tmdnum || !p->xmit_queued)
786 netif_wake_queue(dev);
787 dev->trans_start = jiffies; /* prevent tx timeout */
788 }
789 else
790 writedatareg(CSR0_STRT | csr0);
791}
792
793/*
794 * init lance (write init-values .. init-buffers) (open-helper)
795 */
796static int ni65_lance_reinit(struct net_device *dev)
797{
798 int i;
799 struct priv *p = dev->ml_priv;
800 unsigned long flags;
801
802 p->lock = 0;
803 p->xmit_queued = 0;
804
805 flags=claim_dma_lock();
806 disable_dma(dev->dma); /* I've never worked with dma, but we do it like the packetdriver */
807 set_dma_mode(dev->dma,DMA_MODE_CASCADE);
808 enable_dma(dev->dma);
809 release_dma_lock(flags);
810
811 outw(inw(PORT+L_RESET),PORT+L_RESET); /* first: reset the card */
812 if( (i=readreg(CSR0) ) != 0x4)
813 {
814 printk(KERN_ERR "%s: can't RESET %s card: %04x\n",dev->name,
815 cards[p->cardno].cardname,(int) i);
816 flags=claim_dma_lock();
817 disable_dma(dev->dma);
818 release_dma_lock(flags);
819 return 0;
820 }
821
822 p->rmdnum = p->tmdnum = p->tmdlast = p->tmdbouncenum = 0;
823 for(i=0;i<TMDNUM;i++)
824 {
825 struct tmd *tmdp = p->tmdhead + i;
826#ifdef XMT_VIA_SKB
827 if(p->tmd_skb[i]) {
828 dev_kfree_skb(p->tmd_skb[i]);
829 p->tmd_skb[i] = NULL;
830 }
831#endif
832 tmdp->u.buffer = 0x0;
833 tmdp->u.s.status = XMIT_START | XMIT_END;
834 tmdp->blen = tmdp->status2 = 0;
835 }
836
837 for(i=0;i<RMDNUM;i++)
838 {
839 struct rmd *rmdp = p->rmdhead + i;
840#ifdef RCV_VIA_SKB
841 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recv_skb[i]->data);
842#else
843 rmdp->u.buffer = (u32) isa_virt_to_bus(p->recvbounce[i]);
844#endif
845 rmdp->blen = -(R_BUF_SIZE-8);
846 rmdp->mlen = 0;
847 rmdp->u.s.status = RCV_OWN;
848 }
849
850 if(dev->flags & IFF_PROMISC)
851 ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
852 else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
853 ni65_init_lance(p,dev->dev_addr,0xff,0x0);
854 else
855 ni65_init_lance(p,dev->dev_addr,0x00,0x00);
856
857 /*
858 * ni65_set_lance_mem() sets L_ADDRREG to CSR0
859 * NOW, WE WILL NEVER CHANGE THE L_ADDRREG, CSR0 IS ALWAYS SELECTED
860 */
861
862 if(inw(PORT+L_DATAREG) & CSR0_IDON) {
863 ni65_set_performance(p);
864 /* init OK: start lance , enable interrupts */
865 writedatareg(CSR0_CLRALL | CSR0_INEA | CSR0_STRT);
866 return 1; /* ->OK */
867 }
868 printk(KERN_ERR "%s: can't init lance, status: %04x\n",dev->name,(int) inw(PORT+L_DATAREG));
869 flags=claim_dma_lock();
870 disable_dma(dev->dma);
871 release_dma_lock(flags);
872 return 0; /* ->Error */
873}
874
875/*
876 * interrupt handler
877 */
878static irqreturn_t ni65_interrupt(int irq, void * dev_id)
879{
880 int csr0 = 0;
881 struct net_device *dev = dev_id;
882 struct priv *p;
883 int bcnt = 32;
884
885 p = dev->ml_priv;
886
887 spin_lock(&p->ring_lock);
888
889 while(--bcnt) {
890 csr0 = inw(PORT+L_DATAREG);
891
892#if 0
893 writedatareg( (csr0 & CSR0_CLRALL) ); /* ack interrupts, disable int. */
894#else
895 writedatareg( (csr0 & CSR0_CLRALL) | CSR0_INEA ); /* ack interrupts, interrupts enabled */
896#endif
897
898 if(!(csr0 & (CSR0_ERR | CSR0_RINT | CSR0_TINT)))
899 break;
900
901 if(csr0 & CSR0_RINT) /* RECV-int? */
902 ni65_recv_intr(dev,csr0);
903 if(csr0 & CSR0_TINT) /* XMIT-int? */
904 ni65_xmit_intr(dev,csr0);
905
906 if(csr0 & CSR0_ERR)
907 {
908 if(debuglevel > 1)
909 printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
910 if(csr0 & CSR0_BABL)
911 dev->stats.tx_errors++;
912 if(csr0 & CSR0_MISS) {
913 int i;
914 for(i=0;i<RMDNUM;i++)
915 printk("%02x ",p->rmdhead[i].u.s.status);
916 printk("\n");
917 dev->stats.rx_errors++;
918 }
919 if(csr0 & CSR0_MERR) {
920 if(debuglevel > 1)
921 printk(KERN_ERR "%s: Ooops .. memory error: %04x.\n",dev->name,csr0);
922 ni65_stop_start(dev,p);
923 }
924 }
925 }
926
927#ifdef RCV_PARANOIA_CHECK
928{
929 int j;
930 for(j=0;j<RMDNUM;j++)
931 {
932 int i, num2;
933 for(i=RMDNUM-1;i>0;i--) {
934 num2 = (p->rmdnum + i) & (RMDNUM-1);
935 if(!(p->rmdhead[num2].u.s.status & RCV_OWN))
936 break;
937 }
938
939 if(i) {
940 int k, num1;
941 for(k=0;k<RMDNUM;k++) {
942 num1 = (p->rmdnum + k) & (RMDNUM-1);
943 if(!(p->rmdhead[num1].u.s.status & RCV_OWN))
944 break;
945 }
946 if(!k)
947 break;
948
949 if(debuglevel > 0)
950 {
951 char buf[256],*buf1;
952 buf1 = buf;
953 for(k=0;k<RMDNUM;k++) {
954 sprintf(buf1,"%02x ",(p->rmdhead[k].u.s.status)); /* & RCV_OWN) ); */
955 buf1 += 3;
956 }
957 *buf1 = 0;
958 printk(KERN_ERR "%s: Ooops, receive ring corrupted %2d %2d | %s\n",dev->name,p->rmdnum,i,buf);
959 }
960
961 p->rmdnum = num1;
962 ni65_recv_intr(dev,csr0);
963 if((p->rmdhead[num2].u.s.status & RCV_OWN))
964 break; /* ok, we are 'in sync' again */
965 }
966 else
967 break;
968 }
969}
970#endif
971
972 if( (csr0 & (CSR0_RXON | CSR0_TXON)) != (CSR0_RXON | CSR0_TXON) ) {
973 printk(KERN_DEBUG "%s: RX or TX was offline -> restart\n",dev->name);
974 ni65_stop_start(dev,p);
975 }
976 else
977 writedatareg(CSR0_INEA);
978
979 spin_unlock(&p->ring_lock);
980 return IRQ_HANDLED;
981}
982
983/*
984 * We have received an Xmit-Interrupt ..
985 * send a new packet if necessary
986 */
987static void ni65_xmit_intr(struct net_device *dev,int csr0)
988{
989 struct priv *p = dev->ml_priv;
990
991 while(p->xmit_queued)
992 {
993 struct tmd *tmdp = p->tmdhead + p->tmdlast;
994 int tmdstat = tmdp->u.s.status;
995
996 if(tmdstat & XMIT_OWN)
997 break;
998
999 if(tmdstat & XMIT_ERR)
1000 {
1001#if 0
1002 if(tmdp->status2 & XMIT_TDRMASK && debuglevel > 3)
1003 printk(KERN_ERR "%s: tdr-problems (e.g. no resistor)\n",dev->name);
1004#endif
1005 /* checking some errors */
1006 if(tmdp->status2 & XMIT_RTRY)
1007 dev->stats.tx_aborted_errors++;
1008 if(tmdp->status2 & XMIT_LCAR)
1009 dev->stats.tx_carrier_errors++;
1010 if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
1011 /* this stops the xmitter */
1012 dev->stats.tx_fifo_errors++;
1013 if(debuglevel > 0)
1014 printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
1015 if(p->features & INIT_RING_BEFORE_START) {
1016 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END; /* test: resend this frame */
1017 ni65_stop_start(dev,p);
1018 break; /* no more Xmit processing .. */
1019 }
1020 else
1021 ni65_stop_start(dev,p);
1022 }
1023 if(debuglevel > 2)
1024 printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
1025 if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
1026 dev->stats.tx_errors++;
1027 tmdp->status2 = 0;
1028 }
1029 else {
1030 dev->stats.tx_bytes -= (short)(tmdp->blen);
1031 dev->stats.tx_packets++;
1032 }
1033
1034#ifdef XMT_VIA_SKB
1035 if(p->tmd_skb[p->tmdlast]) {
1036 dev_kfree_skb_irq(p->tmd_skb[p->tmdlast]);
1037 p->tmd_skb[p->tmdlast] = NULL;
1038 }
1039#endif
1040
1041 p->tmdlast = (p->tmdlast + 1) & (TMDNUM-1);
1042 if(p->tmdlast == p->tmdnum)
1043 p->xmit_queued = 0;
1044 }
1045 netif_wake_queue(dev);
1046}
1047
1048/*
1049 * We have received a packet
1050 */
1051static void ni65_recv_intr(struct net_device *dev,int csr0)
1052{
1053 struct rmd *rmdp;
1054 int rmdstat,len;
1055 int cnt=0;
1056 struct priv *p = dev->ml_priv;
1057
1058 rmdp = p->rmdhead + p->rmdnum;
1059 while(!( (rmdstat = rmdp->u.s.status) & RCV_OWN))
1060 {
1061 cnt++;
1062 if( (rmdstat & (RCV_START | RCV_END | RCV_ERR)) != (RCV_START | RCV_END) ) /* error or oversized? */
1063 {
1064 if(!(rmdstat & RCV_ERR)) {
1065 if(rmdstat & RCV_START)
1066 {
1067 dev->stats.rx_length_errors++;
1068 printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
1069 }
1070 }
1071 else {
1072 if(debuglevel > 2)
1073 printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
1074 dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
1075 if(rmdstat & RCV_FRAM)
1076 dev->stats.rx_frame_errors++;
1077 if(rmdstat & RCV_OFLO)
1078 dev->stats.rx_over_errors++;
1079 if(rmdstat & RCV_CRC)
1080 dev->stats.rx_crc_errors++;
1081 if(rmdstat & RCV_BUF_ERR)
1082 dev->stats.rx_fifo_errors++;
1083 }
1084 if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
1085 dev->stats.rx_errors++;
1086 }
1087 else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
1088 {
1089#ifdef RCV_VIA_SKB
1090 struct sk_buff *skb = alloc_skb(R_BUF_SIZE+2+16,GFP_ATOMIC);
1091 if (skb)
1092 skb_reserve(skb,16);
1093#else
1094 struct sk_buff *skb = dev_alloc_skb(len+2);
1095#endif
1096 if(skb)
1097 {
1098 skb_reserve(skb,2);
1099#ifdef RCV_VIA_SKB
1100 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) {
1101 skb_put(skb,len);
1102 skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len);
1103 }
1104 else {
1105 struct sk_buff *skb1 = p->recv_skb[p->rmdnum];
1106 skb_put(skb,R_BUF_SIZE);
1107 p->recv_skb[p->rmdnum] = skb;
1108 rmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1109 skb = skb1;
1110 skb_trim(skb,len);
1111 }
1112#else
1113 skb_put(skb,len);
1114 skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
1115#endif
1116 dev->stats.rx_packets++;
1117 dev->stats.rx_bytes += len;
1118 skb->protocol=eth_type_trans(skb,dev);
1119 netif_rx(skb);
1120 }
1121 else
1122 {
1123 printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
1124 dev->stats.rx_dropped++;
1125 }
1126 }
1127 else {
1128 printk(KERN_INFO "%s: received runt packet\n",dev->name);
1129 dev->stats.rx_errors++;
1130 }
1131 rmdp->blen = -(R_BUF_SIZE-8);
1132 rmdp->mlen = 0;
1133 rmdp->u.s.status = RCV_OWN; /* change owner */
1134 p->rmdnum = (p->rmdnum + 1) & (RMDNUM-1);
1135 rmdp = p->rmdhead + p->rmdnum;
1136 }
1137}
1138
1139/*
1140 * kick xmitter ..
1141 */
1142
1143static void ni65_timeout(struct net_device *dev)
1144{
1145 int i;
1146 struct priv *p = dev->ml_priv;
1147
1148 printk(KERN_ERR "%s: xmitter timed out, try to restart!\n",dev->name);
1149 for(i=0;i<TMDNUM;i++)
1150 printk("%02x ",p->tmdhead[i].u.s.status);
1151 printk("\n");
1152 ni65_lance_reinit(dev);
1153 dev->trans_start = jiffies; /* prevent tx timeout */
1154 netif_wake_queue(dev);
1155}
1156
1157/*
1158 * Send a packet
1159 */
1160
1161static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
1162 struct net_device *dev)
1163{
1164 struct priv *p = dev->ml_priv;
1165
1166 netif_stop_queue(dev);
1167
1168 if (test_and_set_bit(0, (void*)&p->lock)) {
1169 printk(KERN_ERR "%s: Queue was locked.\n", dev->name);
1170 return NETDEV_TX_BUSY;
1171 }
1172
1173 {
1174 short len = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
1175 struct tmd *tmdp;
1176 unsigned long flags;
1177
1178#ifdef XMT_VIA_SKB
1179 if( (unsigned long) (skb->data + skb->len) > 0x1000000) {
1180#endif
1181
1182 skb_copy_from_linear_data(skb, p->tmdbounce[p->tmdbouncenum],
1183 skb->len > T_BUF_SIZE ? T_BUF_SIZE :
1184 skb->len);
1185 if (len > skb->len)
1186 memset((char *)p->tmdbounce[p->tmdbouncenum]+skb->len, 0, len-skb->len);
1187 dev_kfree_skb (skb);
1188
1189 spin_lock_irqsave(&p->ring_lock, flags);
1190 tmdp = p->tmdhead + p->tmdnum;
1191 tmdp->u.buffer = (u32) isa_virt_to_bus(p->tmdbounce[p->tmdbouncenum]);
1192 p->tmdbouncenum = (p->tmdbouncenum + 1) & (TMDNUM - 1);
1193
1194#ifdef XMT_VIA_SKB
1195 }
1196 else {
1197 spin_lock_irqsave(&p->ring_lock, flags);
1198
1199 tmdp = p->tmdhead + p->tmdnum;
1200 tmdp->u.buffer = (u32) isa_virt_to_bus(skb->data);
1201 p->tmd_skb[p->tmdnum] = skb;
1202 }
1203#endif
1204 tmdp->blen = -len;
1205
1206 tmdp->u.s.status = XMIT_OWN | XMIT_START | XMIT_END;
1207 writedatareg(CSR0_TDMD | CSR0_INEA); /* enable xmit & interrupt */
1208
1209 p->xmit_queued = 1;
1210 p->tmdnum = (p->tmdnum + 1) & (TMDNUM-1);
1211
1212 if(p->tmdnum != p->tmdlast)
1213 netif_wake_queue(dev);
1214
1215 p->lock = 0;
1216
1217 spin_unlock_irqrestore(&p->ring_lock, flags);
1218 }
1219
1220 return NETDEV_TX_OK;
1221}
1222
1223static void set_multicast_list(struct net_device *dev)
1224{
1225 if(!ni65_lance_reinit(dev))
1226 printk(KERN_ERR "%s: Can't switch card into MC mode!\n",dev->name);
1227 netif_wake_queue(dev);
1228}
1229
1230#ifdef MODULE
1231static struct net_device *dev_ni65;
1232
1233module_param(irq, int, 0);
1234module_param(io, int, 0);
1235module_param(dma, int, 0);
1236MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1237MODULE_PARM_DESC(io, "ni6510 I/O base address");
1238MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1239
1240int __init init_module(void)
1241{
1242 dev_ni65 = ni65_probe(-1);
1243 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
1244}
1245
1246void __exit cleanup_module(void)
1247{
1248 unregister_netdev(dev_ni65);
1249 cleanup_card(dev_ni65);
1250 free_netdev(dev_ni65);
1251}
1252#endif /* MODULE */
1253
1254MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/ni65.h b/drivers/net/ethernet/amd/ni65.h
new file mode 100644
index 000000000000..e6217e35edf0
--- /dev/null
+++ b/drivers/net/ethernet/amd/ni65.h
@@ -0,0 +1,121 @@
1/* am7990 (lance) definitions
2 *
3 * This is an extension to the Linux operating system, and is covered by
4 * same GNU General Public License that covers that work.
5 *
6 * Michael Hipp
7 * email: mhipp@student.uni-tuebingen.de
8 *
9 * sources: (mail me or ask archie if you need them)
10 * crynwr-packet-driver
11 */
12
13/*
14 * Control and Status Register 0 (CSR0) bit definitions
15 * (R=Readable) (W=Writeable) (S=Set on write) (C-Clear on write)
16 *
17 */
18
19#define CSR0_ERR 0x8000 /* Error summary (R) */
20#define CSR0_BABL 0x4000 /* Babble transmitter timeout error (RC) */
21#define CSR0_CERR 0x2000 /* Collision Error (RC) */
22#define CSR0_MISS 0x1000 /* Missed packet (RC) */
23#define CSR0_MERR 0x0800 /* Memory Error (RC) */
24#define CSR0_RINT 0x0400 /* Receiver Interrupt (RC) */
25#define CSR0_TINT 0x0200 /* Transmit Interrupt (RC) */
26#define CSR0_IDON 0x0100 /* Initialization Done (RC) */
27#define CSR0_INTR 0x0080 /* Interrupt Flag (R) */
28#define CSR0_INEA 0x0040 /* Interrupt Enable (RW) */
29#define CSR0_RXON 0x0020 /* Receiver on (R) */
30#define CSR0_TXON 0x0010 /* Transmitter on (R) */
31#define CSR0_TDMD 0x0008 /* Transmit Demand (RS) */
32#define CSR0_STOP 0x0004 /* Stop (RS) */
33#define CSR0_STRT 0x0002 /* Start (RS) */
34#define CSR0_INIT 0x0001 /* Initialize (RS) */
35
36#define CSR0_CLRALL 0x7f00 /* mask for all clearable bits */
37/*
38 * Initialization Block Mode operation Bit Definitions.
39 */
40
41#define M_PROM 0x8000 /* Promiscuous Mode */
42#define M_INTL 0x0040 /* Internal Loopback */
43#define M_DRTY 0x0020 /* Disable Retry */
44#define M_COLL 0x0010 /* Force Collision */
45#define M_DTCR 0x0008 /* Disable Transmit CRC) */
46#define M_LOOP 0x0004 /* Loopback */
47#define M_DTX 0x0002 /* Disable the Transmitter */
48#define M_DRX 0x0001 /* Disable the Receiver */
49
50
51/*
52 * Receive message descriptor bit definitions.
53 */
54
55#define RCV_OWN 0x80 /* owner bit 0 = host, 1 = lance */
56#define RCV_ERR 0x40 /* Error Summary */
57#define RCV_FRAM 0x20 /* Framing Error */
58#define RCV_OFLO 0x10 /* Overflow Error */
59#define RCV_CRC 0x08 /* CRC Error */
60#define RCV_BUF_ERR 0x04 /* Buffer Error */
61#define RCV_START 0x02 /* Start of Packet */
62#define RCV_END 0x01 /* End of Packet */
63
64
65/*
66 * Transmit message descriptor bit definitions.
67 */
68
69#define XMIT_OWN 0x80 /* owner bit 0 = host, 1 = lance */
70#define XMIT_ERR 0x40 /* Error Summary */
71#define XMIT_RETRY 0x10 /* more the 1 retry needed to Xmit */
72#define XMIT_1_RETRY 0x08 /* one retry needed to Xmit */
73#define XMIT_DEF 0x04 /* Deferred */
74#define XMIT_START 0x02 /* Start of Packet */
75#define XMIT_END 0x01 /* End of Packet */
76
77/*
78 * transmit status (2) (valid if XMIT_ERR == 1)
79 */
80
81#define XMIT_TDRMASK 0x03ff /* time-domain-reflectometer-value */
82#define XMIT_RTRY 0x0400 /* Failed after 16 retransmissions */
83#define XMIT_LCAR 0x0800 /* Loss of Carrier */
84#define XMIT_LCOL 0x1000 /* Late collision */
85#define XMIT_RESERV 0x2000 /* Reserved */
86#define XMIT_UFLO 0x4000 /* Underflow (late memory) */
87#define XMIT_BUFF 0x8000 /* Buffering error (no ENP) */
88
89struct init_block {
90 unsigned short mode;
91 unsigned char eaddr[6];
92 unsigned char filter[8];
93 /* bit 29-31: number of rmd's (power of 2) */
94 u32 rrp; /* receive ring pointer (align 8) */
95 /* bit 29-31: number of tmd's (power of 2) */
96 u32 trp; /* transmit ring pointer (align 8) */
97};
98
99struct rmd { /* Receive Message Descriptor */
100 union {
101 volatile u32 buffer;
102 struct {
103 volatile unsigned char dummy[3];
104 volatile unsigned char status;
105 } s;
106 } u;
107 volatile short blen;
108 volatile unsigned short mlen;
109};
110
111struct tmd {
112 union {
113 volatile u32 buffer;
114 struct {
115 volatile unsigned char dummy[3];
116 volatile unsigned char status;
117 } s;
118 } u;
119 volatile unsigned short blen;
120 volatile unsigned short status2;
121};
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
new file mode 100644
index 000000000000..3accd5d21b08
--- /dev/null
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -0,0 +1,1525 @@
1/* ----------------------------------------------------------------------------
2Linux PCMCIA ethernet adapter driver for the New Media Ethernet LAN.
3 nmclan_cs.c,v 0.16 1995/07/01 06:42:17 rpao Exp rpao
4
5 The Ethernet LAN uses the Advanced Micro Devices (AMD) Am79C940 Media
6 Access Controller for Ethernet (MACE). It is essentially the Am2150
7 PCMCIA Ethernet card contained in the Am2150 Demo Kit.
8
9Written by Roger C. Pao <rpao@paonet.org>
10 Copyright 1995 Roger C. Pao
11 Linux 2.5 cleanups Copyright Red Hat 2003
12
13 This software may be used and distributed according to the terms of
14 the GNU General Public License.
15
16Ported to Linux 1.3.* network driver environment by
17 Matti Aarnio <mea@utu.fi>
18
19References
20
21 Am2150 Technical Reference Manual, Revision 1.0, August 17, 1993
22 Am79C940 (MACE) Data Sheet, 1994
23 Am79C90 (C-LANCE) Data Sheet, 1994
24 Linux PCMCIA Programmer's Guide v1.17
25 /usr/src/linux/net/inet/dev.c, Linux kernel 1.2.8
26
27 Eric Mears, New Media Corporation
28 Tom Pollard, New Media Corporation
29 Dean Siasoyco, New Media Corporation
30 Ken Lesniak, Silicon Graphics, Inc. <lesniak@boston.sgi.com>
31 Donald Becker <becker@scyld.com>
32 David Hinds <dahinds@users.sourceforge.net>
33
34 The Linux client driver is based on the 3c589_cs.c client driver by
35 David Hinds.
36
37 The Linux network driver outline is based on the 3c589_cs.c driver,
38 the 8390.c driver, and the example skeleton.c kernel code, which are
39 by Donald Becker.
40
41 The Am2150 network driver hardware interface code is based on the
42 OS/9000 driver for the New Media Ethernet LAN by Eric Mears.
43
44 Special thanks for testing and help in debugging this driver goes
45 to Ken Lesniak.
46
47-------------------------------------------------------------------------------
48Driver Notes and Issues
49-------------------------------------------------------------------------------
50
511. Developed on a Dell 320SLi
52 PCMCIA Card Services 2.6.2
53 Linux dell 1.2.10 #1 Thu Jun 29 20:23:41 PDT 1995 i386
54
552. rc.pcmcia may require loading pcmcia_core with io_speed=300:
56 'insmod pcmcia_core.o io_speed=300'.
57 This will avoid problems with fast systems which causes rx_framecnt
58 to return random values.
59
603. If hot extraction does not work for you, use 'ifconfig eth0 down'
61 before extraction.
62
634. There is a bad slow-down problem in this driver.
64
655. Future: Multicast processing. In the meantime, do _not_ compile your
66 kernel with multicast ip enabled.
67
68-------------------------------------------------------------------------------
69History
70-------------------------------------------------------------------------------
71Log: nmclan_cs.c,v
72 * 2.5.75-ac1 2003/07/11 Alan Cox <alan@lxorguk.ukuu.org.uk>
73 * Fixed hang on card eject as we probe it
74 * Cleaned up to use new style locking.
75 *
76 * Revision 0.16 1995/07/01 06:42:17 rpao
77 * Bug fix: nmclan_reset() called CardServices incorrectly.
78 *
79 * Revision 0.15 1995/05/24 08:09:47 rpao
80 * Re-implement MULTI_TX dev->tbusy handling.
81 *
82 * Revision 0.14 1995/05/23 03:19:30 rpao
83 * Added, in nmclan_config(), "tuple.Attributes = 0;".
84 * Modified MACE ID check to ignore chip revision level.
85 * Avoid tx_free_frames race condition between _start_xmit and _interrupt.
86 *
87 * Revision 0.13 1995/05/18 05:56:34 rpao
88 * Statistics changes.
89 * Bug fix: nmclan_reset did not enable TX and RX: call restore_multicast_list.
90 * Bug fix: mace_interrupt checks ~MACE_IMR_DEFAULT. Fixes driver lockup.
91 *
92 * Revision 0.12 1995/05/14 00:12:23 rpao
93 * Statistics overhaul.
94 *
95
9695/05/13 rpao V0.10a
97 Bug fix: MACE statistics counters used wrong I/O ports.
98 Bug fix: mace_interrupt() needed to allow statistics to be
99 processed without RX or TX interrupts pending.
10095/05/11 rpao V0.10
101 Multiple transmit request processing.
102 Modified statistics to use MACE counters where possible.
10395/05/10 rpao V0.09 Bug fix: Must use IO_DATA_PATH_WIDTH_AUTO.
104 *Released
10595/05/10 rpao V0.08
106 Bug fix: Make all non-exported functions private by using
107 static keyword.
108 Bug fix: Test IntrCnt _before_ reading MACE_IR.
10995/05/10 rpao V0.07 Statistics.
11095/05/09 rpao V0.06 Fix rx_framecnt problem by addition of PCIC wait states.
111
112---------------------------------------------------------------------------- */
113
114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
115
116#define DRV_NAME "nmclan_cs"
117#define DRV_VERSION "0.16"
118
119
120/* ----------------------------------------------------------------------------
121Conditional Compilation Options
122---------------------------------------------------------------------------- */
123
124#define MULTI_TX 0
125#define RESET_ON_TIMEOUT 1
126#define TX_INTERRUPTABLE 1
127#define RESET_XILINX 0
128
129/* ----------------------------------------------------------------------------
130Include Files
131---------------------------------------------------------------------------- */
132
133#include <linux/module.h>
134#include <linux/kernel.h>
135#include <linux/init.h>
136#include <linux/ptrace.h>
137#include <linux/slab.h>
138#include <linux/string.h>
139#include <linux/timer.h>
140#include <linux/interrupt.h>
141#include <linux/in.h>
142#include <linux/delay.h>
143#include <linux/ethtool.h>
144#include <linux/netdevice.h>
145#include <linux/etherdevice.h>
146#include <linux/skbuff.h>
147#include <linux/if_arp.h>
148#include <linux/ioport.h>
149#include <linux/bitops.h>
150
151#include <pcmcia/cisreg.h>
152#include <pcmcia/cistpl.h>
153#include <pcmcia/ds.h>
154
155#include <asm/uaccess.h>
156#include <asm/io.h>
157#include <asm/system.h>
158
159/* ----------------------------------------------------------------------------
160Defines
161---------------------------------------------------------------------------- */
162
163#define ETHER_ADDR_LEN ETH_ALEN
164 /* 6 bytes in an Ethernet Address */
165#define MACE_LADRF_LEN 8
166 /* 8 bytes in Logical Address Filter */
167
168/* Loop Control Defines */
169#define MACE_MAX_IR_ITERATIONS 10
170#define MACE_MAX_RX_ITERATIONS 12
171 /*
172 TBD: Dean brought this up, and I assumed the hardware would
173 handle it:
174
175 If MACE_MAX_RX_ITERATIONS is > 1, rx_framecnt may still be
176 non-zero when the isr exits. We may not get another interrupt
177 to process the remaining packets for some time.
178 */
179
180/*
181The Am2150 has a Xilinx XC3042 field programmable gate array (FPGA)
182which manages the interface between the MACE and the PCMCIA bus. It
183also includes buffer management for the 32K x 8 SRAM to control up to
184four transmit and 12 receive frames at a time.
185*/
186#define AM2150_MAX_TX_FRAMES 4
187#define AM2150_MAX_RX_FRAMES 12
188
189/* Am2150 Ethernet Card I/O Mapping */
190#define AM2150_RCV 0x00
191#define AM2150_XMT 0x04
192#define AM2150_XMT_SKIP 0x09
193#define AM2150_RCV_NEXT 0x0A
194#define AM2150_RCV_FRAME_COUNT 0x0B
195#define AM2150_MACE_BANK 0x0C
196#define AM2150_MACE_BASE 0x10
197
198/* MACE Registers */
199#define MACE_RCVFIFO 0
200#define MACE_XMTFIFO 1
201#define MACE_XMTFC 2
202#define MACE_XMTFS 3
203#define MACE_XMTRC 4
204#define MACE_RCVFC 5
205#define MACE_RCVFS 6
206#define MACE_FIFOFC 7
207#define MACE_IR 8
208#define MACE_IMR 9
209#define MACE_PR 10
210#define MACE_BIUCC 11
211#define MACE_FIFOCC 12
212#define MACE_MACCC 13
213#define MACE_PLSCC 14
214#define MACE_PHYCC 15
215#define MACE_CHIPIDL 16
216#define MACE_CHIPIDH 17
217#define MACE_IAC 18
218/* Reserved */
219#define MACE_LADRF 20
220#define MACE_PADR 21
221/* Reserved */
222/* Reserved */
223#define MACE_MPC 24
224/* Reserved */
225#define MACE_RNTPC 26
226#define MACE_RCVCC 27
227/* Reserved */
228#define MACE_UTR 29
229#define MACE_RTR1 30
230#define MACE_RTR2 31
231
232/* MACE Bit Masks */
233#define MACE_XMTRC_EXDEF 0x80
234#define MACE_XMTRC_XMTRC 0x0F
235
236#define MACE_XMTFS_XMTSV 0x80
237#define MACE_XMTFS_UFLO 0x40
238#define MACE_XMTFS_LCOL 0x20
239#define MACE_XMTFS_MORE 0x10
240#define MACE_XMTFS_ONE 0x08
241#define MACE_XMTFS_DEFER 0x04
242#define MACE_XMTFS_LCAR 0x02
243#define MACE_XMTFS_RTRY 0x01
244
245#define MACE_RCVFS_RCVSTS 0xF000
246#define MACE_RCVFS_OFLO 0x8000
247#define MACE_RCVFS_CLSN 0x4000
248#define MACE_RCVFS_FRAM 0x2000
249#define MACE_RCVFS_FCS 0x1000
250
251#define MACE_FIFOFC_RCVFC 0xF0
252#define MACE_FIFOFC_XMTFC 0x0F
253
254#define MACE_IR_JAB 0x80
255#define MACE_IR_BABL 0x40
256#define MACE_IR_CERR 0x20
257#define MACE_IR_RCVCCO 0x10
258#define MACE_IR_RNTPCO 0x08
259#define MACE_IR_MPCO 0x04
260#define MACE_IR_RCVINT 0x02
261#define MACE_IR_XMTINT 0x01
262
263#define MACE_MACCC_PROM 0x80
264#define MACE_MACCC_DXMT2PD 0x40
265#define MACE_MACCC_EMBA 0x20
266#define MACE_MACCC_RESERVED 0x10
267#define MACE_MACCC_DRCVPA 0x08
268#define MACE_MACCC_DRCVBC 0x04
269#define MACE_MACCC_ENXMT 0x02
270#define MACE_MACCC_ENRCV 0x01
271
272#define MACE_PHYCC_LNKFL 0x80
273#define MACE_PHYCC_DLNKTST 0x40
274#define MACE_PHYCC_REVPOL 0x20
275#define MACE_PHYCC_DAPC 0x10
276#define MACE_PHYCC_LRT 0x08
277#define MACE_PHYCC_ASEL 0x04
278#define MACE_PHYCC_RWAKE 0x02
279#define MACE_PHYCC_AWAKE 0x01
280
281#define MACE_IAC_ADDRCHG 0x80
282#define MACE_IAC_PHYADDR 0x04
283#define MACE_IAC_LOGADDR 0x02
284
285#define MACE_UTR_RTRE 0x80
286#define MACE_UTR_RTRD 0x40
287#define MACE_UTR_RPA 0x20
288#define MACE_UTR_FCOLL 0x10
289#define MACE_UTR_RCVFCSE 0x08
290#define MACE_UTR_LOOP_INCL_MENDEC 0x06
291#define MACE_UTR_LOOP_NO_MENDEC 0x04
292#define MACE_UTR_LOOP_EXTERNAL 0x02
293#define MACE_UTR_LOOP_NONE 0x00
294#define MACE_UTR_RESERVED 0x01
295
296/* Switch MACE register bank (only 0 and 1 are valid) */
297#define MACEBANK(win_num) outb((win_num), ioaddr + AM2150_MACE_BANK)
298
299#define MACE_IMR_DEFAULT \
300 (0xFF - \
301 ( \
302 MACE_IR_CERR | \
303 MACE_IR_RCVCCO | \
304 MACE_IR_RNTPCO | \
305 MACE_IR_MPCO | \
306 MACE_IR_RCVINT | \
307 MACE_IR_XMTINT \
308 ) \
309 )
310#undef MACE_IMR_DEFAULT
311#define MACE_IMR_DEFAULT 0x00 /* New statistics handling: grab everything */
312
313#define TX_TIMEOUT ((400*HZ)/1000)
314
315/* ----------------------------------------------------------------------------
316Type Definitions
317---------------------------------------------------------------------------- */
318
319typedef struct _mace_statistics {
320 /* MACE_XMTFS */
321 int xmtsv;
322 int uflo;
323 int lcol;
324 int more;
325 int one;
326 int defer;
327 int lcar;
328 int rtry;
329
330 /* MACE_XMTRC */
331 int exdef;
332 int xmtrc;
333
334 /* RFS1--Receive Status (RCVSTS) */
335 int oflo;
336 int clsn;
337 int fram;
338 int fcs;
339
340 /* RFS2--Runt Packet Count (RNTPC) */
341 int rfs_rntpc;
342
343 /* RFS3--Receive Collision Count (RCVCC) */
344 int rfs_rcvcc;
345
346 /* MACE_IR */
347 int jab;
348 int babl;
349 int cerr;
350 int rcvcco;
351 int rntpco;
352 int mpco;
353
354 /* MACE_MPC */
355 int mpc;
356
357 /* MACE_RNTPC */
358 int rntpc;
359
360 /* MACE_RCVCC */
361 int rcvcc;
362} mace_statistics;
363
364typedef struct _mace_private {
365 struct pcmcia_device *p_dev;
366 struct net_device_stats linux_stats; /* Linux statistics counters */
367 mace_statistics mace_stats; /* MACE chip statistics counters */
368
369 /* restore_multicast_list() state variables */
370 int multicast_ladrf[MACE_LADRF_LEN]; /* Logical address filter */
371 int multicast_num_addrs;
372
373 char tx_free_frames; /* Number of free transmit frame buffers */
374 char tx_irq_disabled; /* MACE TX interrupt disabled */
375
376 spinlock_t bank_lock; /* Must be held if you step off bank 0 */
377} mace_private;
378
379/* ----------------------------------------------------------------------------
380Private Global Variables
381---------------------------------------------------------------------------- */
382
383static const char *if_names[]={
384 "Auto", "10baseT", "BNC",
385};
386
387/* ----------------------------------------------------------------------------
388Parameters
389 These are the parameters that can be set during loading with
390 'insmod'.
391---------------------------------------------------------------------------- */
392
393MODULE_DESCRIPTION("New Media PCMCIA ethernet driver");
394MODULE_LICENSE("GPL");
395
396#define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
397
398/* 0=auto, 1=10baseT, 2 = 10base2, default=auto */
399INT_MODULE_PARM(if_port, 0);
400
401
402/* ----------------------------------------------------------------------------
403Function Prototypes
404---------------------------------------------------------------------------- */
405
406static int nmclan_config(struct pcmcia_device *link);
407static void nmclan_release(struct pcmcia_device *link);
408
409static void nmclan_reset(struct net_device *dev);
410static int mace_config(struct net_device *dev, struct ifmap *map);
411static int mace_open(struct net_device *dev);
412static int mace_close(struct net_device *dev);
413static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
414 struct net_device *dev);
415static void mace_tx_timeout(struct net_device *dev);
416static irqreturn_t mace_interrupt(int irq, void *dev_id);
417static struct net_device_stats *mace_get_stats(struct net_device *dev);
418static int mace_rx(struct net_device *dev, unsigned char RxCnt);
419static void restore_multicast_list(struct net_device *dev);
420static void set_multicast_list(struct net_device *dev);
421static const struct ethtool_ops netdev_ethtool_ops;
422
423
424static void nmclan_detach(struct pcmcia_device *p_dev);
425
426static const struct net_device_ops mace_netdev_ops = {
427 .ndo_open = mace_open,
428 .ndo_stop = mace_close,
429 .ndo_start_xmit = mace_start_xmit,
430 .ndo_tx_timeout = mace_tx_timeout,
431 .ndo_set_config = mace_config,
432 .ndo_get_stats = mace_get_stats,
433 .ndo_set_rx_mode = set_multicast_list,
434 .ndo_change_mtu = eth_change_mtu,
435 .ndo_set_mac_address = eth_mac_addr,
436 .ndo_validate_addr = eth_validate_addr,
437};
438
439static int nmclan_probe(struct pcmcia_device *link)
440{
441 mace_private *lp;
442 struct net_device *dev;
443
444 dev_dbg(&link->dev, "nmclan_attach()\n");
445
446 /* Create new ethernet device */
447 dev = alloc_etherdev(sizeof(mace_private));
448 if (!dev)
449 return -ENOMEM;
450 lp = netdev_priv(dev);
451 lp->p_dev = link;
452 link->priv = dev;
453
454 spin_lock_init(&lp->bank_lock);
455 link->resource[0]->end = 32;
456 link->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
457 link->config_flags |= CONF_ENABLE_IRQ;
458 link->config_index = 1;
459 link->config_regs = PRESENT_OPTION;
460
461 lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
462
463 dev->netdev_ops = &mace_netdev_ops;
464 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
465 dev->watchdog_timeo = TX_TIMEOUT;
466
467 return nmclan_config(link);
468} /* nmclan_attach */
469
470static void nmclan_detach(struct pcmcia_device *link)
471{
472 struct net_device *dev = link->priv;
473
474 dev_dbg(&link->dev, "nmclan_detach\n");
475
476 unregister_netdev(dev);
477
478 nmclan_release(link);
479
480 free_netdev(dev);
481} /* nmclan_detach */
482
483/* ----------------------------------------------------------------------------
484mace_read
485 Reads a MACE register. This is bank independent; however, the
486 caller must ensure that this call is not interruptable. We are
487 assuming that during normal operation, the MACE is always in
488 bank 0.
489---------------------------------------------------------------------------- */
490static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
491{
492 int data = 0xFF;
493 unsigned long flags;
494
495 switch (reg >> 4) {
496 case 0: /* register 0-15 */
497 data = inb(ioaddr + AM2150_MACE_BASE + reg);
498 break;
499 case 1: /* register 16-31 */
500 spin_lock_irqsave(&lp->bank_lock, flags);
501 MACEBANK(1);
502 data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
503 MACEBANK(0);
504 spin_unlock_irqrestore(&lp->bank_lock, flags);
505 break;
506 }
507 return data & 0xFF;
508} /* mace_read */
509
510/* ----------------------------------------------------------------------------
511mace_write
512 Writes to a MACE register. This is bank independent; however,
513 the caller must ensure that this call is not interruptable. We
514 are assuming that during normal operation, the MACE is always in
515 bank 0.
516---------------------------------------------------------------------------- */
517static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
518 int data)
519{
520 unsigned long flags;
521
522 switch (reg >> 4) {
523 case 0: /* register 0-15 */
524 outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
525 break;
526 case 1: /* register 16-31 */
527 spin_lock_irqsave(&lp->bank_lock, flags);
528 MACEBANK(1);
529 outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
530 MACEBANK(0);
531 spin_unlock_irqrestore(&lp->bank_lock, flags);
532 break;
533 }
534} /* mace_write */
535
536/* ----------------------------------------------------------------------------
537mace_init
538 Resets the MACE chip.
539---------------------------------------------------------------------------- */
540static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
541{
542 int i;
543 int ct = 0;
544
545 /* MACE Software reset */
546 mace_write(lp, ioaddr, MACE_BIUCC, 1);
547 while (mace_read(lp, ioaddr, MACE_BIUCC) & 0x01) {
548 /* Wait for reset bit to be cleared automatically after <= 200ns */;
549 if(++ct > 500)
550 {
551 pr_err("reset failed, card removed?\n");
552 return -1;
553 }
554 udelay(1);
555 }
556 mace_write(lp, ioaddr, MACE_BIUCC, 0);
557
558 /* The Am2150 requires that the MACE FIFOs operate in burst mode. */
559 mace_write(lp, ioaddr, MACE_FIFOCC, 0x0F);
560
561 mace_write(lp,ioaddr, MACE_RCVFC, 0); /* Disable Auto Strip Receive */
562 mace_write(lp, ioaddr, MACE_IMR, 0xFF); /* Disable all interrupts until _open */
563
564 /*
565 * Bit 2-1 PORTSEL[1-0] Port Select.
566 * 00 AUI/10Base-2
567 * 01 10Base-T
568 * 10 DAI Port (reserved in Am2150)
569 * 11 GPSI
570 * For this card, only the first two are valid.
571 * So, PLSCC should be set to
572 * 0x00 for 10Base-2
573 * 0x02 for 10Base-T
574 * Or just set ASEL in PHYCC below!
575 */
576 switch (if_port) {
577 case 1:
578 mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
579 break;
580 case 2:
581 mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
582 break;
583 default:
584 mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
585 /* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
586 and the MACE device will automatically select the operating media
587 interface port. */
588 break;
589 }
590
591 mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_PHYADDR);
592 /* Poll ADDRCHG bit */
593 ct = 0;
594 while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
595 {
596 if(++ ct > 500)
597 {
598 pr_err("ADDRCHG timeout, card removed?\n");
599 return -1;
600 }
601 }
602 /* Set PADR register */
603 for (i = 0; i < ETHER_ADDR_LEN; i++)
604 mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
605
606 /* MAC Configuration Control Register should be written last */
607 /* Let set_multicast_list set this. */
608 /* mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV); */
609 mace_write(lp, ioaddr, MACE_MACCC, 0x00);
610 return 0;
611} /* mace_init */
612
613static int nmclan_config(struct pcmcia_device *link)
614{
615 struct net_device *dev = link->priv;
616 mace_private *lp = netdev_priv(dev);
617 u8 *buf;
618 size_t len;
619 int i, ret;
620 unsigned int ioaddr;
621
622 dev_dbg(&link->dev, "nmclan_config\n");
623
624 link->io_lines = 5;
625 ret = pcmcia_request_io(link);
626 if (ret)
627 goto failed;
628 ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
629 if (ret)
630 goto failed;
631 ret = pcmcia_enable_device(link);
632 if (ret)
633 goto failed;
634
635 dev->irq = link->irq;
636 dev->base_addr = link->resource[0]->start;
637
638 ioaddr = dev->base_addr;
639
640 /* Read the ethernet address from the CIS. */
641 len = pcmcia_get_tuple(link, 0x80, &buf);
642 if (!buf || len < ETHER_ADDR_LEN) {
643 kfree(buf);
644 goto failed;
645 }
646 memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
647 kfree(buf);
648
649 /* Verify configuration by reading the MACE ID. */
650 {
651 char sig[2];
652
653 sig[0] = mace_read(lp, ioaddr, MACE_CHIPIDL);
654 sig[1] = mace_read(lp, ioaddr, MACE_CHIPIDH);
655 if ((sig[0] == 0x40) && ((sig[1] & 0x0F) == 0x09)) {
656 dev_dbg(&link->dev, "nmclan_cs configured: mace id=%x %x\n",
657 sig[0], sig[1]);
658 } else {
659 pr_notice("mace id not found: %x %x should be 0x40 0x?9\n",
660 sig[0], sig[1]);
661 return -ENODEV;
662 }
663 }
664
665 if(mace_init(lp, ioaddr, dev->dev_addr) == -1)
666 goto failed;
667
668 /* The if_port symbol can be set when the module is loaded */
669 if (if_port <= 2)
670 dev->if_port = if_port;
671 else
672 pr_notice("invalid if_port requested\n");
673
674 SET_NETDEV_DEV(dev, &link->dev);
675
676 i = register_netdev(dev);
677 if (i != 0) {
678 pr_notice("register_netdev() failed\n");
679 goto failed;
680 }
681
682 netdev_info(dev, "nmclan: port %#3lx, irq %d, %s port, hw_addr %pM\n",
683 dev->base_addr, dev->irq, if_names[dev->if_port], dev->dev_addr);
684 return 0;
685
686failed:
687 nmclan_release(link);
688 return -ENODEV;
689} /* nmclan_config */
690
691static void nmclan_release(struct pcmcia_device *link)
692{
693 dev_dbg(&link->dev, "nmclan_release\n");
694 pcmcia_disable_device(link);
695}
696
697static int nmclan_suspend(struct pcmcia_device *link)
698{
699 struct net_device *dev = link->priv;
700
701 if (link->open)
702 netif_device_detach(dev);
703
704 return 0;
705}
706
707static int nmclan_resume(struct pcmcia_device *link)
708{
709 struct net_device *dev = link->priv;
710
711 if (link->open) {
712 nmclan_reset(dev);
713 netif_device_attach(dev);
714 }
715
716 return 0;
717}
718
719
720/* ----------------------------------------------------------------------------
721nmclan_reset
722 Reset and restore all of the Xilinx and MACE registers.
723---------------------------------------------------------------------------- */
724static void nmclan_reset(struct net_device *dev)
725{
726 mace_private *lp = netdev_priv(dev);
727
728#if RESET_XILINX
729 struct pcmcia_device *link = &lp->link;
730 u8 OrigCorValue;
731
732 /* Save original COR value */
733 pcmcia_read_config_byte(link, CISREG_COR, &OrigCorValue);
734
735 /* Reset Xilinx */
736 dev_dbg(&link->dev, "nmclan_reset: OrigCorValue=0x%x, resetting...\n",
737 OrigCorValue);
738 pcmcia_write_config_byte(link, CISREG_COR, COR_SOFT_RESET);
739 /* Need to wait for 20 ms for PCMCIA to finish reset. */
740
741 /* Restore original COR configuration index */
742 pcmcia_write_config_byte(link, CISREG_COR,
743 (COR_LEVEL_REQ | (OrigCorValue & COR_CONFIG_MASK)));
744 /* Xilinx is now completely reset along with the MACE chip. */
745 lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
746
747#endif /* #if RESET_XILINX */
748
749 /* Xilinx is now completely reset along with the MACE chip. */
750 lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
751
752 /* Reinitialize the MACE chip for operation. */
753 mace_init(lp, dev->base_addr, dev->dev_addr);
754 mace_write(lp, dev->base_addr, MACE_IMR, MACE_IMR_DEFAULT);
755
756 /* Restore the multicast list and enable TX and RX. */
757 restore_multicast_list(dev);
758} /* nmclan_reset */
759
760/* ----------------------------------------------------------------------------
761mace_config
762 [Someone tell me what this is supposed to do? Is if_port a defined
763 standard? If so, there should be defines to indicate 1=10Base-T,
764 2=10Base-2, etc. including limited automatic detection.]
765---------------------------------------------------------------------------- */
766static int mace_config(struct net_device *dev, struct ifmap *map)
767{
768 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
769 if (map->port <= 2) {
770 dev->if_port = map->port;
771 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
772 } else
773 return -EINVAL;
774 }
775 return 0;
776} /* mace_config */
777
778/* ----------------------------------------------------------------------------
779mace_open
780 Open device driver.
781---------------------------------------------------------------------------- */
782static int mace_open(struct net_device *dev)
783{
784 unsigned int ioaddr = dev->base_addr;
785 mace_private *lp = netdev_priv(dev);
786 struct pcmcia_device *link = lp->p_dev;
787
788 if (!pcmcia_dev_present(link))
789 return -ENODEV;
790
791 link->open++;
792
793 MACEBANK(0);
794
795 netif_start_queue(dev);
796 nmclan_reset(dev);
797
798 return 0; /* Always succeed */
799} /* mace_open */
800
801/* ----------------------------------------------------------------------------
802mace_close
803 Closes device driver.
804---------------------------------------------------------------------------- */
805static int mace_close(struct net_device *dev)
806{
807 unsigned int ioaddr = dev->base_addr;
808 mace_private *lp = netdev_priv(dev);
809 struct pcmcia_device *link = lp->p_dev;
810
811 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
812
813 /* Mask off all interrupts from the MACE chip. */
814 outb(0xFF, ioaddr + AM2150_MACE_BASE + MACE_IMR);
815
816 link->open--;
817 netif_stop_queue(dev);
818
819 return 0;
820} /* mace_close */
821
822static void netdev_get_drvinfo(struct net_device *dev,
823 struct ethtool_drvinfo *info)
824{
825 strcpy(info->driver, DRV_NAME);
826 strcpy(info->version, DRV_VERSION);
827 sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
828}
829
830static const struct ethtool_ops netdev_ethtool_ops = {
831 .get_drvinfo = netdev_get_drvinfo,
832};
833
834/* ----------------------------------------------------------------------------
835mace_start_xmit
836 This routine begins the packet transmit function. When completed,
837 it will generate a transmit interrupt.
838
839 According to /usr/src/linux/net/inet/dev.c, if _start_xmit
840 returns 0, the "packet is now solely the responsibility of the
841 driver." If _start_xmit returns non-zero, the "transmission
842 failed, put skb back into a list."
843---------------------------------------------------------------------------- */
844
845static void mace_tx_timeout(struct net_device *dev)
846{
847 mace_private *lp = netdev_priv(dev);
848 struct pcmcia_device *link = lp->p_dev;
849
850 netdev_notice(dev, "transmit timed out -- ");
851#if RESET_ON_TIMEOUT
852 pr_cont("resetting card\n");
853 pcmcia_reset_card(link->socket);
854#else /* #if RESET_ON_TIMEOUT */
855 pr_cont("NOT resetting card\n");
856#endif /* #if RESET_ON_TIMEOUT */
857 dev->trans_start = jiffies; /* prevent tx timeout */
858 netif_wake_queue(dev);
859}
860
861static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
862 struct net_device *dev)
863{
864 mace_private *lp = netdev_priv(dev);
865 unsigned int ioaddr = dev->base_addr;
866
867 netif_stop_queue(dev);
868
869 pr_debug("%s: mace_start_xmit(length = %ld) called.\n",
870 dev->name, (long)skb->len);
871
872#if (!TX_INTERRUPTABLE)
873 /* Disable MACE TX interrupts. */
874 outb(MACE_IMR_DEFAULT | MACE_IR_XMTINT,
875 ioaddr + AM2150_MACE_BASE + MACE_IMR);
876 lp->tx_irq_disabled=1;
877#endif /* #if (!TX_INTERRUPTABLE) */
878
879 {
880 /* This block must not be interrupted by another transmit request!
881 mace_tx_timeout will take care of timer-based retransmissions from
882 the upper layers. The interrupt handler is guaranteed never to
883 service a transmit interrupt while we are in here.
884 */
885
886 lp->linux_stats.tx_bytes += skb->len;
887 lp->tx_free_frames--;
888
889 /* WARNING: Write the _exact_ number of bytes written in the header! */
890 /* Put out the word header [must be an outw()] . . . */
891 outw(skb->len, ioaddr + AM2150_XMT);
892 /* . . . and the packet [may be any combination of outw() and outb()] */
893 outsw(ioaddr + AM2150_XMT, skb->data, skb->len >> 1);
894 if (skb->len & 1) {
895 /* Odd byte transfer */
896 outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
897 }
898
899#if MULTI_TX
900 if (lp->tx_free_frames > 0)
901 netif_start_queue(dev);
902#endif /* #if MULTI_TX */
903 }
904
905#if (!TX_INTERRUPTABLE)
906 /* Re-enable MACE TX interrupts. */
907 lp->tx_irq_disabled=0;
908 outb(MACE_IMR_DEFAULT, ioaddr + AM2150_MACE_BASE + MACE_IMR);
909#endif /* #if (!TX_INTERRUPTABLE) */
910
911 dev_kfree_skb(skb);
912
913 return NETDEV_TX_OK;
914} /* mace_start_xmit */
915
916/* ----------------------------------------------------------------------------
917mace_interrupt
918 The interrupt handler.
919---------------------------------------------------------------------------- */
920static irqreturn_t mace_interrupt(int irq, void *dev_id)
921{
922 struct net_device *dev = (struct net_device *) dev_id;
923 mace_private *lp = netdev_priv(dev);
924 unsigned int ioaddr;
925 int status;
926 int IntrCnt = MACE_MAX_IR_ITERATIONS;
927
928 if (dev == NULL) {
929 pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
930 irq);
931 return IRQ_NONE;
932 }
933
934 ioaddr = dev->base_addr;
935
936 if (lp->tx_irq_disabled) {
937 const char *msg;
938 if (lp->tx_irq_disabled)
939 msg = "Interrupt with tx_irq_disabled";
940 else
941 msg = "Re-entering the interrupt handler";
942 netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n",
943 msg,
944 inb(ioaddr + AM2150_MACE_BASE + MACE_IR),
945 inb(ioaddr + AM2150_MACE_BASE + MACE_IMR));
946 /* WARNING: MACE_IR has been read! */
947 return IRQ_NONE;
948 }
949
950 if (!netif_device_present(dev)) {
951 netdev_dbg(dev, "interrupt from dead card\n");
952 return IRQ_NONE;
953 }
954
955 do {
956 /* WARNING: MACE_IR is a READ/CLEAR port! */
957 status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR);
958
959 pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status);
960
961 if (status & MACE_IR_RCVINT) {
962 mace_rx(dev, MACE_MAX_RX_ITERATIONS);
963 }
964
965 if (status & MACE_IR_XMTINT) {
966 unsigned char fifofc;
967 unsigned char xmtrc;
968 unsigned char xmtfs;
969
970 fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC);
971 if ((fifofc & MACE_FIFOFC_XMTFC)==0) {
972 lp->linux_stats.tx_errors++;
973 outb(0xFF, ioaddr + AM2150_XMT_SKIP);
974 }
975
976 /* Transmit Retry Count (XMTRC, reg 4) */
977 xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC);
978 if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++;
979 lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC);
980
981 if (
982 (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) &
983 MACE_XMTFS_XMTSV /* Transmit Status Valid */
984 ) {
985 lp->mace_stats.xmtsv++;
986
987 if (xmtfs & ~MACE_XMTFS_XMTSV) {
988 if (xmtfs & MACE_XMTFS_UFLO) {
989 /* Underflow. Indicates that the Transmit FIFO emptied before
990 the end of frame was reached. */
991 lp->mace_stats.uflo++;
992 }
993 if (xmtfs & MACE_XMTFS_LCOL) {
994 /* Late Collision */
995 lp->mace_stats.lcol++;
996 }
997 if (xmtfs & MACE_XMTFS_MORE) {
998 /* MORE than one retry was needed */
999 lp->mace_stats.more++;
1000 }
1001 if (xmtfs & MACE_XMTFS_ONE) {
1002 /* Exactly ONE retry occurred */
1003 lp->mace_stats.one++;
1004 }
1005 if (xmtfs & MACE_XMTFS_DEFER) {
1006 /* Transmission was defered */
1007 lp->mace_stats.defer++;
1008 }
1009 if (xmtfs & MACE_XMTFS_LCAR) {
1010 /* Loss of carrier */
1011 lp->mace_stats.lcar++;
1012 }
1013 if (xmtfs & MACE_XMTFS_RTRY) {
1014 /* Retry error: transmit aborted after 16 attempts */
1015 lp->mace_stats.rtry++;
1016 }
1017 } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */
1018
1019 } /* if (xmtfs & MACE_XMTFS_XMTSV) */
1020
1021 lp->linux_stats.tx_packets++;
1022 lp->tx_free_frames++;
1023 netif_wake_queue(dev);
1024 } /* if (status & MACE_IR_XMTINT) */
1025
1026 if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) {
1027 if (status & MACE_IR_JAB) {
1028 /* Jabber Error. Excessive transmit duration (20-150ms). */
1029 lp->mace_stats.jab++;
1030 }
1031 if (status & MACE_IR_BABL) {
1032 /* Babble Error. >1518 bytes transmitted. */
1033 lp->mace_stats.babl++;
1034 }
1035 if (status & MACE_IR_CERR) {
1036 /* Collision Error. CERR indicates the absence of the
1037 Signal Quality Error Test message after a packet
1038 transmission. */
1039 lp->mace_stats.cerr++;
1040 }
1041 if (status & MACE_IR_RCVCCO) {
1042 /* Receive Collision Count Overflow; */
1043 lp->mace_stats.rcvcco++;
1044 }
1045 if (status & MACE_IR_RNTPCO) {
1046 /* Runt Packet Count Overflow */
1047 lp->mace_stats.rntpco++;
1048 }
1049 if (status & MACE_IR_MPCO) {
1050 /* Missed Packet Count Overflow */
1051 lp->mace_stats.mpco++;
1052 }
1053 } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */
1054
1055 } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt));
1056
1057 return IRQ_HANDLED;
1058} /* mace_interrupt */
1059
1060/* ----------------------------------------------------------------------------
1061mace_rx
1062 Receives packets.
1063---------------------------------------------------------------------------- */
1064static int mace_rx(struct net_device *dev, unsigned char RxCnt)
1065{
1066 mace_private *lp = netdev_priv(dev);
1067 unsigned int ioaddr = dev->base_addr;
1068 unsigned char rx_framecnt;
1069 unsigned short rx_status;
1070
1071 while (
1072 ((rx_framecnt = inb(ioaddr + AM2150_RCV_FRAME_COUNT)) > 0) &&
1073 (rx_framecnt <= 12) && /* rx_framecnt==0xFF if card is extracted. */
1074 (RxCnt--)
1075 ) {
1076 rx_status = inw(ioaddr + AM2150_RCV);
1077
1078 pr_debug("%s: in mace_rx(), framecnt 0x%X, rx_status"
1079 " 0x%X.\n", dev->name, rx_framecnt, rx_status);
1080
1081 if (rx_status & MACE_RCVFS_RCVSTS) { /* Error, update stats. */
1082 lp->linux_stats.rx_errors++;
1083 if (rx_status & MACE_RCVFS_OFLO) {
1084 lp->mace_stats.oflo++;
1085 }
1086 if (rx_status & MACE_RCVFS_CLSN) {
1087 lp->mace_stats.clsn++;
1088 }
1089 if (rx_status & MACE_RCVFS_FRAM) {
1090 lp->mace_stats.fram++;
1091 }
1092 if (rx_status & MACE_RCVFS_FCS) {
1093 lp->mace_stats.fcs++;
1094 }
1095 } else {
1096 short pkt_len = (rx_status & ~MACE_RCVFS_RCVSTS) - 4;
1097 /* Auto Strip is off, always subtract 4 */
1098 struct sk_buff *skb;
1099
1100 lp->mace_stats.rfs_rntpc += inb(ioaddr + AM2150_RCV);
1101 /* runt packet count */
1102 lp->mace_stats.rfs_rcvcc += inb(ioaddr + AM2150_RCV);
1103 /* rcv collision count */
1104
1105 pr_debug(" receiving packet size 0x%X rx_status"
1106 " 0x%X.\n", pkt_len, rx_status);
1107
1108 skb = dev_alloc_skb(pkt_len+2);
1109
1110 if (skb != NULL) {
1111 skb_reserve(skb, 2);
1112 insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
1113 if (pkt_len & 1)
1114 *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV);
1115 skb->protocol = eth_type_trans(skb, dev);
1116
1117 netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
1118
1119 lp->linux_stats.rx_packets++;
1120 lp->linux_stats.rx_bytes += pkt_len;
1121 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
1122 continue;
1123 } else {
1124 pr_debug("%s: couldn't allocate a sk_buff of size"
1125 " %d.\n", dev->name, pkt_len);
1126 lp->linux_stats.rx_dropped++;
1127 }
1128 }
1129 outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
1130 } /* while */
1131
1132 return 0;
1133} /* mace_rx */
1134
1135/* ----------------------------------------------------------------------------
1136pr_linux_stats
1137---------------------------------------------------------------------------- */
1138static void pr_linux_stats(struct net_device_stats *pstats)
1139{
1140 pr_debug("pr_linux_stats\n");
1141 pr_debug(" rx_packets=%-7ld tx_packets=%ld\n",
1142 (long)pstats->rx_packets, (long)pstats->tx_packets);
1143 pr_debug(" rx_errors=%-7ld tx_errors=%ld\n",
1144 (long)pstats->rx_errors, (long)pstats->tx_errors);
1145 pr_debug(" rx_dropped=%-7ld tx_dropped=%ld\n",
1146 (long)pstats->rx_dropped, (long)pstats->tx_dropped);
1147 pr_debug(" multicast=%-7ld collisions=%ld\n",
1148 (long)pstats->multicast, (long)pstats->collisions);
1149
1150 pr_debug(" rx_length_errors=%-7ld rx_over_errors=%ld\n",
1151 (long)pstats->rx_length_errors, (long)pstats->rx_over_errors);
1152 pr_debug(" rx_crc_errors=%-7ld rx_frame_errors=%ld\n",
1153 (long)pstats->rx_crc_errors, (long)pstats->rx_frame_errors);
1154 pr_debug(" rx_fifo_errors=%-7ld rx_missed_errors=%ld\n",
1155 (long)pstats->rx_fifo_errors, (long)pstats->rx_missed_errors);
1156
1157 pr_debug(" tx_aborted_errors=%-7ld tx_carrier_errors=%ld\n",
1158 (long)pstats->tx_aborted_errors, (long)pstats->tx_carrier_errors);
1159 pr_debug(" tx_fifo_errors=%-7ld tx_heartbeat_errors=%ld\n",
1160 (long)pstats->tx_fifo_errors, (long)pstats->tx_heartbeat_errors);
1161 pr_debug(" tx_window_errors=%ld\n",
1162 (long)pstats->tx_window_errors);
1163} /* pr_linux_stats */
1164
1165/* ----------------------------------------------------------------------------
1166pr_mace_stats
1167---------------------------------------------------------------------------- */
1168static void pr_mace_stats(mace_statistics *pstats)
1169{
1170 pr_debug("pr_mace_stats\n");
1171
1172 pr_debug(" xmtsv=%-7d uflo=%d\n",
1173 pstats->xmtsv, pstats->uflo);
1174 pr_debug(" lcol=%-7d more=%d\n",
1175 pstats->lcol, pstats->more);
1176 pr_debug(" one=%-7d defer=%d\n",
1177 pstats->one, pstats->defer);
1178 pr_debug(" lcar=%-7d rtry=%d\n",
1179 pstats->lcar, pstats->rtry);
1180
1181 /* MACE_XMTRC */
1182 pr_debug(" exdef=%-7d xmtrc=%d\n",
1183 pstats->exdef, pstats->xmtrc);
1184
1185 /* RFS1--Receive Status (RCVSTS) */
1186 pr_debug(" oflo=%-7d clsn=%d\n",
1187 pstats->oflo, pstats->clsn);
1188 pr_debug(" fram=%-7d fcs=%d\n",
1189 pstats->fram, pstats->fcs);
1190
1191 /* RFS2--Runt Packet Count (RNTPC) */
1192 /* RFS3--Receive Collision Count (RCVCC) */
1193 pr_debug(" rfs_rntpc=%-7d rfs_rcvcc=%d\n",
1194 pstats->rfs_rntpc, pstats->rfs_rcvcc);
1195
1196 /* MACE_IR */
1197 pr_debug(" jab=%-7d babl=%d\n",
1198 pstats->jab, pstats->babl);
1199 pr_debug(" cerr=%-7d rcvcco=%d\n",
1200 pstats->cerr, pstats->rcvcco);
1201 pr_debug(" rntpco=%-7d mpco=%d\n",
1202 pstats->rntpco, pstats->mpco);
1203
1204 /* MACE_MPC */
1205 pr_debug(" mpc=%d\n", pstats->mpc);
1206
1207 /* MACE_RNTPC */
1208 pr_debug(" rntpc=%d\n", pstats->rntpc);
1209
1210 /* MACE_RCVCC */
1211 pr_debug(" rcvcc=%d\n", pstats->rcvcc);
1212
1213} /* pr_mace_stats */
1214
1215/* ----------------------------------------------------------------------------
1216update_stats
1217 Update statistics. We change to register window 1, so this
1218 should be run single-threaded if the device is active. This is
1219 expected to be a rare operation, and it's simpler for the rest
1220 of the driver to assume that window 0 is always valid rather
1221 than use a special window-state variable.
1222
1223 oflo & uflo should _never_ occur since it would mean the Xilinx
1224 was not able to transfer data between the MACE FIFO and the
1225 card's SRAM fast enough. If this happens, something is
1226 seriously wrong with the hardware.
1227---------------------------------------------------------------------------- */
1228static void update_stats(unsigned int ioaddr, struct net_device *dev)
1229{
1230 mace_private *lp = netdev_priv(dev);
1231
1232 lp->mace_stats.rcvcc += mace_read(lp, ioaddr, MACE_RCVCC);
1233 lp->mace_stats.rntpc += mace_read(lp, ioaddr, MACE_RNTPC);
1234 lp->mace_stats.mpc += mace_read(lp, ioaddr, MACE_MPC);
1235 /* At this point, mace_stats is fully updated for this call.
1236 We may now update the linux_stats. */
1237
1238 /* The MACE has no equivalent for linux_stats field which are commented
1239 out. */
1240
1241 /* lp->linux_stats.multicast; */
1242 lp->linux_stats.collisions =
1243 lp->mace_stats.rcvcco * 256 + lp->mace_stats.rcvcc;
1244 /* Collision: The MACE may retry sending a packet 15 times
1245 before giving up. The retry count is in XMTRC.
1246 Does each retry constitute a collision?
1247 If so, why doesn't the RCVCC record these collisions? */
1248
1249 /* detailed rx_errors: */
1250 lp->linux_stats.rx_length_errors =
1251 lp->mace_stats.rntpco * 256 + lp->mace_stats.rntpc;
1252 /* lp->linux_stats.rx_over_errors */
1253 lp->linux_stats.rx_crc_errors = lp->mace_stats.fcs;
1254 lp->linux_stats.rx_frame_errors = lp->mace_stats.fram;
1255 lp->linux_stats.rx_fifo_errors = lp->mace_stats.oflo;
1256 lp->linux_stats.rx_missed_errors =
1257 lp->mace_stats.mpco * 256 + lp->mace_stats.mpc;
1258
1259 /* detailed tx_errors */
1260 lp->linux_stats.tx_aborted_errors = lp->mace_stats.rtry;
1261 lp->linux_stats.tx_carrier_errors = lp->mace_stats.lcar;
1262 /* LCAR usually results from bad cabling. */
1263 lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
1264 lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
1265 /* lp->linux_stats.tx_window_errors; */
1266} /* update_stats */
1267
1268/* ----------------------------------------------------------------------------
1269mace_get_stats
1270 Gathers ethernet statistics from the MACE chip.
1271---------------------------------------------------------------------------- */
1272static struct net_device_stats *mace_get_stats(struct net_device *dev)
1273{
1274 mace_private *lp = netdev_priv(dev);
1275
1276 update_stats(dev->base_addr, dev);
1277
1278 pr_debug("%s: updating the statistics.\n", dev->name);
1279 pr_linux_stats(&lp->linux_stats);
1280 pr_mace_stats(&lp->mace_stats);
1281
1282 return &lp->linux_stats;
1283} /* net_device_stats */
1284
1285/* ----------------------------------------------------------------------------
1286updateCRC
1287 Modified from Am79C90 data sheet.
1288---------------------------------------------------------------------------- */
1289
1290#ifdef BROKEN_MULTICAST
1291
1292static void updateCRC(int *CRC, int bit)
1293{
1294 static const int poly[]={
1295 1,1,1,0, 1,1,0,1,
1296 1,0,1,1, 1,0,0,0,
1297 1,0,0,0, 0,0,1,1,
1298 0,0,1,0, 0,0,0,0
1299 }; /* CRC polynomial. poly[n] = coefficient of the x**n term of the
1300 CRC generator polynomial. */
1301
1302 int j;
1303
1304 /* shift CRC and control bit (CRC[32]) */
1305 for (j = 32; j > 0; j--)
1306 CRC[j] = CRC[j-1];
1307 CRC[0] = 0;
1308
1309 /* If bit XOR(control bit) = 1, set CRC = CRC XOR polynomial. */
1310 if (bit ^ CRC[32])
1311 for (j = 0; j < 32; j++)
1312 CRC[j] ^= poly[j];
1313} /* updateCRC */
1314
1315/* ----------------------------------------------------------------------------
1316BuildLAF
1317 Build logical address filter.
1318 Modified from Am79C90 data sheet.
1319
1320Input
1321 ladrf: logical address filter (contents initialized to 0)
1322 adr: ethernet address
1323---------------------------------------------------------------------------- */
1324static void BuildLAF(int *ladrf, int *adr)
1325{
1326 int CRC[33]={1}; /* CRC register, 1 word/bit + extra control bit */
1327
1328 int i, byte; /* temporary array indices */
1329 int hashcode; /* the output object */
1330
1331 CRC[32]=0;
1332
1333 for (byte = 0; byte < 6; byte++)
1334 for (i = 0; i < 8; i++)
1335 updateCRC(CRC, (adr[byte] >> i) & 1);
1336
1337 hashcode = 0;
1338 for (i = 0; i < 6; i++)
1339 hashcode = (hashcode << 1) + CRC[i];
1340
1341 byte = hashcode >> 3;
1342 ladrf[byte] |= (1 << (hashcode & 7));
1343
1344#ifdef PCMCIA_DEBUG
1345 if (0)
1346 printk(KERN_DEBUG " adr =%pM\n", adr);
1347 printk(KERN_DEBUG " hashcode = %d(decimal), ladrf[0:63] =", hashcode);
1348 for (i = 0; i < 8; i++)
1349 pr_cont(" %02X", ladrf[i]);
1350 pr_cont("\n");
1351#endif
1352} /* BuildLAF */
1353
1354/* ----------------------------------------------------------------------------
1355restore_multicast_list
1356 Restores the multicast filter for MACE chip to the last
1357 set_multicast_list() call.
1358
1359Input
1360 multicast_num_addrs
1361 multicast_ladrf[]
1362---------------------------------------------------------------------------- */
1363static void restore_multicast_list(struct net_device *dev)
1364{
1365 mace_private *lp = netdev_priv(dev);
1366 int num_addrs = lp->multicast_num_addrs;
1367 int *ladrf = lp->multicast_ladrf;
1368 unsigned int ioaddr = dev->base_addr;
1369 int i;
1370
1371 pr_debug("%s: restoring Rx mode to %d addresses.\n",
1372 dev->name, num_addrs);
1373
1374 if (num_addrs > 0) {
1375
1376 pr_debug("Attempt to restore multicast list detected.\n");
1377
1378 mace_write(lp, ioaddr, MACE_IAC, MACE_IAC_ADDRCHG | MACE_IAC_LOGADDR);
1379 /* Poll ADDRCHG bit */
1380 while (mace_read(lp, ioaddr, MACE_IAC) & MACE_IAC_ADDRCHG)
1381 ;
1382 /* Set LADRF register */
1383 for (i = 0; i < MACE_LADRF_LEN; i++)
1384 mace_write(lp, ioaddr, MACE_LADRF, ladrf[i]);
1385
1386 mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_RCVFCSE | MACE_UTR_LOOP_EXTERNAL);
1387 mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
1388
1389 } else if (num_addrs < 0) {
1390
1391 /* Promiscuous mode: receive all packets */
1392 mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
1393 mace_write(lp, ioaddr, MACE_MACCC,
1394 MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
1395 );
1396
1397 } else {
1398
1399 /* Normal mode */
1400 mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
1401 mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
1402
1403 }
1404} /* restore_multicast_list */
1405
1406/* ----------------------------------------------------------------------------
1407set_multicast_list
1408 Set or clear the multicast filter for this adaptor.
1409
1410Input
1411 num_addrs == -1 Promiscuous mode, receive all packets
1412 num_addrs == 0 Normal mode, clear multicast list
1413 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
1414 best-effort filtering.
1415Output
1416 multicast_num_addrs
1417 multicast_ladrf[]
1418---------------------------------------------------------------------------- */
1419
1420static void set_multicast_list(struct net_device *dev)
1421{
1422 mace_private *lp = netdev_priv(dev);
1423 int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
1424 struct netdev_hw_addr *ha;
1425
1426#ifdef PCMCIA_DEBUG
1427 {
1428 static int old;
1429 if (netdev_mc_count(dev) != old) {
1430 old = netdev_mc_count(dev);
1431 pr_debug("%s: setting Rx mode to %d addresses.\n",
1432 dev->name, old);
1433 }
1434 }
1435#endif
1436
1437 /* Set multicast_num_addrs. */
1438 lp->multicast_num_addrs = netdev_mc_count(dev);
1439
1440 /* Set multicast_ladrf. */
1441 if (num_addrs > 0) {
1442 /* Calculate multicast logical address filter */
1443 memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
1444 netdev_for_each_mc_addr(ha, dev) {
1445 memcpy(adr, ha->addr, ETHER_ADDR_LEN);
1446 BuildLAF(lp->multicast_ladrf, adr);
1447 }
1448 }
1449
1450 restore_multicast_list(dev);
1451
1452} /* set_multicast_list */
1453
1454#endif /* BROKEN_MULTICAST */
1455
1456static void restore_multicast_list(struct net_device *dev)
1457{
1458 unsigned int ioaddr = dev->base_addr;
1459 mace_private *lp = netdev_priv(dev);
1460
1461 pr_debug("%s: restoring Rx mode to %d addresses.\n", dev->name,
1462 lp->multicast_num_addrs);
1463
1464 if (dev->flags & IFF_PROMISC) {
1465 /* Promiscuous mode: receive all packets */
1466 mace_write(lp,ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
1467 mace_write(lp, ioaddr, MACE_MACCC,
1468 MACE_MACCC_PROM | MACE_MACCC_ENXMT | MACE_MACCC_ENRCV
1469 );
1470 } else {
1471 /* Normal mode */
1472 mace_write(lp, ioaddr, MACE_UTR, MACE_UTR_LOOP_EXTERNAL);
1473 mace_write(lp, ioaddr, MACE_MACCC, MACE_MACCC_ENXMT | MACE_MACCC_ENRCV);
1474 }
1475} /* restore_multicast_list */
1476
1477static void set_multicast_list(struct net_device *dev)
1478{
1479 mace_private *lp = netdev_priv(dev);
1480
1481#ifdef PCMCIA_DEBUG
1482 {
1483 static int old;
1484 if (netdev_mc_count(dev) != old) {
1485 old = netdev_mc_count(dev);
1486 pr_debug("%s: setting Rx mode to %d addresses.\n",
1487 dev->name, old);
1488 }
1489 }
1490#endif
1491
1492 lp->multicast_num_addrs = netdev_mc_count(dev);
1493 restore_multicast_list(dev);
1494
1495} /* set_multicast_list */
1496
1497static const struct pcmcia_device_id nmclan_ids[] = {
1498 PCMCIA_DEVICE_PROD_ID12("New Media Corporation", "Ethernet", 0x085a850b, 0x00b2e941),
1499 PCMCIA_DEVICE_PROD_ID12("Portable Add-ons", "Ethernet+", 0xebf1d60, 0xad673aaf),
1500 PCMCIA_DEVICE_NULL,
1501};
1502MODULE_DEVICE_TABLE(pcmcia, nmclan_ids);
1503
1504static struct pcmcia_driver nmclan_cs_driver = {
1505 .owner = THIS_MODULE,
1506 .name = "nmclan_cs",
1507 .probe = nmclan_probe,
1508 .remove = nmclan_detach,
1509 .id_table = nmclan_ids,
1510 .suspend = nmclan_suspend,
1511 .resume = nmclan_resume,
1512};
1513
1514static int __init init_nmclan_cs(void)
1515{
1516 return pcmcia_register_driver(&nmclan_cs_driver);
1517}
1518
1519static void __exit exit_nmclan_cs(void)
1520{
1521 pcmcia_unregister_driver(&nmclan_cs_driver);
1522}
1523
1524module_init(init_nmclan_cs);
1525module_exit(exit_nmclan_cs);
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
new file mode 100644
index 000000000000..f92bc6e34828
--- /dev/null
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -0,0 +1,2937 @@
1/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
2/*
3 * Copyright 1996-1999 Thomas Bogendoerfer
4 *
5 * Derived from the lance driver written 1993,1994,1995 by Donald Becker.
6 *
7 * Copyright 1993 United States Government as represented by the
8 * Director, National Security Agency.
9 *
10 * This software may be used and distributed according to the terms
11 * of the GNU General Public License, incorporated herein by reference.
12 *
13 * This driver is for PCnet32 and PCnetPCI based ethercards
14 */
15/**************************************************************************
16 * 23 Oct, 2000.
17 * Fixed a few bugs, related to running the controller in 32bit mode.
18 *
19 * Carsten Langgaard, carstenl@mips.com
20 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
21 *
22 *************************************************************************/
23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26#define DRV_NAME "pcnet32"
27#define DRV_VERSION "1.35"
28#define DRV_RELDATE "21.Apr.2008"
29#define PFX DRV_NAME ": "
30
31static const char *const version =
32 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n";
33
34#include <linux/module.h>
35#include <linux/kernel.h>
36#include <linux/sched.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/ioport.h>
40#include <linux/slab.h>
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/delay.h>
44#include <linux/init.h>
45#include <linux/ethtool.h>
46#include <linux/mii.h>
47#include <linux/crc32.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/if_ether.h>
51#include <linux/skbuff.h>
52#include <linux/spinlock.h>
53#include <linux/moduleparam.h>
54#include <linux/bitops.h>
55#include <linux/io.h>
56#include <linux/uaccess.h>
57
58#include <asm/dma.h>
59#include <asm/irq.h>
60
61/*
62 * PCI device identifiers for "new style" Linux PCI Device Drivers
63 */
64static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
65 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
66 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
67
68 /*
69 * Adapters that were sold with IBM's RS/6000 or pSeries hardware have
70 * the incorrect vendor id.
71 */
72 { PCI_DEVICE(PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE),
73 .class = (PCI_CLASS_NETWORK_ETHERNET << 8), .class_mask = 0xffff00, },
74
75 { } /* terminate list */
76};
77
78MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
79
80static int cards_found;
81
82/*
83 * VLB I/O addresses
84 */
85static unsigned int pcnet32_portlist[] =
86 { 0x300, 0x320, 0x340, 0x360, 0 };
87
88static int pcnet32_debug;
89static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
90static int pcnet32vlb; /* check for VLB cards ? */
91
92static struct net_device *pcnet32_dev;
93
94static int max_interrupt_work = 2;
95static int rx_copybreak = 200;
96
97#define PCNET32_PORT_AUI 0x00
98#define PCNET32_PORT_10BT 0x01
99#define PCNET32_PORT_GPSI 0x02
100#define PCNET32_PORT_MII 0x03
101
102#define PCNET32_PORT_PORTSEL 0x03
103#define PCNET32_PORT_ASEL 0x04
104#define PCNET32_PORT_100 0x40
105#define PCNET32_PORT_FD 0x80
106
107#define PCNET32_DMA_MASK 0xffffffff
108
109#define PCNET32_WATCHDOG_TIMEOUT (jiffies + (2 * HZ))
110#define PCNET32_BLINK_TIMEOUT (jiffies + (HZ/4))
111
112/*
113 * table to translate option values from tulip
114 * to internal options
115 */
116static const unsigned char options_mapping[] = {
117 PCNET32_PORT_ASEL, /* 0 Auto-select */
118 PCNET32_PORT_AUI, /* 1 BNC/AUI */
119 PCNET32_PORT_AUI, /* 2 AUI/BNC */
120 PCNET32_PORT_ASEL, /* 3 not supported */
121 PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */
122 PCNET32_PORT_ASEL, /* 5 not supported */
123 PCNET32_PORT_ASEL, /* 6 not supported */
124 PCNET32_PORT_ASEL, /* 7 not supported */
125 PCNET32_PORT_ASEL, /* 8 not supported */
126 PCNET32_PORT_MII, /* 9 MII 10baseT */
127 PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */
128 PCNET32_PORT_MII, /* 11 MII (autosel) */
129 PCNET32_PORT_10BT, /* 12 10BaseT */
130 PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */
131 /* 14 MII 100BaseTx-FD */
132 PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD,
133 PCNET32_PORT_ASEL /* 15 not supported */
134};
135
136static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = {
137 "Loopback test (offline)"
138};
139
140#define PCNET32_TEST_LEN ARRAY_SIZE(pcnet32_gstrings_test)
141
142#define PCNET32_NUM_REGS 136
143
144#define MAX_UNITS 8 /* More are supported, limit only on options */
145static int options[MAX_UNITS];
146static int full_duplex[MAX_UNITS];
147static int homepna[MAX_UNITS];
148
149/*
150 * Theory of Operation
151 *
152 * This driver uses the same software structure as the normal lance
153 * driver. So look for a verbose description in lance.c. The differences
154 * to the normal lance driver is the use of the 32bit mode of PCnet32
155 * and PCnetPCI chips. Because these chips are 32bit chips, there is no
156 * 16MB limitation and we don't need bounce buffers.
157 */
158
159/*
160 * Set the number of Tx and Rx buffers, using Log_2(# buffers).
161 * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
162 * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
163 */
164#ifndef PCNET32_LOG_TX_BUFFERS
165#define PCNET32_LOG_TX_BUFFERS 4
166#define PCNET32_LOG_RX_BUFFERS 5
167#define PCNET32_LOG_MAX_TX_BUFFERS 9 /* 2^9 == 512 */
168#define PCNET32_LOG_MAX_RX_BUFFERS 9
169#endif
170
171#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
172#define TX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_TX_BUFFERS))
173
174#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
175#define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS))
176
177#define PKT_BUF_SKB 1544
178/* actual buffer length after being aligned */
179#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN)
180/* chip wants twos complement of the (aligned) buffer length */
181#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB)
182
183/* Offsets from base I/O address. */
184#define PCNET32_WIO_RDP 0x10
185#define PCNET32_WIO_RAP 0x12
186#define PCNET32_WIO_RESET 0x14
187#define PCNET32_WIO_BDP 0x16
188
189#define PCNET32_DWIO_RDP 0x10
190#define PCNET32_DWIO_RAP 0x14
191#define PCNET32_DWIO_RESET 0x18
192#define PCNET32_DWIO_BDP 0x1C
193
194#define PCNET32_TOTAL_SIZE 0x20
195
196#define CSR0 0
197#define CSR0_INIT 0x1
198#define CSR0_START 0x2
199#define CSR0_STOP 0x4
200#define CSR0_TXPOLL 0x8
201#define CSR0_INTEN 0x40
202#define CSR0_IDON 0x0100
203#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
204#define PCNET32_INIT_LOW 1
205#define PCNET32_INIT_HIGH 2
206#define CSR3 3
207#define CSR4 4
208#define CSR5 5
209#define CSR5_SUSPEND 0x0001
210#define CSR15 15
211#define PCNET32_MC_FILTER 8
212
213#define PCNET32_79C970A 0x2621
214
215/* The PCNET32 Rx and Tx ring descriptors. */
216struct pcnet32_rx_head {
217 __le32 base;
218 __le16 buf_length; /* two`s complement of length */
219 __le16 status;
220 __le32 msg_length;
221 __le32 reserved;
222};
223
224struct pcnet32_tx_head {
225 __le32 base;
226 __le16 length; /* two`s complement of length */
227 __le16 status;
228 __le32 misc;
229 __le32 reserved;
230};
231
232/* The PCNET32 32-Bit initialization block, described in databook. */
233struct pcnet32_init_block {
234 __le16 mode;
235 __le16 tlen_rlen;
236 u8 phys_addr[6];
237 __le16 reserved;
238 __le32 filter[2];
239 /* Receive and transmit ring base, along with extra bits. */
240 __le32 rx_ring;
241 __le32 tx_ring;
242};
243
244/* PCnet32 access functions */
245struct pcnet32_access {
246 u16 (*read_csr) (unsigned long, int);
247 void (*write_csr) (unsigned long, int, u16);
248 u16 (*read_bcr) (unsigned long, int);
249 void (*write_bcr) (unsigned long, int, u16);
250 u16 (*read_rap) (unsigned long);
251 void (*write_rap) (unsigned long, u16);
252 void (*reset) (unsigned long);
253};
254
255/*
256 * The first field of pcnet32_private is read by the ethernet device
257 * so the structure should be allocated using pci_alloc_consistent().
258 */
259struct pcnet32_private {
260 struct pcnet32_init_block *init_block;
261 /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
262 struct pcnet32_rx_head *rx_ring;
263 struct pcnet32_tx_head *tx_ring;
264 dma_addr_t init_dma_addr;/* DMA address of beginning of the init block,
265 returned by pci_alloc_consistent */
266 struct pci_dev *pci_dev;
267 const char *name;
268 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
269 struct sk_buff **tx_skbuff;
270 struct sk_buff **rx_skbuff;
271 dma_addr_t *tx_dma_addr;
272 dma_addr_t *rx_dma_addr;
273 const struct pcnet32_access *a;
274 spinlock_t lock; /* Guard lock */
275 unsigned int cur_rx, cur_tx; /* The next free ring entry */
276 unsigned int rx_ring_size; /* current rx ring size */
277 unsigned int tx_ring_size; /* current tx ring size */
278 unsigned int rx_mod_mask; /* rx ring modular mask */
279 unsigned int tx_mod_mask; /* tx ring modular mask */
280 unsigned short rx_len_bits;
281 unsigned short tx_len_bits;
282 dma_addr_t rx_ring_dma_addr;
283 dma_addr_t tx_ring_dma_addr;
284 unsigned int dirty_rx, /* ring entries to be freed. */
285 dirty_tx;
286
287 struct net_device *dev;
288 struct napi_struct napi;
289 char tx_full;
290 char phycount; /* number of phys found */
291 int options;
292 unsigned int shared_irq:1, /* shared irq possible */
293 dxsuflo:1, /* disable transmit stop on uflo */
294 mii:1; /* mii port available */
295 struct net_device *next;
296 struct mii_if_info mii_if;
297 struct timer_list watchdog_timer;
298 u32 msg_enable; /* debug message level */
299
300 /* each bit indicates an available PHY */
301 u32 phymask;
302 unsigned short chip_version; /* which variant this is */
303
304 /* saved registers during ethtool blink */
305 u16 save_regs[4];
306};
307
308static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
309static int pcnet32_probe1(unsigned long, int, struct pci_dev *);
310static int pcnet32_open(struct net_device *);
311static int pcnet32_init_ring(struct net_device *);
312static netdev_tx_t pcnet32_start_xmit(struct sk_buff *,
313 struct net_device *);
314static void pcnet32_tx_timeout(struct net_device *dev);
315static irqreturn_t pcnet32_interrupt(int, void *);
316static int pcnet32_close(struct net_device *);
317static struct net_device_stats *pcnet32_get_stats(struct net_device *);
318static void pcnet32_load_multicast(struct net_device *dev);
319static void pcnet32_set_multicast_list(struct net_device *);
320static int pcnet32_ioctl(struct net_device *, struct ifreq *, int);
321static void pcnet32_watchdog(struct net_device *);
322static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
323static void mdio_write(struct net_device *dev, int phy_id, int reg_num,
324 int val);
325static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
326static void pcnet32_ethtool_test(struct net_device *dev,
327 struct ethtool_test *eth_test, u64 * data);
328static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
329static int pcnet32_get_regs_len(struct net_device *dev);
330static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
331 void *ptr);
332static void pcnet32_purge_tx_ring(struct net_device *dev);
333static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
334static void pcnet32_free_ring(struct net_device *dev);
335static void pcnet32_check_media(struct net_device *dev, int verbose);
336
337static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
338{
339 outw(index, addr + PCNET32_WIO_RAP);
340 return inw(addr + PCNET32_WIO_RDP);
341}
342
343static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
344{
345 outw(index, addr + PCNET32_WIO_RAP);
346 outw(val, addr + PCNET32_WIO_RDP);
347}
348
349static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
350{
351 outw(index, addr + PCNET32_WIO_RAP);
352 return inw(addr + PCNET32_WIO_BDP);
353}
354
355static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
356{
357 outw(index, addr + PCNET32_WIO_RAP);
358 outw(val, addr + PCNET32_WIO_BDP);
359}
360
361static u16 pcnet32_wio_read_rap(unsigned long addr)
362{
363 return inw(addr + PCNET32_WIO_RAP);
364}
365
366static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
367{
368 outw(val, addr + PCNET32_WIO_RAP);
369}
370
371static void pcnet32_wio_reset(unsigned long addr)
372{
373 inw(addr + PCNET32_WIO_RESET);
374}
375
376static int pcnet32_wio_check(unsigned long addr)
377{
378 outw(88, addr + PCNET32_WIO_RAP);
379 return inw(addr + PCNET32_WIO_RAP) == 88;
380}
381
382static const struct pcnet32_access pcnet32_wio = {
383 .read_csr = pcnet32_wio_read_csr,
384 .write_csr = pcnet32_wio_write_csr,
385 .read_bcr = pcnet32_wio_read_bcr,
386 .write_bcr = pcnet32_wio_write_bcr,
387 .read_rap = pcnet32_wio_read_rap,
388 .write_rap = pcnet32_wio_write_rap,
389 .reset = pcnet32_wio_reset
390};
391
392static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
393{
394 outl(index, addr + PCNET32_DWIO_RAP);
395 return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
396}
397
398static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
399{
400 outl(index, addr + PCNET32_DWIO_RAP);
401 outl(val, addr + PCNET32_DWIO_RDP);
402}
403
404static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
405{
406 outl(index, addr + PCNET32_DWIO_RAP);
407 return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
408}
409
410static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
411{
412 outl(index, addr + PCNET32_DWIO_RAP);
413 outl(val, addr + PCNET32_DWIO_BDP);
414}
415
416static u16 pcnet32_dwio_read_rap(unsigned long addr)
417{
418 return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
419}
420
421static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
422{
423 outl(val, addr + PCNET32_DWIO_RAP);
424}
425
426static void pcnet32_dwio_reset(unsigned long addr)
427{
428 inl(addr + PCNET32_DWIO_RESET);
429}
430
431static int pcnet32_dwio_check(unsigned long addr)
432{
433 outl(88, addr + PCNET32_DWIO_RAP);
434 return (inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88;
435}
436
437static const struct pcnet32_access pcnet32_dwio = {
438 .read_csr = pcnet32_dwio_read_csr,
439 .write_csr = pcnet32_dwio_write_csr,
440 .read_bcr = pcnet32_dwio_read_bcr,
441 .write_bcr = pcnet32_dwio_write_bcr,
442 .read_rap = pcnet32_dwio_read_rap,
443 .write_rap = pcnet32_dwio_write_rap,
444 .reset = pcnet32_dwio_reset
445};
446
447static void pcnet32_netif_stop(struct net_device *dev)
448{
449 struct pcnet32_private *lp = netdev_priv(dev);
450
451 dev->trans_start = jiffies; /* prevent tx timeout */
452 napi_disable(&lp->napi);
453 netif_tx_disable(dev);
454}
455
456static void pcnet32_netif_start(struct net_device *dev)
457{
458 struct pcnet32_private *lp = netdev_priv(dev);
459 ulong ioaddr = dev->base_addr;
460 u16 val;
461
462 netif_wake_queue(dev);
463 val = lp->a->read_csr(ioaddr, CSR3);
464 val &= 0x00ff;
465 lp->a->write_csr(ioaddr, CSR3, val);
466 napi_enable(&lp->napi);
467}
468
469/*
470 * Allocate space for the new sized tx ring.
471 * Free old resources
472 * Save new resources.
473 * Any failure keeps old resources.
474 * Must be called with lp->lock held.
475 */
476static void pcnet32_realloc_tx_ring(struct net_device *dev,
477 struct pcnet32_private *lp,
478 unsigned int size)
479{
480 dma_addr_t new_ring_dma_addr;
481 dma_addr_t *new_dma_addr_list;
482 struct pcnet32_tx_head *new_tx_ring;
483 struct sk_buff **new_skb_list;
484
485 pcnet32_purge_tx_ring(dev);
486
487 new_tx_ring = pci_alloc_consistent(lp->pci_dev,
488 sizeof(struct pcnet32_tx_head) *
489 (1 << size),
490 &new_ring_dma_addr);
491 if (new_tx_ring == NULL) {
492 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
493 return;
494 }
495 memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
496
497 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
498 GFP_ATOMIC);
499 if (!new_dma_addr_list) {
500 netif_err(lp, drv, dev, "Memory allocation failed\n");
501 goto free_new_tx_ring;
502 }
503
504 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
505 GFP_ATOMIC);
506 if (!new_skb_list) {
507 netif_err(lp, drv, dev, "Memory allocation failed\n");
508 goto free_new_lists;
509 }
510
511 kfree(lp->tx_skbuff);
512 kfree(lp->tx_dma_addr);
513 pci_free_consistent(lp->pci_dev,
514 sizeof(struct pcnet32_tx_head) *
515 lp->tx_ring_size, lp->tx_ring,
516 lp->tx_ring_dma_addr);
517
518 lp->tx_ring_size = (1 << size);
519 lp->tx_mod_mask = lp->tx_ring_size - 1;
520 lp->tx_len_bits = (size << 12);
521 lp->tx_ring = new_tx_ring;
522 lp->tx_ring_dma_addr = new_ring_dma_addr;
523 lp->tx_dma_addr = new_dma_addr_list;
524 lp->tx_skbuff = new_skb_list;
525 return;
526
527free_new_lists:
528 kfree(new_dma_addr_list);
529free_new_tx_ring:
530 pci_free_consistent(lp->pci_dev,
531 sizeof(struct pcnet32_tx_head) *
532 (1 << size),
533 new_tx_ring,
534 new_ring_dma_addr);
535}
536
537/*
538 * Allocate space for the new sized rx ring.
539 * Re-use old receive buffers.
540 * alloc extra buffers
541 * free unneeded buffers
542 * free unneeded buffers
543 * Save new resources.
544 * Any failure keeps old resources.
545 * Must be called with lp->lock held.
546 */
547static void pcnet32_realloc_rx_ring(struct net_device *dev,
548 struct pcnet32_private *lp,
549 unsigned int size)
550{
551 dma_addr_t new_ring_dma_addr;
552 dma_addr_t *new_dma_addr_list;
553 struct pcnet32_rx_head *new_rx_ring;
554 struct sk_buff **new_skb_list;
555 int new, overlap;
556
557 new_rx_ring = pci_alloc_consistent(lp->pci_dev,
558 sizeof(struct pcnet32_rx_head) *
559 (1 << size),
560 &new_ring_dma_addr);
561 if (new_rx_ring == NULL) {
562 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
563 return;
564 }
565 memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
566
567 new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
568 GFP_ATOMIC);
569 if (!new_dma_addr_list) {
570 netif_err(lp, drv, dev, "Memory allocation failed\n");
571 goto free_new_rx_ring;
572 }
573
574 new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
575 GFP_ATOMIC);
576 if (!new_skb_list) {
577 netif_err(lp, drv, dev, "Memory allocation failed\n");
578 goto free_new_lists;
579 }
580
581 /* first copy the current receive buffers */
582 overlap = min(size, lp->rx_ring_size);
583 for (new = 0; new < overlap; new++) {
584 new_rx_ring[new] = lp->rx_ring[new];
585 new_dma_addr_list[new] = lp->rx_dma_addr[new];
586 new_skb_list[new] = lp->rx_skbuff[new];
587 }
588 /* now allocate any new buffers needed */
589 for (; new < size; new++) {
590 struct sk_buff *rx_skbuff;
591 new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
592 rx_skbuff = new_skb_list[new];
593 if (!rx_skbuff) {
594 /* keep the original lists and buffers */
595 netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
596 __func__);
597 goto free_all_new;
598 }
599 skb_reserve(rx_skbuff, NET_IP_ALIGN);
600
601 new_dma_addr_list[new] =
602 pci_map_single(lp->pci_dev, rx_skbuff->data,
603 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
604 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
605 new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
606 new_rx_ring[new].status = cpu_to_le16(0x8000);
607 }
608 /* and free any unneeded buffers */
609 for (; new < lp->rx_ring_size; new++) {
610 if (lp->rx_skbuff[new]) {
611 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
612 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
613 dev_kfree_skb(lp->rx_skbuff[new]);
614 }
615 }
616
617 kfree(lp->rx_skbuff);
618 kfree(lp->rx_dma_addr);
619 pci_free_consistent(lp->pci_dev,
620 sizeof(struct pcnet32_rx_head) *
621 lp->rx_ring_size, lp->rx_ring,
622 lp->rx_ring_dma_addr);
623
624 lp->rx_ring_size = (1 << size);
625 lp->rx_mod_mask = lp->rx_ring_size - 1;
626 lp->rx_len_bits = (size << 4);
627 lp->rx_ring = new_rx_ring;
628 lp->rx_ring_dma_addr = new_ring_dma_addr;
629 lp->rx_dma_addr = new_dma_addr_list;
630 lp->rx_skbuff = new_skb_list;
631 return;
632
633free_all_new:
634 while (--new >= lp->rx_ring_size) {
635 if (new_skb_list[new]) {
636 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
637 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
638 dev_kfree_skb(new_skb_list[new]);
639 }
640 }
641 kfree(new_skb_list);
642free_new_lists:
643 kfree(new_dma_addr_list);
644free_new_rx_ring:
645 pci_free_consistent(lp->pci_dev,
646 sizeof(struct pcnet32_rx_head) *
647 (1 << size),
648 new_rx_ring,
649 new_ring_dma_addr);
650}
651
652static void pcnet32_purge_rx_ring(struct net_device *dev)
653{
654 struct pcnet32_private *lp = netdev_priv(dev);
655 int i;
656
657 /* free all allocated skbuffs */
658 for (i = 0; i < lp->rx_ring_size; i++) {
659 lp->rx_ring[i].status = 0; /* CPU owns buffer */
660 wmb(); /* Make sure adapter sees owner change */
661 if (lp->rx_skbuff[i]) {
662 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
663 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
664 dev_kfree_skb_any(lp->rx_skbuff[i]);
665 }
666 lp->rx_skbuff[i] = NULL;
667 lp->rx_dma_addr[i] = 0;
668 }
669}
670
671#ifdef CONFIG_NET_POLL_CONTROLLER
672static void pcnet32_poll_controller(struct net_device *dev)
673{
674 disable_irq(dev->irq);
675 pcnet32_interrupt(0, dev);
676 enable_irq(dev->irq);
677}
678#endif
679
680static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
681{
682 struct pcnet32_private *lp = netdev_priv(dev);
683 unsigned long flags;
684 int r = -EOPNOTSUPP;
685
686 if (lp->mii) {
687 spin_lock_irqsave(&lp->lock, flags);
688 mii_ethtool_gset(&lp->mii_if, cmd);
689 spin_unlock_irqrestore(&lp->lock, flags);
690 r = 0;
691 }
692 return r;
693}
694
695static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
696{
697 struct pcnet32_private *lp = netdev_priv(dev);
698 unsigned long flags;
699 int r = -EOPNOTSUPP;
700
701 if (lp->mii) {
702 spin_lock_irqsave(&lp->lock, flags);
703 r = mii_ethtool_sset(&lp->mii_if, cmd);
704 spin_unlock_irqrestore(&lp->lock, flags);
705 }
706 return r;
707}
708
709static void pcnet32_get_drvinfo(struct net_device *dev,
710 struct ethtool_drvinfo *info)
711{
712 struct pcnet32_private *lp = netdev_priv(dev);
713
714 strcpy(info->driver, DRV_NAME);
715 strcpy(info->version, DRV_VERSION);
716 if (lp->pci_dev)
717 strcpy(info->bus_info, pci_name(lp->pci_dev));
718 else
719 sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
720}
721
722static u32 pcnet32_get_link(struct net_device *dev)
723{
724 struct pcnet32_private *lp = netdev_priv(dev);
725 unsigned long flags;
726 int r;
727
728 spin_lock_irqsave(&lp->lock, flags);
729 if (lp->mii) {
730 r = mii_link_ok(&lp->mii_if);
731 } else if (lp->chip_version >= PCNET32_79C970A) {
732 ulong ioaddr = dev->base_addr; /* card base I/O address */
733 r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
734 } else { /* can not detect link on really old chips */
735 r = 1;
736 }
737 spin_unlock_irqrestore(&lp->lock, flags);
738
739 return r;
740}
741
742static u32 pcnet32_get_msglevel(struct net_device *dev)
743{
744 struct pcnet32_private *lp = netdev_priv(dev);
745 return lp->msg_enable;
746}
747
748static void pcnet32_set_msglevel(struct net_device *dev, u32 value)
749{
750 struct pcnet32_private *lp = netdev_priv(dev);
751 lp->msg_enable = value;
752}
753
754static int pcnet32_nway_reset(struct net_device *dev)
755{
756 struct pcnet32_private *lp = netdev_priv(dev);
757 unsigned long flags;
758 int r = -EOPNOTSUPP;
759
760 if (lp->mii) {
761 spin_lock_irqsave(&lp->lock, flags);
762 r = mii_nway_restart(&lp->mii_if);
763 spin_unlock_irqrestore(&lp->lock, flags);
764 }
765 return r;
766}
767
768static void pcnet32_get_ringparam(struct net_device *dev,
769 struct ethtool_ringparam *ering)
770{
771 struct pcnet32_private *lp = netdev_priv(dev);
772
773 ering->tx_max_pending = TX_MAX_RING_SIZE;
774 ering->tx_pending = lp->tx_ring_size;
775 ering->rx_max_pending = RX_MAX_RING_SIZE;
776 ering->rx_pending = lp->rx_ring_size;
777}
778
779static int pcnet32_set_ringparam(struct net_device *dev,
780 struct ethtool_ringparam *ering)
781{
782 struct pcnet32_private *lp = netdev_priv(dev);
783 unsigned long flags;
784 unsigned int size;
785 ulong ioaddr = dev->base_addr;
786 int i;
787
788 if (ering->rx_mini_pending || ering->rx_jumbo_pending)
789 return -EINVAL;
790
791 if (netif_running(dev))
792 pcnet32_netif_stop(dev);
793
794 spin_lock_irqsave(&lp->lock, flags);
795 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
796
797 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
798
799 /* set the minimum ring size to 4, to allow the loopback test to work
800 * unchanged.
801 */
802 for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
803 if (size <= (1 << i))
804 break;
805 }
806 if ((1 << i) != lp->tx_ring_size)
807 pcnet32_realloc_tx_ring(dev, lp, i);
808
809 size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
810 for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
811 if (size <= (1 << i))
812 break;
813 }
814 if ((1 << i) != lp->rx_ring_size)
815 pcnet32_realloc_rx_ring(dev, lp, i);
816
817 lp->napi.weight = lp->rx_ring_size / 2;
818
819 if (netif_running(dev)) {
820 pcnet32_netif_start(dev);
821 pcnet32_restart(dev, CSR0_NORMAL);
822 }
823
824 spin_unlock_irqrestore(&lp->lock, flags);
825
826 netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
827 lp->rx_ring_size, lp->tx_ring_size);
828
829 return 0;
830}
831
832static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
833 u8 *data)
834{
835 memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
836}
837
838static int pcnet32_get_sset_count(struct net_device *dev, int sset)
839{
840 switch (sset) {
841 case ETH_SS_TEST:
842 return PCNET32_TEST_LEN;
843 default:
844 return -EOPNOTSUPP;
845 }
846}
847
848static void pcnet32_ethtool_test(struct net_device *dev,
849 struct ethtool_test *test, u64 * data)
850{
851 struct pcnet32_private *lp = netdev_priv(dev);
852 int rc;
853
854 if (test->flags == ETH_TEST_FL_OFFLINE) {
855 rc = pcnet32_loopback_test(dev, data);
856 if (rc) {
857 netif_printk(lp, hw, KERN_DEBUG, dev,
858 "Loopback test failed\n");
859 test->flags |= ETH_TEST_FL_FAILED;
860 } else
861 netif_printk(lp, hw, KERN_DEBUG, dev,
862 "Loopback test passed\n");
863 } else
864 netif_printk(lp, hw, KERN_DEBUG, dev,
865 "No tests to run (specify 'Offline' on ethtool)\n");
866} /* end pcnet32_ethtool_test */
867
868static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
869{
870 struct pcnet32_private *lp = netdev_priv(dev);
871 const struct pcnet32_access *a = lp->a; /* access to registers */
872 ulong ioaddr = dev->base_addr; /* card base I/O address */
873 struct sk_buff *skb; /* sk buff */
874 int x, i; /* counters */
875 int numbuffs = 4; /* number of TX/RX buffers and descs */
876 u16 status = 0x8300; /* TX ring status */
877 __le16 teststatus; /* test of ring status */
878 int rc; /* return code */
879 int size; /* size of packets */
880 unsigned char *packet; /* source packet data */
881 static const int data_len = 60; /* length of source packets */
882 unsigned long flags;
883 unsigned long ticks;
884
885 rc = 1; /* default to fail */
886
887 if (netif_running(dev))
888 pcnet32_netif_stop(dev);
889
890 spin_lock_irqsave(&lp->lock, flags);
891 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
892
893 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
894
895 /* Reset the PCNET32 */
896 lp->a->reset(ioaddr);
897 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
898
899 /* switch pcnet32 to 32bit mode */
900 lp->a->write_bcr(ioaddr, 20, 2);
901
902 /* purge & init rings but don't actually restart */
903 pcnet32_restart(dev, 0x0000);
904
905 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
906
907 /* Initialize Transmit buffers. */
908 size = data_len + 15;
909 for (x = 0; x < numbuffs; x++) {
910 skb = dev_alloc_skb(size);
911 if (!skb) {
912 netif_printk(lp, hw, KERN_DEBUG, dev,
913 "Cannot allocate skb at line: %d!\n",
914 __LINE__);
915 goto clean_up;
916 }
917 packet = skb->data;
918 skb_put(skb, size); /* create space for data */
919 lp->tx_skbuff[x] = skb;
920 lp->tx_ring[x].length = cpu_to_le16(-skb->len);
921 lp->tx_ring[x].misc = 0;
922
923 /* put DA and SA into the skb */
924 for (i = 0; i < 6; i++)
925 *packet++ = dev->dev_addr[i];
926 for (i = 0; i < 6; i++)
927 *packet++ = dev->dev_addr[i];
928 /* type */
929 *packet++ = 0x08;
930 *packet++ = 0x06;
931 /* packet number */
932 *packet++ = x;
933 /* fill packet with data */
934 for (i = 0; i < data_len; i++)
935 *packet++ = i;
936
937 lp->tx_dma_addr[x] =
938 pci_map_single(lp->pci_dev, skb->data, skb->len,
939 PCI_DMA_TODEVICE);
940 lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
941 wmb(); /* Make sure owner changes after all others are visible */
942 lp->tx_ring[x].status = cpu_to_le16(status);
943 }
944
945 x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
946 a->write_bcr(ioaddr, 32, x | 0x0002);
947
948 /* set int loopback in CSR15 */
949 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
950 lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
951
952 teststatus = cpu_to_le16(0x8000);
953 lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
954
955 /* Check status of descriptors */
956 for (x = 0; x < numbuffs; x++) {
957 ticks = 0;
958 rmb();
959 while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) {
960 spin_unlock_irqrestore(&lp->lock, flags);
961 msleep(1);
962 spin_lock_irqsave(&lp->lock, flags);
963 rmb();
964 ticks++;
965 }
966 if (ticks == 200) {
967 netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
968 break;
969 }
970 }
971
972 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
973 wmb();
974 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
975 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
976
977 for (x = 0; x < numbuffs; x++) {
978 netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
979 skb = lp->rx_skbuff[x];
980 for (i = 0; i < size; i++)
981 pr_cont(" %02x", *(skb->data + i));
982 pr_cont("\n");
983 }
984 }
985
986 x = 0;
987 rc = 0;
988 while (x < numbuffs && !rc) {
989 skb = lp->rx_skbuff[x];
990 packet = lp->tx_skbuff[x]->data;
991 for (i = 0; i < size; i++) {
992 if (*(skb->data + i) != packet[i]) {
993 netif_printk(lp, hw, KERN_DEBUG, dev,
994 "Error in compare! %2x - %02x %02x\n",
995 i, *(skb->data + i), packet[i]);
996 rc = 1;
997 break;
998 }
999 }
1000 x++;
1001 }
1002
1003clean_up:
1004 *data1 = rc;
1005 pcnet32_purge_tx_ring(dev);
1006
1007 x = a->read_csr(ioaddr, CSR15);
1008 a->write_csr(ioaddr, CSR15, (x & ~0x0044)); /* reset bits 6 and 2 */
1009
1010 x = a->read_bcr(ioaddr, 32); /* reset internal loopback */
1011 a->write_bcr(ioaddr, 32, (x & ~0x0002));
1012
1013 if (netif_running(dev)) {
1014 pcnet32_netif_start(dev);
1015 pcnet32_restart(dev, CSR0_NORMAL);
1016 } else {
1017 pcnet32_purge_rx_ring(dev);
1018 lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
1019 }
1020 spin_unlock_irqrestore(&lp->lock, flags);
1021
1022 return rc;
1023} /* end pcnet32_loopback_test */
1024
1025static int pcnet32_set_phys_id(struct net_device *dev,
1026 enum ethtool_phys_id_state state)
1027{
1028 struct pcnet32_private *lp = netdev_priv(dev);
1029 const struct pcnet32_access *a = lp->a;
1030 ulong ioaddr = dev->base_addr;
1031 unsigned long flags;
1032 int i;
1033
1034 switch (state) {
1035 case ETHTOOL_ID_ACTIVE:
1036 /* Save the current value of the bcrs */
1037 spin_lock_irqsave(&lp->lock, flags);
1038 for (i = 4; i < 8; i++)
1039 lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
1040 spin_unlock_irqrestore(&lp->lock, flags);
1041 return 2; /* cycle on/off twice per second */
1042
1043 case ETHTOOL_ID_ON:
1044 case ETHTOOL_ID_OFF:
1045 /* Blink the led */
1046 spin_lock_irqsave(&lp->lock, flags);
1047 for (i = 4; i < 8; i++)
1048 a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
1049 spin_unlock_irqrestore(&lp->lock, flags);
1050 break;
1051
1052 case ETHTOOL_ID_INACTIVE:
1053 /* Restore the original value of the bcrs */
1054 spin_lock_irqsave(&lp->lock, flags);
1055 for (i = 4; i < 8; i++)
1056 a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
1057 spin_unlock_irqrestore(&lp->lock, flags);
1058 }
1059 return 0;
1060}
1061
1062/*
1063 * lp->lock must be held.
1064 */
1065static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1066 int can_sleep)
1067{
1068 int csr5;
1069 struct pcnet32_private *lp = netdev_priv(dev);
1070 const struct pcnet32_access *a = lp->a;
1071 ulong ioaddr = dev->base_addr;
1072 int ticks;
1073
1074 /* really old chips have to be stopped. */
1075 if (lp->chip_version < PCNET32_79C970A)
1076 return 0;
1077
1078 /* set SUSPEND (SPND) - CSR5 bit 0 */
1079 csr5 = a->read_csr(ioaddr, CSR5);
1080 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
1081
1082 /* poll waiting for bit to be set */
1083 ticks = 0;
1084 while (!(a->read_csr(ioaddr, CSR5) & CSR5_SUSPEND)) {
1085 spin_unlock_irqrestore(&lp->lock, *flags);
1086 if (can_sleep)
1087 msleep(1);
1088 else
1089 mdelay(1);
1090 spin_lock_irqsave(&lp->lock, *flags);
1091 ticks++;
1092 if (ticks > 200) {
1093 netif_printk(lp, hw, KERN_DEBUG, dev,
1094 "Error getting into suspend!\n");
1095 return 0;
1096 }
1097 }
1098 return 1;
1099}
1100
1101/*
1102 * process one receive descriptor entry
1103 */
1104
1105static void pcnet32_rx_entry(struct net_device *dev,
1106 struct pcnet32_private *lp,
1107 struct pcnet32_rx_head *rxp,
1108 int entry)
1109{
1110 int status = (short)le16_to_cpu(rxp->status) >> 8;
1111 int rx_in_place = 0;
1112 struct sk_buff *skb;
1113 short pkt_len;
1114
1115 if (status != 0x03) { /* There was an error. */
1116 /*
1117 * There is a tricky error noted by John Murphy,
1118 * <murf@perftech.com> to Russ Nelson: Even with full-sized
1119 * buffers it's possible for a jabber packet to use two
1120 * buffers, with only the last correctly noting the error.
1121 */
1122 if (status & 0x01) /* Only count a general error at the */
1123 dev->stats.rx_errors++; /* end of a packet. */
1124 if (status & 0x20)
1125 dev->stats.rx_frame_errors++;
1126 if (status & 0x10)
1127 dev->stats.rx_over_errors++;
1128 if (status & 0x08)
1129 dev->stats.rx_crc_errors++;
1130 if (status & 0x04)
1131 dev->stats.rx_fifo_errors++;
1132 return;
1133 }
1134
1135 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4;
1136
1137 /* Discard oversize frames. */
1138 if (unlikely(pkt_len > PKT_BUF_SIZE)) {
1139 netif_err(lp, drv, dev, "Impossible packet size %d!\n",
1140 pkt_len);
1141 dev->stats.rx_errors++;
1142 return;
1143 }
1144 if (pkt_len < 60) {
1145 netif_err(lp, rx_err, dev, "Runt packet!\n");
1146 dev->stats.rx_errors++;
1147 return;
1148 }
1149
1150 if (pkt_len > rx_copybreak) {
1151 struct sk_buff *newskb;
1152
1153 newskb = dev_alloc_skb(PKT_BUF_SKB);
1154 if (newskb) {
1155 skb_reserve(newskb, NET_IP_ALIGN);
1156 skb = lp->rx_skbuff[entry];
1157 pci_unmap_single(lp->pci_dev,
1158 lp->rx_dma_addr[entry],
1159 PKT_BUF_SIZE,
1160 PCI_DMA_FROMDEVICE);
1161 skb_put(skb, pkt_len);
1162 lp->rx_skbuff[entry] = newskb;
1163 lp->rx_dma_addr[entry] =
1164 pci_map_single(lp->pci_dev,
1165 newskb->data,
1166 PKT_BUF_SIZE,
1167 PCI_DMA_FROMDEVICE);
1168 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
1169 rx_in_place = 1;
1170 } else
1171 skb = NULL;
1172 } else
1173 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1174
1175 if (skb == NULL) {
1176 netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
1177 dev->stats.rx_dropped++;
1178 return;
1179 }
1180 if (!rx_in_place) {
1181 skb_reserve(skb, NET_IP_ALIGN);
1182 skb_put(skb, pkt_len); /* Make room */
1183 pci_dma_sync_single_for_cpu(lp->pci_dev,
1184 lp->rx_dma_addr[entry],
1185 pkt_len,
1186 PCI_DMA_FROMDEVICE);
1187 skb_copy_to_linear_data(skb,
1188 (unsigned char *)(lp->rx_skbuff[entry]->data),
1189 pkt_len);
1190 pci_dma_sync_single_for_device(lp->pci_dev,
1191 lp->rx_dma_addr[entry],
1192 pkt_len,
1193 PCI_DMA_FROMDEVICE);
1194 }
1195 dev->stats.rx_bytes += skb->len;
1196 skb->protocol = eth_type_trans(skb, dev);
1197 netif_receive_skb(skb);
1198 dev->stats.rx_packets++;
1199}
1200
1201static int pcnet32_rx(struct net_device *dev, int budget)
1202{
1203 struct pcnet32_private *lp = netdev_priv(dev);
1204 int entry = lp->cur_rx & lp->rx_mod_mask;
1205 struct pcnet32_rx_head *rxp = &lp->rx_ring[entry];
1206 int npackets = 0;
1207
1208 /* If we own the next entry, it's a new packet. Send it up. */
1209 while (npackets < budget && (short)le16_to_cpu(rxp->status) >= 0) {
1210 pcnet32_rx_entry(dev, lp, rxp, entry);
1211 npackets += 1;
1212 /*
1213 * The docs say that the buffer length isn't touched, but Andrew
1214 * Boyd of QNX reports that some revs of the 79C965 clear it.
1215 */
1216 rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE);
1217 wmb(); /* Make sure owner changes after others are visible */
1218 rxp->status = cpu_to_le16(0x8000);
1219 entry = (++lp->cur_rx) & lp->rx_mod_mask;
1220 rxp = &lp->rx_ring[entry];
1221 }
1222
1223 return npackets;
1224}
1225
1226static int pcnet32_tx(struct net_device *dev)
1227{
1228 struct pcnet32_private *lp = netdev_priv(dev);
1229 unsigned int dirty_tx = lp->dirty_tx;
1230 int delta;
1231 int must_restart = 0;
1232
1233 while (dirty_tx != lp->cur_tx) {
1234 int entry = dirty_tx & lp->tx_mod_mask;
1235 int status = (short)le16_to_cpu(lp->tx_ring[entry].status);
1236
1237 if (status < 0)
1238 break; /* It still hasn't been Txed */
1239
1240 lp->tx_ring[entry].base = 0;
1241
1242 if (status & 0x4000) {
1243 /* There was a major error, log it. */
1244 int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
1245 dev->stats.tx_errors++;
1246 netif_err(lp, tx_err, dev,
1247 "Tx error status=%04x err_status=%08x\n",
1248 status, err_status);
1249 if (err_status & 0x04000000)
1250 dev->stats.tx_aborted_errors++;
1251 if (err_status & 0x08000000)
1252 dev->stats.tx_carrier_errors++;
1253 if (err_status & 0x10000000)
1254 dev->stats.tx_window_errors++;
1255#ifndef DO_DXSUFLO
1256 if (err_status & 0x40000000) {
1257 dev->stats.tx_fifo_errors++;
1258 /* Ackk! On FIFO errors the Tx unit is turned off! */
1259 /* Remove this verbosity later! */
1260 netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
1261 must_restart = 1;
1262 }
1263#else
1264 if (err_status & 0x40000000) {
1265 dev->stats.tx_fifo_errors++;
1266 if (!lp->dxsuflo) { /* If controller doesn't recover ... */
1267 /* Ackk! On FIFO errors the Tx unit is turned off! */
1268 /* Remove this verbosity later! */
1269 netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
1270 must_restart = 1;
1271 }
1272 }
1273#endif
1274 } else {
1275 if (status & 0x1800)
1276 dev->stats.collisions++;
1277 dev->stats.tx_packets++;
1278 }
1279
1280 /* We must free the original skb */
1281 if (lp->tx_skbuff[entry]) {
1282 pci_unmap_single(lp->pci_dev,
1283 lp->tx_dma_addr[entry],
1284 lp->tx_skbuff[entry]->
1285 len, PCI_DMA_TODEVICE);
1286 dev_kfree_skb_any(lp->tx_skbuff[entry]);
1287 lp->tx_skbuff[entry] = NULL;
1288 lp->tx_dma_addr[entry] = 0;
1289 }
1290 dirty_tx++;
1291 }
1292
1293 delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
1294 if (delta > lp->tx_ring_size) {
1295 netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
1296 dirty_tx, lp->cur_tx, lp->tx_full);
1297 dirty_tx += lp->tx_ring_size;
1298 delta -= lp->tx_ring_size;
1299 }
1300
1301 if (lp->tx_full &&
1302 netif_queue_stopped(dev) &&
1303 delta < lp->tx_ring_size - 2) {
1304 /* The ring is no longer full, clear tbusy. */
1305 lp->tx_full = 0;
1306 netif_wake_queue(dev);
1307 }
1308 lp->dirty_tx = dirty_tx;
1309
1310 return must_restart;
1311}
1312
1313static int pcnet32_poll(struct napi_struct *napi, int budget)
1314{
1315 struct pcnet32_private *lp = container_of(napi, struct pcnet32_private, napi);
1316 struct net_device *dev = lp->dev;
1317 unsigned long ioaddr = dev->base_addr;
1318 unsigned long flags;
1319 int work_done;
1320 u16 val;
1321
1322 work_done = pcnet32_rx(dev, budget);
1323
1324 spin_lock_irqsave(&lp->lock, flags);
1325 if (pcnet32_tx(dev)) {
1326 /* reset the chip to clear the error condition, then restart */
1327 lp->a->reset(ioaddr);
1328 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
1329 pcnet32_restart(dev, CSR0_START);
1330 netif_wake_queue(dev);
1331 }
1332 spin_unlock_irqrestore(&lp->lock, flags);
1333
1334 if (work_done < budget) {
1335 spin_lock_irqsave(&lp->lock, flags);
1336
1337 __napi_complete(napi);
1338
1339 /* clear interrupt masks */
1340 val = lp->a->read_csr(ioaddr, CSR3);
1341 val &= 0x00ff;
1342 lp->a->write_csr(ioaddr, CSR3, val);
1343
1344 /* Set interrupt enable. */
1345 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
1346
1347 spin_unlock_irqrestore(&lp->lock, flags);
1348 }
1349 return work_done;
1350}
1351
1352#define PCNET32_REGS_PER_PHY 32
1353#define PCNET32_MAX_PHYS 32
1354static int pcnet32_get_regs_len(struct net_device *dev)
1355{
1356 struct pcnet32_private *lp = netdev_priv(dev);
1357 int j = lp->phycount * PCNET32_REGS_PER_PHY;
1358
1359 return (PCNET32_NUM_REGS + j) * sizeof(u16);
1360}
1361
1362static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1363 void *ptr)
1364{
1365 int i, csr0;
1366 u16 *buff = ptr;
1367 struct pcnet32_private *lp = netdev_priv(dev);
1368 const struct pcnet32_access *a = lp->a;
1369 ulong ioaddr = dev->base_addr;
1370 unsigned long flags;
1371
1372 spin_lock_irqsave(&lp->lock, flags);
1373
1374 csr0 = a->read_csr(ioaddr, CSR0);
1375 if (!(csr0 & CSR0_STOP)) /* If not stopped */
1376 pcnet32_suspend(dev, &flags, 1);
1377
1378 /* read address PROM */
1379 for (i = 0; i < 16; i += 2)
1380 *buff++ = inw(ioaddr + i);
1381
1382 /* read control and status registers */
1383 for (i = 0; i < 90; i++)
1384 *buff++ = a->read_csr(ioaddr, i);
1385
1386 *buff++ = a->read_csr(ioaddr, 112);
1387 *buff++ = a->read_csr(ioaddr, 114);
1388
1389 /* read bus configuration registers */
1390 for (i = 0; i < 30; i++)
1391 *buff++ = a->read_bcr(ioaddr, i);
1392
1393 *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
1394
1395 for (i = 31; i < 36; i++)
1396 *buff++ = a->read_bcr(ioaddr, i);
1397
1398 /* read mii phy registers */
1399 if (lp->mii) {
1400 int j;
1401 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
1402 if (lp->phymask & (1 << j)) {
1403 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
1404 lp->a->write_bcr(ioaddr, 33,
1405 (j << 5) | i);
1406 *buff++ = lp->a->read_bcr(ioaddr, 34);
1407 }
1408 }
1409 }
1410 }
1411
1412 if (!(csr0 & CSR0_STOP)) { /* If not stopped */
1413 int csr5;
1414
1415 /* clear SUSPEND (SPND) - CSR5 bit 0 */
1416 csr5 = a->read_csr(ioaddr, CSR5);
1417 a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
1418 }
1419
1420 spin_unlock_irqrestore(&lp->lock, flags);
1421}
1422
1423static const struct ethtool_ops pcnet32_ethtool_ops = {
1424 .get_settings = pcnet32_get_settings,
1425 .set_settings = pcnet32_set_settings,
1426 .get_drvinfo = pcnet32_get_drvinfo,
1427 .get_msglevel = pcnet32_get_msglevel,
1428 .set_msglevel = pcnet32_set_msglevel,
1429 .nway_reset = pcnet32_nway_reset,
1430 .get_link = pcnet32_get_link,
1431 .get_ringparam = pcnet32_get_ringparam,
1432 .set_ringparam = pcnet32_set_ringparam,
1433 .get_strings = pcnet32_get_strings,
1434 .self_test = pcnet32_ethtool_test,
1435 .set_phys_id = pcnet32_set_phys_id,
1436 .get_regs_len = pcnet32_get_regs_len,
1437 .get_regs = pcnet32_get_regs,
1438 .get_sset_count = pcnet32_get_sset_count,
1439};
1440
1441/* only probes for non-PCI devices, the rest are handled by
1442 * pci_register_driver via pcnet32_probe_pci */
1443
1444static void __devinit pcnet32_probe_vlbus(unsigned int *pcnet32_portlist)
1445{
1446 unsigned int *port, ioaddr;
1447
1448 /* search for PCnet32 VLB cards at known addresses */
1449 for (port = pcnet32_portlist; (ioaddr = *port); port++) {
1450 if (request_region
1451 (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) {
1452 /* check if there is really a pcnet chip on that ioaddr */
1453 if ((inb(ioaddr + 14) == 0x57) &&
1454 (inb(ioaddr + 15) == 0x57)) {
1455 pcnet32_probe1(ioaddr, 0, NULL);
1456 } else {
1457 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1458 }
1459 }
1460 }
1461}
1462
1463static int __devinit
1464pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
1465{
1466 unsigned long ioaddr;
1467 int err;
1468
1469 err = pci_enable_device(pdev);
1470 if (err < 0) {
1471 if (pcnet32_debug & NETIF_MSG_PROBE)
1472 pr_err("failed to enable device -- err=%d\n", err);
1473 return err;
1474 }
1475 pci_set_master(pdev);
1476
1477 ioaddr = pci_resource_start(pdev, 0);
1478 if (!ioaddr) {
1479 if (pcnet32_debug & NETIF_MSG_PROBE)
1480 pr_err("card has no PCI IO resources, aborting\n");
1481 return -ENODEV;
1482 }
1483
1484 if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
1485 if (pcnet32_debug & NETIF_MSG_PROBE)
1486 pr_err("architecture does not support 32bit PCI busmaster DMA\n");
1487 return -ENODEV;
1488 }
1489 if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
1490 if (pcnet32_debug & NETIF_MSG_PROBE)
1491 pr_err("io address range already allocated\n");
1492 return -EBUSY;
1493 }
1494
1495 err = pcnet32_probe1(ioaddr, 1, pdev);
1496 if (err < 0)
1497 pci_disable_device(pdev);
1498
1499 return err;
1500}
1501
1502static const struct net_device_ops pcnet32_netdev_ops = {
1503 .ndo_open = pcnet32_open,
1504 .ndo_stop = pcnet32_close,
1505 .ndo_start_xmit = pcnet32_start_xmit,
1506 .ndo_tx_timeout = pcnet32_tx_timeout,
1507 .ndo_get_stats = pcnet32_get_stats,
1508 .ndo_set_rx_mode = pcnet32_set_multicast_list,
1509 .ndo_do_ioctl = pcnet32_ioctl,
1510 .ndo_change_mtu = eth_change_mtu,
1511 .ndo_set_mac_address = eth_mac_addr,
1512 .ndo_validate_addr = eth_validate_addr,
1513#ifdef CONFIG_NET_POLL_CONTROLLER
1514 .ndo_poll_controller = pcnet32_poll_controller,
1515#endif
1516};
1517
1518/* pcnet32_probe1
1519 * Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
1520 * pdev will be NULL when called from pcnet32_probe_vlbus.
1521 */
1522static int __devinit
1523pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1524{
1525 struct pcnet32_private *lp;
1526 int i, media;
1527 int fdx, mii, fset, dxsuflo;
1528 int chip_version;
1529 char *chipname;
1530 struct net_device *dev;
1531 const struct pcnet32_access *a = NULL;
1532 u8 promaddr[6];
1533 int ret = -ENODEV;
1534
1535 /* reset the chip */
1536 pcnet32_wio_reset(ioaddr);
1537
1538 /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
1539 if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
1540 a = &pcnet32_wio;
1541 } else {
1542 pcnet32_dwio_reset(ioaddr);
1543 if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
1544 pcnet32_dwio_check(ioaddr)) {
1545 a = &pcnet32_dwio;
1546 } else {
1547 if (pcnet32_debug & NETIF_MSG_PROBE)
1548 pr_err("No access methods\n");
1549 goto err_release_region;
1550 }
1551 }
1552
1553 chip_version =
1554 a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
1555 if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
1556 pr_info(" PCnet chip version is %#x\n", chip_version);
1557 if ((chip_version & 0xfff) != 0x003) {
1558 if (pcnet32_debug & NETIF_MSG_PROBE)
1559 pr_info("Unsupported chip version\n");
1560 goto err_release_region;
1561 }
1562
1563 /* initialize variables */
1564 fdx = mii = fset = dxsuflo = 0;
1565 chip_version = (chip_version >> 12) & 0xffff;
1566
1567 switch (chip_version) {
1568 case 0x2420:
1569 chipname = "PCnet/PCI 79C970"; /* PCI */
1570 break;
1571 case 0x2430:
1572 if (shared)
1573 chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
1574 else
1575 chipname = "PCnet/32 79C965"; /* 486/VL bus */
1576 break;
1577 case 0x2621:
1578 chipname = "PCnet/PCI II 79C970A"; /* PCI */
1579 fdx = 1;
1580 break;
1581 case 0x2623:
1582 chipname = "PCnet/FAST 79C971"; /* PCI */
1583 fdx = 1;
1584 mii = 1;
1585 fset = 1;
1586 break;
1587 case 0x2624:
1588 chipname = "PCnet/FAST+ 79C972"; /* PCI */
1589 fdx = 1;
1590 mii = 1;
1591 fset = 1;
1592 break;
1593 case 0x2625:
1594 chipname = "PCnet/FAST III 79C973"; /* PCI */
1595 fdx = 1;
1596 mii = 1;
1597 break;
1598 case 0x2626:
1599 chipname = "PCnet/Home 79C978"; /* PCI */
1600 fdx = 1;
1601 /*
1602 * This is based on specs published at www.amd.com. This section
1603 * assumes that a card with a 79C978 wants to go into standard
1604 * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode,
1605 * and the module option homepna=1 can select this instead.
1606 */
1607 media = a->read_bcr(ioaddr, 49);
1608 media &= ~3; /* default to 10Mb ethernet */
1609 if (cards_found < MAX_UNITS && homepna[cards_found])
1610 media |= 1; /* switch to home wiring mode */
1611 if (pcnet32_debug & NETIF_MSG_PROBE)
1612 printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
1613 (media & 1) ? "1" : "10");
1614 a->write_bcr(ioaddr, 49, media);
1615 break;
1616 case 0x2627:
1617 chipname = "PCnet/FAST III 79C975"; /* PCI */
1618 fdx = 1;
1619 mii = 1;
1620 break;
1621 case 0x2628:
1622 chipname = "PCnet/PRO 79C976";
1623 fdx = 1;
1624 mii = 1;
1625 break;
1626 default:
1627 if (pcnet32_debug & NETIF_MSG_PROBE)
1628 pr_info("PCnet version %#x, no PCnet32 chip\n",
1629 chip_version);
1630 goto err_release_region;
1631 }
1632
1633 /*
1634 * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
1635 * starting until the packet is loaded. Strike one for reliability, lose
1636 * one for latency - although on PCI this isn't a big loss. Older chips
1637 * have FIFO's smaller than a packet, so you can't do this.
1638 * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
1639 */
1640
1641 if (fset) {
1642 a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860));
1643 a->write_csr(ioaddr, 80,
1644 (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
1645 dxsuflo = 1;
1646 }
1647
1648 dev = alloc_etherdev(sizeof(*lp));
1649 if (!dev) {
1650 if (pcnet32_debug & NETIF_MSG_PROBE)
1651 pr_err("Memory allocation failed\n");
1652 ret = -ENOMEM;
1653 goto err_release_region;
1654 }
1655
1656 if (pdev)
1657 SET_NETDEV_DEV(dev, &pdev->dev);
1658
1659 if (pcnet32_debug & NETIF_MSG_PROBE)
1660 pr_info("%s at %#3lx,", chipname, ioaddr);
1661
1662 /* In most chips, after a chip reset, the ethernet address is read from the
1663 * station address PROM at the base address and programmed into the
1664 * "Physical Address Registers" CSR12-14.
1665 * As a precautionary measure, we read the PROM values and complain if
1666 * they disagree with the CSRs. If they miscompare, and the PROM addr
1667 * is valid, then the PROM addr is used.
1668 */
1669 for (i = 0; i < 3; i++) {
1670 unsigned int val;
1671 val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
1672 /* There may be endianness issues here. */
1673 dev->dev_addr[2 * i] = val & 0x0ff;
1674 dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
1675 }
1676
1677 /* read PROM address and compare with CSR address */
1678 for (i = 0; i < 6; i++)
1679 promaddr[i] = inb(ioaddr + i);
1680
1681 if (memcmp(promaddr, dev->dev_addr, 6) ||
1682 !is_valid_ether_addr(dev->dev_addr)) {
1683 if (is_valid_ether_addr(promaddr)) {
1684 if (pcnet32_debug & NETIF_MSG_PROBE) {
1685 pr_cont(" warning: CSR address invalid,\n");
1686 pr_info(" using instead PROM address of");
1687 }
1688 memcpy(dev->dev_addr, promaddr, 6);
1689 }
1690 }
1691 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1692
1693 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
1694 if (!is_valid_ether_addr(dev->perm_addr))
1695 memset(dev->dev_addr, 0, ETH_ALEN);
1696
1697 if (pcnet32_debug & NETIF_MSG_PROBE) {
1698 pr_cont(" %pM", dev->dev_addr);
1699
1700 /* Version 0x2623 and 0x2624 */
1701 if (((chip_version + 1) & 0xfffe) == 0x2624) {
1702 i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
1703 pr_info(" tx_start_pt(0x%04x):", i);
1704 switch (i >> 10) {
1705 case 0:
1706 pr_cont(" 20 bytes,");
1707 break;
1708 case 1:
1709 pr_cont(" 64 bytes,");
1710 break;
1711 case 2:
1712 pr_cont(" 128 bytes,");
1713 break;
1714 case 3:
1715 pr_cont("~220 bytes,");
1716 break;
1717 }
1718 i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
1719 pr_cont(" BCR18(%x):", i & 0xffff);
1720 if (i & (1 << 5))
1721 pr_cont("BurstWrEn ");
1722 if (i & (1 << 6))
1723 pr_cont("BurstRdEn ");
1724 if (i & (1 << 7))
1725 pr_cont("DWordIO ");
1726 if (i & (1 << 11))
1727 pr_cont("NoUFlow ");
1728 i = a->read_bcr(ioaddr, 25);
1729 pr_info(" SRAMSIZE=0x%04x,", i << 8);
1730 i = a->read_bcr(ioaddr, 26);
1731 pr_cont(" SRAM_BND=0x%04x,", i << 8);
1732 i = a->read_bcr(ioaddr, 27);
1733 if (i & (1 << 14))
1734 pr_cont("LowLatRx");
1735 }
1736 }
1737
1738 dev->base_addr = ioaddr;
1739 lp = netdev_priv(dev);
1740 /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
1741 lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
1742 &lp->init_dma_addr);
1743 if (!lp->init_block) {
1744 if (pcnet32_debug & NETIF_MSG_PROBE)
1745 pr_err("Consistent memory allocation failed\n");
1746 ret = -ENOMEM;
1747 goto err_free_netdev;
1748 }
1749 lp->pci_dev = pdev;
1750
1751 lp->dev = dev;
1752
1753 spin_lock_init(&lp->lock);
1754
1755 lp->name = chipname;
1756 lp->shared_irq = shared;
1757 lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
1758 lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */
1759 lp->tx_mod_mask = lp->tx_ring_size - 1;
1760 lp->rx_mod_mask = lp->rx_ring_size - 1;
1761 lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12);
1762 lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4);
1763 lp->mii_if.full_duplex = fdx;
1764 lp->mii_if.phy_id_mask = 0x1f;
1765 lp->mii_if.reg_num_mask = 0x1f;
1766 lp->dxsuflo = dxsuflo;
1767 lp->mii = mii;
1768 lp->chip_version = chip_version;
1769 lp->msg_enable = pcnet32_debug;
1770 if ((cards_found >= MAX_UNITS) ||
1771 (options[cards_found] >= sizeof(options_mapping)))
1772 lp->options = PCNET32_PORT_ASEL;
1773 else
1774 lp->options = options_mapping[options[cards_found]];
1775 lp->mii_if.dev = dev;
1776 lp->mii_if.mdio_read = mdio_read;
1777 lp->mii_if.mdio_write = mdio_write;
1778
1779 /* napi.weight is used in both the napi and non-napi cases */
1780 lp->napi.weight = lp->rx_ring_size / 2;
1781
1782 netif_napi_add(dev, &lp->napi, pcnet32_poll, lp->rx_ring_size / 2);
1783
1784 if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
1785 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
1786 lp->options |= PCNET32_PORT_FD;
1787
1788 lp->a = a;
1789
1790 /* prior to register_netdev, dev->name is not yet correct */
1791 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
1792 ret = -ENOMEM;
1793 goto err_free_ring;
1794 }
1795 /* detect special T1/E1 WAN card by checking for MAC address */
1796 if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
1797 dev->dev_addr[2] == 0x75)
1798 lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
1799
1800 lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
1801 lp->init_block->tlen_rlen =
1802 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
1803 for (i = 0; i < 6; i++)
1804 lp->init_block->phys_addr[i] = dev->dev_addr[i];
1805 lp->init_block->filter[0] = 0x00000000;
1806 lp->init_block->filter[1] = 0x00000000;
1807 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
1808 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
1809
1810 /* switch pcnet32 to 32bit mode */
1811 a->write_bcr(ioaddr, 20, 2);
1812
1813 a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
1814 a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
1815
1816 if (pdev) { /* use the IRQ provided by PCI */
1817 dev->irq = pdev->irq;
1818 if (pcnet32_debug & NETIF_MSG_PROBE)
1819 pr_cont(" assigned IRQ %d\n", dev->irq);
1820 } else {
1821 unsigned long irq_mask = probe_irq_on();
1822
1823 /*
1824 * To auto-IRQ we enable the initialization-done and DMA error
1825 * interrupts. For ISA boards we get a DMA error, but VLB and PCI
1826 * boards will work.
1827 */
1828 /* Trigger an initialization just for the interrupt. */
1829 a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_INIT);
1830 mdelay(1);
1831
1832 dev->irq = probe_irq_off(irq_mask);
1833 if (!dev->irq) {
1834 if (pcnet32_debug & NETIF_MSG_PROBE)
1835 pr_cont(", failed to detect IRQ line\n");
1836 ret = -ENODEV;
1837 goto err_free_ring;
1838 }
1839 if (pcnet32_debug & NETIF_MSG_PROBE)
1840 pr_cont(", probed IRQ %d\n", dev->irq);
1841 }
1842
1843 /* Set the mii phy_id so that we can query the link state */
1844 if (lp->mii) {
1845 /* lp->phycount and lp->phymask are set to 0 by memset above */
1846
1847 lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
1848 /* scan for PHYs */
1849 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
1850 unsigned short id1, id2;
1851
1852 id1 = mdio_read(dev, i, MII_PHYSID1);
1853 if (id1 == 0xffff)
1854 continue;
1855 id2 = mdio_read(dev, i, MII_PHYSID2);
1856 if (id2 == 0xffff)
1857 continue;
1858 if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624)
1859 continue; /* 79C971 & 79C972 have phantom phy at id 31 */
1860 lp->phycount++;
1861 lp->phymask |= (1 << i);
1862 lp->mii_if.phy_id = i;
1863 if (pcnet32_debug & NETIF_MSG_PROBE)
1864 pr_info("Found PHY %04x:%04x at address %d\n",
1865 id1, id2, i);
1866 }
1867 lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
1868 if (lp->phycount > 1)
1869 lp->options |= PCNET32_PORT_MII;
1870 }
1871
1872 init_timer(&lp->watchdog_timer);
1873 lp->watchdog_timer.data = (unsigned long)dev;
1874 lp->watchdog_timer.function = (void *)&pcnet32_watchdog;
1875
1876 /* The PCNET32-specific entries in the device structure. */
1877 dev->netdev_ops = &pcnet32_netdev_ops;
1878 dev->ethtool_ops = &pcnet32_ethtool_ops;
1879 dev->watchdog_timeo = (5 * HZ);
1880
1881 /* Fill in the generic fields of the device structure. */
1882 if (register_netdev(dev))
1883 goto err_free_ring;
1884
1885 if (pdev) {
1886 pci_set_drvdata(pdev, dev);
1887 } else {
1888 lp->next = pcnet32_dev;
1889 pcnet32_dev = dev;
1890 }
1891
1892 if (pcnet32_debug & NETIF_MSG_PROBE)
1893 pr_info("%s: registered as %s\n", dev->name, lp->name);
1894 cards_found++;
1895
1896 /* enable LED writes */
1897 a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000);
1898
1899 return 0;
1900
1901err_free_ring:
1902 pcnet32_free_ring(dev);
1903 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
1904 lp->init_block, lp->init_dma_addr);
1905err_free_netdev:
1906 free_netdev(dev);
1907err_release_region:
1908 release_region(ioaddr, PCNET32_TOTAL_SIZE);
1909 return ret;
1910}
1911
1912/* if any allocation fails, caller must also call pcnet32_free_ring */
1913static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
1914{
1915 struct pcnet32_private *lp = netdev_priv(dev);
1916
1917 lp->tx_ring = pci_alloc_consistent(lp->pci_dev,
1918 sizeof(struct pcnet32_tx_head) *
1919 lp->tx_ring_size,
1920 &lp->tx_ring_dma_addr);
1921 if (lp->tx_ring == NULL) {
1922 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
1923 return -ENOMEM;
1924 }
1925
1926 lp->rx_ring = pci_alloc_consistent(lp->pci_dev,
1927 sizeof(struct pcnet32_rx_head) *
1928 lp->rx_ring_size,
1929 &lp->rx_ring_dma_addr);
1930 if (lp->rx_ring == NULL) {
1931 netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
1932 return -ENOMEM;
1933 }
1934
1935 lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
1936 GFP_ATOMIC);
1937 if (!lp->tx_dma_addr) {
1938 netif_err(lp, drv, dev, "Memory allocation failed\n");
1939 return -ENOMEM;
1940 }
1941
1942 lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
1943 GFP_ATOMIC);
1944 if (!lp->rx_dma_addr) {
1945 netif_err(lp, drv, dev, "Memory allocation failed\n");
1946 return -ENOMEM;
1947 }
1948
1949 lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
1950 GFP_ATOMIC);
1951 if (!lp->tx_skbuff) {
1952 netif_err(lp, drv, dev, "Memory allocation failed\n");
1953 return -ENOMEM;
1954 }
1955
1956 lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
1957 GFP_ATOMIC);
1958 if (!lp->rx_skbuff) {
1959 netif_err(lp, drv, dev, "Memory allocation failed\n");
1960 return -ENOMEM;
1961 }
1962
1963 return 0;
1964}
1965
1966static void pcnet32_free_ring(struct net_device *dev)
1967{
1968 struct pcnet32_private *lp = netdev_priv(dev);
1969
1970 kfree(lp->tx_skbuff);
1971 lp->tx_skbuff = NULL;
1972
1973 kfree(lp->rx_skbuff);
1974 lp->rx_skbuff = NULL;
1975
1976 kfree(lp->tx_dma_addr);
1977 lp->tx_dma_addr = NULL;
1978
1979 kfree(lp->rx_dma_addr);
1980 lp->rx_dma_addr = NULL;
1981
1982 if (lp->tx_ring) {
1983 pci_free_consistent(lp->pci_dev,
1984 sizeof(struct pcnet32_tx_head) *
1985 lp->tx_ring_size, lp->tx_ring,
1986 lp->tx_ring_dma_addr);
1987 lp->tx_ring = NULL;
1988 }
1989
1990 if (lp->rx_ring) {
1991 pci_free_consistent(lp->pci_dev,
1992 sizeof(struct pcnet32_rx_head) *
1993 lp->rx_ring_size, lp->rx_ring,
1994 lp->rx_ring_dma_addr);
1995 lp->rx_ring = NULL;
1996 }
1997}
1998
1999static int pcnet32_open(struct net_device *dev)
2000{
2001 struct pcnet32_private *lp = netdev_priv(dev);
2002 struct pci_dev *pdev = lp->pci_dev;
2003 unsigned long ioaddr = dev->base_addr;
2004 u16 val;
2005 int i;
2006 int rc;
2007 unsigned long flags;
2008
2009 if (request_irq(dev->irq, pcnet32_interrupt,
2010 lp->shared_irq ? IRQF_SHARED : 0, dev->name,
2011 (void *)dev)) {
2012 return -EAGAIN;
2013 }
2014
2015 spin_lock_irqsave(&lp->lock, flags);
2016 /* Check for a valid station address */
2017 if (!is_valid_ether_addr(dev->dev_addr)) {
2018 rc = -EINVAL;
2019 goto err_free_irq;
2020 }
2021
2022 /* Reset the PCNET32 */
2023 lp->a->reset(ioaddr);
2024
2025 /* switch pcnet32 to 32bit mode */
2026 lp->a->write_bcr(ioaddr, 20, 2);
2027
2028 netif_printk(lp, ifup, KERN_DEBUG, dev,
2029 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
2030 __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
2031 (u32) (lp->rx_ring_dma_addr),
2032 (u32) (lp->init_dma_addr));
2033
2034 /* set/reset autoselect bit */
2035 val = lp->a->read_bcr(ioaddr, 2) & ~2;
2036 if (lp->options & PCNET32_PORT_ASEL)
2037 val |= 2;
2038 lp->a->write_bcr(ioaddr, 2, val);
2039
2040 /* handle full duplex setting */
2041 if (lp->mii_if.full_duplex) {
2042 val = lp->a->read_bcr(ioaddr, 9) & ~3;
2043 if (lp->options & PCNET32_PORT_FD) {
2044 val |= 1;
2045 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
2046 val |= 2;
2047 } else if (lp->options & PCNET32_PORT_ASEL) {
2048 /* workaround of xSeries250, turn on for 79C975 only */
2049 if (lp->chip_version == 0x2627)
2050 val |= 3;
2051 }
2052 lp->a->write_bcr(ioaddr, 9, val);
2053 }
2054
2055 /* set/reset GPSI bit in test register */
2056 val = lp->a->read_csr(ioaddr, 124) & ~0x10;
2057 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
2058 val |= 0x10;
2059 lp->a->write_csr(ioaddr, 124, val);
2060
2061 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
2062 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
2063 (pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
2064 pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
2065 if (lp->options & PCNET32_PORT_ASEL) {
2066 lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
2067 netif_printk(lp, link, KERN_DEBUG, dev,
2068 "Setting 100Mb-Full Duplex\n");
2069 }
2070 }
2071 if (lp->phycount < 2) {
2072 /*
2073 * 24 Jun 2004 according AMD, in order to change the PHY,
2074 * DANAS (or DISPM for 79C976) must be set; then select the speed,
2075 * duplex, and/or enable auto negotiation, and clear DANAS
2076 */
2077 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
2078 lp->a->write_bcr(ioaddr, 32,
2079 lp->a->read_bcr(ioaddr, 32) | 0x0080);
2080 /* disable Auto Negotiation, set 10Mpbs, HD */
2081 val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
2082 if (lp->options & PCNET32_PORT_FD)
2083 val |= 0x10;
2084 if (lp->options & PCNET32_PORT_100)
2085 val |= 0x08;
2086 lp->a->write_bcr(ioaddr, 32, val);
2087 } else {
2088 if (lp->options & PCNET32_PORT_ASEL) {
2089 lp->a->write_bcr(ioaddr, 32,
2090 lp->a->read_bcr(ioaddr,
2091 32) | 0x0080);
2092 /* enable auto negotiate, setup, disable fd */
2093 val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
2094 val |= 0x20;
2095 lp->a->write_bcr(ioaddr, 32, val);
2096 }
2097 }
2098 } else {
2099 int first_phy = -1;
2100 u16 bmcr;
2101 u32 bcr9;
2102 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
2103
2104 /*
2105 * There is really no good other way to handle multiple PHYs
2106 * other than turning off all automatics
2107 */
2108 val = lp->a->read_bcr(ioaddr, 2);
2109 lp->a->write_bcr(ioaddr, 2, val & ~2);
2110 val = lp->a->read_bcr(ioaddr, 32);
2111 lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
2112
2113 if (!(lp->options & PCNET32_PORT_ASEL)) {
2114 /* setup ecmd */
2115 ecmd.port = PORT_MII;
2116 ecmd.transceiver = XCVR_INTERNAL;
2117 ecmd.autoneg = AUTONEG_DISABLE;
2118 ethtool_cmd_speed_set(&ecmd,
2119 (lp->options & PCNET32_PORT_100) ?
2120 SPEED_100 : SPEED_10);
2121 bcr9 = lp->a->read_bcr(ioaddr, 9);
2122
2123 if (lp->options & PCNET32_PORT_FD) {
2124 ecmd.duplex = DUPLEX_FULL;
2125 bcr9 |= (1 << 0);
2126 } else {
2127 ecmd.duplex = DUPLEX_HALF;
2128 bcr9 |= ~(1 << 0);
2129 }
2130 lp->a->write_bcr(ioaddr, 9, bcr9);
2131 }
2132
2133 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2134 if (lp->phymask & (1 << i)) {
2135 /* isolate all but the first PHY */
2136 bmcr = mdio_read(dev, i, MII_BMCR);
2137 if (first_phy == -1) {
2138 first_phy = i;
2139 mdio_write(dev, i, MII_BMCR,
2140 bmcr & ~BMCR_ISOLATE);
2141 } else {
2142 mdio_write(dev, i, MII_BMCR,
2143 bmcr | BMCR_ISOLATE);
2144 }
2145 /* use mii_ethtool_sset to setup PHY */
2146 lp->mii_if.phy_id = i;
2147 ecmd.phy_address = i;
2148 if (lp->options & PCNET32_PORT_ASEL) {
2149 mii_ethtool_gset(&lp->mii_if, &ecmd);
2150 ecmd.autoneg = AUTONEG_ENABLE;
2151 }
2152 mii_ethtool_sset(&lp->mii_if, &ecmd);
2153 }
2154 }
2155 lp->mii_if.phy_id = first_phy;
2156 netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
2157 }
2158
2159#ifdef DO_DXSUFLO
2160 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
2161 val = lp->a->read_csr(ioaddr, CSR3);
2162 val |= 0x40;
2163 lp->a->write_csr(ioaddr, CSR3, val);
2164 }
2165#endif
2166
2167 lp->init_block->mode =
2168 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
2169 pcnet32_load_multicast(dev);
2170
2171 if (pcnet32_init_ring(dev)) {
2172 rc = -ENOMEM;
2173 goto err_free_ring;
2174 }
2175
2176 napi_enable(&lp->napi);
2177
2178 /* Re-initialize the PCNET32, and start it when done. */
2179 lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
2180 lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
2181
2182 lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
2183 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
2184
2185 netif_start_queue(dev);
2186
2187 if (lp->chip_version >= PCNET32_79C970A) {
2188 /* Print the link status and start the watchdog */
2189 pcnet32_check_media(dev, 1);
2190 mod_timer(&lp->watchdog_timer, PCNET32_WATCHDOG_TIMEOUT);
2191 }
2192
2193 i = 0;
2194 while (i++ < 100)
2195 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
2196 break;
2197 /*
2198 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
2199 * reports that doing so triggers a bug in the '974.
2200 */
2201 lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
2202
2203 netif_printk(lp, ifup, KERN_DEBUG, dev,
2204 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
2205 i,
2206 (u32) (lp->init_dma_addr),
2207 lp->a->read_csr(ioaddr, CSR0));
2208
2209 spin_unlock_irqrestore(&lp->lock, flags);
2210
2211 return 0; /* Always succeed */
2212
2213err_free_ring:
2214 /* free any allocated skbuffs */
2215 pcnet32_purge_rx_ring(dev);
2216
2217 /*
2218 * Switch back to 16bit mode to avoid problems with dumb
2219 * DOS packet driver after a warm reboot
2220 */
2221 lp->a->write_bcr(ioaddr, 20, 4);
2222
2223err_free_irq:
2224 spin_unlock_irqrestore(&lp->lock, flags);
2225 free_irq(dev->irq, dev);
2226 return rc;
2227}
2228
2229/*
2230 * The LANCE has been halted for one reason or another (busmaster memory
2231 * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
2232 * etc.). Modern LANCE variants always reload their ring-buffer
2233 * configuration when restarted, so we must reinitialize our ring
2234 * context before restarting. As part of this reinitialization,
2235 * find all packets still on the Tx ring and pretend that they had been
2236 * sent (in effect, drop the packets on the floor) - the higher-level
2237 * protocols will time out and retransmit. It'd be better to shuffle
2238 * these skbs to a temp list and then actually re-Tx them after
2239 * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
2240 */
2241
2242static void pcnet32_purge_tx_ring(struct net_device *dev)
2243{
2244 struct pcnet32_private *lp = netdev_priv(dev);
2245 int i;
2246
2247 for (i = 0; i < lp->tx_ring_size; i++) {
2248 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2249 wmb(); /* Make sure adapter sees owner change */
2250 if (lp->tx_skbuff[i]) {
2251 pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
2252 lp->tx_skbuff[i]->len,
2253 PCI_DMA_TODEVICE);
2254 dev_kfree_skb_any(lp->tx_skbuff[i]);
2255 }
2256 lp->tx_skbuff[i] = NULL;
2257 lp->tx_dma_addr[i] = 0;
2258 }
2259}
2260
2261/* Initialize the PCNET32 Rx and Tx rings. */
2262static int pcnet32_init_ring(struct net_device *dev)
2263{
2264 struct pcnet32_private *lp = netdev_priv(dev);
2265 int i;
2266
2267 lp->tx_full = 0;
2268 lp->cur_rx = lp->cur_tx = 0;
2269 lp->dirty_rx = lp->dirty_tx = 0;
2270
2271 for (i = 0; i < lp->rx_ring_size; i++) {
2272 struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
2273 if (rx_skbuff == NULL) {
2274 lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
2275 rx_skbuff = lp->rx_skbuff[i];
2276 if (!rx_skbuff) {
2277 /* there is not much we can do at this point */
2278 netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
2279 __func__);
2280 return -1;
2281 }
2282 skb_reserve(rx_skbuff, NET_IP_ALIGN);
2283 }
2284
2285 rmb();
2286 if (lp->rx_dma_addr[i] == 0)
2287 lp->rx_dma_addr[i] =
2288 pci_map_single(lp->pci_dev, rx_skbuff->data,
2289 PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
2290 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
2291 lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
2292 wmb(); /* Make sure owner changes after all others are visible */
2293 lp->rx_ring[i].status = cpu_to_le16(0x8000);
2294 }
2295 /* The Tx buffer address is filled in as needed, but we do need to clear
2296 * the upper ownership bit. */
2297 for (i = 0; i < lp->tx_ring_size; i++) {
2298 lp->tx_ring[i].status = 0; /* CPU owns buffer */
2299 wmb(); /* Make sure adapter sees owner change */
2300 lp->tx_ring[i].base = 0;
2301 lp->tx_dma_addr[i] = 0;
2302 }
2303
2304 lp->init_block->tlen_rlen =
2305 cpu_to_le16(lp->tx_len_bits | lp->rx_len_bits);
2306 for (i = 0; i < 6; i++)
2307 lp->init_block->phys_addr[i] = dev->dev_addr[i];
2308 lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
2309 lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
2310 wmb(); /* Make sure all changes are visible */
2311 return 0;
2312}
2313
2314/* the pcnet32 has been issued a stop or reset. Wait for the stop bit
2315 * then flush the pending transmit operations, re-initialize the ring,
2316 * and tell the chip to initialize.
2317 */
2318static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
2319{
2320 struct pcnet32_private *lp = netdev_priv(dev);
2321 unsigned long ioaddr = dev->base_addr;
2322 int i;
2323
2324 /* wait for stop */
2325 for (i = 0; i < 100; i++)
2326 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
2327 break;
2328
2329 if (i >= 100)
2330 netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
2331 __func__);
2332
2333 pcnet32_purge_tx_ring(dev);
2334 if (pcnet32_init_ring(dev))
2335 return;
2336
2337 /* ReInit Ring */
2338 lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
2339 i = 0;
2340 while (i++ < 1000)
2341 if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
2342 break;
2343
2344 lp->a->write_csr(ioaddr, CSR0, csr0_bits);
2345}
2346
2347static void pcnet32_tx_timeout(struct net_device *dev)
2348{
2349 struct pcnet32_private *lp = netdev_priv(dev);
2350 unsigned long ioaddr = dev->base_addr, flags;
2351
2352 spin_lock_irqsave(&lp->lock, flags);
2353 /* Transmitter timeout, serious problems. */
2354 if (pcnet32_debug & NETIF_MSG_DRV)
2355 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
2356 dev->name, lp->a->read_csr(ioaddr, CSR0));
2357 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
2358 dev->stats.tx_errors++;
2359 if (netif_msg_tx_err(lp)) {
2360 int i;
2361 printk(KERN_DEBUG
2362 " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
2363 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
2364 lp->cur_rx);
2365 for (i = 0; i < lp->rx_ring_size; i++)
2366 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
2367 le32_to_cpu(lp->rx_ring[i].base),
2368 (-le16_to_cpu(lp->rx_ring[i].buf_length)) &
2369 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length),
2370 le16_to_cpu(lp->rx_ring[i].status));
2371 for (i = 0; i < lp->tx_ring_size; i++)
2372 printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ",
2373 le32_to_cpu(lp->tx_ring[i].base),
2374 (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff,
2375 le32_to_cpu(lp->tx_ring[i].misc),
2376 le16_to_cpu(lp->tx_ring[i].status));
2377 printk("\n");
2378 }
2379 pcnet32_restart(dev, CSR0_NORMAL);
2380
2381 dev->trans_start = jiffies; /* prevent tx timeout */
2382 netif_wake_queue(dev);
2383
2384 spin_unlock_irqrestore(&lp->lock, flags);
2385}
2386
2387static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
2388 struct net_device *dev)
2389{
2390 struct pcnet32_private *lp = netdev_priv(dev);
2391 unsigned long ioaddr = dev->base_addr;
2392 u16 status;
2393 int entry;
2394 unsigned long flags;
2395
2396 spin_lock_irqsave(&lp->lock, flags);
2397
2398 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
2399 "%s() called, csr0 %4.4x\n",
2400 __func__, lp->a->read_csr(ioaddr, CSR0));
2401
2402 /* Default status -- will not enable Successful-TxDone
2403 * interrupt when that option is available to us.
2404 */
2405 status = 0x8300;
2406
2407 /* Fill in a Tx ring entry */
2408
2409 /* Mask to ring buffer boundary. */
2410 entry = lp->cur_tx & lp->tx_mod_mask;
2411
2412 /* Caution: the write order is important here, set the status
2413 * with the "ownership" bits last. */
2414
2415 lp->tx_ring[entry].length = cpu_to_le16(-skb->len);
2416
2417 lp->tx_ring[entry].misc = 0x00000000;
2418
2419 lp->tx_skbuff[entry] = skb;
2420 lp->tx_dma_addr[entry] =
2421 pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
2422 lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
2423 wmb(); /* Make sure owner changes after all others are visible */
2424 lp->tx_ring[entry].status = cpu_to_le16(status);
2425
2426 lp->cur_tx++;
2427 dev->stats.tx_bytes += skb->len;
2428
2429 /* Trigger an immediate send poll. */
2430 lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
2431
2432 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
2433 lp->tx_full = 1;
2434 netif_stop_queue(dev);
2435 }
2436 spin_unlock_irqrestore(&lp->lock, flags);
2437 return NETDEV_TX_OK;
2438}
2439
2440/* The PCNET32 interrupt handler. */
2441static irqreturn_t
2442pcnet32_interrupt(int irq, void *dev_id)
2443{
2444 struct net_device *dev = dev_id;
2445 struct pcnet32_private *lp;
2446 unsigned long ioaddr;
2447 u16 csr0;
2448 int boguscnt = max_interrupt_work;
2449
2450 ioaddr = dev->base_addr;
2451 lp = netdev_priv(dev);
2452
2453 spin_lock(&lp->lock);
2454
2455 csr0 = lp->a->read_csr(ioaddr, CSR0);
2456 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
2457 if (csr0 == 0xffff)
2458 break; /* PCMCIA remove happened */
2459 /* Acknowledge all of the current interrupt sources ASAP. */
2460 lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
2461
2462 netif_printk(lp, intr, KERN_DEBUG, dev,
2463 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
2464 csr0, lp->a->read_csr(ioaddr, CSR0));
2465
2466 /* Log misc errors. */
2467 if (csr0 & 0x4000)
2468 dev->stats.tx_errors++; /* Tx babble. */
2469 if (csr0 & 0x1000) {
2470 /*
2471 * This happens when our receive ring is full. This
2472 * shouldn't be a problem as we will see normal rx
2473 * interrupts for the frames in the receive ring. But
2474 * there are some PCI chipsets (I can reproduce this
2475 * on SP3G with Intel saturn chipset) which have
2476 * sometimes problems and will fill up the receive
2477 * ring with error descriptors. In this situation we
2478 * don't get a rx interrupt, but a missed frame
2479 * interrupt sooner or later.
2480 */
2481 dev->stats.rx_errors++; /* Missed a Rx frame. */
2482 }
2483 if (csr0 & 0x0800) {
2484 netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
2485 csr0);
2486 /* unlike for the lance, there is no restart needed */
2487 }
2488 if (napi_schedule_prep(&lp->napi)) {
2489 u16 val;
2490 /* set interrupt masks */
2491 val = lp->a->read_csr(ioaddr, CSR3);
2492 val |= 0x5f00;
2493 lp->a->write_csr(ioaddr, CSR3, val);
2494
2495 __napi_schedule(&lp->napi);
2496 break;
2497 }
2498 csr0 = lp->a->read_csr(ioaddr, CSR0);
2499 }
2500
2501 netif_printk(lp, intr, KERN_DEBUG, dev,
2502 "exiting interrupt, csr0=%#4.4x\n",
2503 lp->a->read_csr(ioaddr, CSR0));
2504
2505 spin_unlock(&lp->lock);
2506
2507 return IRQ_HANDLED;
2508}
2509
2510static int pcnet32_close(struct net_device *dev)
2511{
2512 unsigned long ioaddr = dev->base_addr;
2513 struct pcnet32_private *lp = netdev_priv(dev);
2514 unsigned long flags;
2515
2516 del_timer_sync(&lp->watchdog_timer);
2517
2518 netif_stop_queue(dev);
2519 napi_disable(&lp->napi);
2520
2521 spin_lock_irqsave(&lp->lock, flags);
2522
2523 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
2524
2525 netif_printk(lp, ifdown, KERN_DEBUG, dev,
2526 "Shutting down ethercard, status was %2.2x\n",
2527 lp->a->read_csr(ioaddr, CSR0));
2528
2529 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
2530 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
2531
2532 /*
2533 * Switch back to 16bit mode to avoid problems with dumb
2534 * DOS packet driver after a warm reboot
2535 */
2536 lp->a->write_bcr(ioaddr, 20, 4);
2537
2538 spin_unlock_irqrestore(&lp->lock, flags);
2539
2540 free_irq(dev->irq, dev);
2541
2542 spin_lock_irqsave(&lp->lock, flags);
2543
2544 pcnet32_purge_rx_ring(dev);
2545 pcnet32_purge_tx_ring(dev);
2546
2547 spin_unlock_irqrestore(&lp->lock, flags);
2548
2549 return 0;
2550}
2551
2552static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
2553{
2554 struct pcnet32_private *lp = netdev_priv(dev);
2555 unsigned long ioaddr = dev->base_addr;
2556 unsigned long flags;
2557
2558 spin_lock_irqsave(&lp->lock, flags);
2559 dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
2560 spin_unlock_irqrestore(&lp->lock, flags);
2561
2562 return &dev->stats;
2563}
2564
2565/* taken from the sunlance driver, which it took from the depca driver */
2566static void pcnet32_load_multicast(struct net_device *dev)
2567{
2568 struct pcnet32_private *lp = netdev_priv(dev);
2569 volatile struct pcnet32_init_block *ib = lp->init_block;
2570 volatile __le16 *mcast_table = (__le16 *)ib->filter;
2571 struct netdev_hw_addr *ha;
2572 unsigned long ioaddr = dev->base_addr;
2573 int i;
2574 u32 crc;
2575
2576 /* set all multicast bits */
2577 if (dev->flags & IFF_ALLMULTI) {
2578 ib->filter[0] = cpu_to_le32(~0U);
2579 ib->filter[1] = cpu_to_le32(~0U);
2580 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
2581 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
2582 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
2583 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
2584 return;
2585 }
2586 /* clear the multicast filter */
2587 ib->filter[0] = 0;
2588 ib->filter[1] = 0;
2589
2590 /* Add addresses */
2591 netdev_for_each_mc_addr(ha, dev) {
2592 crc = ether_crc_le(6, ha->addr);
2593 crc = crc >> 26;
2594 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
2595 }
2596 for (i = 0; i < 4; i++)
2597 lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
2598 le16_to_cpu(mcast_table[i]));
2599}
2600
2601/*
2602 * Set or clear the multicast filter for this adaptor.
2603 */
2604static void pcnet32_set_multicast_list(struct net_device *dev)
2605{
2606 unsigned long ioaddr = dev->base_addr, flags;
2607 struct pcnet32_private *lp = netdev_priv(dev);
2608 int csr15, suspended;
2609
2610 spin_lock_irqsave(&lp->lock, flags);
2611 suspended = pcnet32_suspend(dev, &flags, 0);
2612 csr15 = lp->a->read_csr(ioaddr, CSR15);
2613 if (dev->flags & IFF_PROMISC) {
2614 /* Log any net taps. */
2615 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
2616 lp->init_block->mode =
2617 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
2618 7);
2619 lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
2620 } else {
2621 lp->init_block->mode =
2622 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
2623 lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
2624 pcnet32_load_multicast(dev);
2625 }
2626
2627 if (suspended) {
2628 int csr5;
2629 /* clear SUSPEND (SPND) - CSR5 bit 0 */
2630 csr5 = lp->a->read_csr(ioaddr, CSR5);
2631 lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
2632 } else {
2633 lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
2634 pcnet32_restart(dev, CSR0_NORMAL);
2635 netif_wake_queue(dev);
2636 }
2637
2638 spin_unlock_irqrestore(&lp->lock, flags);
2639}
2640
2641/* This routine assumes that the lp->lock is held */
2642static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
2643{
2644 struct pcnet32_private *lp = netdev_priv(dev);
2645 unsigned long ioaddr = dev->base_addr;
2646 u16 val_out;
2647
2648 if (!lp->mii)
2649 return 0;
2650
2651 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2652 val_out = lp->a->read_bcr(ioaddr, 34);
2653
2654 return val_out;
2655}
2656
2657/* This routine assumes that the lp->lock is held */
2658static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
2659{
2660 struct pcnet32_private *lp = netdev_priv(dev);
2661 unsigned long ioaddr = dev->base_addr;
2662
2663 if (!lp->mii)
2664 return;
2665
2666 lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
2667 lp->a->write_bcr(ioaddr, 34, val);
2668}
2669
2670static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2671{
2672 struct pcnet32_private *lp = netdev_priv(dev);
2673 int rc;
2674 unsigned long flags;
2675
2676 /* SIOC[GS]MIIxxx ioctls */
2677 if (lp->mii) {
2678 spin_lock_irqsave(&lp->lock, flags);
2679 rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL);
2680 spin_unlock_irqrestore(&lp->lock, flags);
2681 } else {
2682 rc = -EOPNOTSUPP;
2683 }
2684
2685 return rc;
2686}
2687
2688static int pcnet32_check_otherphy(struct net_device *dev)
2689{
2690 struct pcnet32_private *lp = netdev_priv(dev);
2691 struct mii_if_info mii = lp->mii_if;
2692 u16 bmcr;
2693 int i;
2694
2695 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
2696 if (i == lp->mii_if.phy_id)
2697 continue; /* skip active phy */
2698 if (lp->phymask & (1 << i)) {
2699 mii.phy_id = i;
2700 if (mii_link_ok(&mii)) {
2701 /* found PHY with active link */
2702 netif_info(lp, link, dev, "Using PHY number %d\n",
2703 i);
2704
2705 /* isolate inactive phy */
2706 bmcr =
2707 mdio_read(dev, lp->mii_if.phy_id, MII_BMCR);
2708 mdio_write(dev, lp->mii_if.phy_id, MII_BMCR,
2709 bmcr | BMCR_ISOLATE);
2710
2711 /* de-isolate new phy */
2712 bmcr = mdio_read(dev, i, MII_BMCR);
2713 mdio_write(dev, i, MII_BMCR,
2714 bmcr & ~BMCR_ISOLATE);
2715
2716 /* set new phy address */
2717 lp->mii_if.phy_id = i;
2718 return 1;
2719 }
2720 }
2721 }
2722 return 0;
2723}
2724
2725/*
2726 * Show the status of the media. Similar to mii_check_media however it
2727 * correctly shows the link speed for all (tested) pcnet32 variants.
2728 * Devices with no mii just report link state without speed.
2729 *
2730 * Caller is assumed to hold and release the lp->lock.
2731 */
2732
2733static void pcnet32_check_media(struct net_device *dev, int verbose)
2734{
2735 struct pcnet32_private *lp = netdev_priv(dev);
2736 int curr_link;
2737 int prev_link = netif_carrier_ok(dev) ? 1 : 0;
2738 u32 bcr9;
2739
2740 if (lp->mii) {
2741 curr_link = mii_link_ok(&lp->mii_if);
2742 } else {
2743 ulong ioaddr = dev->base_addr; /* card base I/O address */
2744 curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
2745 }
2746 if (!curr_link) {
2747 if (prev_link || verbose) {
2748 netif_carrier_off(dev);
2749 netif_info(lp, link, dev, "link down\n");
2750 }
2751 if (lp->phycount > 1) {
2752 curr_link = pcnet32_check_otherphy(dev);
2753 prev_link = 0;
2754 }
2755 } else if (verbose || !prev_link) {
2756 netif_carrier_on(dev);
2757 if (lp->mii) {
2758 if (netif_msg_link(lp)) {
2759 struct ethtool_cmd ecmd = {
2760 .cmd = ETHTOOL_GSET };
2761 mii_ethtool_gset(&lp->mii_if, &ecmd);
2762 netdev_info(dev, "link up, %uMbps, %s-duplex\n",
2763 ethtool_cmd_speed(&ecmd),
2764 (ecmd.duplex == DUPLEX_FULL)
2765 ? "full" : "half");
2766 }
2767 bcr9 = lp->a->read_bcr(dev->base_addr, 9);
2768 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
2769 if (lp->mii_if.full_duplex)
2770 bcr9 |= (1 << 0);
2771 else
2772 bcr9 &= ~(1 << 0);
2773 lp->a->write_bcr(dev->base_addr, 9, bcr9);
2774 }
2775 } else {
2776 netif_info(lp, link, dev, "link up\n");
2777 }
2778 }
2779}
2780
2781/*
2782 * Check for loss of link and link establishment.
2783 * Can not use mii_check_media because it does nothing if mode is forced.
2784 */
2785
2786static void pcnet32_watchdog(struct net_device *dev)
2787{
2788 struct pcnet32_private *lp = netdev_priv(dev);
2789 unsigned long flags;
2790
2791 /* Print the link status if it has changed */
2792 spin_lock_irqsave(&lp->lock, flags);
2793 pcnet32_check_media(dev, 0);
2794 spin_unlock_irqrestore(&lp->lock, flags);
2795
2796 mod_timer(&lp->watchdog_timer, round_jiffies(PCNET32_WATCHDOG_TIMEOUT));
2797}
2798
2799static int pcnet32_pm_suspend(struct pci_dev *pdev, pm_message_t state)
2800{
2801 struct net_device *dev = pci_get_drvdata(pdev);
2802
2803 if (netif_running(dev)) {
2804 netif_device_detach(dev);
2805 pcnet32_close(dev);
2806 }
2807 pci_save_state(pdev);
2808 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2809 return 0;
2810}
2811
2812static int pcnet32_pm_resume(struct pci_dev *pdev)
2813{
2814 struct net_device *dev = pci_get_drvdata(pdev);
2815
2816 pci_set_power_state(pdev, PCI_D0);
2817 pci_restore_state(pdev);
2818
2819 if (netif_running(dev)) {
2820 pcnet32_open(dev);
2821 netif_device_attach(dev);
2822 }
2823 return 0;
2824}
2825
2826static void __devexit pcnet32_remove_one(struct pci_dev *pdev)
2827{
2828 struct net_device *dev = pci_get_drvdata(pdev);
2829
2830 if (dev) {
2831 struct pcnet32_private *lp = netdev_priv(dev);
2832
2833 unregister_netdev(dev);
2834 pcnet32_free_ring(dev);
2835 release_region(dev->base_addr, PCNET32_TOTAL_SIZE);
2836 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
2837 lp->init_block, lp->init_dma_addr);
2838 free_netdev(dev);
2839 pci_disable_device(pdev);
2840 pci_set_drvdata(pdev, NULL);
2841 }
2842}
2843
2844static struct pci_driver pcnet32_driver = {
2845 .name = DRV_NAME,
2846 .probe = pcnet32_probe_pci,
2847 .remove = __devexit_p(pcnet32_remove_one),
2848 .id_table = pcnet32_pci_tbl,
2849 .suspend = pcnet32_pm_suspend,
2850 .resume = pcnet32_pm_resume,
2851};
2852
2853/* An additional parameter that may be passed in... */
2854static int debug = -1;
2855static int tx_start_pt = -1;
2856static int pcnet32_have_pci;
2857
2858module_param(debug, int, 0);
2859MODULE_PARM_DESC(debug, DRV_NAME " debug level");
2860module_param(max_interrupt_work, int, 0);
2861MODULE_PARM_DESC(max_interrupt_work,
2862 DRV_NAME " maximum events handled per interrupt");
2863module_param(rx_copybreak, int, 0);
2864MODULE_PARM_DESC(rx_copybreak,
2865 DRV_NAME " copy breakpoint for copy-only-tiny-frames");
2866module_param(tx_start_pt, int, 0);
2867MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
2868module_param(pcnet32vlb, int, 0);
2869MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
2870module_param_array(options, int, NULL, 0);
2871MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
2872module_param_array(full_duplex, int, NULL, 0);
2873MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
2874/* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */
2875module_param_array(homepna, int, NULL, 0);
2876MODULE_PARM_DESC(homepna,
2877 DRV_NAME
2878 " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet");
2879
2880MODULE_AUTHOR("Thomas Bogendoerfer");
2881MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards");
2882MODULE_LICENSE("GPL");
2883
2884#define PCNET32_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
2885
2886static int __init pcnet32_init_module(void)
2887{
2888 pr_info("%s", version);
2889
2890 pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
2891
2892 if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
2893 tx_start = tx_start_pt;
2894
2895 /* find the PCI devices */
2896 if (!pci_register_driver(&pcnet32_driver))
2897 pcnet32_have_pci = 1;
2898
2899 /* should we find any remaining VLbus devices ? */
2900 if (pcnet32vlb)
2901 pcnet32_probe_vlbus(pcnet32_portlist);
2902
2903 if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
2904 pr_info("%d cards_found\n", cards_found);
2905
2906 return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
2907}
2908
2909static void __exit pcnet32_cleanup_module(void)
2910{
2911 struct net_device *next_dev;
2912
2913 while (pcnet32_dev) {
2914 struct pcnet32_private *lp = netdev_priv(pcnet32_dev);
2915 next_dev = lp->next;
2916 unregister_netdev(pcnet32_dev);
2917 pcnet32_free_ring(pcnet32_dev);
2918 release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
2919 pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
2920 lp->init_block, lp->init_dma_addr);
2921 free_netdev(pcnet32_dev);
2922 pcnet32_dev = next_dev;
2923 }
2924
2925 if (pcnet32_have_pci)
2926 pci_unregister_driver(&pcnet32_driver);
2927}
2928
2929module_init(pcnet32_init_module);
2930module_exit(pcnet32_cleanup_module);
2931
2932/*
2933 * Local variables:
2934 * c-indent-level: 4
2935 * tab-width: 8
2936 * End:
2937 */
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
new file mode 100644
index 000000000000..080b71fcc683
--- /dev/null
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -0,0 +1,961 @@
1/* sun3lance.c: Ethernet driver for SUN3 Lance chip */
2/*
3
4 Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net).
5 This driver is a part of the linux kernel, and is thus distributed
6 under the GNU General Public License.
7
8 The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically
9 true for the correct IRQ and address of the lance registers. They
10 have not been widely tested, however. What we probably need is a
11 "proper" way to search for a device in the sun3's prom, but, alas,
12 linux has no such thing.
13
14 This driver is largely based on atarilance.c, by Roman Hodek. Other
15 sources of inspiration were the NetBSD sun3 am7990 driver, and the
16 linux sparc lance driver (sunlance.c).
17
18 There are more assumptions made throughout this driver, it almost
19 certainly still needs work, but it does work at least for RARP/BOOTP and
20 mounting the root NFS filesystem.
21
22*/
23
24static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n";
25
26#include <linux/module.h>
27#include <linux/stddef.h>
28#include <linux/kernel.h>
29#include <linux/string.h>
30#include <linux/errno.h>
31#include <linux/interrupt.h>
32#include <linux/init.h>
33#include <linux/ioport.h>
34#include <linux/delay.h>
35#include <linux/netdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/bitops.h>
39
40#include <asm/cacheflush.h>
41#include <asm/setup.h>
42#include <asm/irq.h>
43#include <asm/io.h>
44#include <asm/pgtable.h>
45#include <asm/dvma.h>
46#include <asm/idprom.h>
47#include <asm/machines.h>
48
49#ifdef CONFIG_SUN3
50#include <asm/sun3mmu.h>
51#else
52#include <asm/sun3xprom.h>
53#endif
54
55/* sun3/60 addr/irq for the lance chip. If your sun is different,
56 change this. */
57#define LANCE_OBIO 0x120000
58#define LANCE_IRQ IRQ_AUTO_3
59
60/* Debug level:
61 * 0 = silent, print only serious errors
62 * 1 = normal, print error messages
63 * 2 = debug, print debug infos
64 * 3 = debug, print even more debug infos (packet data)
65 */
66
67#define LANCE_DEBUG 0
68
69#ifdef LANCE_DEBUG
70static int lance_debug = LANCE_DEBUG;
71#else
72static int lance_debug = 1;
73#endif
74module_param(lance_debug, int, 0);
75MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
76MODULE_LICENSE("GPL");
77
78#define DPRINTK(n,a) \
79 do { \
80 if (lance_debug >= n) \
81 printk a; \
82 } while( 0 )
83
84
85/* we're only using 32k of memory, so we use 4 TX
86 buffers and 16 RX buffers. These values are expressed as log2. */
87
88#define TX_LOG_RING_SIZE 3
89#define RX_LOG_RING_SIZE 5
90
91/* These are the derived values */
92
93#define TX_RING_SIZE (1 << TX_LOG_RING_SIZE)
94#define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5)
95#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
96
97#define RX_RING_SIZE (1 << RX_LOG_RING_SIZE)
98#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
99#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
100
101/* Definitions for packet buffer access: */
102#define PKT_BUF_SZ 1544
103
104/* Get the address of a packet buffer corresponding to a given buffer head */
105#define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base)
106
107
108/* The LANCE Rx and Tx ring descriptors. */
109struct lance_rx_head {
110 unsigned short base; /* Low word of base addr */
111 volatile unsigned char flag;
112 unsigned char base_hi; /* High word of base addr (unused) */
113 short buf_length; /* This length is 2s complement! */
114 volatile short msg_length; /* This length is "normal". */
115};
116
117struct lance_tx_head {
118 unsigned short base; /* Low word of base addr */
119 volatile unsigned char flag;
120 unsigned char base_hi; /* High word of base addr (unused) */
121 short length; /* Length is 2s complement! */
122 volatile short misc;
123};
124
125/* The LANCE initialization block, described in databook. */
126struct lance_init_block {
127 unsigned short mode; /* Pre-set mode */
128 unsigned char hwaddr[6]; /* Physical ethernet address */
129 unsigned int filter[2]; /* Multicast filter (unused). */
130 /* Receive and transmit ring base, along with length bits. */
131 unsigned short rdra;
132 unsigned short rlen;
133 unsigned short tdra;
134 unsigned short tlen;
135 unsigned short pad[4]; /* is thie needed? */
136};
137
138/* The whole layout of the Lance shared memory */
139struct lance_memory {
140 struct lance_init_block init;
141 struct lance_tx_head tx_head[TX_RING_SIZE];
142 struct lance_rx_head rx_head[RX_RING_SIZE];
143 char rx_data[RX_RING_SIZE][PKT_BUF_SZ];
144 char tx_data[TX_RING_SIZE][PKT_BUF_SZ];
145};
146
147/* The driver's private device structure */
148
149struct lance_private {
150 volatile unsigned short *iobase;
151 struct lance_memory *mem;
152 int new_rx, new_tx; /* The next free ring entry */
153 int old_tx, old_rx; /* ring entry to be processed */
154/* These two must be longs for set_bit() */
155 long tx_full;
156 long lock;
157};
158
159/* I/O register access macros */
160
161#define MEM lp->mem
162#define DREG lp->iobase[0]
163#define AREG lp->iobase[1]
164#define REGA(a) (*( AREG = (a), &DREG ))
165
166/* Definitions for the Lance */
167
168/* tx_head flags */
169#define TMD1_ENP 0x01 /* end of packet */
170#define TMD1_STP 0x02 /* start of packet */
171#define TMD1_DEF 0x04 /* deferred */
172#define TMD1_ONE 0x08 /* one retry needed */
173#define TMD1_MORE 0x10 /* more than one retry needed */
174#define TMD1_ERR 0x40 /* error summary */
175#define TMD1_OWN 0x80 /* ownership (set: chip owns) */
176
177#define TMD1_OWN_CHIP TMD1_OWN
178#define TMD1_OWN_HOST 0
179
180/* tx_head misc field */
181#define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */
182#define TMD3_RTRY 0x0400 /* failed after 16 retries */
183#define TMD3_LCAR 0x0800 /* carrier lost */
184#define TMD3_LCOL 0x1000 /* late collision */
185#define TMD3_UFLO 0x4000 /* underflow (late memory) */
186#define TMD3_BUFF 0x8000 /* buffering error (no ENP) */
187
188/* rx_head flags */
189#define RMD1_ENP 0x01 /* end of packet */
190#define RMD1_STP 0x02 /* start of packet */
191#define RMD1_BUFF 0x04 /* buffer error */
192#define RMD1_CRC 0x08 /* CRC error */
193#define RMD1_OFLO 0x10 /* overflow */
194#define RMD1_FRAM 0x20 /* framing error */
195#define RMD1_ERR 0x40 /* error summary */
196#define RMD1_OWN 0x80 /* ownership (set: ship owns) */
197
198#define RMD1_OWN_CHIP RMD1_OWN
199#define RMD1_OWN_HOST 0
200
201/* register names */
202#define CSR0 0 /* mode/status */
203#define CSR1 1 /* init block addr (low) */
204#define CSR2 2 /* init block addr (high) */
205#define CSR3 3 /* misc */
206#define CSR8 8 /* address filter */
207#define CSR15 15 /* promiscuous mode */
208
209/* CSR0 */
210/* (R=readable, W=writeable, S=set on write, C=clear on write) */
211#define CSR0_INIT 0x0001 /* initialize (RS) */
212#define CSR0_STRT 0x0002 /* start (RS) */
213#define CSR0_STOP 0x0004 /* stop (RS) */
214#define CSR0_TDMD 0x0008 /* transmit demand (RS) */
215#define CSR0_TXON 0x0010 /* transmitter on (R) */
216#define CSR0_RXON 0x0020 /* receiver on (R) */
217#define CSR0_INEA 0x0040 /* interrupt enable (RW) */
218#define CSR0_INTR 0x0080 /* interrupt active (R) */
219#define CSR0_IDON 0x0100 /* initialization done (RC) */
220#define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */
221#define CSR0_RINT 0x0400 /* receiver interrupt (RC) */
222#define CSR0_MERR 0x0800 /* memory error (RC) */
223#define CSR0_MISS 0x1000 /* missed frame (RC) */
224#define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */
225#define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */
226#define CSR0_ERR 0x8000 /* error (RC) */
227
228/* CSR3 */
229#define CSR3_BCON 0x0001 /* byte control */
230#define CSR3_ACON 0x0002 /* ALE control */
231#define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */
232
233/***************************** Prototypes *****************************/
234
235static int lance_probe( struct net_device *dev);
236static int lance_open( struct net_device *dev );
237static void lance_init_ring( struct net_device *dev );
238static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev );
239static irqreturn_t lance_interrupt( int irq, void *dev_id);
240static int lance_rx( struct net_device *dev );
241static int lance_close( struct net_device *dev );
242static void set_multicast_list( struct net_device *dev );
243
244/************************* End of Prototypes **************************/
245
246struct net_device * __init sun3lance_probe(int unit)
247{
248 struct net_device *dev;
249 static int found;
250 int err = -ENODEV;
251
252 if (!MACH_IS_SUN3 && !MACH_IS_SUN3X)
253 return ERR_PTR(-ENODEV);
254
255 /* check that this machine has an onboard lance */
256 switch(idprom->id_machtype) {
257 case SM_SUN3|SM_3_50:
258 case SM_SUN3|SM_3_60:
259 case SM_SUN3X|SM_3_80:
260 /* these machines have lance */
261 break;
262
263 default:
264 return ERR_PTR(-ENODEV);
265 }
266
267 if (found)
268 return ERR_PTR(-ENODEV);
269
270 dev = alloc_etherdev(sizeof(struct lance_private));
271 if (!dev)
272 return ERR_PTR(-ENOMEM);
273 if (unit >= 0) {
274 sprintf(dev->name, "eth%d", unit);
275 netdev_boot_setup_check(dev);
276 }
277
278 if (!lance_probe(dev))
279 goto out;
280
281 err = register_netdev(dev);
282 if (err)
283 goto out1;
284 found = 1;
285 return dev;
286
287out1:
288#ifdef CONFIG_SUN3
289 iounmap((void __iomem *)dev->base_addr);
290#endif
291out:
292 free_netdev(dev);
293 return ERR_PTR(err);
294}
295
296static const struct net_device_ops lance_netdev_ops = {
297 .ndo_open = lance_open,
298 .ndo_stop = lance_close,
299 .ndo_start_xmit = lance_start_xmit,
300 .ndo_set_rx_mode = set_multicast_list,
301 .ndo_set_mac_address = NULL,
302 .ndo_change_mtu = eth_change_mtu,
303 .ndo_validate_addr = eth_validate_addr,
304};
305
306static int __init lance_probe( struct net_device *dev)
307{
308 unsigned long ioaddr;
309
310 struct lance_private *lp;
311 int i;
312 static int did_version;
313 volatile unsigned short *ioaddr_probe;
314 unsigned short tmp1, tmp2;
315
316#ifdef CONFIG_SUN3
317 ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE);
318 if (!ioaddr)
319 return 0;
320#else
321 ioaddr = SUN3X_LANCE;
322#endif
323
324 /* test to see if there's really a lance here */
325 /* (CSRO_INIT shouldn't be readable) */
326
327 ioaddr_probe = (volatile unsigned short *)ioaddr;
328 tmp1 = ioaddr_probe[0];
329 tmp2 = ioaddr_probe[1];
330
331 ioaddr_probe[1] = CSR0;
332 ioaddr_probe[0] = CSR0_INIT | CSR0_STOP;
333
334 if(ioaddr_probe[0] != CSR0_STOP) {
335 ioaddr_probe[0] = tmp1;
336 ioaddr_probe[1] = tmp2;
337
338#ifdef CONFIG_SUN3
339 iounmap((void __iomem *)ioaddr);
340#endif
341 return 0;
342 }
343
344 lp = netdev_priv(dev);
345
346 /* XXX - leak? */
347 MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
348 if (MEM == NULL) {
349#ifdef CONFIG_SUN3
350 iounmap((void __iomem *)ioaddr);
351#endif
352 printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n");
353 return 0;
354 }
355
356 lp->iobase = (volatile unsigned short *)ioaddr;
357 dev->base_addr = (unsigned long)ioaddr; /* informational only */
358
359 REGA(CSR0) = CSR0_STOP;
360
361 if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) {
362#ifdef CONFIG_SUN3
363 iounmap((void __iomem *)ioaddr);
364#endif
365 dvma_free((void *)MEM);
366 printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n");
367 return 0;
368 }
369 dev->irq = (unsigned short)LANCE_IRQ;
370
371
372 printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ",
373 dev->name,
374 (unsigned long)ioaddr,
375 (unsigned long)MEM,
376 dev->irq);
377
378 /* copy in the ethernet address from the prom */
379 for(i = 0; i < 6 ; i++)
380 dev->dev_addr[i] = idprom->id_ethaddr[i];
381
382 /* tell the card it's ether address, bytes swapped */
383 MEM->init.hwaddr[0] = dev->dev_addr[1];
384 MEM->init.hwaddr[1] = dev->dev_addr[0];
385 MEM->init.hwaddr[2] = dev->dev_addr[3];
386 MEM->init.hwaddr[3] = dev->dev_addr[2];
387 MEM->init.hwaddr[4] = dev->dev_addr[5];
388 MEM->init.hwaddr[5] = dev->dev_addr[4];
389
390 printk("%pM\n", dev->dev_addr);
391
392 MEM->init.mode = 0x0000;
393 MEM->init.filter[0] = 0x00000000;
394 MEM->init.filter[1] = 0x00000000;
395 MEM->init.rdra = dvma_vtob(MEM->rx_head);
396 MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
397 (dvma_vtob(MEM->rx_head) >> 16);
398 MEM->init.tdra = dvma_vtob(MEM->tx_head);
399 MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
400 (dvma_vtob(MEM->tx_head) >> 16);
401
402 DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n",
403 dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head),
404 (dvma_vtob(MEM->tx_head))));
405
406 if (did_version++ == 0)
407 printk( version );
408
409 dev->netdev_ops = &lance_netdev_ops;
410// KLUDGE -- REMOVE ME
411 set_bit(__LINK_STATE_PRESENT, &dev->state);
412
413
414 return 1;
415}
416
417static int lance_open( struct net_device *dev )
418{
419 struct lance_private *lp = netdev_priv(dev);
420 int i;
421
422 DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
423
424 REGA(CSR0) = CSR0_STOP;
425
426 lance_init_ring(dev);
427
428 /* From now on, AREG is kept to point to CSR0 */
429 REGA(CSR0) = CSR0_INIT;
430
431 i = 1000000;
432 while (--i > 0)
433 if (DREG & CSR0_IDON)
434 break;
435 if (i <= 0 || (DREG & CSR0_ERR)) {
436 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
437 dev->name, i, DREG ));
438 DREG = CSR0_STOP;
439 return -EIO;
440 }
441
442 DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
443
444 netif_start_queue(dev);
445
446 DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
447
448 return 0;
449}
450
451
452/* Initialize the LANCE Rx and Tx rings. */
453
454static void lance_init_ring( struct net_device *dev )
455{
456 struct lance_private *lp = netdev_priv(dev);
457 int i;
458
459 lp->lock = 0;
460 lp->tx_full = 0;
461 lp->new_rx = lp->new_tx = 0;
462 lp->old_rx = lp->old_tx = 0;
463
464 for( i = 0; i < TX_RING_SIZE; i++ ) {
465 MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]);
466 MEM->tx_head[i].flag = 0;
467 MEM->tx_head[i].base_hi =
468 (dvma_vtob(MEM->tx_data[i])) >>16;
469 MEM->tx_head[i].length = 0;
470 MEM->tx_head[i].misc = 0;
471 }
472
473 for( i = 0; i < RX_RING_SIZE; i++ ) {
474 MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]);
475 MEM->rx_head[i].flag = RMD1_OWN_CHIP;
476 MEM->rx_head[i].base_hi =
477 (dvma_vtob(MEM->rx_data[i])) >> 16;
478 MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000;
479 MEM->rx_head[i].msg_length = 0;
480 }
481
482 /* tell the card it's ether address, bytes swapped */
483 MEM->init.hwaddr[0] = dev->dev_addr[1];
484 MEM->init.hwaddr[1] = dev->dev_addr[0];
485 MEM->init.hwaddr[2] = dev->dev_addr[3];
486 MEM->init.hwaddr[3] = dev->dev_addr[2];
487 MEM->init.hwaddr[4] = dev->dev_addr[5];
488 MEM->init.hwaddr[5] = dev->dev_addr[4];
489
490 MEM->init.mode = 0x0000;
491 MEM->init.filter[0] = 0x00000000;
492 MEM->init.filter[1] = 0x00000000;
493 MEM->init.rdra = dvma_vtob(MEM->rx_head);
494 MEM->init.rlen = (RX_LOG_RING_SIZE << 13) |
495 (dvma_vtob(MEM->rx_head) >> 16);
496 MEM->init.tdra = dvma_vtob(MEM->tx_head);
497 MEM->init.tlen = (TX_LOG_RING_SIZE << 13) |
498 (dvma_vtob(MEM->tx_head) >> 16);
499
500
501 /* tell the lance the address of its init block */
502 REGA(CSR1) = dvma_vtob(&(MEM->init));
503 REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16;
504
505#ifdef CONFIG_SUN3X
506 REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON;
507#else
508 REGA(CSR3) = CSR3_BSWP;
509#endif
510
511}
512
513
514static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
515{
516 struct lance_private *lp = netdev_priv(dev);
517 int entry, len;
518 struct lance_tx_head *head;
519 unsigned long flags;
520
521 DPRINTK( 1, ( "%s: transmit start.\n",
522 dev->name));
523
524 /* Transmitter timeout, serious problems. */
525 if (netif_queue_stopped(dev)) {
526 int tickssofar = jiffies - dev_trans_start(dev);
527 if (tickssofar < HZ/5)
528 return NETDEV_TX_BUSY;
529
530 DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
531 dev->name, DREG ));
532 DREG = CSR0_STOP;
533 /*
534 * Always set BSWP after a STOP as STOP puts it back into
535 * little endian mode.
536 */
537 REGA(CSR3) = CSR3_BSWP;
538 dev->stats.tx_errors++;
539
540 if(lance_debug >= 2) {
541 int i;
542 printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n",
543 lp->old_tx, lp->new_tx,
544 lp->tx_full ? " (full)" : "",
545 lp->new_rx );
546 for( i = 0 ; i < RX_RING_SIZE; i++ )
547 printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
548 i, MEM->rx_head[i].base,
549 -MEM->rx_head[i].buf_length,
550 MEM->rx_head[i].msg_length);
551 for( i = 0 ; i < TX_RING_SIZE; i++ )
552 printk("tx #%d: base=%04x len=%04x misc=%04x\n",
553 i, MEM->tx_head[i].base,
554 -MEM->tx_head[i].length,
555 MEM->tx_head[i].misc );
556 }
557
558 lance_init_ring(dev);
559 REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
560
561 netif_start_queue(dev);
562
563 return NETDEV_TX_OK;
564 }
565
566
567 /* Block a timer-based transmit from overlapping. This could better be
568 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
569
570 /* Block a timer-based transmit from overlapping with us by
571 stopping the queue for a bit... */
572
573 netif_stop_queue(dev);
574
575 if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
576 printk( "%s: tx queue lock!.\n", dev->name);
577 /* don't clear dev->tbusy flag. */
578 return NETDEV_TX_BUSY;
579 }
580
581 AREG = CSR0;
582 DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
583 dev->name, DREG ));
584
585#ifdef CONFIG_SUN3X
586 /* this weirdness doesn't appear on sun3... */
587 if(!(DREG & CSR0_INIT)) {
588 DPRINTK( 1, ("INIT not set, reinitializing...\n"));
589 REGA( CSR0 ) = CSR0_STOP;
590 lance_init_ring(dev);
591 REGA( CSR0 ) = CSR0_INIT | CSR0_STRT;
592 }
593#endif
594
595 /* Fill in a Tx ring entry */
596#if 0
597 if (lance_debug >= 2) {
598 printk( "%s: TX pkt %d type 0x%04x"
599 " from %s to %s"
600 " data at 0x%08x len %d\n",
601 dev->name, lp->new_tx, ((u_short *)skb->data)[6],
602 DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data),
603 (int)skb->data, (int)skb->len );
604 }
605#endif
606 /* We're not prepared for the int until the last flags are set/reset.
607 * And the int may happen already after setting the OWN_CHIP... */
608 local_irq_save(flags);
609
610 /* Mask to ring buffer boundary. */
611 entry = lp->new_tx;
612 head = &(MEM->tx_head[entry]);
613
614 /* Caution: the write order is important here, set the "ownership" bits
615 * last.
616 */
617
618 /* the sun3's lance needs it's buffer padded to the minimum
619 size */
620 len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
621
622// head->length = -len;
623 head->length = (-len) | 0xf000;
624 head->misc = 0;
625
626 skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
627 if (len != skb->len)
628 memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
629
630 head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
631 lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK;
632 dev->stats.tx_bytes += skb->len;
633
634 /* Trigger an immediate send poll. */
635 REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT;
636 AREG = CSR0;
637 DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
638 dev->name, DREG ));
639 dev_kfree_skb(skb);
640
641 lp->lock = 0;
642 if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
643 TMD1_OWN_HOST)
644 netif_start_queue(dev);
645
646 local_irq_restore(flags);
647
648 return NETDEV_TX_OK;
649}
650
651/* The LANCE interrupt handler. */
652
653static irqreturn_t lance_interrupt( int irq, void *dev_id)
654{
655 struct net_device *dev = dev_id;
656 struct lance_private *lp = netdev_priv(dev);
657 int csr0;
658 static int in_interrupt;
659
660 if (dev == NULL) {
661 DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" ));
662 return IRQ_NONE;
663 }
664
665 if (in_interrupt)
666 DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name ));
667 in_interrupt = 1;
668
669 still_more:
670 flush_cache_all();
671
672 AREG = CSR0;
673 csr0 = DREG;
674
675 /* ack interrupts */
676 DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON);
677
678 /* clear errors */
679 if(csr0 & CSR0_ERR)
680 DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS;
681
682
683 DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n",
684 dev->name, csr0, DREG ));
685
686 if (csr0 & CSR0_TINT) { /* Tx-done interrupt */
687 int old_tx = lp->old_tx;
688
689// if(lance_debug >= 3) {
690// int i;
691//
692// printk("%s: tx int\n", dev->name);
693//
694// for(i = 0; i < TX_RING_SIZE; i++)
695// printk("ring %d flag=%04x\n", i,
696// MEM->tx_head[i].flag);
697// }
698
699 while( old_tx != lp->new_tx) {
700 struct lance_tx_head *head = &(MEM->tx_head[old_tx]);
701
702 DPRINTK(3, ("on tx_ring %d\n", old_tx));
703
704 if (head->flag & TMD1_OWN_CHIP)
705 break; /* It still hasn't been Txed */
706
707 if (head->flag & TMD1_ERR) {
708 int status = head->misc;
709 dev->stats.tx_errors++;
710 if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
711 if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
712 if (status & TMD3_LCOL) dev->stats.tx_window_errors++;
713 if (status & (TMD3_UFLO | TMD3_BUFF)) {
714 dev->stats.tx_fifo_errors++;
715 printk("%s: Tx FIFO error\n",
716 dev->name);
717 REGA(CSR0) = CSR0_STOP;
718 REGA(CSR3) = CSR3_BSWP;
719 lance_init_ring(dev);
720 REGA(CSR0) = CSR0_STRT | CSR0_INEA;
721 return IRQ_HANDLED;
722 }
723 } else if(head->flag & (TMD1_ENP | TMD1_STP)) {
724
725 head->flag &= ~(TMD1_ENP | TMD1_STP);
726 if(head->flag & (TMD1_ONE | TMD1_MORE))
727 dev->stats.collisions++;
728
729 dev->stats.tx_packets++;
730 DPRINTK(3, ("cleared tx ring %d\n", old_tx));
731 }
732 old_tx = (old_tx +1) & TX_RING_MOD_MASK;
733 }
734
735 lp->old_tx = old_tx;
736 }
737
738
739 if (netif_queue_stopped(dev)) {
740 /* The ring is no longer full, clear tbusy. */
741 netif_start_queue(dev);
742 netif_wake_queue(dev);
743 }
744
745 if (csr0 & CSR0_RINT) /* Rx interrupt */
746 lance_rx( dev );
747
748 /* Log misc errors. */
749 if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
750 if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
751 if (csr0 & CSR0_MERR) {
752 DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
753 "status %04x.\n", dev->name, csr0 ));
754 /* Restart the chip. */
755 REGA(CSR0) = CSR0_STOP;
756 REGA(CSR3) = CSR3_BSWP;
757 lance_init_ring(dev);
758 REGA(CSR0) = CSR0_STRT | CSR0_INEA;
759 }
760
761
762 /* Clear any other interrupt, and set interrupt enable. */
763// DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
764// CSR0_IDON | CSR0_INEA;
765
766 REGA(CSR0) = CSR0_INEA;
767
768 if(DREG & (CSR0_RINT | CSR0_TINT)) {
769 DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG));
770 goto still_more;
771 }
772
773 DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
774 dev->name, DREG ));
775 in_interrupt = 0;
776 return IRQ_HANDLED;
777}
778
779/* get packet, toss into skbuff */
780static int lance_rx( struct net_device *dev )
781{
782 struct lance_private *lp = netdev_priv(dev);
783 int entry = lp->new_rx;
784
785 /* If we own the next entry, it's a new packet. Send it up. */
786 while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
787 struct lance_rx_head *head = &(MEM->rx_head[entry]);
788 int status = head->flag;
789
790 if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */
791 /* There is a tricky error noted by John Murphy,
792 <murf@perftech.com> to Russ Nelson: Even with
793 full-sized buffers it's possible for a jabber packet to use two
794 buffers, with only the last correctly noting the error. */
795 if (status & RMD1_ENP) /* Only count a general error at the */
796 dev->stats.rx_errors++; /* end of a packet.*/
797 if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
798 if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
799 if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
800 if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
801 head->flag &= (RMD1_ENP|RMD1_STP);
802 } else {
803 /* Malloc up new buffer, compatible with net-3. */
804// short pkt_len = head->msg_length;// & 0xfff;
805 short pkt_len = (head->msg_length & 0xfff) - 4;
806 struct sk_buff *skb;
807
808 if (pkt_len < 60) {
809 printk( "%s: Runt packet!\n", dev->name );
810 dev->stats.rx_errors++;
811 }
812 else {
813 skb = dev_alloc_skb( pkt_len+2 );
814 if (skb == NULL) {
815 DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
816 dev->name ));
817
818 dev->stats.rx_dropped++;
819 head->msg_length = 0;
820 head->flag |= RMD1_OWN_CHIP;
821 lp->new_rx = (lp->new_rx+1) &
822 RX_RING_MOD_MASK;
823 }
824
825#if 0
826 if (lance_debug >= 3) {
827 u_char *data = PKTBUF_ADDR(head);
828 printk("%s: RX pkt %d type 0x%04x"
829 " from %pM to %pM",
830 dev->name, lp->new_tx, ((u_short *)data)[6],
831 &data[6], data);
832
833 printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
834 "len %d at %08x\n",
835 data[15], data[16], data[17], data[18],
836 data[19], data[20], data[21], data[22],
837 pkt_len, data);
838 }
839#endif
840 if (lance_debug >= 3) {
841 u_char *data = PKTBUF_ADDR(head);
842 printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len);
843 }
844
845
846 skb_reserve( skb, 2 ); /* 16 byte align */
847 skb_put( skb, pkt_len ); /* Make room */
848 skb_copy_to_linear_data(skb,
849 PKTBUF_ADDR(head),
850 pkt_len);
851
852 skb->protocol = eth_type_trans( skb, dev );
853 netif_rx( skb );
854 dev->stats.rx_packets++;
855 dev->stats.rx_bytes += pkt_len;
856 }
857 }
858
859// head->buf_length = -PKT_BUF_SZ | 0xf000;
860 head->msg_length = 0;
861 head->flag = RMD1_OWN_CHIP;
862
863 entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK;
864 }
865
866 /* From lance.c (Donald Becker): */
867 /* We should check that at least two ring entries are free.
868 If not, we should free one and mark stats->rx_dropped++. */
869
870 return 0;
871}
872
873
874static int lance_close( struct net_device *dev )
875{
876 struct lance_private *lp = netdev_priv(dev);
877
878 netif_stop_queue(dev);
879
880 AREG = CSR0;
881
882 DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
883 dev->name, DREG ));
884
885 /* We stop the LANCE here -- it occasionally polls
886 memory if we don't. */
887 DREG = CSR0_STOP;
888 return 0;
889}
890
891
892/* Set or clear the multicast filter for this adaptor.
893 num_addrs == -1 Promiscuous mode, receive all packets
894 num_addrs == 0 Normal mode, clear multicast list
895 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
896 best-effort filtering.
897 */
898
899/* completely untested on a sun3 */
900static void set_multicast_list( struct net_device *dev )
901{
902 struct lance_private *lp = netdev_priv(dev);
903
904 if(netif_queue_stopped(dev))
905 /* Only possible if board is already started */
906 return;
907
908 /* We take the simple way out and always enable promiscuous mode. */
909 DREG = CSR0_STOP; /* Temporarily stop the lance. */
910
911 if (dev->flags & IFF_PROMISC) {
912 /* Log any net taps. */
913 DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name ));
914 REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
915 } else {
916 short multicast_table[4];
917 int num_addrs = netdev_mc_count(dev);
918 int i;
919 /* We don't use the multicast table, but rely on upper-layer
920 * filtering. */
921 memset( multicast_table, (num_addrs == 0) ? 0 : -1,
922 sizeof(multicast_table) );
923 for( i = 0; i < 4; i++ )
924 REGA( CSR8+i ) = multicast_table[i];
925 REGA( CSR15 ) = 0; /* Unset promiscuous mode */
926 }
927
928 /*
929 * Always set BSWP after a STOP as STOP puts it back into
930 * little endian mode.
931 */
932 REGA( CSR3 ) = CSR3_BSWP;
933
934 /* Resume normal operation and reset AREG to CSR0 */
935 REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
936}
937
938
939#ifdef MODULE
940
941static struct net_device *sun3lance_dev;
942
943int __init init_module(void)
944{
945 sun3lance_dev = sun3lance_probe(-1);
946 if (IS_ERR(sun3lance_dev))
947 return PTR_ERR(sun3lance_dev);
948 return 0;
949}
950
951void __exit cleanup_module(void)
952{
953 unregister_netdev(sun3lance_dev);
954#ifdef CONFIG_SUN3
955 iounmap((void __iomem *)sun3lance_dev->base_addr);
956#endif
957 free_netdev(sun3lance_dev);
958}
959
960#endif /* MODULE */
961
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
new file mode 100644
index 000000000000..8fda457f94cf
--- /dev/null
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -0,0 +1,1556 @@
1/* $Id: sunlance.c,v 1.112 2002/01/15 06:48:55 davem Exp $
2 * lance.c: Linux/Sparc/Lance driver
3 *
4 * Written 1995, 1996 by Miguel de Icaza
5 * Sources:
6 * The Linux depca driver
7 * The Linux lance driver.
8 * The Linux skeleton driver.
9 * The NetBSD Sparc/Lance driver.
10 * Theo de Raadt (deraadt@openbsd.org)
11 * NCR92C990 Lan Controller manual
12 *
13 * 1.4:
14 * Added support to run with a ledma on the Sun4m
15 *
16 * 1.5:
17 * Added multiple card detection.
18 *
19 * 4/17/96: Burst sizes and tpe selection on sun4m by Eddie C. Dost
20 * (ecd@skynet.be)
21 *
22 * 5/15/96: auto carrier detection on sun4m by Eddie C. Dost
23 * (ecd@skynet.be)
24 *
25 * 5/17/96: lebuffer on scsi/ether cards now work David S. Miller
26 * (davem@caip.rutgers.edu)
27 *
28 * 5/29/96: override option 'tpe-link-test?', if it is 'false', as
29 * this disables auto carrier detection on sun4m. Eddie C. Dost
30 * (ecd@skynet.be)
31 *
32 * 1.7:
33 * 6/26/96: Bug fix for multiple ledmas, miguel.
34 *
35 * 1.8:
36 * Stole multicast code from depca.c, fixed lance_tx.
37 *
38 * 1.9:
39 * 8/21/96: Fixed the multicast code (Pedro Roque)
40 *
41 * 8/28/96: Send fake packet in lance_open() if auto_select is true,
42 * so we can detect the carrier loss condition in time.
43 * Eddie C. Dost (ecd@skynet.be)
44 *
45 * 9/15/96: Align rx_buf so that eth_copy_and_sum() won't cause an
46 * MNA trap during chksum_partial_copy(). (ecd@skynet.be)
47 *
48 * 11/17/96: Handle LE_C0_MERR in lance_interrupt(). (ecd@skynet.be)
49 *
50 * 12/22/96: Don't loop forever in lance_rx() on incomplete packets.
51 * This was the sun4c killer. Shit, stupid bug.
52 * (ecd@skynet.be)
53 *
54 * 1.10:
55 * 1/26/97: Modularize driver. (ecd@skynet.be)
56 *
57 * 1.11:
58 * 12/27/97: Added sun4d support. (jj@sunsite.mff.cuni.cz)
59 *
60 * 1.12:
61 * 11/3/99: Fixed SMP race in lance_start_xmit found by davem.
62 * Anton Blanchard (anton@progsoc.uts.edu.au)
63 * 2.00: 11/9/99: Massive overhaul and port to new SBUS driver interfaces.
64 * David S. Miller (davem@redhat.com)
65 * 2.01:
66 * 11/08/01: Use library crc32 functions (Matt_Domsch@dell.com)
67 *
68 */
69
70#undef DEBUG_DRIVER
71
72static char lancestr[] = "LANCE";
73
74#include <linux/module.h>
75#include <linux/kernel.h>
76#include <linux/types.h>
77#include <linux/fcntl.h>
78#include <linux/interrupt.h>
79#include <linux/ioport.h>
80#include <linux/in.h>
81#include <linux/string.h>
82#include <linux/delay.h>
83#include <linux/init.h>
84#include <linux/crc32.h>
85#include <linux/errno.h>
86#include <linux/socket.h> /* Used for the temporal inet entries and routing */
87#include <linux/route.h>
88#include <linux/netdevice.h>
89#include <linux/etherdevice.h>
90#include <linux/skbuff.h>
91#include <linux/ethtool.h>
92#include <linux/bitops.h>
93#include <linux/dma-mapping.h>
94#include <linux/of.h>
95#include <linux/of_device.h>
96#include <linux/gfp.h>
97
98#include <asm/system.h>
99#include <asm/io.h>
100#include <asm/dma.h>
101#include <asm/pgtable.h>
102#include <asm/byteorder.h> /* Used by the checksum routines */
103#include <asm/idprom.h>
104#include <asm/prom.h>
105#include <asm/auxio.h> /* For tpe-link-test? setting */
106#include <asm/irq.h>
107
108#define DRV_NAME "sunlance"
109#define DRV_VERSION "2.02"
110#define DRV_RELDATE "8/24/03"
111#define DRV_AUTHOR "Miguel de Icaza (miguel@nuclecu.unam.mx)"
112
113static char version[] =
114 DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
115
116MODULE_VERSION(DRV_VERSION);
117MODULE_AUTHOR(DRV_AUTHOR);
118MODULE_DESCRIPTION("Sun Lance ethernet driver");
119MODULE_LICENSE("GPL");
120
121/* Define: 2^4 Tx buffers and 2^4 Rx buffers */
122#ifndef LANCE_LOG_TX_BUFFERS
123#define LANCE_LOG_TX_BUFFERS 4
124#define LANCE_LOG_RX_BUFFERS 4
125#endif
126
127#define LE_CSR0 0
128#define LE_CSR1 1
129#define LE_CSR2 2
130#define LE_CSR3 3
131
132#define LE_MO_PROM 0x8000 /* Enable promiscuous mode */
133
134#define LE_C0_ERR 0x8000 /* Error: set if BAB, SQE, MISS or ME is set */
135#define LE_C0_BABL 0x4000 /* BAB: Babble: tx timeout. */
136#define LE_C0_CERR 0x2000 /* SQE: Signal quality error */
137#define LE_C0_MISS 0x1000 /* MISS: Missed a packet */
138#define LE_C0_MERR 0x0800 /* ME: Memory error */
139#define LE_C0_RINT 0x0400 /* Received interrupt */
140#define LE_C0_TINT 0x0200 /* Transmitter Interrupt */
141#define LE_C0_IDON 0x0100 /* IFIN: Init finished. */
142#define LE_C0_INTR 0x0080 /* Interrupt or error */
143#define LE_C0_INEA 0x0040 /* Interrupt enable */
144#define LE_C0_RXON 0x0020 /* Receiver on */
145#define LE_C0_TXON 0x0010 /* Transmitter on */
146#define LE_C0_TDMD 0x0008 /* Transmitter demand */
147#define LE_C0_STOP 0x0004 /* Stop the card */
148#define LE_C0_STRT 0x0002 /* Start the card */
149#define LE_C0_INIT 0x0001 /* Init the card */
150
151#define LE_C3_BSWP 0x4 /* SWAP */
152#define LE_C3_ACON 0x2 /* ALE Control */
153#define LE_C3_BCON 0x1 /* Byte control */
154
155/* Receive message descriptor 1 */
156#define LE_R1_OWN 0x80 /* Who owns the entry */
157#define LE_R1_ERR 0x40 /* Error: if FRA, OFL, CRC or BUF is set */
158#define LE_R1_FRA 0x20 /* FRA: Frame error */
159#define LE_R1_OFL 0x10 /* OFL: Frame overflow */
160#define LE_R1_CRC 0x08 /* CRC error */
161#define LE_R1_BUF 0x04 /* BUF: Buffer error */
162#define LE_R1_SOP 0x02 /* Start of packet */
163#define LE_R1_EOP 0x01 /* End of packet */
164#define LE_R1_POK 0x03 /* Packet is complete: SOP + EOP */
165
166#define LE_T1_OWN 0x80 /* Lance owns the packet */
167#define LE_T1_ERR 0x40 /* Error summary */
168#define LE_T1_EMORE 0x10 /* Error: more than one retry needed */
169#define LE_T1_EONE 0x08 /* Error: one retry needed */
170#define LE_T1_EDEF 0x04 /* Error: deferred */
171#define LE_T1_SOP 0x02 /* Start of packet */
172#define LE_T1_EOP 0x01 /* End of packet */
173#define LE_T1_POK 0x03 /* Packet is complete: SOP + EOP */
174
175#define LE_T3_BUF 0x8000 /* Buffer error */
176#define LE_T3_UFL 0x4000 /* Error underflow */
177#define LE_T3_LCOL 0x1000 /* Error late collision */
178#define LE_T3_CLOS 0x0800 /* Error carrier loss */
179#define LE_T3_RTY 0x0400 /* Error retry */
180#define LE_T3_TDR 0x03ff /* Time Domain Reflectometry counter */
181
182#define TX_RING_SIZE (1 << (LANCE_LOG_TX_BUFFERS))
183#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
184#define TX_RING_LEN_BITS ((LANCE_LOG_TX_BUFFERS) << 29)
185#define TX_NEXT(__x) (((__x)+1) & TX_RING_MOD_MASK)
186
187#define RX_RING_SIZE (1 << (LANCE_LOG_RX_BUFFERS))
188#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
189#define RX_RING_LEN_BITS ((LANCE_LOG_RX_BUFFERS) << 29)
190#define RX_NEXT(__x) (((__x)+1) & RX_RING_MOD_MASK)
191
192#define PKT_BUF_SZ 1544
193#define RX_BUFF_SIZE PKT_BUF_SZ
194#define TX_BUFF_SIZE PKT_BUF_SZ
195
196struct lance_rx_desc {
197 u16 rmd0; /* low address of packet */
198 u8 rmd1_bits; /* descriptor bits */
199 u8 rmd1_hadr; /* high address of packet */
200 s16 length; /* This length is 2s complement (negative)!
201 * Buffer length
202 */
203 u16 mblength; /* This is the actual number of bytes received */
204};
205
206struct lance_tx_desc {
207 u16 tmd0; /* low address of packet */
208 u8 tmd1_bits; /* descriptor bits */
209 u8 tmd1_hadr; /* high address of packet */
210 s16 length; /* Length is 2s complement (negative)! */
211 u16 misc;
212};
213
214/* The LANCE initialization block, described in databook. */
215/* On the Sparc, this block should be on a DMA region */
216struct lance_init_block {
217 u16 mode; /* Pre-set mode (reg. 15) */
218 u8 phys_addr[6]; /* Physical ethernet address */
219 u32 filter[2]; /* Multicast filter. */
220
221 /* Receive and transmit ring base, along with extra bits. */
222 u16 rx_ptr; /* receive descriptor addr */
223 u16 rx_len; /* receive len and high addr */
224 u16 tx_ptr; /* transmit descriptor addr */
225 u16 tx_len; /* transmit len and high addr */
226
227 /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
228 struct lance_rx_desc brx_ring[RX_RING_SIZE];
229 struct lance_tx_desc btx_ring[TX_RING_SIZE];
230
231 u8 tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
232 u8 pad[2]; /* align rx_buf for copy_and_sum(). */
233 u8 rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
234};
235
236#define libdesc_offset(rt, elem) \
237((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem])))))
238
239#define libbuff_offset(rt, elem) \
240((__u32)(((unsigned long)(&(((struct lance_init_block *)0)->rt[elem][0])))))
241
242struct lance_private {
243 void __iomem *lregs; /* Lance RAP/RDP regs. */
244 void __iomem *dregs; /* DMA controller regs. */
245 struct lance_init_block __iomem *init_block_iomem;
246 struct lance_init_block *init_block_mem;
247
248 spinlock_t lock;
249
250 int rx_new, tx_new;
251 int rx_old, tx_old;
252
253 struct platform_device *ledma; /* If set this points to ledma */
254 char tpe; /* cable-selection is TPE */
255 char auto_select; /* cable-selection by carrier */
256 char burst_sizes; /* ledma SBus burst sizes */
257 char pio_buffer; /* init block in PIO space? */
258
259 unsigned short busmaster_regval;
260
261 void (*init_ring)(struct net_device *);
262 void (*rx)(struct net_device *);
263 void (*tx)(struct net_device *);
264
265 char *name;
266 dma_addr_t init_block_dvma;
267 struct net_device *dev; /* Backpointer */
268 struct platform_device *op;
269 struct platform_device *lebuffer;
270 struct timer_list multicast_timer;
271};
272
273#define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
274 lp->tx_old+TX_RING_MOD_MASK-lp->tx_new:\
275 lp->tx_old - lp->tx_new-1)
276
277/* Lance registers. */
278#define RDP 0x00UL /* register data port */
279#define RAP 0x02UL /* register address port */
280#define LANCE_REG_SIZE 0x04UL
281
282#define STOP_LANCE(__lp) \
283do { void __iomem *__base = (__lp)->lregs; \
284 sbus_writew(LE_CSR0, __base + RAP); \
285 sbus_writew(LE_C0_STOP, __base + RDP); \
286} while (0)
287
288int sparc_lance_debug = 2;
289
290/* The Lance uses 24 bit addresses */
291/* On the Sun4c the DVMA will provide the remaining bytes for us */
292/* On the Sun4m we have to instruct the ledma to provide them */
293/* Even worse, on scsi/ether SBUS cards, the init block and the
294 * transmit/receive buffers are addresses as offsets from absolute
295 * zero on the lebuffer PIO area. -DaveM
296 */
297
298#define LANCE_ADDR(x) ((long)(x) & ~0xff000000)
299
300/* Load the CSR registers */
301static void load_csrs(struct lance_private *lp)
302{
303 u32 leptr;
304
305 if (lp->pio_buffer)
306 leptr = 0;
307 else
308 leptr = LANCE_ADDR(lp->init_block_dvma);
309
310 sbus_writew(LE_CSR1, lp->lregs + RAP);
311 sbus_writew(leptr & 0xffff, lp->lregs + RDP);
312 sbus_writew(LE_CSR2, lp->lregs + RAP);
313 sbus_writew(leptr >> 16, lp->lregs + RDP);
314 sbus_writew(LE_CSR3, lp->lregs + RAP);
315 sbus_writew(lp->busmaster_regval, lp->lregs + RDP);
316
317 /* Point back to csr0 */
318 sbus_writew(LE_CSR0, lp->lregs + RAP);
319}
320
321/* Setup the Lance Rx and Tx rings */
322static void lance_init_ring_dvma(struct net_device *dev)
323{
324 struct lance_private *lp = netdev_priv(dev);
325 struct lance_init_block *ib = lp->init_block_mem;
326 dma_addr_t aib = lp->init_block_dvma;
327 __u32 leptr;
328 int i;
329
330 /* Lock out other processes while setting up hardware */
331 netif_stop_queue(dev);
332 lp->rx_new = lp->tx_new = 0;
333 lp->rx_old = lp->tx_old = 0;
334
335 /* Copy the ethernet address to the lance init block
336 * Note that on the sparc you need to swap the ethernet address.
337 */
338 ib->phys_addr [0] = dev->dev_addr [1];
339 ib->phys_addr [1] = dev->dev_addr [0];
340 ib->phys_addr [2] = dev->dev_addr [3];
341 ib->phys_addr [3] = dev->dev_addr [2];
342 ib->phys_addr [4] = dev->dev_addr [5];
343 ib->phys_addr [5] = dev->dev_addr [4];
344
345 /* Setup the Tx ring entries */
346 for (i = 0; i < TX_RING_SIZE; i++) {
347 leptr = LANCE_ADDR(aib + libbuff_offset(tx_buf, i));
348 ib->btx_ring [i].tmd0 = leptr;
349 ib->btx_ring [i].tmd1_hadr = leptr >> 16;
350 ib->btx_ring [i].tmd1_bits = 0;
351 ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
352 ib->btx_ring [i].misc = 0;
353 }
354
355 /* Setup the Rx ring entries */
356 for (i = 0; i < RX_RING_SIZE; i++) {
357 leptr = LANCE_ADDR(aib + libbuff_offset(rx_buf, i));
358
359 ib->brx_ring [i].rmd0 = leptr;
360 ib->brx_ring [i].rmd1_hadr = leptr >> 16;
361 ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
362 ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
363 ib->brx_ring [i].mblength = 0;
364 }
365
366 /* Setup the initialization block */
367
368 /* Setup rx descriptor pointer */
369 leptr = LANCE_ADDR(aib + libdesc_offset(brx_ring, 0));
370 ib->rx_len = (LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16);
371 ib->rx_ptr = leptr;
372
373 /* Setup tx descriptor pointer */
374 leptr = LANCE_ADDR(aib + libdesc_offset(btx_ring, 0));
375 ib->tx_len = (LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16);
376 ib->tx_ptr = leptr;
377}
378
379static void lance_init_ring_pio(struct net_device *dev)
380{
381 struct lance_private *lp = netdev_priv(dev);
382 struct lance_init_block __iomem *ib = lp->init_block_iomem;
383 u32 leptr;
384 int i;
385
386 /* Lock out other processes while setting up hardware */
387 netif_stop_queue(dev);
388 lp->rx_new = lp->tx_new = 0;
389 lp->rx_old = lp->tx_old = 0;
390
391 /* Copy the ethernet address to the lance init block
392 * Note that on the sparc you need to swap the ethernet address.
393 */
394 sbus_writeb(dev->dev_addr[1], &ib->phys_addr[0]);
395 sbus_writeb(dev->dev_addr[0], &ib->phys_addr[1]);
396 sbus_writeb(dev->dev_addr[3], &ib->phys_addr[2]);
397 sbus_writeb(dev->dev_addr[2], &ib->phys_addr[3]);
398 sbus_writeb(dev->dev_addr[5], &ib->phys_addr[4]);
399 sbus_writeb(dev->dev_addr[4], &ib->phys_addr[5]);
400
401 /* Setup the Tx ring entries */
402 for (i = 0; i < TX_RING_SIZE; i++) {
403 leptr = libbuff_offset(tx_buf, i);
404 sbus_writew(leptr, &ib->btx_ring [i].tmd0);
405 sbus_writeb(leptr >> 16,&ib->btx_ring [i].tmd1_hadr);
406 sbus_writeb(0, &ib->btx_ring [i].tmd1_bits);
407
408 /* The ones required by tmd2 */
409 sbus_writew(0xf000, &ib->btx_ring [i].length);
410 sbus_writew(0, &ib->btx_ring [i].misc);
411 }
412
413 /* Setup the Rx ring entries */
414 for (i = 0; i < RX_RING_SIZE; i++) {
415 leptr = libbuff_offset(rx_buf, i);
416
417 sbus_writew(leptr, &ib->brx_ring [i].rmd0);
418 sbus_writeb(leptr >> 16,&ib->brx_ring [i].rmd1_hadr);
419 sbus_writeb(LE_R1_OWN, &ib->brx_ring [i].rmd1_bits);
420 sbus_writew(-RX_BUFF_SIZE|0xf000,
421 &ib->brx_ring [i].length);
422 sbus_writew(0, &ib->brx_ring [i].mblength);
423 }
424
425 /* Setup the initialization block */
426
427 /* Setup rx descriptor pointer */
428 leptr = libdesc_offset(brx_ring, 0);
429 sbus_writew((LANCE_LOG_RX_BUFFERS << 13) | (leptr >> 16),
430 &ib->rx_len);
431 sbus_writew(leptr, &ib->rx_ptr);
432
433 /* Setup tx descriptor pointer */
434 leptr = libdesc_offset(btx_ring, 0);
435 sbus_writew((LANCE_LOG_TX_BUFFERS << 13) | (leptr >> 16),
436 &ib->tx_len);
437 sbus_writew(leptr, &ib->tx_ptr);
438}
439
440static void init_restart_ledma(struct lance_private *lp)
441{
442 u32 csr = sbus_readl(lp->dregs + DMA_CSR);
443
444 if (!(csr & DMA_HNDL_ERROR)) {
445 /* E-Cache draining */
446 while (sbus_readl(lp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN)
447 barrier();
448 }
449
450 csr = sbus_readl(lp->dregs + DMA_CSR);
451 csr &= ~DMA_E_BURSTS;
452 if (lp->burst_sizes & DMA_BURST32)
453 csr |= DMA_E_BURST32;
454 else
455 csr |= DMA_E_BURST16;
456
457 csr |= (DMA_DSBL_RD_DRN | DMA_DSBL_WR_INV | DMA_FIFO_INV);
458
459 if (lp->tpe)
460 csr |= DMA_EN_ENETAUI;
461 else
462 csr &= ~DMA_EN_ENETAUI;
463 udelay(20);
464 sbus_writel(csr, lp->dregs + DMA_CSR);
465 udelay(200);
466}
467
468static int init_restart_lance(struct lance_private *lp)
469{
470 u16 regval = 0;
471 int i;
472
473 if (lp->dregs)
474 init_restart_ledma(lp);
475
476 sbus_writew(LE_CSR0, lp->lregs + RAP);
477 sbus_writew(LE_C0_INIT, lp->lregs + RDP);
478
479 /* Wait for the lance to complete initialization */
480 for (i = 0; i < 100; i++) {
481 regval = sbus_readw(lp->lregs + RDP);
482
483 if (regval & (LE_C0_ERR | LE_C0_IDON))
484 break;
485 barrier();
486 }
487 if (i == 100 || (regval & LE_C0_ERR)) {
488 printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
489 i, regval);
490 if (lp->dregs)
491 printk("dcsr=%8.8x\n", sbus_readl(lp->dregs + DMA_CSR));
492 return -1;
493 }
494
495 /* Clear IDON by writing a "1", enable interrupts and start lance */
496 sbus_writew(LE_C0_IDON, lp->lregs + RDP);
497 sbus_writew(LE_C0_INEA | LE_C0_STRT, lp->lregs + RDP);
498
499 if (lp->dregs) {
500 u32 csr = sbus_readl(lp->dregs + DMA_CSR);
501
502 csr |= DMA_INT_ENAB;
503 sbus_writel(csr, lp->dregs + DMA_CSR);
504 }
505
506 return 0;
507}
508
509static void lance_rx_dvma(struct net_device *dev)
510{
511 struct lance_private *lp = netdev_priv(dev);
512 struct lance_init_block *ib = lp->init_block_mem;
513 struct lance_rx_desc *rd;
514 u8 bits;
515 int len, entry = lp->rx_new;
516 struct sk_buff *skb;
517
518 for (rd = &ib->brx_ring [entry];
519 !((bits = rd->rmd1_bits) & LE_R1_OWN);
520 rd = &ib->brx_ring [entry]) {
521
522 /* We got an incomplete frame? */
523 if ((bits & LE_R1_POK) != LE_R1_POK) {
524 dev->stats.rx_over_errors++;
525 dev->stats.rx_errors++;
526 } else if (bits & LE_R1_ERR) {
527 /* Count only the end frame as a rx error,
528 * not the beginning
529 */
530 if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
531 if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
532 if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
533 if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
534 if (bits & LE_R1_EOP) dev->stats.rx_errors++;
535 } else {
536 len = (rd->mblength & 0xfff) - 4;
537 skb = dev_alloc_skb(len + 2);
538
539 if (skb == NULL) {
540 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
541 dev->name);
542 dev->stats.rx_dropped++;
543 rd->mblength = 0;
544 rd->rmd1_bits = LE_R1_OWN;
545 lp->rx_new = RX_NEXT(entry);
546 return;
547 }
548
549 dev->stats.rx_bytes += len;
550
551 skb_reserve(skb, 2); /* 16 byte align */
552 skb_put(skb, len); /* make room */
553 skb_copy_to_linear_data(skb,
554 (unsigned char *)&(ib->rx_buf [entry][0]),
555 len);
556 skb->protocol = eth_type_trans(skb, dev);
557 netif_rx(skb);
558 dev->stats.rx_packets++;
559 }
560
561 /* Return the packet to the pool */
562 rd->mblength = 0;
563 rd->rmd1_bits = LE_R1_OWN;
564 entry = RX_NEXT(entry);
565 }
566
567 lp->rx_new = entry;
568}
569
570static void lance_tx_dvma(struct net_device *dev)
571{
572 struct lance_private *lp = netdev_priv(dev);
573 struct lance_init_block *ib = lp->init_block_mem;
574 int i, j;
575
576 spin_lock(&lp->lock);
577
578 j = lp->tx_old;
579 for (i = j; i != lp->tx_new; i = j) {
580 struct lance_tx_desc *td = &ib->btx_ring [i];
581 u8 bits = td->tmd1_bits;
582
583 /* If we hit a packet not owned by us, stop */
584 if (bits & LE_T1_OWN)
585 break;
586
587 if (bits & LE_T1_ERR) {
588 u16 status = td->misc;
589
590 dev->stats.tx_errors++;
591 if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
592 if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
593
594 if (status & LE_T3_CLOS) {
595 dev->stats.tx_carrier_errors++;
596 if (lp->auto_select) {
597 lp->tpe = 1 - lp->tpe;
598 printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
599 dev->name, lp->tpe?"TPE":"AUI");
600 STOP_LANCE(lp);
601 lp->init_ring(dev);
602 load_csrs(lp);
603 init_restart_lance(lp);
604 goto out;
605 }
606 }
607
608 /* Buffer errors and underflows turn off the
609 * transmitter, restart the adapter.
610 */
611 if (status & (LE_T3_BUF|LE_T3_UFL)) {
612 dev->stats.tx_fifo_errors++;
613
614 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
615 dev->name);
616 STOP_LANCE(lp);
617 lp->init_ring(dev);
618 load_csrs(lp);
619 init_restart_lance(lp);
620 goto out;
621 }
622 } else if ((bits & LE_T1_POK) == LE_T1_POK) {
623 /*
624 * So we don't count the packet more than once.
625 */
626 td->tmd1_bits = bits & ~(LE_T1_POK);
627
628 /* One collision before packet was sent. */
629 if (bits & LE_T1_EONE)
630 dev->stats.collisions++;
631
632 /* More than one collision, be optimistic. */
633 if (bits & LE_T1_EMORE)
634 dev->stats.collisions += 2;
635
636 dev->stats.tx_packets++;
637 }
638
639 j = TX_NEXT(j);
640 }
641 lp->tx_old = j;
642out:
643 if (netif_queue_stopped(dev) &&
644 TX_BUFFS_AVAIL > 0)
645 netif_wake_queue(dev);
646
647 spin_unlock(&lp->lock);
648}
649
650static void lance_piocopy_to_skb(struct sk_buff *skb, void __iomem *piobuf, int len)
651{
652 u16 *p16 = (u16 *) skb->data;
653 u32 *p32;
654 u8 *p8;
655 void __iomem *pbuf = piobuf;
656
657 /* We know here that both src and dest are on a 16bit boundary. */
658 *p16++ = sbus_readw(pbuf);
659 p32 = (u32 *) p16;
660 pbuf += 2;
661 len -= 2;
662
663 while (len >= 4) {
664 *p32++ = sbus_readl(pbuf);
665 pbuf += 4;
666 len -= 4;
667 }
668 p8 = (u8 *) p32;
669 if (len >= 2) {
670 p16 = (u16 *) p32;
671 *p16++ = sbus_readw(pbuf);
672 pbuf += 2;
673 len -= 2;
674 p8 = (u8 *) p16;
675 }
676 if (len >= 1)
677 *p8 = sbus_readb(pbuf);
678}
679
680static void lance_rx_pio(struct net_device *dev)
681{
682 struct lance_private *lp = netdev_priv(dev);
683 struct lance_init_block __iomem *ib = lp->init_block_iomem;
684 struct lance_rx_desc __iomem *rd;
685 unsigned char bits;
686 int len, entry;
687 struct sk_buff *skb;
688
689 entry = lp->rx_new;
690 for (rd = &ib->brx_ring [entry];
691 !((bits = sbus_readb(&rd->rmd1_bits)) & LE_R1_OWN);
692 rd = &ib->brx_ring [entry]) {
693
694 /* We got an incomplete frame? */
695 if ((bits & LE_R1_POK) != LE_R1_POK) {
696 dev->stats.rx_over_errors++;
697 dev->stats.rx_errors++;
698 } else if (bits & LE_R1_ERR) {
699 /* Count only the end frame as a rx error,
700 * not the beginning
701 */
702 if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
703 if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
704 if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
705 if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
706 if (bits & LE_R1_EOP) dev->stats.rx_errors++;
707 } else {
708 len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
709 skb = dev_alloc_skb(len + 2);
710
711 if (skb == NULL) {
712 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
713 dev->name);
714 dev->stats.rx_dropped++;
715 sbus_writew(0, &rd->mblength);
716 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
717 lp->rx_new = RX_NEXT(entry);
718 return;
719 }
720
721 dev->stats.rx_bytes += len;
722
723 skb_reserve (skb, 2); /* 16 byte align */
724 skb_put(skb, len); /* make room */
725 lance_piocopy_to_skb(skb, &(ib->rx_buf[entry][0]), len);
726 skb->protocol = eth_type_trans(skb, dev);
727 netif_rx(skb);
728 dev->stats.rx_packets++;
729 }
730
731 /* Return the packet to the pool */
732 sbus_writew(0, &rd->mblength);
733 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
734 entry = RX_NEXT(entry);
735 }
736
737 lp->rx_new = entry;
738}
739
740static void lance_tx_pio(struct net_device *dev)
741{
742 struct lance_private *lp = netdev_priv(dev);
743 struct lance_init_block __iomem *ib = lp->init_block_iomem;
744 int i, j;
745
746 spin_lock(&lp->lock);
747
748 j = lp->tx_old;
749 for (i = j; i != lp->tx_new; i = j) {
750 struct lance_tx_desc __iomem *td = &ib->btx_ring [i];
751 u8 bits = sbus_readb(&td->tmd1_bits);
752
753 /* If we hit a packet not owned by us, stop */
754 if (bits & LE_T1_OWN)
755 break;
756
757 if (bits & LE_T1_ERR) {
758 u16 status = sbus_readw(&td->misc);
759
760 dev->stats.tx_errors++;
761 if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
762 if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
763
764 if (status & LE_T3_CLOS) {
765 dev->stats.tx_carrier_errors++;
766 if (lp->auto_select) {
767 lp->tpe = 1 - lp->tpe;
768 printk(KERN_NOTICE "%s: Carrier Lost, trying %s\n",
769 dev->name, lp->tpe?"TPE":"AUI");
770 STOP_LANCE(lp);
771 lp->init_ring(dev);
772 load_csrs(lp);
773 init_restart_lance(lp);
774 goto out;
775 }
776 }
777
778 /* Buffer errors and underflows turn off the
779 * transmitter, restart the adapter.
780 */
781 if (status & (LE_T3_BUF|LE_T3_UFL)) {
782 dev->stats.tx_fifo_errors++;
783
784 printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
785 dev->name);
786 STOP_LANCE(lp);
787 lp->init_ring(dev);
788 load_csrs(lp);
789 init_restart_lance(lp);
790 goto out;
791 }
792 } else if ((bits & LE_T1_POK) == LE_T1_POK) {
793 /*
794 * So we don't count the packet more than once.
795 */
796 sbus_writeb(bits & ~(LE_T1_POK), &td->tmd1_bits);
797
798 /* One collision before packet was sent. */
799 if (bits & LE_T1_EONE)
800 dev->stats.collisions++;
801
802 /* More than one collision, be optimistic. */
803 if (bits & LE_T1_EMORE)
804 dev->stats.collisions += 2;
805
806 dev->stats.tx_packets++;
807 }
808
809 j = TX_NEXT(j);
810 }
811 lp->tx_old = j;
812
813 if (netif_queue_stopped(dev) &&
814 TX_BUFFS_AVAIL > 0)
815 netif_wake_queue(dev);
816out:
817 spin_unlock(&lp->lock);
818}
819
820static irqreturn_t lance_interrupt(int irq, void *dev_id)
821{
822 struct net_device *dev = dev_id;
823 struct lance_private *lp = netdev_priv(dev);
824 int csr0;
825
826 sbus_writew(LE_CSR0, lp->lregs + RAP);
827 csr0 = sbus_readw(lp->lregs + RDP);
828
829 /* Acknowledge all the interrupt sources ASAP */
830 sbus_writew(csr0 & (LE_C0_INTR | LE_C0_TINT | LE_C0_RINT),
831 lp->lregs + RDP);
832
833 if ((csr0 & LE_C0_ERR) != 0) {
834 /* Clear the error condition */
835 sbus_writew((LE_C0_BABL | LE_C0_ERR | LE_C0_MISS |
836 LE_C0_CERR | LE_C0_MERR),
837 lp->lregs + RDP);
838 }
839
840 if (csr0 & LE_C0_RINT)
841 lp->rx(dev);
842
843 if (csr0 & LE_C0_TINT)
844 lp->tx(dev);
845
846 if (csr0 & LE_C0_BABL)
847 dev->stats.tx_errors++;
848
849 if (csr0 & LE_C0_MISS)
850 dev->stats.rx_errors++;
851
852 if (csr0 & LE_C0_MERR) {
853 if (lp->dregs) {
854 u32 addr = sbus_readl(lp->dregs + DMA_ADDR);
855
856 printk(KERN_ERR "%s: Memory error, status %04x, addr %06x\n",
857 dev->name, csr0, addr & 0xffffff);
858 } else {
859 printk(KERN_ERR "%s: Memory error, status %04x\n",
860 dev->name, csr0);
861 }
862
863 sbus_writew(LE_C0_STOP, lp->lregs + RDP);
864
865 if (lp->dregs) {
866 u32 dma_csr = sbus_readl(lp->dregs + DMA_CSR);
867
868 dma_csr |= DMA_FIFO_INV;
869 sbus_writel(dma_csr, lp->dregs + DMA_CSR);
870 }
871
872 lp->init_ring(dev);
873 load_csrs(lp);
874 init_restart_lance(lp);
875 netif_wake_queue(dev);
876 }
877
878 sbus_writew(LE_C0_INEA, lp->lregs + RDP);
879
880 return IRQ_HANDLED;
881}
882
883/* Build a fake network packet and send it to ourselves. */
884static void build_fake_packet(struct lance_private *lp)
885{
886 struct net_device *dev = lp->dev;
887 int i, entry;
888
889 entry = lp->tx_new & TX_RING_MOD_MASK;
890 if (lp->pio_buffer) {
891 struct lance_init_block __iomem *ib = lp->init_block_iomem;
892 u16 __iomem *packet = (u16 __iomem *) &(ib->tx_buf[entry][0]);
893 struct ethhdr __iomem *eth = (struct ethhdr __iomem *) packet;
894 for (i = 0; i < (ETH_ZLEN / sizeof(u16)); i++)
895 sbus_writew(0, &packet[i]);
896 for (i = 0; i < 6; i++) {
897 sbus_writeb(dev->dev_addr[i], &eth->h_dest[i]);
898 sbus_writeb(dev->dev_addr[i], &eth->h_source[i]);
899 }
900 sbus_writew((-ETH_ZLEN) | 0xf000, &ib->btx_ring[entry].length);
901 sbus_writew(0, &ib->btx_ring[entry].misc);
902 sbus_writeb(LE_T1_POK|LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
903 } else {
904 struct lance_init_block *ib = lp->init_block_mem;
905 u16 *packet = (u16 *) &(ib->tx_buf[entry][0]);
906 struct ethhdr *eth = (struct ethhdr *) packet;
907 memset(packet, 0, ETH_ZLEN);
908 for (i = 0; i < 6; i++) {
909 eth->h_dest[i] = dev->dev_addr[i];
910 eth->h_source[i] = dev->dev_addr[i];
911 }
912 ib->btx_ring[entry].length = (-ETH_ZLEN) | 0xf000;
913 ib->btx_ring[entry].misc = 0;
914 ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
915 }
916 lp->tx_new = TX_NEXT(entry);
917}
918
919static int lance_open(struct net_device *dev)
920{
921 struct lance_private *lp = netdev_priv(dev);
922 int status = 0;
923
924 STOP_LANCE(lp);
925
926 if (request_irq(dev->irq, lance_interrupt, IRQF_SHARED,
927 lancestr, (void *) dev)) {
928 printk(KERN_ERR "Lance: Can't get irq %d\n", dev->irq);
929 return -EAGAIN;
930 }
931
932 /* On the 4m, setup the ledma to provide the upper bits for buffers */
933 if (lp->dregs) {
934 u32 regval = lp->init_block_dvma & 0xff000000;
935
936 sbus_writel(regval, lp->dregs + DMA_TEST);
937 }
938
939 /* Set mode and clear multicast filter only at device open,
940 * so that lance_init_ring() called at any error will not
941 * forget multicast filters.
942 *
943 * BTW it is common bug in all lance drivers! --ANK
944 */
945 if (lp->pio_buffer) {
946 struct lance_init_block __iomem *ib = lp->init_block_iomem;
947 sbus_writew(0, &ib->mode);
948 sbus_writel(0, &ib->filter[0]);
949 sbus_writel(0, &ib->filter[1]);
950 } else {
951 struct lance_init_block *ib = lp->init_block_mem;
952 ib->mode = 0;
953 ib->filter [0] = 0;
954 ib->filter [1] = 0;
955 }
956
957 lp->init_ring(dev);
958 load_csrs(lp);
959
960 netif_start_queue(dev);
961
962 status = init_restart_lance(lp);
963 if (!status && lp->auto_select) {
964 build_fake_packet(lp);
965 sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
966 }
967
968 return status;
969}
970
971static int lance_close(struct net_device *dev)
972{
973 struct lance_private *lp = netdev_priv(dev);
974
975 netif_stop_queue(dev);
976 del_timer_sync(&lp->multicast_timer);
977
978 STOP_LANCE(lp);
979
980 free_irq(dev->irq, (void *) dev);
981 return 0;
982}
983
984static int lance_reset(struct net_device *dev)
985{
986 struct lance_private *lp = netdev_priv(dev);
987 int status;
988
989 STOP_LANCE(lp);
990
991 /* On the 4m, reset the dma too */
992 if (lp->dregs) {
993 u32 csr, addr;
994
995 printk(KERN_ERR "resetting ledma\n");
996 csr = sbus_readl(lp->dregs + DMA_CSR);
997 sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
998 udelay(200);
999 sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
1000
1001 addr = lp->init_block_dvma & 0xff000000;
1002 sbus_writel(addr, lp->dregs + DMA_TEST);
1003 }
1004 lp->init_ring(dev);
1005 load_csrs(lp);
1006 dev->trans_start = jiffies; /* prevent tx timeout */
1007 status = init_restart_lance(lp);
1008 return status;
1009}
1010
1011static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int len)
1012{
1013 void __iomem *piobuf = dest;
1014 u32 *p32;
1015 u16 *p16;
1016 u8 *p8;
1017
1018 switch ((unsigned long)src & 0x3) {
1019 case 0:
1020 p32 = (u32 *) src;
1021 while (len >= 4) {
1022 sbus_writel(*p32, piobuf);
1023 p32++;
1024 piobuf += 4;
1025 len -= 4;
1026 }
1027 src = (char *) p32;
1028 break;
1029 case 1:
1030 case 3:
1031 p8 = (u8 *) src;
1032 while (len >= 4) {
1033 u32 val;
1034
1035 val = p8[0] << 24;
1036 val |= p8[1] << 16;
1037 val |= p8[2] << 8;
1038 val |= p8[3];
1039 sbus_writel(val, piobuf);
1040 p8 += 4;
1041 piobuf += 4;
1042 len -= 4;
1043 }
1044 src = (char *) p8;
1045 break;
1046 case 2:
1047 p16 = (u16 *) src;
1048 while (len >= 4) {
1049 u32 val = p16[0]<<16 | p16[1];
1050 sbus_writel(val, piobuf);
1051 p16 += 2;
1052 piobuf += 4;
1053 len -= 4;
1054 }
1055 src = (char *) p16;
1056 break;
1057 }
1058 if (len >= 2) {
1059 u16 val = src[0] << 8 | src[1];
1060 sbus_writew(val, piobuf);
1061 src += 2;
1062 piobuf += 2;
1063 len -= 2;
1064 }
1065 if (len >= 1)
1066 sbus_writeb(src[0], piobuf);
1067}
1068
1069static void lance_piozero(void __iomem *dest, int len)
1070{
1071 void __iomem *piobuf = dest;
1072
1073 if ((unsigned long)piobuf & 1) {
1074 sbus_writeb(0, piobuf);
1075 piobuf += 1;
1076 len -= 1;
1077 if (len == 0)
1078 return;
1079 }
1080 if (len == 1) {
1081 sbus_writeb(0, piobuf);
1082 return;
1083 }
1084 if ((unsigned long)piobuf & 2) {
1085 sbus_writew(0, piobuf);
1086 piobuf += 2;
1087 len -= 2;
1088 if (len == 0)
1089 return;
1090 }
1091 while (len >= 4) {
1092 sbus_writel(0, piobuf);
1093 piobuf += 4;
1094 len -= 4;
1095 }
1096 if (len >= 2) {
1097 sbus_writew(0, piobuf);
1098 piobuf += 2;
1099 len -= 2;
1100 }
1101 if (len >= 1)
1102 sbus_writeb(0, piobuf);
1103}
1104
1105static void lance_tx_timeout(struct net_device *dev)
1106{
1107 struct lance_private *lp = netdev_priv(dev);
1108
1109 printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
1110 dev->name, sbus_readw(lp->lregs + RDP));
1111 lance_reset(dev);
1112 netif_wake_queue(dev);
1113}
1114
1115static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1116{
1117 struct lance_private *lp = netdev_priv(dev);
1118 int entry, skblen, len;
1119
1120 skblen = skb->len;
1121
1122 len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
1123
1124 spin_lock_irq(&lp->lock);
1125
1126 dev->stats.tx_bytes += len;
1127
1128 entry = lp->tx_new & TX_RING_MOD_MASK;
1129 if (lp->pio_buffer) {
1130 struct lance_init_block __iomem *ib = lp->init_block_iomem;
1131 sbus_writew((-len) | 0xf000, &ib->btx_ring[entry].length);
1132 sbus_writew(0, &ib->btx_ring[entry].misc);
1133 lance_piocopy_from_skb(&ib->tx_buf[entry][0], skb->data, skblen);
1134 if (len != skblen)
1135 lance_piozero(&ib->tx_buf[entry][skblen], len - skblen);
1136 sbus_writeb(LE_T1_POK | LE_T1_OWN, &ib->btx_ring[entry].tmd1_bits);
1137 } else {
1138 struct lance_init_block *ib = lp->init_block_mem;
1139 ib->btx_ring [entry].length = (-len) | 0xf000;
1140 ib->btx_ring [entry].misc = 0;
1141 skb_copy_from_linear_data(skb, &ib->tx_buf [entry][0], skblen);
1142 if (len != skblen)
1143 memset((char *) &ib->tx_buf [entry][skblen], 0, len - skblen);
1144 ib->btx_ring [entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
1145 }
1146
1147 lp->tx_new = TX_NEXT(entry);
1148
1149 if (TX_BUFFS_AVAIL <= 0)
1150 netif_stop_queue(dev);
1151
1152 /* Kick the lance: transmit now */
1153 sbus_writew(LE_C0_INEA | LE_C0_TDMD, lp->lregs + RDP);
1154
1155 /* Read back CSR to invalidate the E-Cache.
1156 * This is needed, because DMA_DSBL_WR_INV is set.
1157 */
1158 if (lp->dregs)
1159 sbus_readw(lp->lregs + RDP);
1160
1161 spin_unlock_irq(&lp->lock);
1162
1163 dev_kfree_skb(skb);
1164
1165 return NETDEV_TX_OK;
1166}
1167
1168/* taken from the depca driver */
1169static void lance_load_multicast(struct net_device *dev)
1170{
1171 struct lance_private *lp = netdev_priv(dev);
1172 struct netdev_hw_addr *ha;
1173 u32 crc;
1174 u32 val;
1175
1176 /* set all multicast bits */
1177 if (dev->flags & IFF_ALLMULTI)
1178 val = ~0;
1179 else
1180 val = 0;
1181
1182 if (lp->pio_buffer) {
1183 struct lance_init_block __iomem *ib = lp->init_block_iomem;
1184 sbus_writel(val, &ib->filter[0]);
1185 sbus_writel(val, &ib->filter[1]);
1186 } else {
1187 struct lance_init_block *ib = lp->init_block_mem;
1188 ib->filter [0] = val;
1189 ib->filter [1] = val;
1190 }
1191
1192 if (dev->flags & IFF_ALLMULTI)
1193 return;
1194
1195 /* Add addresses */
1196 netdev_for_each_mc_addr(ha, dev) {
1197 crc = ether_crc_le(6, ha->addr);
1198 crc = crc >> 26;
1199 if (lp->pio_buffer) {
1200 struct lance_init_block __iomem *ib = lp->init_block_iomem;
1201 u16 __iomem *mcast_table = (u16 __iomem *) &ib->filter;
1202 u16 tmp = sbus_readw(&mcast_table[crc>>4]);
1203 tmp |= 1 << (crc & 0xf);
1204 sbus_writew(tmp, &mcast_table[crc>>4]);
1205 } else {
1206 struct lance_init_block *ib = lp->init_block_mem;
1207 u16 *mcast_table = (u16 *) &ib->filter;
1208 mcast_table [crc >> 4] |= 1 << (crc & 0xf);
1209 }
1210 }
1211}
1212
1213static void lance_set_multicast(struct net_device *dev)
1214{
1215 struct lance_private *lp = netdev_priv(dev);
1216 struct lance_init_block *ib_mem = lp->init_block_mem;
1217 struct lance_init_block __iomem *ib_iomem = lp->init_block_iomem;
1218 u16 mode;
1219
1220 if (!netif_running(dev))
1221 return;
1222
1223 if (lp->tx_old != lp->tx_new) {
1224 mod_timer(&lp->multicast_timer, jiffies + 4);
1225 netif_wake_queue(dev);
1226 return;
1227 }
1228
1229 netif_stop_queue(dev);
1230
1231 STOP_LANCE(lp);
1232 lp->init_ring(dev);
1233
1234 if (lp->pio_buffer)
1235 mode = sbus_readw(&ib_iomem->mode);
1236 else
1237 mode = ib_mem->mode;
1238 if (dev->flags & IFF_PROMISC) {
1239 mode |= LE_MO_PROM;
1240 if (lp->pio_buffer)
1241 sbus_writew(mode, &ib_iomem->mode);
1242 else
1243 ib_mem->mode = mode;
1244 } else {
1245 mode &= ~LE_MO_PROM;
1246 if (lp->pio_buffer)
1247 sbus_writew(mode, &ib_iomem->mode);
1248 else
1249 ib_mem->mode = mode;
1250 lance_load_multicast(dev);
1251 }
1252 load_csrs(lp);
1253 init_restart_lance(lp);
1254 netif_wake_queue(dev);
1255}
1256
1257static void lance_set_multicast_retry(unsigned long _opaque)
1258{
1259 struct net_device *dev = (struct net_device *) _opaque;
1260
1261 lance_set_multicast(dev);
1262}
1263
1264static void lance_free_hwresources(struct lance_private *lp)
1265{
1266 if (lp->lregs)
1267 of_iounmap(&lp->op->resource[0], lp->lregs, LANCE_REG_SIZE);
1268 if (lp->dregs) {
1269 struct platform_device *ledma = lp->ledma;
1270
1271 of_iounmap(&ledma->resource[0], lp->dregs,
1272 resource_size(&ledma->resource[0]));
1273 }
1274 if (lp->init_block_iomem) {
1275 of_iounmap(&lp->lebuffer->resource[0], lp->init_block_iomem,
1276 sizeof(struct lance_init_block));
1277 } else if (lp->init_block_mem) {
1278 dma_free_coherent(&lp->op->dev,
1279 sizeof(struct lance_init_block),
1280 lp->init_block_mem,
1281 lp->init_block_dvma);
1282 }
1283}
1284
1285/* Ethtool support... */
1286static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1287{
1288 strcpy(info->driver, "sunlance");
1289 strcpy(info->version, "2.02");
1290}
1291
1292static const struct ethtool_ops sparc_lance_ethtool_ops = {
1293 .get_drvinfo = sparc_lance_get_drvinfo,
1294 .get_link = ethtool_op_get_link,
1295};
1296
1297static const struct net_device_ops sparc_lance_ops = {
1298 .ndo_open = lance_open,
1299 .ndo_stop = lance_close,
1300 .ndo_start_xmit = lance_start_xmit,
1301 .ndo_set_rx_mode = lance_set_multicast,
1302 .ndo_tx_timeout = lance_tx_timeout,
1303 .ndo_change_mtu = eth_change_mtu,
1304 .ndo_set_mac_address = eth_mac_addr,
1305 .ndo_validate_addr = eth_validate_addr,
1306};
1307
1308static int __devinit sparc_lance_probe_one(struct platform_device *op,
1309 struct platform_device *ledma,
1310 struct platform_device *lebuffer)
1311{
1312 struct device_node *dp = op->dev.of_node;
1313 static unsigned version_printed;
1314 struct lance_private *lp;
1315 struct net_device *dev;
1316 int i;
1317
1318 dev = alloc_etherdev(sizeof(struct lance_private) + 8);
1319 if (!dev)
1320 return -ENOMEM;
1321
1322 lp = netdev_priv(dev);
1323
1324 if (sparc_lance_debug && version_printed++ == 0)
1325 printk (KERN_INFO "%s", version);
1326
1327 spin_lock_init(&lp->lock);
1328
1329 /* Copy the IDPROM ethernet address to the device structure, later we
1330 * will copy the address in the device structure to the lance
1331 * initialization block.
1332 */
1333 for (i = 0; i < 6; i++)
1334 dev->dev_addr[i] = idprom->id_ethaddr[i];
1335
1336 /* Get the IO region */
1337 lp->lregs = of_ioremap(&op->resource[0], 0,
1338 LANCE_REG_SIZE, lancestr);
1339 if (!lp->lregs) {
1340 printk(KERN_ERR "SunLance: Cannot map registers.\n");
1341 goto fail;
1342 }
1343
1344 lp->ledma = ledma;
1345 if (lp->ledma) {
1346 lp->dregs = of_ioremap(&ledma->resource[0], 0,
1347 resource_size(&ledma->resource[0]),
1348 "ledma");
1349 if (!lp->dregs) {
1350 printk(KERN_ERR "SunLance: Cannot map "
1351 "ledma registers.\n");
1352 goto fail;
1353 }
1354 }
1355
1356 lp->op = op;
1357 lp->lebuffer = lebuffer;
1358 if (lebuffer) {
1359 /* sanity check */
1360 if (lebuffer->resource[0].start & 7) {
1361 printk(KERN_ERR "SunLance: ERROR: Rx and Tx rings not on even boundary.\n");
1362 goto fail;
1363 }
1364 lp->init_block_iomem =
1365 of_ioremap(&lebuffer->resource[0], 0,
1366 sizeof(struct lance_init_block), "lebuffer");
1367 if (!lp->init_block_iomem) {
1368 printk(KERN_ERR "SunLance: Cannot map PIO buffer.\n");
1369 goto fail;
1370 }
1371 lp->init_block_dvma = 0;
1372 lp->pio_buffer = 1;
1373 lp->init_ring = lance_init_ring_pio;
1374 lp->rx = lance_rx_pio;
1375 lp->tx = lance_tx_pio;
1376 } else {
1377 lp->init_block_mem =
1378 dma_alloc_coherent(&op->dev,
1379 sizeof(struct lance_init_block),
1380 &lp->init_block_dvma, GFP_ATOMIC);
1381 if (!lp->init_block_mem) {
1382 printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
1383 goto fail;
1384 }
1385 lp->pio_buffer = 0;
1386 lp->init_ring = lance_init_ring_dvma;
1387 lp->rx = lance_rx_dvma;
1388 lp->tx = lance_tx_dvma;
1389 }
1390 lp->busmaster_regval = of_getintprop_default(dp, "busmaster-regval",
1391 (LE_C3_BSWP |
1392 LE_C3_ACON |
1393 LE_C3_BCON));
1394
1395 lp->name = lancestr;
1396
1397 lp->burst_sizes = 0;
1398 if (lp->ledma) {
1399 struct device_node *ledma_dp = ledma->dev.of_node;
1400 struct device_node *sbus_dp;
1401 unsigned int sbmask;
1402 const char *prop;
1403 u32 csr;
1404
1405 /* Find burst-size property for ledma */
1406 lp->burst_sizes = of_getintprop_default(ledma_dp,
1407 "burst-sizes", 0);
1408
1409 /* ledma may be capable of fast bursts, but sbus may not. */
1410 sbus_dp = ledma_dp->parent;
1411 sbmask = of_getintprop_default(sbus_dp, "burst-sizes",
1412 DMA_BURSTBITS);
1413 lp->burst_sizes &= sbmask;
1414
1415 /* Get the cable-selection property */
1416 prop = of_get_property(ledma_dp, "cable-selection", NULL);
1417 if (!prop || prop[0] == '\0') {
1418 struct device_node *nd;
1419
1420 printk(KERN_INFO "SunLance: using "
1421 "auto-carrier-detection.\n");
1422
1423 nd = of_find_node_by_path("/options");
1424 if (!nd)
1425 goto no_link_test;
1426
1427 prop = of_get_property(nd, "tpe-link-test?", NULL);
1428 if (!prop)
1429 goto no_link_test;
1430
1431 if (strcmp(prop, "true")) {
1432 printk(KERN_NOTICE "SunLance: warning: overriding option "
1433 "'tpe-link-test?'\n");
1434 printk(KERN_NOTICE "SunLance: warning: mail any problems "
1435 "to ecd@skynet.be\n");
1436 auxio_set_lte(AUXIO_LTE_ON);
1437 }
1438no_link_test:
1439 lp->auto_select = 1;
1440 lp->tpe = 0;
1441 } else if (!strcmp(prop, "aui")) {
1442 lp->auto_select = 0;
1443 lp->tpe = 0;
1444 } else {
1445 lp->auto_select = 0;
1446 lp->tpe = 1;
1447 }
1448
1449 /* Reset ledma */
1450 csr = sbus_readl(lp->dregs + DMA_CSR);
1451 sbus_writel(csr | DMA_RST_ENET, lp->dregs + DMA_CSR);
1452 udelay(200);
1453 sbus_writel(csr & ~DMA_RST_ENET, lp->dregs + DMA_CSR);
1454 } else
1455 lp->dregs = NULL;
1456
1457 lp->dev = dev;
1458 SET_NETDEV_DEV(dev, &op->dev);
1459 dev->watchdog_timeo = 5*HZ;
1460 dev->ethtool_ops = &sparc_lance_ethtool_ops;
1461 dev->netdev_ops = &sparc_lance_ops;
1462
1463 dev->irq = op->archdata.irqs[0];
1464
1465 /* We cannot sleep if the chip is busy during a
1466 * multicast list update event, because such events
1467 * can occur from interrupts (ex. IPv6). So we
1468 * use a timer to try again later when necessary. -DaveM
1469 */
1470 init_timer(&lp->multicast_timer);
1471 lp->multicast_timer.data = (unsigned long) dev;
1472 lp->multicast_timer.function = lance_set_multicast_retry;
1473
1474 if (register_netdev(dev)) {
1475 printk(KERN_ERR "SunLance: Cannot register device.\n");
1476 goto fail;
1477 }
1478
1479 dev_set_drvdata(&op->dev, lp);
1480
1481 printk(KERN_INFO "%s: LANCE %pM\n",
1482 dev->name, dev->dev_addr);
1483
1484 return 0;
1485
1486fail:
1487 lance_free_hwresources(lp);
1488 free_netdev(dev);
1489 return -ENODEV;
1490}
1491
1492static int __devinit sunlance_sbus_probe(struct platform_device *op)
1493{
1494 struct platform_device *parent = to_platform_device(op->dev.parent);
1495 struct device_node *parent_dp = parent->dev.of_node;
1496 int err;
1497
1498 if (!strcmp(parent_dp->name, "ledma")) {
1499 err = sparc_lance_probe_one(op, parent, NULL);
1500 } else if (!strcmp(parent_dp->name, "lebuffer")) {
1501 err = sparc_lance_probe_one(op, NULL, parent);
1502 } else
1503 err = sparc_lance_probe_one(op, NULL, NULL);
1504
1505 return err;
1506}
1507
1508static int __devexit sunlance_sbus_remove(struct platform_device *op)
1509{
1510 struct lance_private *lp = dev_get_drvdata(&op->dev);
1511 struct net_device *net_dev = lp->dev;
1512
1513 unregister_netdev(net_dev);
1514
1515 lance_free_hwresources(lp);
1516
1517 free_netdev(net_dev);
1518
1519 dev_set_drvdata(&op->dev, NULL);
1520
1521 return 0;
1522}
1523
1524static const struct of_device_id sunlance_sbus_match[] = {
1525 {
1526 .name = "le",
1527 },
1528 {},
1529};
1530
1531MODULE_DEVICE_TABLE(of, sunlance_sbus_match);
1532
1533static struct platform_driver sunlance_sbus_driver = {
1534 .driver = {
1535 .name = "sunlance",
1536 .owner = THIS_MODULE,
1537 .of_match_table = sunlance_sbus_match,
1538 },
1539 .probe = sunlance_sbus_probe,
1540 .remove = __devexit_p(sunlance_sbus_remove),
1541};
1542
1543
1544/* Find all the lance cards on the system and initialize them */
1545static int __init sparc_lance_init(void)
1546{
1547 return platform_driver_register(&sunlance_sbus_driver);
1548}
1549
1550static void __exit sparc_lance_exit(void)
1551{
1552 platform_driver_unregister(&sunlance_sbus_driver);
1553}
1554
1555module_init(sparc_lance_init);
1556module_exit(sparc_lance_exit);