aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/apple
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-05-16 04:39:01 -0400
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2011-08-11 19:29:07 -0400
commit8fb6b0908176704a3ea22005e8a9fa3ebf35b5be (patch)
tree1eefa29ce73cd5be2b0fcbd12d03ac148d171cc5 /drivers/net/ethernet/apple
parent7ac6653a085b41405758bc16b2525db56ee0a23f (diff)
bmac/mace/macmace/mac89x0/cs89x0: Move the Macintosh (Apple) drivers
Move the Apple drivers into driver/net/ethernet/apple/ and make the necessary Kconfig and Makefile changes. CC: Paul Mackerras <paulus@samba.org> CC: Paul Mackerras <paulus@au.ibm.com> CC: Russell Nelson <nelson@crynwr.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/apple')
-rw-r--r--drivers/net/ethernet/apple/Kconfig96
-rw-r--r--drivers/net/ethernet/apple/Makefile9
-rw-r--r--drivers/net/ethernet/apple/bmac.c1685
-rw-r--r--drivers/net/ethernet/apple/bmac.h164
-rw-r--r--drivers/net/ethernet/apple/cs89x0.c1913
-rw-r--r--drivers/net/ethernet/apple/cs89x0.h465
-rw-r--r--drivers/net/ethernet/apple/mac89x0.c634
-rw-r--r--drivers/net/ethernet/apple/mace.c1031
-rw-r--r--drivers/net/ethernet/apple/mace.h173
-rw-r--r--drivers/net/ethernet/apple/macmace.c799
10 files changed, 6969 insertions, 0 deletions
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
new file mode 100644
index 000000000000..fc796bc353d0
--- /dev/null
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -0,0 +1,96 @@
1#
2# Apple device configuration
3#
4
5config NET_VENDOR_APPLE
6 bool "Apple devices"
7 depends on (PPC_PMAC && PPC32) || MAC || ISA || EISA || MACH_IXDP2351 \
8 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
9 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from
12 <http://www.tldp.org/docs.html#howto>.
13
14 Note that the answer to this question doesn't directly affect the
15 kernel: saying N will just cause the configurator to skip all
16 the questions about IBM devices. If you say Y, you will be asked for
17 your specific card in the following questions.
18
19if NET_VENDOR_APPLE
20
21config MACE
22 tristate "MACE (Power Mac ethernet) support"
23 depends on PPC_PMAC && PPC32
24 select CRC32
25 ---help---
26 Power Macintoshes and clones with Ethernet built-in on the
27 motherboard will usually use a MACE (Medium Access Control for
28 Ethernet) interface. Say Y to include support for the MACE chip.
29
30 To compile this driver as a module, choose M here: the module
31 will be called mace.
32
33config MACE_AAUI_PORT
34 bool "Use AAUI port instead of TP by default"
35 depends on MACE
36 ---help---
37 Some Apple machines (notably the Apple Network Server) which use the
38 MACE ethernet chip have an Apple AUI port (small 15-pin connector),
39 instead of an 8-pin RJ45 connector for twisted-pair ethernet. Say
40 Y here if you have such a machine. If unsure, say N.
41 The driver will default to AAUI on ANS anyway, and if you use it as
42 a module, you can provide the port_aaui=0|1 to force the driver.
43
44config BMAC
45 tristate "BMAC (G3 ethernet) support"
46 depends on PPC_PMAC && PPC32
47 select CRC32
48 ---help---
49 Say Y for support of BMAC Ethernet interfaces. These are used on G3
50 computers.
51
52 To compile this driver as a module, choose M here: the module
53 will be called bmac.
54
55config MAC89x0
56 tristate "Macintosh CS89x0 based ethernet cards"
57 depends on MAC
58 ---help---
59 Support for CS89x0 chipset based Ethernet cards. If you have a
60 Nubus or LC-PDS network (Ethernet) card of this type, say Y and
61 read the Ethernet-HOWTO, available from
62 <http://www.tldp.org/docs.html#howto>.
63
64 To compile this driver as a module, choose M here. This module will
65 be called mac89x0.
66
67config MACMACE
68 bool "Macintosh (AV) onboard MACE ethernet"
69 depends on MAC
70 select CRC32
71 ---help---
72 Support for the onboard AMD 79C940 MACE Ethernet controller used in
73 the 660AV and 840AV Macintosh. If you have one of these Macintoshes
74 say Y and read the Ethernet-HOWTO, available from
75 <http://www.tldp.org/docs.html#howto>.
76
77config CS89x0
78 tristate "CS89x0 support"
79 depends on (ISA || EISA || MACH_IXDP2351 \
80 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440)
81 ---help---
82 Support for CS89x0 chipset based Ethernet cards. If you have a
83 network (Ethernet) card of this type, say Y and read the
84 Ethernet-HOWTO, available from
85 <http://www.tldp.org/docs.html#howto> as well as
86 <file:Documentation/networking/cs89x0.txt>.
87
88 To compile this driver as a module, choose M here. The module
89 will be called cs89x0.
90
91config CS89x0_NONISA_IRQ
92 def_bool y
93 depends on CS89x0 != n
94 depends on MACH_IXDP2351 || ARCH_IXDP2X01 || MACH_MX31ADS || MACH_QQ2440
95
96endif # NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/apple/Makefile b/drivers/net/ethernet/apple/Makefile
new file mode 100644
index 000000000000..9d300864461f
--- /dev/null
+++ b/drivers/net/ethernet/apple/Makefile
@@ -0,0 +1,9 @@
1#
2# Makefile for the Apple network device drivers.
3#
4
5obj-$(CONFIG_MACE) += mace.o
6obj-$(CONFIG_BMAC) += bmac.o
7obj-$(CONFIG_MAC89x0) += mac89x0.o
8obj-$(CONFIG_CS89x0) += cs89x0.o
9obj-$(CONFIG_MACMACE) += macmace.o
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
new file mode 100644
index 000000000000..45e45e8d3d66
--- /dev/null
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -0,0 +1,1685 @@
1/*
2 * Network device driver for the BMAC ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
4 *
5 * Copyright (C) 1998 Randy Gobbel.
6 *
7 * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to
8 * dynamic procfs inode.
9 */
10#include <linux/interrupt.h>
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/string.h>
17#include <linux/timer.h>
18#include <linux/proc_fs.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/crc32.h>
22#include <linux/bitrev.h>
23#include <linux/ethtool.h>
24#include <linux/slab.h>
25#include <asm/prom.h>
26#include <asm/dbdma.h>
27#include <asm/io.h>
28#include <asm/page.h>
29#include <asm/pgtable.h>
30#include <asm/machdep.h>
31#include <asm/pmac_feature.h>
32#include <asm/macio.h>
33#include <asm/irq.h>
34
35#include "bmac.h"
36
37#define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1))))
38#define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1)))
39
40/*
41 * CRC polynomial - used in working out multicast filter bits.
42 */
43#define ENET_CRCPOLY 0x04c11db7
44
45/* switch to use multicast code lifted from sunhme driver */
46#define SUNHME_MULTICAST
47
48#define N_RX_RING 64
49#define N_TX_RING 32
50#define MAX_TX_ACTIVE 1
51#define ETHERCRC 4
52#define ETHERMINPACKET 64
53#define ETHERMTU 1500
54#define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2)
55#define TX_TIMEOUT HZ /* 1 second */
56
57/* Bits in transmit DMA status */
58#define TX_DMA_ERR 0x80
59
60#define XXDEBUG(args)
61
62struct bmac_data {
63 /* volatile struct bmac *bmac; */
64 struct sk_buff_head *queue;
65 volatile struct dbdma_regs __iomem *tx_dma;
66 int tx_dma_intr;
67 volatile struct dbdma_regs __iomem *rx_dma;
68 int rx_dma_intr;
69 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
70 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
71 struct macio_dev *mdev;
72 int is_bmac_plus;
73 struct sk_buff *rx_bufs[N_RX_RING];
74 int rx_fill;
75 int rx_empty;
76 struct sk_buff *tx_bufs[N_TX_RING];
77 int tx_fill;
78 int tx_empty;
79 unsigned char tx_fullup;
80 struct timer_list tx_timeout;
81 int timeout_active;
82 int sleeping;
83 int opened;
84 unsigned short hash_use_count[64];
85 unsigned short hash_table_mask[4];
86 spinlock_t lock;
87};
88
89#if 0 /* Move that to ethtool */
90
91typedef struct bmac_reg_entry {
92 char *name;
93 unsigned short reg_offset;
94} bmac_reg_entry_t;
95
96#define N_REG_ENTRIES 31
97
98static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = {
99 {"MEMADD", MEMADD},
100 {"MEMDATAHI", MEMDATAHI},
101 {"MEMDATALO", MEMDATALO},
102 {"TXPNTR", TXPNTR},
103 {"RXPNTR", RXPNTR},
104 {"IPG1", IPG1},
105 {"IPG2", IPG2},
106 {"ALIMIT", ALIMIT},
107 {"SLOT", SLOT},
108 {"PALEN", PALEN},
109 {"PAPAT", PAPAT},
110 {"TXSFD", TXSFD},
111 {"JAM", JAM},
112 {"TXCFG", TXCFG},
113 {"TXMAX", TXMAX},
114 {"TXMIN", TXMIN},
115 {"PAREG", PAREG},
116 {"DCNT", DCNT},
117 {"NCCNT", NCCNT},
118 {"NTCNT", NTCNT},
119 {"EXCNT", EXCNT},
120 {"LTCNT", LTCNT},
121 {"TXSM", TXSM},
122 {"RXCFG", RXCFG},
123 {"RXMAX", RXMAX},
124 {"RXMIN", RXMIN},
125 {"FRCNT", FRCNT},
126 {"AECNT", AECNT},
127 {"FECNT", FECNT},
128 {"RXSM", RXSM},
129 {"RXCV", RXCV}
130};
131
132#endif
133
134static unsigned char *bmac_emergency_rxbuf;
135
136/*
137 * Number of bytes of private data per BMAC: allow enough for
138 * the rx and tx dma commands plus a branch dma command each,
139 * and another 16 bytes to allow us to align the dma command
140 * buffers on a 16 byte boundary.
141 */
142#define PRIV_BYTES (sizeof(struct bmac_data) \
143 + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \
144 + sizeof(struct sk_buff_head))
145
146static int bmac_open(struct net_device *dev);
147static int bmac_close(struct net_device *dev);
148static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev);
149static void bmac_set_multicast(struct net_device *dev);
150static void bmac_reset_and_enable(struct net_device *dev);
151static void bmac_start_chip(struct net_device *dev);
152static void bmac_init_chip(struct net_device *dev);
153static void bmac_init_registers(struct net_device *dev);
154static void bmac_enable_and_reset_chip(struct net_device *dev);
155static int bmac_set_address(struct net_device *dev, void *addr);
156static irqreturn_t bmac_misc_intr(int irq, void *dev_id);
157static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
158static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
159static void bmac_set_timeout(struct net_device *dev);
160static void bmac_tx_timeout(unsigned long data);
161static int bmac_output(struct sk_buff *skb, struct net_device *dev);
162static void bmac_start(struct net_device *dev);
163
164#define DBDMA_SET(x) ( ((x) | (x) << 16) )
165#define DBDMA_CLEAR(x) ( (x) << 16)
166
167static inline void
168dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
169{
170 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
171}
172
173static inline unsigned long
174dbdma_ld32(volatile __u32 __iomem *a)
175{
176 __u32 swap;
177 __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a));
178 return swap;
179}
180
181static void
182dbdma_continue(volatile struct dbdma_regs __iomem *dmap)
183{
184 dbdma_st32(&dmap->control,
185 DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD));
186 eieio();
187}
188
189static void
190dbdma_reset(volatile struct dbdma_regs __iomem *dmap)
191{
192 dbdma_st32(&dmap->control,
193 DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN));
194 eieio();
195 while (dbdma_ld32(&dmap->status) & RUN)
196 eieio();
197}
198
199static void
200dbdma_setcmd(volatile struct dbdma_cmd *cp,
201 unsigned short cmd, unsigned count, unsigned long addr,
202 unsigned long cmd_dep)
203{
204 out_le16(&cp->command, cmd);
205 out_le16(&cp->req_count, count);
206 out_le32(&cp->phy_addr, addr);
207 out_le32(&cp->cmd_dep, cmd_dep);
208 out_le16(&cp->xfer_status, 0);
209 out_le16(&cp->res_count, 0);
210}
211
212static inline
213void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data )
214{
215 out_le16((void __iomem *)dev->base_addr + reg_offset, data);
216}
217
218
219static inline
220unsigned short bmread(struct net_device *dev, unsigned long reg_offset )
221{
222 return in_le16((void __iomem *)dev->base_addr + reg_offset);
223}
224
225static void
226bmac_enable_and_reset_chip(struct net_device *dev)
227{
228 struct bmac_data *bp = netdev_priv(dev);
229 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
230 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
231
232 if (rd)
233 dbdma_reset(rd);
234 if (td)
235 dbdma_reset(td);
236
237 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1);
238}
239
240#define MIFDELAY udelay(10)
241
242static unsigned int
243bmac_mif_readbits(struct net_device *dev, int nb)
244{
245 unsigned int val = 0;
246
247 while (--nb >= 0) {
248 bmwrite(dev, MIFCSR, 0);
249 MIFDELAY;
250 if (bmread(dev, MIFCSR) & 8)
251 val |= 1 << nb;
252 bmwrite(dev, MIFCSR, 1);
253 MIFDELAY;
254 }
255 bmwrite(dev, MIFCSR, 0);
256 MIFDELAY;
257 bmwrite(dev, MIFCSR, 1);
258 MIFDELAY;
259 return val;
260}
261
262static void
263bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb)
264{
265 int b;
266
267 while (--nb >= 0) {
268 b = (val & (1 << nb))? 6: 4;
269 bmwrite(dev, MIFCSR, b);
270 MIFDELAY;
271 bmwrite(dev, MIFCSR, b|1);
272 MIFDELAY;
273 }
274}
275
276static unsigned int
277bmac_mif_read(struct net_device *dev, unsigned int addr)
278{
279 unsigned int val;
280
281 bmwrite(dev, MIFCSR, 4);
282 MIFDELAY;
283 bmac_mif_writebits(dev, ~0U, 32);
284 bmac_mif_writebits(dev, 6, 4);
285 bmac_mif_writebits(dev, addr, 10);
286 bmwrite(dev, MIFCSR, 2);
287 MIFDELAY;
288 bmwrite(dev, MIFCSR, 1);
289 MIFDELAY;
290 val = bmac_mif_readbits(dev, 17);
291 bmwrite(dev, MIFCSR, 4);
292 MIFDELAY;
293 return val;
294}
295
296static void
297bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val)
298{
299 bmwrite(dev, MIFCSR, 4);
300 MIFDELAY;
301 bmac_mif_writebits(dev, ~0U, 32);
302 bmac_mif_writebits(dev, 5, 4);
303 bmac_mif_writebits(dev, addr, 10);
304 bmac_mif_writebits(dev, 2, 2);
305 bmac_mif_writebits(dev, val, 16);
306 bmac_mif_writebits(dev, 3, 2);
307}
308
309static void
310bmac_init_registers(struct net_device *dev)
311{
312 struct bmac_data *bp = netdev_priv(dev);
313 volatile unsigned short regValue;
314 unsigned short *pWord16;
315 int i;
316
317 /* XXDEBUG(("bmac: enter init_registers\n")); */
318
319 bmwrite(dev, RXRST, RxResetValue);
320 bmwrite(dev, TXRST, TxResetBit);
321
322 i = 100;
323 do {
324 --i;
325 udelay(10000);
326 regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */
327 } while ((regValue & TxResetBit) && i > 0);
328
329 if (!bp->is_bmac_plus) {
330 regValue = bmread(dev, XCVRIF);
331 regValue |= ClkBit | SerialMode | COLActiveLow;
332 bmwrite(dev, XCVRIF, regValue);
333 udelay(10000);
334 }
335
336 bmwrite(dev, RSEED, (unsigned short)0x1968);
337
338 regValue = bmread(dev, XIFC);
339 regValue |= TxOutputEnable;
340 bmwrite(dev, XIFC, regValue);
341
342 bmread(dev, PAREG);
343
344 /* set collision counters to 0 */
345 bmwrite(dev, NCCNT, 0);
346 bmwrite(dev, NTCNT, 0);
347 bmwrite(dev, EXCNT, 0);
348 bmwrite(dev, LTCNT, 0);
349
350 /* set rx counters to 0 */
351 bmwrite(dev, FRCNT, 0);
352 bmwrite(dev, LECNT, 0);
353 bmwrite(dev, AECNT, 0);
354 bmwrite(dev, FECNT, 0);
355 bmwrite(dev, RXCV, 0);
356
357 /* set tx fifo information */
358 bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */
359
360 bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */
361 bmwrite(dev, TXFIFOCSR, TxFIFOEnable );
362
363 /* set rx fifo information */
364 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
365 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
366
367 //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */
368 bmread(dev, STATUS); /* read it just to clear it */
369
370 /* zero out the chip Hash Filter registers */
371 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
372 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
373 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
374 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
375 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
376
377 pWord16 = (unsigned short *)dev->dev_addr;
378 bmwrite(dev, MADD0, *pWord16++);
379 bmwrite(dev, MADD1, *pWord16++);
380 bmwrite(dev, MADD2, *pWord16);
381
382 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
383
384 bmwrite(dev, INTDISABLE, EnableNormal);
385}
386
387#if 0
388static void
389bmac_disable_interrupts(struct net_device *dev)
390{
391 bmwrite(dev, INTDISABLE, DisableAll);
392}
393
394static void
395bmac_enable_interrupts(struct net_device *dev)
396{
397 bmwrite(dev, INTDISABLE, EnableNormal);
398}
399#endif
400
401
402static void
403bmac_start_chip(struct net_device *dev)
404{
405 struct bmac_data *bp = netdev_priv(dev);
406 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
407 unsigned short oldConfig;
408
409 /* enable rx dma channel */
410 dbdma_continue(rd);
411
412 oldConfig = bmread(dev, TXCFG);
413 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
414
415 /* turn on rx plus any other bits already on (promiscuous possibly) */
416 oldConfig = bmread(dev, RXCFG);
417 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
418 udelay(20000);
419}
420
421static void
422bmac_init_phy(struct net_device *dev)
423{
424 unsigned int addr;
425 struct bmac_data *bp = netdev_priv(dev);
426
427 printk(KERN_DEBUG "phy registers:");
428 for (addr = 0; addr < 32; ++addr) {
429 if ((addr & 7) == 0)
430 printk(KERN_DEBUG);
431 printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr));
432 }
433 printk(KERN_CONT "\n");
434
435 if (bp->is_bmac_plus) {
436 unsigned int capable, ctrl;
437
438 ctrl = bmac_mif_read(dev, 0);
439 capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1;
440 if (bmac_mif_read(dev, 4) != capable ||
441 (ctrl & 0x1000) == 0) {
442 bmac_mif_write(dev, 4, capable);
443 bmac_mif_write(dev, 0, 0x1200);
444 } else
445 bmac_mif_write(dev, 0, 0x1000);
446 }
447}
448
449static void bmac_init_chip(struct net_device *dev)
450{
451 bmac_init_phy(dev);
452 bmac_init_registers(dev);
453}
454
455#ifdef CONFIG_PM
456static int bmac_suspend(struct macio_dev *mdev, pm_message_t state)
457{
458 struct net_device* dev = macio_get_drvdata(mdev);
459 struct bmac_data *bp = netdev_priv(dev);
460 unsigned long flags;
461 unsigned short config;
462 int i;
463
464 netif_device_detach(dev);
465 /* prolly should wait for dma to finish & turn off the chip */
466 spin_lock_irqsave(&bp->lock, flags);
467 if (bp->timeout_active) {
468 del_timer(&bp->tx_timeout);
469 bp->timeout_active = 0;
470 }
471 disable_irq(dev->irq);
472 disable_irq(bp->tx_dma_intr);
473 disable_irq(bp->rx_dma_intr);
474 bp->sleeping = 1;
475 spin_unlock_irqrestore(&bp->lock, flags);
476 if (bp->opened) {
477 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
478 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
479
480 config = bmread(dev, RXCFG);
481 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
482 config = bmread(dev, TXCFG);
483 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
484 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
485 /* disable rx and tx dma */
486 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
487 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
488 /* free some skb's */
489 for (i=0; i<N_RX_RING; i++) {
490 if (bp->rx_bufs[i] != NULL) {
491 dev_kfree_skb(bp->rx_bufs[i]);
492 bp->rx_bufs[i] = NULL;
493 }
494 }
495 for (i = 0; i<N_TX_RING; i++) {
496 if (bp->tx_bufs[i] != NULL) {
497 dev_kfree_skb(bp->tx_bufs[i]);
498 bp->tx_bufs[i] = NULL;
499 }
500 }
501 }
502 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
503 return 0;
504}
505
506static int bmac_resume(struct macio_dev *mdev)
507{
508 struct net_device* dev = macio_get_drvdata(mdev);
509 struct bmac_data *bp = netdev_priv(dev);
510
511 /* see if this is enough */
512 if (bp->opened)
513 bmac_reset_and_enable(dev);
514
515 enable_irq(dev->irq);
516 enable_irq(bp->tx_dma_intr);
517 enable_irq(bp->rx_dma_intr);
518 netif_device_attach(dev);
519
520 return 0;
521}
522#endif /* CONFIG_PM */
523
524static int bmac_set_address(struct net_device *dev, void *addr)
525{
526 struct bmac_data *bp = netdev_priv(dev);
527 unsigned char *p = addr;
528 unsigned short *pWord16;
529 unsigned long flags;
530 int i;
531
532 XXDEBUG(("bmac: enter set_address\n"));
533 spin_lock_irqsave(&bp->lock, flags);
534
535 for (i = 0; i < 6; ++i) {
536 dev->dev_addr[i] = p[i];
537 }
538 /* load up the hardware address */
539 pWord16 = (unsigned short *)dev->dev_addr;
540 bmwrite(dev, MADD0, *pWord16++);
541 bmwrite(dev, MADD1, *pWord16++);
542 bmwrite(dev, MADD2, *pWord16);
543
544 spin_unlock_irqrestore(&bp->lock, flags);
545 XXDEBUG(("bmac: exit set_address\n"));
546 return 0;
547}
548
549static inline void bmac_set_timeout(struct net_device *dev)
550{
551 struct bmac_data *bp = netdev_priv(dev);
552 unsigned long flags;
553
554 spin_lock_irqsave(&bp->lock, flags);
555 if (bp->timeout_active)
556 del_timer(&bp->tx_timeout);
557 bp->tx_timeout.expires = jiffies + TX_TIMEOUT;
558 bp->tx_timeout.function = bmac_tx_timeout;
559 bp->tx_timeout.data = (unsigned long) dev;
560 add_timer(&bp->tx_timeout);
561 bp->timeout_active = 1;
562 spin_unlock_irqrestore(&bp->lock, flags);
563}
564
565static void
566bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
567{
568 void *vaddr;
569 unsigned long baddr;
570 unsigned long len;
571
572 len = skb->len;
573 vaddr = skb->data;
574 baddr = virt_to_bus(vaddr);
575
576 dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0);
577}
578
579static void
580bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp)
581{
582 unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf;
583
584 dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN,
585 virt_to_bus(addr), 0);
586}
587
588static void
589bmac_init_tx_ring(struct bmac_data *bp)
590{
591 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
592
593 memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd));
594
595 bp->tx_empty = 0;
596 bp->tx_fill = 0;
597 bp->tx_fullup = 0;
598
599 /* put a branch at the end of the tx command list */
600 dbdma_setcmd(&bp->tx_cmds[N_TX_RING],
601 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds));
602
603 /* reset tx dma */
604 dbdma_reset(td);
605 out_le32(&td->wait_sel, 0x00200020);
606 out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds));
607}
608
609static int
610bmac_init_rx_ring(struct bmac_data *bp)
611{
612 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
613 int i;
614 struct sk_buff *skb;
615
616 /* initialize list of sk_buffs for receiving and set up recv dma */
617 memset((char *)bp->rx_cmds, 0,
618 (N_RX_RING + 1) * sizeof(struct dbdma_cmd));
619 for (i = 0; i < N_RX_RING; i++) {
620 if ((skb = bp->rx_bufs[i]) == NULL) {
621 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
622 if (skb != NULL)
623 skb_reserve(skb, 2);
624 }
625 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
626 }
627
628 bp->rx_empty = 0;
629 bp->rx_fill = i;
630
631 /* Put a branch back to the beginning of the receive command list */
632 dbdma_setcmd(&bp->rx_cmds[N_RX_RING],
633 (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds));
634
635 /* start rx dma */
636 dbdma_reset(rd);
637 out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds));
638
639 return 1;
640}
641
642
643static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev)
644{
645 struct bmac_data *bp = netdev_priv(dev);
646 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
647 int i;
648
649 /* see if there's a free slot in the tx ring */
650 /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */
651 /* bp->tx_empty, bp->tx_fill)); */
652 i = bp->tx_fill + 1;
653 if (i >= N_TX_RING)
654 i = 0;
655 if (i == bp->tx_empty) {
656 netif_stop_queue(dev);
657 bp->tx_fullup = 1;
658 XXDEBUG(("bmac_transmit_packet: tx ring full\n"));
659 return -1; /* can't take it at the moment */
660 }
661
662 dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0);
663
664 bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]);
665
666 bp->tx_bufs[bp->tx_fill] = skb;
667 bp->tx_fill = i;
668
669 dev->stats.tx_bytes += skb->len;
670
671 dbdma_continue(td);
672
673 return 0;
674}
675
676static int rxintcount;
677
678static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id)
679{
680 struct net_device *dev = (struct net_device *) dev_id;
681 struct bmac_data *bp = netdev_priv(dev);
682 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
683 volatile struct dbdma_cmd *cp;
684 int i, nb, stat;
685 struct sk_buff *skb;
686 unsigned int residual;
687 int last;
688 unsigned long flags;
689
690 spin_lock_irqsave(&bp->lock, flags);
691
692 if (++rxintcount < 10) {
693 XXDEBUG(("bmac_rxdma_intr\n"));
694 }
695
696 last = -1;
697 i = bp->rx_empty;
698
699 while (1) {
700 cp = &bp->rx_cmds[i];
701 stat = ld_le16(&cp->xfer_status);
702 residual = ld_le16(&cp->res_count);
703 if ((stat & ACTIVE) == 0)
704 break;
705 nb = RX_BUFLEN - residual - 2;
706 if (nb < (ETHERMINPACKET - ETHERCRC)) {
707 skb = NULL;
708 dev->stats.rx_length_errors++;
709 dev->stats.rx_errors++;
710 } else {
711 skb = bp->rx_bufs[i];
712 bp->rx_bufs[i] = NULL;
713 }
714 if (skb != NULL) {
715 nb -= ETHERCRC;
716 skb_put(skb, nb);
717 skb->protocol = eth_type_trans(skb, dev);
718 netif_rx(skb);
719 ++dev->stats.rx_packets;
720 dev->stats.rx_bytes += nb;
721 } else {
722 ++dev->stats.rx_dropped;
723 }
724 if ((skb = bp->rx_bufs[i]) == NULL) {
725 bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2);
726 if (skb != NULL)
727 skb_reserve(bp->rx_bufs[i], 2);
728 }
729 bmac_construct_rxbuff(skb, &bp->rx_cmds[i]);
730 st_le16(&cp->res_count, 0);
731 st_le16(&cp->xfer_status, 0);
732 last = i;
733 if (++i >= N_RX_RING) i = 0;
734 }
735
736 if (last != -1) {
737 bp->rx_fill = last;
738 bp->rx_empty = i;
739 }
740
741 dbdma_continue(rd);
742 spin_unlock_irqrestore(&bp->lock, flags);
743
744 if (rxintcount < 10) {
745 XXDEBUG(("bmac_rxdma_intr done\n"));
746 }
747 return IRQ_HANDLED;
748}
749
750static int txintcount;
751
752static irqreturn_t bmac_txdma_intr(int irq, void *dev_id)
753{
754 struct net_device *dev = (struct net_device *) dev_id;
755 struct bmac_data *bp = netdev_priv(dev);
756 volatile struct dbdma_cmd *cp;
757 int stat;
758 unsigned long flags;
759
760 spin_lock_irqsave(&bp->lock, flags);
761
762 if (txintcount++ < 10) {
763 XXDEBUG(("bmac_txdma_intr\n"));
764 }
765
766 /* del_timer(&bp->tx_timeout); */
767 /* bp->timeout_active = 0; */
768
769 while (1) {
770 cp = &bp->tx_cmds[bp->tx_empty];
771 stat = ld_le16(&cp->xfer_status);
772 if (txintcount < 10) {
773 XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat));
774 }
775 if (!(stat & ACTIVE)) {
776 /*
777 * status field might not have been filled by DBDMA
778 */
779 if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr)))
780 break;
781 }
782
783 if (bp->tx_bufs[bp->tx_empty]) {
784 ++dev->stats.tx_packets;
785 dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]);
786 }
787 bp->tx_bufs[bp->tx_empty] = NULL;
788 bp->tx_fullup = 0;
789 netif_wake_queue(dev);
790 if (++bp->tx_empty >= N_TX_RING)
791 bp->tx_empty = 0;
792 if (bp->tx_empty == bp->tx_fill)
793 break;
794 }
795
796 spin_unlock_irqrestore(&bp->lock, flags);
797
798 if (txintcount < 10) {
799 XXDEBUG(("bmac_txdma_intr done->bmac_start\n"));
800 }
801
802 bmac_start(dev);
803 return IRQ_HANDLED;
804}
805
806#ifndef SUNHME_MULTICAST
807/* Real fast bit-reversal algorithm, 6-bit values */
808static int reverse6[64] = {
809 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38,
810 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c,
811 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a,
812 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e,
813 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39,
814 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d,
815 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b,
816 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f
817};
818
819static unsigned int
820crc416(unsigned int curval, unsigned short nxtval)
821{
822 register unsigned int counter, cur = curval, next = nxtval;
823 register int high_crc_set, low_data_set;
824
825 /* Swap bytes */
826 next = ((next & 0x00FF) << 8) | (next >> 8);
827
828 /* Compute bit-by-bit */
829 for (counter = 0; counter < 16; ++counter) {
830 /* is high CRC bit set? */
831 if ((cur & 0x80000000) == 0) high_crc_set = 0;
832 else high_crc_set = 1;
833
834 cur = cur << 1;
835
836 if ((next & 0x0001) == 0) low_data_set = 0;
837 else low_data_set = 1;
838
839 next = next >> 1;
840
841 /* do the XOR */
842 if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY;
843 }
844 return cur;
845}
846
847static unsigned int
848bmac_crc(unsigned short *address)
849{
850 unsigned int newcrc;
851
852 XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2]));
853 newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */
854 newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */
855 newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */
856
857 return(newcrc);
858}
859
860/*
861 * Add requested mcast addr to BMac's hash table filter.
862 *
863 */
864
865static void
866bmac_addhash(struct bmac_data *bp, unsigned char *addr)
867{
868 unsigned int crc;
869 unsigned short mask;
870
871 if (!(*addr)) return;
872 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
873 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
874 if (bp->hash_use_count[crc]++) return; /* This bit is already set */
875 mask = crc % 16;
876 mask = (unsigned char)1 << mask;
877 bp->hash_use_count[crc/16] |= mask;
878}
879
880static void
881bmac_removehash(struct bmac_data *bp, unsigned char *addr)
882{
883 unsigned int crc;
884 unsigned char mask;
885
886 /* Now, delete the address from the filter copy, as indicated */
887 crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */
888 crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */
889 if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */
890 if (--bp->hash_use_count[crc]) return; /* That bit is still in use */
891 mask = crc % 16;
892 mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */
893 bp->hash_table_mask[crc/16] &= mask;
894}
895
896/*
897 * Sync the adapter with the software copy of the multicast mask
898 * (logical address filter).
899 */
900
901static void
902bmac_rx_off(struct net_device *dev)
903{
904 unsigned short rx_cfg;
905
906 rx_cfg = bmread(dev, RXCFG);
907 rx_cfg &= ~RxMACEnable;
908 bmwrite(dev, RXCFG, rx_cfg);
909 do {
910 rx_cfg = bmread(dev, RXCFG);
911 } while (rx_cfg & RxMACEnable);
912}
913
914unsigned short
915bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable)
916{
917 unsigned short rx_cfg;
918
919 rx_cfg = bmread(dev, RXCFG);
920 rx_cfg |= RxMACEnable;
921 if (hash_enable) rx_cfg |= RxHashFilterEnable;
922 else rx_cfg &= ~RxHashFilterEnable;
923 if (promisc_enable) rx_cfg |= RxPromiscEnable;
924 else rx_cfg &= ~RxPromiscEnable;
925 bmwrite(dev, RXRST, RxResetValue);
926 bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */
927 bmwrite(dev, RXFIFOCSR, RxFIFOEnable );
928 bmwrite(dev, RXCFG, rx_cfg );
929 return rx_cfg;
930}
931
932static void
933bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp)
934{
935 bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */
936 bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */
937 bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
938 bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
939}
940
941#if 0
942static void
943bmac_add_multi(struct net_device *dev,
944 struct bmac_data *bp, unsigned char *addr)
945{
946 /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */
947 bmac_addhash(bp, addr);
948 bmac_rx_off(dev);
949 bmac_update_hash_table_mask(dev, bp);
950 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
951 /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */
952}
953
954static void
955bmac_remove_multi(struct net_device *dev,
956 struct bmac_data *bp, unsigned char *addr)
957{
958 bmac_removehash(bp, addr);
959 bmac_rx_off(dev);
960 bmac_update_hash_table_mask(dev, bp);
961 bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0);
962}
963#endif
964
965/* Set or clear the multicast filter for this adaptor.
966 num_addrs == -1 Promiscuous mode, receive all packets
967 num_addrs == 0 Normal mode, clear multicast list
968 num_addrs > 0 Multicast mode, receive normal and MC packets, and do
969 best-effort filtering.
970 */
971static void bmac_set_multicast(struct net_device *dev)
972{
973 struct netdev_hw_addr *ha;
974 struct bmac_data *bp = netdev_priv(dev);
975 int num_addrs = netdev_mc_count(dev);
976 unsigned short rx_cfg;
977 int i;
978
979 if (bp->sleeping)
980 return;
981
982 XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
983
984 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
985 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
986 bmac_update_hash_table_mask(dev, bp);
987 rx_cfg = bmac_rx_on(dev, 1, 0);
988 XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n"));
989 } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) {
990 rx_cfg = bmread(dev, RXCFG);
991 rx_cfg |= RxPromiscEnable;
992 bmwrite(dev, RXCFG, rx_cfg);
993 rx_cfg = bmac_rx_on(dev, 0, 1);
994 XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg));
995 } else {
996 for (i=0; i<4; i++) bp->hash_table_mask[i] = 0;
997 for (i=0; i<64; i++) bp->hash_use_count[i] = 0;
998 if (num_addrs == 0) {
999 rx_cfg = bmac_rx_on(dev, 0, 0);
1000 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1001 } else {
1002 netdev_for_each_mc_addr(ha, dev)
1003 bmac_addhash(bp, ha->addr);
1004 bmac_update_hash_table_mask(dev, bp);
1005 rx_cfg = bmac_rx_on(dev, 1, 0);
1006 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
1007 }
1008 }
1009 /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */
1010}
1011#else /* ifdef SUNHME_MULTICAST */
1012
1013/* The version of set_multicast below was lifted from sunhme.c */
1014
1015static void bmac_set_multicast(struct net_device *dev)
1016{
1017 struct netdev_hw_addr *ha;
1018 int i;
1019 unsigned short rx_cfg;
1020 u32 crc;
1021
1022 if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
1023 bmwrite(dev, BHASH0, 0xffff);
1024 bmwrite(dev, BHASH1, 0xffff);
1025 bmwrite(dev, BHASH2, 0xffff);
1026 bmwrite(dev, BHASH3, 0xffff);
1027 } else if(dev->flags & IFF_PROMISC) {
1028 rx_cfg = bmread(dev, RXCFG);
1029 rx_cfg |= RxPromiscEnable;
1030 bmwrite(dev, RXCFG, rx_cfg);
1031 } else {
1032 u16 hash_table[4];
1033
1034 rx_cfg = bmread(dev, RXCFG);
1035 rx_cfg &= ~RxPromiscEnable;
1036 bmwrite(dev, RXCFG, rx_cfg);
1037
1038 for(i = 0; i < 4; i++) hash_table[i] = 0;
1039
1040 netdev_for_each_mc_addr(ha, dev) {
1041 crc = ether_crc_le(6, ha->addr);
1042 crc >>= 26;
1043 hash_table[crc >> 4] |= 1 << (crc & 0xf);
1044 }
1045 bmwrite(dev, BHASH0, hash_table[0]);
1046 bmwrite(dev, BHASH1, hash_table[1]);
1047 bmwrite(dev, BHASH2, hash_table[2]);
1048 bmwrite(dev, BHASH3, hash_table[3]);
1049 }
1050}
1051#endif /* SUNHME_MULTICAST */
1052
1053static int miscintcount;
1054
1055static irqreturn_t bmac_misc_intr(int irq, void *dev_id)
1056{
1057 struct net_device *dev = (struct net_device *) dev_id;
1058 unsigned int status = bmread(dev, STATUS);
1059 if (miscintcount++ < 10) {
1060 XXDEBUG(("bmac_misc_intr\n"));
1061 }
1062 /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */
1063 /* bmac_txdma_intr_inner(irq, dev_id); */
1064 /* if (status & FrameReceived) dev->stats.rx_dropped++; */
1065 if (status & RxErrorMask) dev->stats.rx_errors++;
1066 if (status & RxCRCCntExp) dev->stats.rx_crc_errors++;
1067 if (status & RxLenCntExp) dev->stats.rx_length_errors++;
1068 if (status & RxOverFlow) dev->stats.rx_over_errors++;
1069 if (status & RxAlignCntExp) dev->stats.rx_frame_errors++;
1070
1071 /* if (status & FrameSent) dev->stats.tx_dropped++; */
1072 if (status & TxErrorMask) dev->stats.tx_errors++;
1073 if (status & TxUnderrun) dev->stats.tx_fifo_errors++;
1074 if (status & TxNormalCollExp) dev->stats.collisions++;
1075 return IRQ_HANDLED;
1076}
1077
1078/*
1079 * Procedure for reading EEPROM
1080 */
1081#define SROMAddressLength 5
1082#define DataInOn 0x0008
1083#define DataInOff 0x0000
1084#define Clk 0x0002
1085#define ChipSelect 0x0001
1086#define SDIShiftCount 3
1087#define SD0ShiftCount 2
1088#define DelayValue 1000 /* number of microseconds */
1089#define SROMStartOffset 10 /* this is in words */
1090#define SROMReadCount 3 /* number of words to read from SROM */
1091#define SROMAddressBits 6
1092#define EnetAddressOffset 20
1093
1094static unsigned char
1095bmac_clock_out_bit(struct net_device *dev)
1096{
1097 unsigned short data;
1098 unsigned short val;
1099
1100 bmwrite(dev, SROMCSR, ChipSelect | Clk);
1101 udelay(DelayValue);
1102
1103 data = bmread(dev, SROMCSR);
1104 udelay(DelayValue);
1105 val = (data >> SD0ShiftCount) & 1;
1106
1107 bmwrite(dev, SROMCSR, ChipSelect);
1108 udelay(DelayValue);
1109
1110 return val;
1111}
1112
1113static void
1114bmac_clock_in_bit(struct net_device *dev, unsigned int val)
1115{
1116 unsigned short data;
1117
1118 if (val != 0 && val != 1) return;
1119
1120 data = (val << SDIShiftCount);
1121 bmwrite(dev, SROMCSR, data | ChipSelect );
1122 udelay(DelayValue);
1123
1124 bmwrite(dev, SROMCSR, data | ChipSelect | Clk );
1125 udelay(DelayValue);
1126
1127 bmwrite(dev, SROMCSR, data | ChipSelect);
1128 udelay(DelayValue);
1129}
1130
1131static void
1132reset_and_select_srom(struct net_device *dev)
1133{
1134 /* first reset */
1135 bmwrite(dev, SROMCSR, 0);
1136 udelay(DelayValue);
1137
1138 /* send it the read command (110) */
1139 bmac_clock_in_bit(dev, 1);
1140 bmac_clock_in_bit(dev, 1);
1141 bmac_clock_in_bit(dev, 0);
1142}
1143
1144static unsigned short
1145read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len)
1146{
1147 unsigned short data, val;
1148 int i;
1149
1150 /* send out the address we want to read from */
1151 for (i = 0; i < addr_len; i++) {
1152 val = addr >> (addr_len-i-1);
1153 bmac_clock_in_bit(dev, val & 1);
1154 }
1155
1156 /* Now read in the 16-bit data */
1157 data = 0;
1158 for (i = 0; i < 16; i++) {
1159 val = bmac_clock_out_bit(dev);
1160 data <<= 1;
1161 data |= val;
1162 }
1163 bmwrite(dev, SROMCSR, 0);
1164
1165 return data;
1166}
1167
1168/*
1169 * It looks like Cogent and SMC use different methods for calculating
1170 * checksums. What a pain..
1171 */
1172
1173static int
1174bmac_verify_checksum(struct net_device *dev)
1175{
1176 unsigned short data, storedCS;
1177
1178 reset_and_select_srom(dev);
1179 data = read_srom(dev, 3, SROMAddressBits);
1180 storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00);
1181
1182 return 0;
1183}
1184
1185
1186static void
1187bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1188{
1189 int i;
1190 unsigned short data;
1191
1192 for (i = 0; i < 6; i++)
1193 {
1194 reset_and_select_srom(dev);
1195 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1196 ea[2*i] = bitrev8(data & 0x0ff);
1197 ea[2*i+1] = bitrev8((data >> 8) & 0x0ff);
1198 }
1199}
1200
1201static void bmac_reset_and_enable(struct net_device *dev)
1202{
1203 struct bmac_data *bp = netdev_priv(dev);
1204 unsigned long flags;
1205 struct sk_buff *skb;
1206 unsigned char *data;
1207
1208 spin_lock_irqsave(&bp->lock, flags);
1209 bmac_enable_and_reset_chip(dev);
1210 bmac_init_tx_ring(bp);
1211 bmac_init_rx_ring(bp);
1212 bmac_init_chip(dev);
1213 bmac_start_chip(dev);
1214 bmwrite(dev, INTDISABLE, EnableNormal);
1215 bp->sleeping = 0;
1216
1217 /*
1218 * It seems that the bmac can't receive until it's transmitted
1219 * a packet. So we give it a dummy packet to transmit.
1220 */
1221 skb = dev_alloc_skb(ETHERMINPACKET);
1222 if (skb != NULL) {
1223 data = skb_put(skb, ETHERMINPACKET);
1224 memset(data, 0, ETHERMINPACKET);
1225 memcpy(data, dev->dev_addr, 6);
1226 memcpy(data+6, dev->dev_addr, 6);
1227 bmac_transmit_packet(skb, dev);
1228 }
1229 spin_unlock_irqrestore(&bp->lock, flags);
1230}
1231
1232static const struct ethtool_ops bmac_ethtool_ops = {
1233 .get_link = ethtool_op_get_link,
1234};
1235
1236static const struct net_device_ops bmac_netdev_ops = {
1237 .ndo_open = bmac_open,
1238 .ndo_stop = bmac_close,
1239 .ndo_start_xmit = bmac_output,
1240 .ndo_set_multicast_list = bmac_set_multicast,
1241 .ndo_set_mac_address = bmac_set_address,
1242 .ndo_change_mtu = eth_change_mtu,
1243 .ndo_validate_addr = eth_validate_addr,
1244};
1245
1246static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
1247{
1248 int j, rev, ret;
1249 struct bmac_data *bp;
1250 const unsigned char *prop_addr;
1251 unsigned char addr[6];
1252 struct net_device *dev;
1253 int is_bmac_plus = ((int)match->data) != 0;
1254
1255 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
1256 printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n");
1257 return -ENODEV;
1258 }
1259 prop_addr = of_get_property(macio_get_of_node(mdev),
1260 "mac-address", NULL);
1261 if (prop_addr == NULL) {
1262 prop_addr = of_get_property(macio_get_of_node(mdev),
1263 "local-mac-address", NULL);
1264 if (prop_addr == NULL) {
1265 printk(KERN_ERR "BMAC: Can't get mac-address\n");
1266 return -ENODEV;
1267 }
1268 }
1269 memcpy(addr, prop_addr, sizeof(addr));
1270
1271 dev = alloc_etherdev(PRIV_BYTES);
1272 if (!dev) {
1273 printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n");
1274 return -ENOMEM;
1275 }
1276
1277 bp = netdev_priv(dev);
1278 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
1279 macio_set_drvdata(mdev, dev);
1280
1281 bp->mdev = mdev;
1282 spin_lock_init(&bp->lock);
1283
1284 if (macio_request_resources(mdev, "bmac")) {
1285 printk(KERN_ERR "BMAC: can't request IO resource !\n");
1286 goto out_free;
1287 }
1288
1289 dev->base_addr = (unsigned long)
1290 ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0));
1291 if (dev->base_addr == 0)
1292 goto out_release;
1293
1294 dev->irq = macio_irq(mdev, 0);
1295
1296 bmac_enable_and_reset_chip(dev);
1297 bmwrite(dev, INTDISABLE, DisableAll);
1298
1299 rev = addr[0] == 0 && addr[1] == 0xA0;
1300 for (j = 0; j < 6; ++j)
1301 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
1302
1303 /* Enable chip without interrupts for now */
1304 bmac_enable_and_reset_chip(dev);
1305 bmwrite(dev, INTDISABLE, DisableAll);
1306
1307 dev->netdev_ops = &bmac_netdev_ops;
1308 dev->ethtool_ops = &bmac_ethtool_ops;
1309
1310 bmac_get_station_address(dev, addr);
1311 if (bmac_verify_checksum(dev) != 0)
1312 goto err_out_iounmap;
1313
1314 bp->is_bmac_plus = is_bmac_plus;
1315 bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1));
1316 if (!bp->tx_dma)
1317 goto err_out_iounmap;
1318 bp->tx_dma_intr = macio_irq(mdev, 1);
1319 bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2));
1320 if (!bp->rx_dma)
1321 goto err_out_iounmap_tx;
1322 bp->rx_dma_intr = macio_irq(mdev, 2);
1323
1324 bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1);
1325 bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1;
1326
1327 bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1);
1328 skb_queue_head_init(bp->queue);
1329
1330 init_timer(&bp->tx_timeout);
1331
1332 ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
1333 if (ret) {
1334 printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
1335 goto err_out_iounmap_rx;
1336 }
1337 ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev);
1338 if (ret) {
1339 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr);
1340 goto err_out_irq0;
1341 }
1342 ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev);
1343 if (ret) {
1344 printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr);
1345 goto err_out_irq1;
1346 }
1347
1348 /* Mask chip interrupts and disable chip, will be
1349 * re-enabled on open()
1350 */
1351 disable_irq(dev->irq);
1352 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1353
1354 if (register_netdev(dev) != 0) {
1355 printk(KERN_ERR "BMAC: Ethernet registration failed\n");
1356 goto err_out_irq2;
1357 }
1358
1359 printk(KERN_INFO "%s: BMAC%s at %pM",
1360 dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr);
1361 XXDEBUG((", base_addr=%#0lx", dev->base_addr));
1362 printk("\n");
1363
1364 return 0;
1365
1366err_out_irq2:
1367 free_irq(bp->rx_dma_intr, dev);
1368err_out_irq1:
1369 free_irq(bp->tx_dma_intr, dev);
1370err_out_irq0:
1371 free_irq(dev->irq, dev);
1372err_out_iounmap_rx:
1373 iounmap(bp->rx_dma);
1374err_out_iounmap_tx:
1375 iounmap(bp->tx_dma);
1376err_out_iounmap:
1377 iounmap((void __iomem *)dev->base_addr);
1378out_release:
1379 macio_release_resources(mdev);
1380out_free:
1381 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1382 free_netdev(dev);
1383
1384 return -ENODEV;
1385}
1386
1387static int bmac_open(struct net_device *dev)
1388{
1389 struct bmac_data *bp = netdev_priv(dev);
1390 /* XXDEBUG(("bmac: enter open\n")); */
1391 /* reset the chip */
1392 bp->opened = 1;
1393 bmac_reset_and_enable(dev);
1394 enable_irq(dev->irq);
1395 return 0;
1396}
1397
1398static int bmac_close(struct net_device *dev)
1399{
1400 struct bmac_data *bp = netdev_priv(dev);
1401 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1402 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1403 unsigned short config;
1404 int i;
1405
1406 bp->sleeping = 1;
1407
1408 /* disable rx and tx */
1409 config = bmread(dev, RXCFG);
1410 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1411
1412 config = bmread(dev, TXCFG);
1413 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1414
1415 bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */
1416
1417 /* disable rx and tx dma */
1418 st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1419 st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */
1420
1421 /* free some skb's */
1422 XXDEBUG(("bmac: free rx bufs\n"));
1423 for (i=0; i<N_RX_RING; i++) {
1424 if (bp->rx_bufs[i] != NULL) {
1425 dev_kfree_skb(bp->rx_bufs[i]);
1426 bp->rx_bufs[i] = NULL;
1427 }
1428 }
1429 XXDEBUG(("bmac: free tx bufs\n"));
1430 for (i = 0; i<N_TX_RING; i++) {
1431 if (bp->tx_bufs[i] != NULL) {
1432 dev_kfree_skb(bp->tx_bufs[i]);
1433 bp->tx_bufs[i] = NULL;
1434 }
1435 }
1436 XXDEBUG(("bmac: all bufs freed\n"));
1437
1438 bp->opened = 0;
1439 disable_irq(dev->irq);
1440 pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0);
1441
1442 return 0;
1443}
1444
1445static void
1446bmac_start(struct net_device *dev)
1447{
1448 struct bmac_data *bp = netdev_priv(dev);
1449 int i;
1450 struct sk_buff *skb;
1451 unsigned long flags;
1452
1453 if (bp->sleeping)
1454 return;
1455
1456 spin_lock_irqsave(&bp->lock, flags);
1457 while (1) {
1458 i = bp->tx_fill + 1;
1459 if (i >= N_TX_RING)
1460 i = 0;
1461 if (i == bp->tx_empty)
1462 break;
1463 skb = skb_dequeue(bp->queue);
1464 if (skb == NULL)
1465 break;
1466 bmac_transmit_packet(skb, dev);
1467 }
1468 spin_unlock_irqrestore(&bp->lock, flags);
1469}
1470
1471static int
1472bmac_output(struct sk_buff *skb, struct net_device *dev)
1473{
1474 struct bmac_data *bp = netdev_priv(dev);
1475 skb_queue_tail(bp->queue, skb);
1476 bmac_start(dev);
1477 return NETDEV_TX_OK;
1478}
1479
1480static void bmac_tx_timeout(unsigned long data)
1481{
1482 struct net_device *dev = (struct net_device *) data;
1483 struct bmac_data *bp = netdev_priv(dev);
1484 volatile struct dbdma_regs __iomem *td = bp->tx_dma;
1485 volatile struct dbdma_regs __iomem *rd = bp->rx_dma;
1486 volatile struct dbdma_cmd *cp;
1487 unsigned long flags;
1488 unsigned short config, oldConfig;
1489 int i;
1490
1491 XXDEBUG(("bmac: tx_timeout called\n"));
1492 spin_lock_irqsave(&bp->lock, flags);
1493 bp->timeout_active = 0;
1494
1495 /* update various counters */
1496/* bmac_handle_misc_intrs(bp, 0); */
1497
1498 cp = &bp->tx_cmds[bp->tx_empty];
1499/* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */
1500/* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */
1501/* mb->pr, mb->xmtfs, mb->fifofc)); */
1502
1503 /* turn off both tx and rx and reset the chip */
1504 config = bmread(dev, RXCFG);
1505 bmwrite(dev, RXCFG, (config & ~RxMACEnable));
1506 config = bmread(dev, TXCFG);
1507 bmwrite(dev, TXCFG, (config & ~TxMACEnable));
1508 out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1509 printk(KERN_ERR "bmac: transmit timeout - resetting\n");
1510 bmac_enable_and_reset_chip(dev);
1511
1512 /* restart rx dma */
1513 cp = bus_to_virt(ld_le32(&rd->cmdptr));
1514 out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD));
1515 out_le16(&cp->xfer_status, 0);
1516 out_le32(&rd->cmdptr, virt_to_bus(cp));
1517 out_le32(&rd->control, DBDMA_SET(RUN|WAKE));
1518
1519 /* fix up the transmit side */
1520 XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n",
1521 bp->tx_empty, bp->tx_fill, bp->tx_fullup));
1522 i = bp->tx_empty;
1523 ++dev->stats.tx_errors;
1524 if (i != bp->tx_fill) {
1525 dev_kfree_skb(bp->tx_bufs[i]);
1526 bp->tx_bufs[i] = NULL;
1527 if (++i >= N_TX_RING) i = 0;
1528 bp->tx_empty = i;
1529 }
1530 bp->tx_fullup = 0;
1531 netif_wake_queue(dev);
1532 if (i != bp->tx_fill) {
1533 cp = &bp->tx_cmds[i];
1534 out_le16(&cp->xfer_status, 0);
1535 out_le16(&cp->command, OUTPUT_LAST);
1536 out_le32(&td->cmdptr, virt_to_bus(cp));
1537 out_le32(&td->control, DBDMA_SET(RUN));
1538 /* bmac_set_timeout(dev); */
1539 XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i));
1540 }
1541
1542 /* turn it back on */
1543 oldConfig = bmread(dev, RXCFG);
1544 bmwrite(dev, RXCFG, oldConfig | RxMACEnable );
1545 oldConfig = bmread(dev, TXCFG);
1546 bmwrite(dev, TXCFG, oldConfig | TxMACEnable );
1547
1548 spin_unlock_irqrestore(&bp->lock, flags);
1549}
1550
1551#if 0
1552static void dump_dbdma(volatile struct dbdma_cmd *cp,int count)
1553{
1554 int i,*ip;
1555
1556 for (i=0;i< count;i++) {
1557 ip = (int*)(cp+i);
1558
1559 printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n",
1560 ld_le32(ip+0),
1561 ld_le32(ip+1),
1562 ld_le32(ip+2),
1563 ld_le32(ip+3));
1564 }
1565
1566}
1567#endif
1568
1569#if 0
1570static int
1571bmac_proc_info(char *buffer, char **start, off_t offset, int length)
1572{
1573 int len = 0;
1574 off_t pos = 0;
1575 off_t begin = 0;
1576 int i;
1577
1578 if (bmac_devs == NULL)
1579 return -ENOSYS;
1580
1581 len += sprintf(buffer, "BMAC counters & registers\n");
1582
1583 for (i = 0; i<N_REG_ENTRIES; i++) {
1584 len += sprintf(buffer + len, "%s: %#08x\n",
1585 reg_entries[i].name,
1586 bmread(bmac_devs, reg_entries[i].reg_offset));
1587 pos = begin + len;
1588
1589 if (pos < offset) {
1590 len = 0;
1591 begin = pos;
1592 }
1593
1594 if (pos > offset+length) break;
1595 }
1596
1597 *start = buffer + (offset - begin);
1598 len -= (offset - begin);
1599
1600 if (len > length) len = length;
1601
1602 return len;
1603}
1604#endif
1605
1606static int __devexit bmac_remove(struct macio_dev *mdev)
1607{
1608 struct net_device *dev = macio_get_drvdata(mdev);
1609 struct bmac_data *bp = netdev_priv(dev);
1610
1611 unregister_netdev(dev);
1612
1613 free_irq(dev->irq, dev);
1614 free_irq(bp->tx_dma_intr, dev);
1615 free_irq(bp->rx_dma_intr, dev);
1616
1617 iounmap((void __iomem *)dev->base_addr);
1618 iounmap(bp->tx_dma);
1619 iounmap(bp->rx_dma);
1620
1621 macio_release_resources(mdev);
1622
1623 free_netdev(dev);
1624
1625 return 0;
1626}
1627
1628static struct of_device_id bmac_match[] =
1629{
1630 {
1631 .name = "bmac",
1632 .data = (void *)0,
1633 },
1634 {
1635 .type = "network",
1636 .compatible = "bmac+",
1637 .data = (void *)1,
1638 },
1639 {},
1640};
1641MODULE_DEVICE_TABLE (of, bmac_match);
1642
1643static struct macio_driver bmac_driver =
1644{
1645 .driver = {
1646 .name = "bmac",
1647 .owner = THIS_MODULE,
1648 .of_match_table = bmac_match,
1649 },
1650 .probe = bmac_probe,
1651 .remove = bmac_remove,
1652#ifdef CONFIG_PM
1653 .suspend = bmac_suspend,
1654 .resume = bmac_resume,
1655#endif
1656};
1657
1658
1659static int __init bmac_init(void)
1660{
1661 if (bmac_emergency_rxbuf == NULL) {
1662 bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL);
1663 if (bmac_emergency_rxbuf == NULL) {
1664 printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n");
1665 return -ENOMEM;
1666 }
1667 }
1668
1669 return macio_register_driver(&bmac_driver);
1670}
1671
1672static void __exit bmac_exit(void)
1673{
1674 macio_unregister_driver(&bmac_driver);
1675
1676 kfree(bmac_emergency_rxbuf);
1677 bmac_emergency_rxbuf = NULL;
1678}
1679
1680MODULE_AUTHOR("Randy Gobbel/Paul Mackerras");
1681MODULE_DESCRIPTION("PowerMac BMAC ethernet driver.");
1682MODULE_LICENSE("GPL");
1683
1684module_init(bmac_init);
1685module_exit(bmac_exit);
diff --git a/drivers/net/ethernet/apple/bmac.h b/drivers/net/ethernet/apple/bmac.h
new file mode 100644
index 000000000000..a1d19d867ba5
--- /dev/null
+++ b/drivers/net/ethernet/apple/bmac.h
@@ -0,0 +1,164 @@
1/*
2 * mace.h - definitions for the registers in the "Big Mac"
3 * Ethernet controller found in PowerMac G3 models.
4 *
5 * Copyright (C) 1998 Randy Gobbel.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13/* The "Big MAC" appears to have some parts in common with the Sun "Happy Meal"
14 * (HME) controller. See sunhme.h
15 */
16
17
18/* register offsets */
19
20/* global status and control */
21#define XIFC 0x000 /* low-level interface control */
22# define TxOutputEnable 0x0001 /* output driver enable */
23# define XIFLoopback 0x0002 /* Loopback-mode XIF enable */
24# define MIILoopback 0x0004 /* Loopback-mode MII enable */
25# define MIILoopbackBits 0x0006
26# define MIIBuffDisable 0x0008 /* MII receive buffer disable */
27# define SQETestEnable 0x0010 /* SQE test enable */
28# define SQETimeWindow 0x03e0 /* SQE time window */
29# define XIFLanceMode 0x0010 /* Lance mode enable */
30# define XIFLanceIPG0 0x03e0 /* Lance mode IPG0 */
31#define TXFIFOCSR 0x100 /* transmit FIFO control */
32# define TxFIFOEnable 0x0001
33#define TXTH 0x110 /* transmit threshold */
34# define TxThreshold 0x0004
35#define RXFIFOCSR 0x120 /* receive FIFO control */
36# define RxFIFOEnable 0x0001
37#define MEMADD 0x130 /* memory address, unknown function */
38#define MEMDATAHI 0x140 /* memory data high, presently unused in driver */
39#define MEMDATALO 0x150 /* memory data low, presently unused in driver */
40#define XCVRIF 0x160 /* transceiver interface control */
41# define COLActiveLow 0x0002
42# define SerialMode 0x0004
43# define ClkBit 0x0008
44# define LinkStatus 0x0100
45#define CHIPID 0x170 /* chip ID */
46#define MIFCSR 0x180 /* ??? */
47#define SROMCSR 0x190 /* SROM control */
48# define ChipSelect 0x0001
49# define Clk 0x0002
50#define TXPNTR 0x1a0 /* transmit pointer */
51#define RXPNTR 0x1b0 /* receive pointer */
52#define STATUS 0x200 /* status--reading this clears it */
53#define INTDISABLE 0x210 /* interrupt enable/disable control */
54/* bits below are the same in both STATUS and INTDISABLE registers */
55# define FrameReceived 0x00000001 /* Received a frame */
56# define RxFrameCntExp 0x00000002 /* Receive frame counter expired */
57# define RxAlignCntExp 0x00000004 /* Align-error counter expired */
58# define RxCRCCntExp 0x00000008 /* CRC-error counter expired */
59# define RxLenCntExp 0x00000010 /* Length-error counter expired */
60# define RxOverFlow 0x00000020 /* Receive FIFO overflow */
61# define RxCodeViolation 0x00000040 /* Code-violation counter expired */
62# define SQETestError 0x00000080 /* Test error in XIF for SQE */
63# define FrameSent 0x00000100 /* Transmitted a frame */
64# define TxUnderrun 0x00000200 /* Transmit FIFO underrun */
65# define TxMaxSizeError 0x00000400 /* Max-packet size error */
66# define TxNormalCollExp 0x00000800 /* Normal-collision counter expired */
67# define TxExcessCollExp 0x00001000 /* Excess-collision counter expired */
68# define TxLateCollExp 0x00002000 /* Late-collision counter expired */
69# define TxNetworkCollExp 0x00004000 /* First-collision counter expired */
70# define TxDeferTimerExp 0x00008000 /* Defer-timer expired */
71# define RxFIFOToHost 0x00010000 /* Data moved from FIFO to host */
72# define RxNoDescriptors 0x00020000 /* No more receive descriptors */
73# define RxDMAError 0x00040000 /* Error during receive DMA */
74# define RxDMALateErr 0x00080000 /* Receive DMA, data late */
75# define RxParityErr 0x00100000 /* Parity error during receive DMA */
76# define RxTagError 0x00200000 /* Tag error during receive DMA */
77# define TxEOPError 0x00400000 /* Tx descriptor did not have EOP set */
78# define MIFIntrEvent 0x00800000 /* MIF is signaling an interrupt */
79# define TxHostToFIFO 0x01000000 /* Data moved from host to FIFO */
80# define TxFIFOAllSent 0x02000000 /* Transmitted all packets in FIFO */
81# define TxDMAError 0x04000000 /* Error during transmit DMA */
82# define TxDMALateError 0x08000000 /* Late error during transmit DMA */
83# define TxParityError 0x10000000 /* Parity error during transmit DMA */
84# define TxTagError 0x20000000 /* Tag error during transmit DMA */
85# define PIOError 0x40000000 /* PIO access got an error */
86# define PIOParityError 0x80000000 /* PIO access got a parity error */
87# define DisableAll 0xffffffff
88# define EnableAll 0x00000000
89/* # define NormalIntEvents ~(FrameReceived | FrameSent | TxUnderrun) */
90# define EnableNormal ~(FrameReceived | FrameSent)
91# define EnableErrors (FrameReceived | FrameSent)
92# define RxErrorMask (RxFrameCntExp | RxAlignCntExp | RxCRCCntExp | \
93 RxLenCntExp | RxOverFlow | RxCodeViolation)
94# define TxErrorMask (TxUnderrun | TxMaxSizeError | TxExcessCollExp | \
95 TxLateCollExp | TxNetworkCollExp | TxDeferTimerExp)
96
97/* transmit control */
98#define TXRST 0x420 /* transmit reset */
99# define TxResetBit 0x0001
100#define TXCFG 0x430 /* transmit configuration control*/
101# define TxMACEnable 0x0001 /* output driver enable */
102# define TxSlowMode 0x0020 /* enable slow mode */
103# define TxIgnoreColl 0x0040 /* ignore transmit collisions */
104# define TxNoFCS 0x0080 /* do not emit FCS */
105# define TxNoBackoff 0x0100 /* no backoff in case of collisions */
106# define TxFullDuplex 0x0200 /* enable full-duplex */
107# define TxNeverGiveUp 0x0400 /* don't give up on transmits */
108#define IPG1 0x440 /* Inter-packet gap 1 */
109#define IPG2 0x450 /* Inter-packet gap 2 */
110#define ALIMIT 0x460 /* Transmit attempt limit */
111#define SLOT 0x470 /* Transmit slot time */
112#define PALEN 0x480 /* Size of transmit preamble */
113#define PAPAT 0x490 /* Pattern for transmit preamble */
114#define TXSFD 0x4a0 /* Transmit frame delimiter */
115#define JAM 0x4b0 /* Jam size */
116#define TXMAX 0x4c0 /* Transmit max pkt size */
117#define TXMIN 0x4d0 /* Transmit min pkt size */
118#define PAREG 0x4e0 /* Count of transmit peak attempts */
119#define DCNT 0x4f0 /* Transmit defer timer */
120#define NCCNT 0x500 /* Transmit normal-collision counter */
121#define NTCNT 0x510 /* Transmit first-collision counter */
122#define EXCNT 0x520 /* Transmit excess-collision counter */
123#define LTCNT 0x530 /* Transmit late-collision counter */
124#define RSEED 0x540 /* Transmit random number seed */
125#define TXSM 0x550 /* Transmit state machine */
126
127/* receive control */
128#define RXRST 0x620 /* receive reset */
129# define RxResetValue 0x0000
130#define RXCFG 0x630 /* receive configuration control */
131# define RxMACEnable 0x0001 /* receiver overall enable */
132# define RxCFGReserved 0x0004
133# define RxPadStripEnab 0x0020 /* enable pad byte stripping */
134# define RxPromiscEnable 0x0040 /* turn on promiscuous mode */
135# define RxNoErrCheck 0x0080 /* disable receive error checking */
136# define RxCRCNoStrip 0x0100 /* disable auto-CRC-stripping */
137# define RxRejectOwnPackets 0x0200 /* don't receive our own packets */
138# define RxGrpPromisck 0x0400 /* enable group promiscuous mode */
139# define RxHashFilterEnable 0x0800 /* enable hash filter */
140# define RxAddrFilterEnable 0x1000 /* enable address filter */
141#define RXMAX 0x640 /* Max receive packet size */
142#define RXMIN 0x650 /* Min receive packet size */
143#define MADD2 0x660 /* our enet address, high part */
144#define MADD1 0x670 /* our enet address, middle part */
145#define MADD0 0x680 /* our enet address, low part */
146#define FRCNT 0x690 /* receive frame counter */
147#define LECNT 0x6a0 /* Receive excess length error counter */
148#define AECNT 0x6b0 /* Receive misaligned error counter */
149#define FECNT 0x6c0 /* Receive CRC error counter */
150#define RXSM 0x6d0 /* Receive state machine */
151#define RXCV 0x6e0 /* Receive code violation */
152
153#define BHASH3 0x700 /* multicast hash register */
154#define BHASH2 0x710 /* multicast hash register */
155#define BHASH1 0x720 /* multicast hash register */
156#define BHASH0 0x730 /* multicast hash register */
157
158#define AFR2 0x740 /* address filtering setup? */
159#define AFR1 0x750 /* address filtering setup? */
160#define AFR0 0x760 /* address filtering setup? */
161#define AFCR 0x770 /* address filter compare register? */
162# define EnableAllCompares 0x0fff
163
164/* bits in XIFC */
diff --git a/drivers/net/ethernet/apple/cs89x0.c b/drivers/net/ethernet/apple/cs89x0.c
new file mode 100644
index 000000000000..537a4b2e2020
--- /dev/null
+++ b/drivers/net/ethernet/apple/cs89x0.c
@@ -0,0 +1,1913 @@
1/* cs89x0.c: A Crystal Semiconductor (Now Cirrus Logic) CS89[02]0
2 * driver for linux.
3 */
4
5/*
6 Written 1996 by Russell Nelson, with reference to skeleton.c
7 written 1993-1994 by Donald Becker.
8
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
11
12 The author may be reached at nelson@crynwr.com, Crynwr
13 Software, 521 Pleasant Valley Rd., Potsdam, NY 13676
14
15 Changelog:
16
17 Mike Cruse : mcruse@cti-ltd.com
18 : Changes for Linux 2.0 compatibility.
19 : Added dev_id parameter in net_interrupt(),
20 : request_irq() and free_irq(). Just NULL for now.
21
22 Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros
23 : in net_open() and net_close() so kerneld would know
24 : that the module is in use and wouldn't eject the
25 : driver prematurely.
26
27 Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c
28 : as an example. Disabled autoprobing in init_module(),
29 : not a good thing to do to other devices while Linux
30 : is running from all accounts.
31
32 Russ Nelson : Jul 13 1998. Added RxOnly DMA support.
33
34 Melody Lee : Aug 10 1999. Changes for Linux 2.2.5 compatibility.
35 : email: ethernet@crystal.cirrus.com
36
37 Alan Cox : Removed 1.2 support, added 2.1 extra counters.
38
39 Andrew Morton : Kernel 2.3.48
40 : Handle kmalloc() failures
41 : Other resource allocation fixes
42 : Add SMP locks
43 : Integrate Russ Nelson's ALLOW_DMA functionality back in.
44 : If ALLOW_DMA is true, make DMA runtime selectable
45 : Folded in changes from Cirrus (Melody Lee
46 : <klee@crystal.cirrus.com>)
47 : Don't call netif_wake_queue() in net_send_packet()
48 : Fixed an out-of-mem bug in dma_rx()
49 : Updated Documentation/networking/cs89x0.txt
50
51 Andrew Morton : Kernel 2.3.99-pre1
52 : Use skb_reserve to longword align IP header (two places)
53 : Remove a delay loop from dma_rx()
54 : Replace '100' with HZ
55 : Clean up a couple of skb API abuses
56 : Added 'cs89x0_dma=N' kernel boot option
57 : Correctly initialise lp->lock in non-module compile
58
59 Andrew Morton : Kernel 2.3.99-pre4-1
60 : MOD_INC/DEC race fix (see
61 : http://www.uwsg.indiana.edu/hypermail/linux/kernel/0003.3/1532.html)
62
63 Andrew Morton : Kernel 2.4.0-test7-pre2
64 : Enhanced EEPROM support to cover more devices,
65 : abstracted IRQ mapping to support CONFIG_ARCH_CLPS7500 arch
66 : (Jason Gunthorpe <jgg@ualberta.ca>)
67
68 Andrew Morton : Kernel 2.4.0-test11-pre4
69 : Use dev->name in request_*() (Andrey Panin)
70 : Fix an error-path memleak in init_module()
71 : Preserve return value from request_irq()
72 : Fix type of `media' module parm (Keith Owens)
73 : Use SET_MODULE_OWNER()
74 : Tidied up strange request_irq() abuse in net_open().
75
76 Andrew Morton : Kernel 2.4.3-pre1
77 : Request correct number of pages for DMA (Hugh Dickens)
78 : Select PP_ChipID _after_ unregister_netdev in cleanup_module()
79 : because unregister_netdev() calls get_stats.
80 : Make `version[]' __initdata
81 : Uninlined the read/write reg/word functions.
82
83 Oskar Schirmer : oskar@scara.com
84 : HiCO.SH4 (superh) support added (irq#1, cs89x0_media=)
85
86 Deepak Saxena : dsaxena@plexity.net
87 : Intel IXDP2x01 (XScale ixp2x00 NPU) platform support
88
89 Dmitry Pervushin : dpervushin@ru.mvista.com
90 : PNX010X platform support
91
92 Deepak Saxena : dsaxena@plexity.net
93 : Intel IXDP2351 platform support
94
95 Dmitry Pervushin : dpervushin@ru.mvista.com
96 : PNX010X platform support
97
98 Domenico Andreoli : cavokz@gmail.com
99 : QQ2440 platform support
100
101*/
102
103/* Always include 'config.h' first in case the user wants to turn on
104 or override something. */
105#include <linux/module.h>
106
107/*
108 * Set this to zero to disable DMA code
109 *
110 * Note that even if DMA is turned off we still support the 'dma' and 'use_dma'
111 * module options so we don't break any startup scripts.
112 */
113#ifndef CONFIG_ISA_DMA_API
114#define ALLOW_DMA 0
115#else
116#define ALLOW_DMA 1
117#endif
118
119/*
120 * Set this to zero to remove all the debug statements via
121 * dead code elimination
122 */
123#define DEBUGGING 1
124
125/*
126 Sources:
127
128 Crynwr packet driver epktisa.
129
130 Crystal Semiconductor data sheets.
131
132*/
133
134#include <linux/errno.h>
135#include <linux/netdevice.h>
136#include <linux/etherdevice.h>
137#include <linux/kernel.h>
138#include <linux/types.h>
139#include <linux/fcntl.h>
140#include <linux/interrupt.h>
141#include <linux/ioport.h>
142#include <linux/in.h>
143#include <linux/skbuff.h>
144#include <linux/spinlock.h>
145#include <linux/string.h>
146#include <linux/init.h>
147#include <linux/bitops.h>
148#include <linux/delay.h>
149#include <linux/gfp.h>
150
151#include <asm/system.h>
152#include <asm/io.h>
153#include <asm/irq.h>
154#if ALLOW_DMA
155#include <asm/dma.h>
156#endif
157
158#include "cs89x0.h"
159
160static char version[] __initdata =
161"cs89x0.c: v2.4.3-pre1 Russell Nelson <nelson@crynwr.com>, Andrew Morton\n";
162
163#define DRV_NAME "cs89x0"
164
165/* First, a few definitions that the brave might change.
166 A zero-terminated list of I/O addresses to be probed. Some special flags..
167 Addr & 1 = Read back the address port, look for signature and reset
168 the page window before probing
169 Addr & 3 = Reset the page window and probe
170 The CLPS eval board has the Cirrus chip at 0x80090300, in ARM IO space,
171 but it is possible that a Cirrus board could be plugged into the ISA
172 slots. */
173/* The cs8900 has 4 IRQ pins, software selectable. cs8900_irq_map maps
174 them to system IRQ numbers. This mapping is card specific and is set to
175 the configuration of the Cirrus Eval board for this chip. */
176#if defined(CONFIG_MACH_IXDP2351)
177static unsigned int netcard_portlist[] __used __initdata = {IXDP2351_VIRT_CS8900_BASE, 0};
178static unsigned int cs8900_irq_map[] = {IRQ_IXDP2351_CS8900, 0, 0, 0};
179#elif defined(CONFIG_ARCH_IXDP2X01)
180static unsigned int netcard_portlist[] __used __initdata = {IXDP2X01_CS8900_VIRT_BASE, 0};
181static unsigned int cs8900_irq_map[] = {IRQ_IXDP2X01_CS8900, 0, 0, 0};
182#elif defined(CONFIG_MACH_QQ2440)
183#include <mach/qq2440.h>
184static unsigned int netcard_portlist[] __used __initdata = { QQ2440_CS8900_VIRT_BASE + 0x300, 0 };
185static unsigned int cs8900_irq_map[] = { QQ2440_CS8900_IRQ, 0, 0, 0 };
186#elif defined(CONFIG_MACH_MX31ADS)
187#include <mach/board-mx31ads.h>
188static unsigned int netcard_portlist[] __used __initdata = {
189 PBC_BASE_ADDRESS + PBC_CS8900A_IOBASE + 0x300, 0
190};
191static unsigned cs8900_irq_map[] = {EXPIO_INT_ENET_INT, 0, 0, 0};
192#else
193static unsigned int netcard_portlist[] __used __initdata =
194 { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0};
195static unsigned int cs8900_irq_map[] = {10,11,12,5};
196#endif
197
198#if DEBUGGING
199static unsigned int net_debug = DEBUGGING;
200#else
201#define net_debug 0 /* gcc will remove all the debug code for us */
202#endif
203
204/* The number of low I/O ports used by the ethercard. */
205#define NETCARD_IO_EXTENT 16
206
207/* we allow the user to override various values normally set in the EEPROM */
208#define FORCE_RJ45 0x0001 /* pick one of these three */
209#define FORCE_AUI 0x0002
210#define FORCE_BNC 0x0004
211
212#define FORCE_AUTO 0x0010 /* pick one of these three */
213#define FORCE_HALF 0x0020
214#define FORCE_FULL 0x0030
215
216/* Information that need to be kept for each board. */
217struct net_local {
218 int chip_type; /* one of: CS8900, CS8920, CS8920M */
219 char chip_revision; /* revision letter of the chip ('A'...) */
220 int send_cmd; /* the proper send command: TX_NOW, TX_AFTER_381, or TX_AFTER_ALL */
221 int auto_neg_cnf; /* auto-negotiation word from EEPROM */
222 int adapter_cnf; /* adapter configuration from EEPROM */
223 int isa_config; /* ISA configuration from EEPROM */
224 int irq_map; /* IRQ map from EEPROM */
225 int rx_mode; /* what mode are we in? 0, RX_MULTCAST_ACCEPT, or RX_ALL_ACCEPT */
226 int curr_rx_cfg; /* a copy of PP_RxCFG */
227 int linectl; /* either 0 or LOW_RX_SQUELCH, depending on configuration. */
228 int send_underrun; /* keep track of how many underruns in a row we get */
229 int force; /* force various values; see FORCE* above. */
230 spinlock_t lock;
231#if ALLOW_DMA
232 int use_dma; /* Flag: we're using dma */
233 int dma; /* DMA channel */
234 int dmasize; /* 16 or 64 */
235 unsigned char *dma_buff; /* points to the beginning of the buffer */
236 unsigned char *end_dma_buff; /* points to the end of the buffer */
237 unsigned char *rx_dma_ptr; /* points to the next packet */
238#endif
239};
240
241/* Index to functions, as function prototypes. */
242
243static int cs89x0_probe1(struct net_device *dev, int ioaddr, int modular);
244static int net_open(struct net_device *dev);
245static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
246static irqreturn_t net_interrupt(int irq, void *dev_id);
247static void set_multicast_list(struct net_device *dev);
248static void net_timeout(struct net_device *dev);
249static void net_rx(struct net_device *dev);
250static int net_close(struct net_device *dev);
251static struct net_device_stats *net_get_stats(struct net_device *dev);
252static void reset_chip(struct net_device *dev);
253static int get_eeprom_data(struct net_device *dev, int off, int len, int *buffer);
254static int get_eeprom_cksum(int off, int len, int *buffer);
255static int set_mac_address(struct net_device *dev, void *addr);
256static void count_rx_errors(int status, struct net_device *dev);
257#ifdef CONFIG_NET_POLL_CONTROLLER
258static void net_poll_controller(struct net_device *dev);
259#endif
260#if ALLOW_DMA
261static void get_dma_channel(struct net_device *dev);
262static void release_dma_buff(struct net_local *lp);
263#endif
264
265/* Example routines you must write ;->. */
266#define tx_done(dev) 1
267
268/*
269 * Permit 'cs89x0_dma=N' in the kernel boot environment
270 */
271#if !defined(MODULE) && (ALLOW_DMA != 0)
272static int g_cs89x0_dma;
273
274static int __init dma_fn(char *str)
275{
276 g_cs89x0_dma = simple_strtol(str,NULL,0);
277 return 1;
278}
279
280__setup("cs89x0_dma=", dma_fn);
281#endif /* !defined(MODULE) && (ALLOW_DMA != 0) */
282
283#ifndef MODULE
284static int g_cs89x0_media__force;
285
286static int __init media_fn(char *str)
287{
288 if (!strcmp(str, "rj45")) g_cs89x0_media__force = FORCE_RJ45;
289 else if (!strcmp(str, "aui")) g_cs89x0_media__force = FORCE_AUI;
290 else if (!strcmp(str, "bnc")) g_cs89x0_media__force = FORCE_BNC;
291 return 1;
292}
293
294__setup("cs89x0_media=", media_fn);
295
296
297/* Check for a network adaptor of this type, and return '0' iff one exists.
298 If dev->base_addr == 0, probe all likely locations.
299 If dev->base_addr == 1, always return failure.
300 If dev->base_addr == 2, allocate space for the device and return success
301 (detachable devices only).
302 Return 0 on success.
303 */
304
305struct net_device * __init cs89x0_probe(int unit)
306{
307 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
308 unsigned *port;
309 int err = 0;
310 int irq;
311 int io;
312
313 if (!dev)
314 return ERR_PTR(-ENODEV);
315
316 sprintf(dev->name, "eth%d", unit);
317 netdev_boot_setup_check(dev);
318 io = dev->base_addr;
319 irq = dev->irq;
320
321 if (net_debug)
322 printk("cs89x0:cs89x0_probe(0x%x)\n", io);
323
324 if (io > 0x1ff) { /* Check a single specified location. */
325 err = cs89x0_probe1(dev, io, 0);
326 } else if (io != 0) { /* Don't probe at all. */
327 err = -ENXIO;
328 } else {
329 for (port = netcard_portlist; *port; port++) {
330 if (cs89x0_probe1(dev, *port, 0) == 0)
331 break;
332 dev->irq = irq;
333 }
334 if (!*port)
335 err = -ENODEV;
336 }
337 if (err)
338 goto out;
339 return dev;
340out:
341 free_netdev(dev);
342 printk(KERN_WARNING "cs89x0: no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n");
343 return ERR_PTR(err);
344}
345#endif
346
347#if defined(CONFIG_MACH_IXDP2351)
348static u16
349readword(unsigned long base_addr, int portno)
350{
351 return __raw_readw(base_addr + (portno << 1));
352}
353
354static void
355writeword(unsigned long base_addr, int portno, u16 value)
356{
357 __raw_writew(value, base_addr + (portno << 1));
358}
359#elif defined(CONFIG_ARCH_IXDP2X01)
360static u16
361readword(unsigned long base_addr, int portno)
362{
363 return __raw_readl(base_addr + (portno << 1));
364}
365
366static void
367writeword(unsigned long base_addr, int portno, u16 value)
368{
369 __raw_writel(value, base_addr + (portno << 1));
370}
371#else
372static u16
373readword(unsigned long base_addr, int portno)
374{
375 return inw(base_addr + portno);
376}
377
378static void
379writeword(unsigned long base_addr, int portno, u16 value)
380{
381 outw(value, base_addr + portno);
382}
383#endif
384
385static void
386readwords(unsigned long base_addr, int portno, void *buf, int length)
387{
388 u8 *buf8 = (u8 *)buf;
389
390 do {
391 u16 tmp16;
392
393 tmp16 = readword(base_addr, portno);
394 *buf8++ = (u8)tmp16;
395 *buf8++ = (u8)(tmp16 >> 8);
396 } while (--length);
397}
398
399static void
400writewords(unsigned long base_addr, int portno, void *buf, int length)
401{
402 u8 *buf8 = (u8 *)buf;
403
404 do {
405 u16 tmp16;
406
407 tmp16 = *buf8++;
408 tmp16 |= (*buf8++) << 8;
409 writeword(base_addr, portno, tmp16);
410 } while (--length);
411}
412
413static u16
414readreg(struct net_device *dev, u16 regno)
415{
416 writeword(dev->base_addr, ADD_PORT, regno);
417 return readword(dev->base_addr, DATA_PORT);
418}
419
420static void
421writereg(struct net_device *dev, u16 regno, u16 value)
422{
423 writeword(dev->base_addr, ADD_PORT, regno);
424 writeword(dev->base_addr, DATA_PORT, value);
425}
426
427static int __init
428wait_eeprom_ready(struct net_device *dev)
429{
430 int timeout = jiffies;
431 /* check to see if the EEPROM is ready, a timeout is used -
432 just in case EEPROM is ready when SI_BUSY in the
433 PP_SelfST is clear */
434 while(readreg(dev, PP_SelfST) & SI_BUSY)
435 if (jiffies - timeout >= 40)
436 return -1;
437 return 0;
438}
439
440static int __init
441get_eeprom_data(struct net_device *dev, int off, int len, int *buffer)
442{
443 int i;
444
445 if (net_debug > 3) printk("EEPROM data from %x for %x:\n",off,len);
446 for (i = 0; i < len; i++) {
447 if (wait_eeprom_ready(dev) < 0) return -1;
448 /* Now send the EEPROM read command and EEPROM location to read */
449 writereg(dev, PP_EECMD, (off + i) | EEPROM_READ_CMD);
450 if (wait_eeprom_ready(dev) < 0) return -1;
451 buffer[i] = readreg(dev, PP_EEData);
452 if (net_debug > 3) printk("%04x ", buffer[i]);
453 }
454 if (net_debug > 3) printk("\n");
455 return 0;
456}
457
458static int __init
459get_eeprom_cksum(int off, int len, int *buffer)
460{
461 int i, cksum;
462
463 cksum = 0;
464 for (i = 0; i < len; i++)
465 cksum += buffer[i];
466 cksum &= 0xffff;
467 if (cksum == 0)
468 return 0;
469 return -1;
470}
471
472#ifdef CONFIG_NET_POLL_CONTROLLER
473/*
474 * Polling receive - used by netconsole and other diagnostic tools
475 * to allow network i/o with interrupts disabled.
476 */
477static void net_poll_controller(struct net_device *dev)
478{
479 disable_irq(dev->irq);
480 net_interrupt(dev->irq, dev);
481 enable_irq(dev->irq);
482}
483#endif
484
485static const struct net_device_ops net_ops = {
486 .ndo_open = net_open,
487 .ndo_stop = net_close,
488 .ndo_tx_timeout = net_timeout,
489 .ndo_start_xmit = net_send_packet,
490 .ndo_get_stats = net_get_stats,
491 .ndo_set_multicast_list = set_multicast_list,
492 .ndo_set_mac_address = set_mac_address,
493#ifdef CONFIG_NET_POLL_CONTROLLER
494 .ndo_poll_controller = net_poll_controller,
495#endif
496 .ndo_change_mtu = eth_change_mtu,
497 .ndo_validate_addr = eth_validate_addr,
498};
499
500/* This is the real probe routine. Linux has a history of friendly device
501 probes on the ISA bus. A good device probes avoids doing writes, and
502 verifies that the correct device exists and functions.
503 Return 0 on success.
504 */
505
506static int __init
507cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
508{
509 struct net_local *lp = netdev_priv(dev);
510 static unsigned version_printed;
511 int i;
512 int tmp;
513 unsigned rev_type = 0;
514 int eeprom_buff[CHKSUM_LEN];
515 int retval;
516
517 /* Initialize the device structure. */
518 if (!modular) {
519 memset(lp, 0, sizeof(*lp));
520 spin_lock_init(&lp->lock);
521#ifndef MODULE
522#if ALLOW_DMA
523 if (g_cs89x0_dma) {
524 lp->use_dma = 1;
525 lp->dma = g_cs89x0_dma;
526 lp->dmasize = 16; /* Could make this an option... */
527 }
528#endif
529 lp->force = g_cs89x0_media__force;
530#endif
531
532#if defined(CONFIG_MACH_QQ2440)
533 lp->force |= FORCE_RJ45 | FORCE_FULL;
534#endif
535 }
536
537 /* Grab the region so we can find another board if autoIRQ fails. */
538 /* WTF is going on here? */
539 if (!request_region(ioaddr & ~3, NETCARD_IO_EXTENT, DRV_NAME)) {
540 printk(KERN_ERR "%s: request_region(0x%x, 0x%x) failed\n",
541 DRV_NAME, ioaddr, NETCARD_IO_EXTENT);
542 retval = -EBUSY;
543 goto out1;
544 }
545
546 /* if they give us an odd I/O address, then do ONE write to
547 the address port, to get it back to address zero, where we
548 expect to find the EISA signature word. An IO with a base of 0x3
549 will skip the test for the ADD_PORT. */
550 if (ioaddr & 1) {
551 if (net_debug > 1)
552 printk(KERN_INFO "%s: odd ioaddr 0x%x\n", dev->name, ioaddr);
553 if ((ioaddr & 2) != 2)
554 if ((readword(ioaddr & ~3, ADD_PORT) & ADD_MASK) != ADD_SIG) {
555 printk(KERN_ERR "%s: bad signature 0x%x\n",
556 dev->name, readword(ioaddr & ~3, ADD_PORT));
557 retval = -ENODEV;
558 goto out2;
559 }
560 }
561
562 ioaddr &= ~3;
563 printk(KERN_DEBUG "PP_addr at %x[%x]: 0x%x\n",
564 ioaddr, ADD_PORT, readword(ioaddr, ADD_PORT));
565 writeword(ioaddr, ADD_PORT, PP_ChipID);
566
567 tmp = readword(ioaddr, DATA_PORT);
568 if (tmp != CHIP_EISA_ID_SIG) {
569 printk(KERN_DEBUG "%s: incorrect signature at %x[%x]: 0x%x!="
570 CHIP_EISA_ID_SIG_STR "\n",
571 dev->name, ioaddr, DATA_PORT, tmp);
572 retval = -ENODEV;
573 goto out2;
574 }
575
576 /* Fill in the 'dev' fields. */
577 dev->base_addr = ioaddr;
578
579 /* get the chip type */
580 rev_type = readreg(dev, PRODUCT_ID_ADD);
581 lp->chip_type = rev_type &~ REVISON_BITS;
582 lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
583
584 /* Check the chip type and revision in order to set the correct send command
585 CS8920 revision C and CS8900 revision F can use the faster send. */
586 lp->send_cmd = TX_AFTER_381;
587 if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
588 lp->send_cmd = TX_NOW;
589 if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
590 lp->send_cmd = TX_NOW;
591
592 if (net_debug && version_printed++ == 0)
593 printk(version);
594
595 printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#3lx ",
596 dev->name,
597 lp->chip_type==CS8900?'0':'2',
598 lp->chip_type==CS8920M?"M":"",
599 lp->chip_revision,
600 dev->base_addr);
601
602 reset_chip(dev);
603
604 /* Here we read the current configuration of the chip. If there
605 is no Extended EEPROM then the idea is to not disturb the chip
606 configuration, it should have been correctly setup by automatic
607 EEPROM read on reset. So, if the chip says it read the EEPROM
608 the driver will always do *something* instead of complain that
609 adapter_cnf is 0. */
610
611
612 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) ==
613 (EEPROM_OK|EEPROM_PRESENT)) {
614 /* Load the MAC. */
615 for (i=0; i < ETH_ALEN/2; i++) {
616 unsigned int Addr;
617 Addr = readreg(dev, PP_IA+i*2);
618 dev->dev_addr[i*2] = Addr & 0xFF;
619 dev->dev_addr[i*2+1] = Addr >> 8;
620 }
621
622 /* Load the Adapter Configuration.
623 Note: Barring any more specific information from some
624 other source (ie EEPROM+Schematics), we would not know
625 how to operate a 10Base2 interface on the AUI port.
626 However, since we do read the status of HCB1 and use
627 settings that always result in calls to control_dc_dc(dev,0)
628 a BNC interface should work if the enable pin
629 (dc/dc converter) is on HCB1. It will be called AUI
630 however. */
631
632 lp->adapter_cnf = 0;
633 i = readreg(dev, PP_LineCTL);
634 /* Preserve the setting of the HCB1 pin. */
635 if ((i & (HCB1 | HCB1_ENBL)) == (HCB1 | HCB1_ENBL))
636 lp->adapter_cnf |= A_CNF_DC_DC_POLARITY;
637 /* Save the sqelch bit */
638 if ((i & LOW_RX_SQUELCH) == LOW_RX_SQUELCH)
639 lp->adapter_cnf |= A_CNF_EXTND_10B_2 | A_CNF_LOW_RX_SQUELCH;
640 /* Check if the card is in 10Base-t only mode */
641 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == 0)
642 lp->adapter_cnf |= A_CNF_10B_T | A_CNF_MEDIA_10B_T;
643 /* Check if the card is in AUI only mode */
644 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUI_ONLY)
645 lp->adapter_cnf |= A_CNF_AUI | A_CNF_MEDIA_AUI;
646 /* Check if the card is in Auto mode. */
647 if ((i & (AUI_ONLY | AUTO_AUI_10BASET)) == AUTO_AUI_10BASET)
648 lp->adapter_cnf |= A_CNF_AUI | A_CNF_10B_T |
649 A_CNF_MEDIA_AUI | A_CNF_MEDIA_10B_T | A_CNF_MEDIA_AUTO;
650
651 if (net_debug > 1)
652 printk(KERN_INFO "%s: PP_LineCTL=0x%x, adapter_cnf=0x%x\n",
653 dev->name, i, lp->adapter_cnf);
654
655 /* IRQ. Other chips already probe, see below. */
656 if (lp->chip_type == CS8900)
657 lp->isa_config = readreg(dev, PP_CS8900_ISAINT) & INT_NO_MASK;
658
659 printk( "[Cirrus EEPROM] ");
660 }
661
662 printk("\n");
663
664 /* First check to see if an EEPROM is attached. */
665
666 if ((readreg(dev, PP_SelfST) & EEPROM_PRESENT) == 0)
667 printk(KERN_WARNING "cs89x0: No EEPROM, relying on command line....\n");
668 else if (get_eeprom_data(dev, START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
669 printk(KERN_WARNING "\ncs89x0: EEPROM read failed, relying on command line.\n");
670 } else if (get_eeprom_cksum(START_EEPROM_DATA,CHKSUM_LEN,eeprom_buff) < 0) {
671 /* Check if the chip was able to read its own configuration starting
672 at 0 in the EEPROM*/
673 if ((readreg(dev, PP_SelfST) & (EEPROM_OK | EEPROM_PRESENT)) !=
674 (EEPROM_OK|EEPROM_PRESENT))
675 printk(KERN_WARNING "cs89x0: Extended EEPROM checksum bad and no Cirrus EEPROM, relying on command line\n");
676
677 } else {
678 /* This reads an extended EEPROM that is not documented
679 in the CS8900 datasheet. */
680
681 /* get transmission control word but keep the autonegotiation bits */
682 if (!lp->auto_neg_cnf) lp->auto_neg_cnf = eeprom_buff[AUTO_NEG_CNF_OFFSET/2];
683 /* Store adapter configuration */
684 if (!lp->adapter_cnf) lp->adapter_cnf = eeprom_buff[ADAPTER_CNF_OFFSET/2];
685 /* Store ISA configuration */
686 lp->isa_config = eeprom_buff[ISA_CNF_OFFSET/2];
687 dev->mem_start = eeprom_buff[PACKET_PAGE_OFFSET/2] << 8;
688
689 /* eeprom_buff has 32-bit ints, so we can't just memcpy it */
690 /* store the initial memory base address */
691 for (i = 0; i < ETH_ALEN/2; i++) {
692 dev->dev_addr[i*2] = eeprom_buff[i];
693 dev->dev_addr[i*2+1] = eeprom_buff[i] >> 8;
694 }
695 if (net_debug > 1)
696 printk(KERN_DEBUG "%s: new adapter_cnf: 0x%x\n",
697 dev->name, lp->adapter_cnf);
698 }
699
700 /* allow them to force multiple transceivers. If they force multiple, autosense */
701 {
702 int count = 0;
703 if (lp->force & FORCE_RJ45) {lp->adapter_cnf |= A_CNF_10B_T; count++; }
704 if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_AUI; count++; }
705 if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_10B_2; count++; }
706 if (count > 1) {lp->adapter_cnf |= A_CNF_MEDIA_AUTO; }
707 else if (lp->force & FORCE_RJ45){lp->adapter_cnf |= A_CNF_MEDIA_10B_T; }
708 else if (lp->force & FORCE_AUI) {lp->adapter_cnf |= A_CNF_MEDIA_AUI; }
709 else if (lp->force & FORCE_BNC) {lp->adapter_cnf |= A_CNF_MEDIA_10B_2; }
710 }
711
712 if (net_debug > 1)
713 printk(KERN_DEBUG "%s: after force 0x%x, adapter_cnf=0x%x\n",
714 dev->name, lp->force, lp->adapter_cnf);
715
716 /* FIXME: We don't let you set dc-dc polarity or low RX squelch from the command line: add it here */
717
718 /* FIXME: We don't let you set the IMM bit from the command line: add it to lp->auto_neg_cnf here */
719
720 /* FIXME: we don't set the Ethernet address on the command line. Use
721 ifconfig IFACE hw ether AABBCCDDEEFF */
722
723 printk(KERN_INFO "cs89x0 media %s%s%s",
724 (lp->adapter_cnf & A_CNF_10B_T)?"RJ-45,":"",
725 (lp->adapter_cnf & A_CNF_AUI)?"AUI,":"",
726 (lp->adapter_cnf & A_CNF_10B_2)?"BNC,":"");
727
728 lp->irq_map = 0xffff;
729
730 /* If this is a CS8900 then no pnp soft */
731 if (lp->chip_type != CS8900 &&
732 /* Check if the ISA IRQ has been set */
733 (i = readreg(dev, PP_CS8920_ISAINT) & 0xff,
734 (i != 0 && i < CS8920_NO_INTS))) {
735 if (!dev->irq)
736 dev->irq = i;
737 } else {
738 i = lp->isa_config & INT_NO_MASK;
739 if (lp->chip_type == CS8900) {
740#ifdef CONFIG_CS89x0_NONISA_IRQ
741 i = cs8900_irq_map[0];
742#else
743 /* Translate the IRQ using the IRQ mapping table. */
744 if (i >= ARRAY_SIZE(cs8900_irq_map))
745 printk("\ncs89x0: invalid ISA interrupt number %d\n", i);
746 else
747 i = cs8900_irq_map[i];
748
749 lp->irq_map = CS8900_IRQ_MAP; /* fixed IRQ map for CS8900 */
750 } else {
751 int irq_map_buff[IRQ_MAP_LEN/2];
752
753 if (get_eeprom_data(dev, IRQ_MAP_EEPROM_DATA,
754 IRQ_MAP_LEN/2,
755 irq_map_buff) >= 0) {
756 if ((irq_map_buff[0] & 0xff) == PNP_IRQ_FRMT)
757 lp->irq_map = (irq_map_buff[0]>>8) | (irq_map_buff[1] << 8);
758 }
759#endif
760 }
761 if (!dev->irq)
762 dev->irq = i;
763 }
764
765 printk(" IRQ %d", dev->irq);
766
767#if ALLOW_DMA
768 if (lp->use_dma) {
769 get_dma_channel(dev);
770 printk(", DMA %d", dev->dma);
771 }
772 else
773#endif
774 {
775 printk(", programmed I/O");
776 }
777
778 /* print the ethernet address. */
779 printk(", MAC %pM", dev->dev_addr);
780
781 dev->netdev_ops = &net_ops;
782 dev->watchdog_timeo = HZ;
783
784 printk("\n");
785 if (net_debug)
786 printk("cs89x0_probe1() successful\n");
787
788 retval = register_netdev(dev);
789 if (retval)
790 goto out3;
791 return 0;
792out3:
793 writeword(dev->base_addr, ADD_PORT, PP_ChipID);
794out2:
795 release_region(ioaddr & ~3, NETCARD_IO_EXTENT);
796out1:
797 return retval;
798}
799
800
801/*********************************
802 * This page contains DMA routines
803**********************************/
804
805#if ALLOW_DMA
806
807#define dma_page_eq(ptr1, ptr2) ((long)(ptr1)>>17 == (long)(ptr2)>>17)
808
809static void
810get_dma_channel(struct net_device *dev)
811{
812 struct net_local *lp = netdev_priv(dev);
813
814 if (lp->dma) {
815 dev->dma = lp->dma;
816 lp->isa_config |= ISA_RxDMA;
817 } else {
818 if ((lp->isa_config & ANY_ISA_DMA) == 0)
819 return;
820 dev->dma = lp->isa_config & DMA_NO_MASK;
821 if (lp->chip_type == CS8900)
822 dev->dma += 5;
823 if (dev->dma < 5 || dev->dma > 7) {
824 lp->isa_config &= ~ANY_ISA_DMA;
825 return;
826 }
827 }
828}
829
830static void
831write_dma(struct net_device *dev, int chip_type, int dma)
832{
833 struct net_local *lp = netdev_priv(dev);
834 if ((lp->isa_config & ANY_ISA_DMA) == 0)
835 return;
836 if (chip_type == CS8900) {
837 writereg(dev, PP_CS8900_ISADMA, dma-5);
838 } else {
839 writereg(dev, PP_CS8920_ISADMA, dma);
840 }
841}
842
843static void
844set_dma_cfg(struct net_device *dev)
845{
846 struct net_local *lp = netdev_priv(dev);
847
848 if (lp->use_dma) {
849 if ((lp->isa_config & ANY_ISA_DMA) == 0) {
850 if (net_debug > 3)
851 printk("set_dma_cfg(): no DMA\n");
852 return;
853 }
854 if (lp->isa_config & ISA_RxDMA) {
855 lp->curr_rx_cfg |= RX_DMA_ONLY;
856 if (net_debug > 3)
857 printk("set_dma_cfg(): RX_DMA_ONLY\n");
858 } else {
859 lp->curr_rx_cfg |= AUTO_RX_DMA; /* not that we support it... */
860 if (net_debug > 3)
861 printk("set_dma_cfg(): AUTO_RX_DMA\n");
862 }
863 }
864}
865
866static int
867dma_bufcfg(struct net_device *dev)
868{
869 struct net_local *lp = netdev_priv(dev);
870 if (lp->use_dma)
871 return (lp->isa_config & ANY_ISA_DMA)? RX_DMA_ENBL : 0;
872 else
873 return 0;
874}
875
876static int
877dma_busctl(struct net_device *dev)
878{
879 int retval = 0;
880 struct net_local *lp = netdev_priv(dev);
881 if (lp->use_dma) {
882 if (lp->isa_config & ANY_ISA_DMA)
883 retval |= RESET_RX_DMA; /* Reset the DMA pointer */
884 if (lp->isa_config & DMA_BURST)
885 retval |= DMA_BURST_MODE; /* Does ISA config specify DMA burst ? */
886 if (lp->dmasize == 64)
887 retval |= RX_DMA_SIZE_64K; /* did they ask for 64K? */
888 retval |= MEMORY_ON; /* we need memory enabled to use DMA. */
889 }
890 return retval;
891}
892
893static void
894dma_rx(struct net_device *dev)
895{
896 struct net_local *lp = netdev_priv(dev);
897 struct sk_buff *skb;
898 int status, length;
899 unsigned char *bp = lp->rx_dma_ptr;
900
901 status = bp[0] + (bp[1]<<8);
902 length = bp[2] + (bp[3]<<8);
903 bp += 4;
904 if (net_debug > 5) {
905 printk( "%s: receiving DMA packet at %lx, status %x, length %x\n",
906 dev->name, (unsigned long)bp, status, length);
907 }
908 if ((status & RX_OK) == 0) {
909 count_rx_errors(status, dev);
910 goto skip_this_frame;
911 }
912
913 /* Malloc up new buffer. */
914 skb = dev_alloc_skb(length + 2);
915 if (skb == NULL) {
916 if (net_debug) /* I don't think we want to do this to a stressed system */
917 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
918 dev->stats.rx_dropped++;
919
920 /* AKPM: advance bp to the next frame */
921skip_this_frame:
922 bp += (length + 3) & ~3;
923 if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024;
924 lp->rx_dma_ptr = bp;
925 return;
926 }
927 skb_reserve(skb, 2); /* longword align L3 header */
928
929 if (bp + length > lp->end_dma_buff) {
930 int semi_cnt = lp->end_dma_buff - bp;
931 memcpy(skb_put(skb,semi_cnt), bp, semi_cnt);
932 memcpy(skb_put(skb,length - semi_cnt), lp->dma_buff,
933 length - semi_cnt);
934 } else {
935 memcpy(skb_put(skb,length), bp, length);
936 }
937 bp += (length + 3) & ~3;
938 if (bp >= lp->end_dma_buff) bp -= lp->dmasize*1024;
939 lp->rx_dma_ptr = bp;
940
941 if (net_debug > 3) {
942 printk( "%s: received %d byte DMA packet of type %x\n",
943 dev->name, length,
944 (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
945 }
946 skb->protocol=eth_type_trans(skb,dev);
947 netif_rx(skb);
948 dev->stats.rx_packets++;
949 dev->stats.rx_bytes += length;
950}
951
952#endif /* ALLOW_DMA */
953
954static void __init reset_chip(struct net_device *dev)
955{
956#if !defined(CONFIG_MACH_MX31ADS)
957#if !defined(CS89x0_NONISA_IRQ)
958 struct net_local *lp = netdev_priv(dev);
959 int ioaddr = dev->base_addr;
960#endif /* CS89x0_NONISA_IRQ */
961 int reset_start_time;
962
963 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
964
965 /* wait 30 ms */
966 msleep(30);
967
968#if !defined(CS89x0_NONISA_IRQ)
969 if (lp->chip_type != CS8900) {
970 /* Hardware problem requires PNP registers to be reconfigured after a reset */
971 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAINT);
972 outb(dev->irq, ioaddr + DATA_PORT);
973 outb(0, ioaddr + DATA_PORT + 1);
974
975 writeword(ioaddr, ADD_PORT, PP_CS8920_ISAMemB);
976 outb((dev->mem_start >> 16) & 0xff, ioaddr + DATA_PORT);
977 outb((dev->mem_start >> 8) & 0xff, ioaddr + DATA_PORT + 1);
978 }
979#endif /* CS89x0_NONISA_IRQ */
980
981 /* Wait until the chip is reset */
982 reset_start_time = jiffies;
983 while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
984 ;
985#endif /* !CONFIG_MACH_MX31ADS */
986}
987
988
989static void
990control_dc_dc(struct net_device *dev, int on_not_off)
991{
992 struct net_local *lp = netdev_priv(dev);
993 unsigned int selfcontrol;
994 int timenow = jiffies;
995 /* control the DC to DC convertor in the SelfControl register.
996 Note: This is hooked up to a general purpose pin, might not
997 always be a DC to DC convertor. */
998
999 selfcontrol = HCB1_ENBL; /* Enable the HCB1 bit as an output */
1000 if (((lp->adapter_cnf & A_CNF_DC_DC_POLARITY) != 0) ^ on_not_off)
1001 selfcontrol |= HCB1;
1002 else
1003 selfcontrol &= ~HCB1;
1004 writereg(dev, PP_SelfCTL, selfcontrol);
1005
1006 /* Wait for the DC/DC converter to power up - 500ms */
1007 while (jiffies - timenow < HZ)
1008 ;
1009}
1010
1011#define DETECTED_NONE 0
1012#define DETECTED_RJ45H 1
1013#define DETECTED_RJ45F 2
1014#define DETECTED_AUI 3
1015#define DETECTED_BNC 4
1016
1017static int
1018detect_tp(struct net_device *dev)
1019{
1020 struct net_local *lp = netdev_priv(dev);
1021 int timenow = jiffies;
1022 int fdx;
1023
1024 if (net_debug > 1) printk("%s: Attempting TP\n", dev->name);
1025
1026 /* If connected to another full duplex capable 10-Base-T card the link pulses
1027 seem to be lost when the auto detect bit in the LineCTL is set.
1028 To overcome this the auto detect bit will be cleared whilst testing the
1029 10-Base-T interface. This would not be necessary for the sparrow chip but
1030 is simpler to do it anyway. */
1031 writereg(dev, PP_LineCTL, lp->linectl &~ AUI_ONLY);
1032 control_dc_dc(dev, 0);
1033
1034 /* Delay for the hardware to work out if the TP cable is present - 150ms */
1035 for (timenow = jiffies; jiffies - timenow < 15; )
1036 ;
1037 if ((readreg(dev, PP_LineST) & LINK_OK) == 0)
1038 return DETECTED_NONE;
1039
1040 if (lp->chip_type == CS8900) {
1041 switch (lp->force & 0xf0) {
1042#if 0
1043 case FORCE_AUTO:
1044 printk("%s: cs8900 doesn't autonegotiate\n",dev->name);
1045 return DETECTED_NONE;
1046#endif
1047 /* CS8900 doesn't support AUTO, change to HALF*/
1048 case FORCE_AUTO:
1049 lp->force &= ~FORCE_AUTO;
1050 lp->force |= FORCE_HALF;
1051 break;
1052 case FORCE_HALF:
1053 break;
1054 case FORCE_FULL:
1055 writereg(dev, PP_TestCTL, readreg(dev, PP_TestCTL) | FDX_8900);
1056 break;
1057 }
1058 fdx = readreg(dev, PP_TestCTL) & FDX_8900;
1059 } else {
1060 switch (lp->force & 0xf0) {
1061 case FORCE_AUTO:
1062 lp->auto_neg_cnf = AUTO_NEG_ENABLE;
1063 break;
1064 case FORCE_HALF:
1065 lp->auto_neg_cnf = 0;
1066 break;
1067 case FORCE_FULL:
1068 lp->auto_neg_cnf = RE_NEG_NOW | ALLOW_FDX;
1069 break;
1070 }
1071
1072 writereg(dev, PP_AutoNegCTL, lp->auto_neg_cnf & AUTO_NEG_MASK);
1073
1074 if ((lp->auto_neg_cnf & AUTO_NEG_BITS) == AUTO_NEG_ENABLE) {
1075 printk(KERN_INFO "%s: negotiating duplex...\n",dev->name);
1076 while (readreg(dev, PP_AutoNegST) & AUTO_NEG_BUSY) {
1077 if (jiffies - timenow > 4000) {
1078 printk(KERN_ERR "**** Full / half duplex auto-negotiation timed out ****\n");
1079 break;
1080 }
1081 }
1082 }
1083 fdx = readreg(dev, PP_AutoNegST) & FDX_ACTIVE;
1084 }
1085 if (fdx)
1086 return DETECTED_RJ45F;
1087 else
1088 return DETECTED_RJ45H;
1089}
1090
1091/* send a test packet - return true if carrier bits are ok */
1092static int
1093send_test_pkt(struct net_device *dev)
1094{
1095 char test_packet[] = { 0,0,0,0,0,0, 0,0,0,0,0,0,
1096 0, 46, /* A 46 in network order */
1097 0, 0, /* DSAP=0 & SSAP=0 fields */
1098 0xf3, 0 /* Control (Test Req + P bit set) */ };
1099 long timenow = jiffies;
1100
1101 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_TX_ON);
1102
1103 memcpy(test_packet, dev->dev_addr, ETH_ALEN);
1104 memcpy(test_packet+ETH_ALEN, dev->dev_addr, ETH_ALEN);
1105
1106 writeword(dev->base_addr, TX_CMD_PORT, TX_AFTER_ALL);
1107 writeword(dev->base_addr, TX_LEN_PORT, ETH_ZLEN);
1108
1109 /* Test to see if the chip has allocated memory for the packet */
1110 while (jiffies - timenow < 5)
1111 if (readreg(dev, PP_BusST) & READY_FOR_TX_NOW)
1112 break;
1113 if (jiffies - timenow >= 5)
1114 return 0; /* this shouldn't happen */
1115
1116 /* Write the contents of the packet */
1117 writewords(dev->base_addr, TX_FRAME_PORT,test_packet,(ETH_ZLEN+1) >>1);
1118
1119 if (net_debug > 1) printk("Sending test packet ");
1120 /* wait a couple of jiffies for packet to be received */
1121 for (timenow = jiffies; jiffies - timenow < 3; )
1122 ;
1123 if ((readreg(dev, PP_TxEvent) & TX_SEND_OK_BITS) == TX_OK) {
1124 if (net_debug > 1) printk("succeeded\n");
1125 return 1;
1126 }
1127 if (net_debug > 1) printk("failed\n");
1128 return 0;
1129}
1130
1131
1132static int
1133detect_aui(struct net_device *dev)
1134{
1135 struct net_local *lp = netdev_priv(dev);
1136
1137 if (net_debug > 1) printk("%s: Attempting AUI\n", dev->name);
1138 control_dc_dc(dev, 0);
1139
1140 writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY);
1141
1142 if (send_test_pkt(dev))
1143 return DETECTED_AUI;
1144 else
1145 return DETECTED_NONE;
1146}
1147
1148static int
1149detect_bnc(struct net_device *dev)
1150{
1151 struct net_local *lp = netdev_priv(dev);
1152
1153 if (net_debug > 1) printk("%s: Attempting BNC\n", dev->name);
1154 control_dc_dc(dev, 1);
1155
1156 writereg(dev, PP_LineCTL, (lp->linectl &~ AUTO_AUI_10BASET) | AUI_ONLY);
1157
1158 if (send_test_pkt(dev))
1159 return DETECTED_BNC;
1160 else
1161 return DETECTED_NONE;
1162}
1163
1164
1165static void
1166write_irq(struct net_device *dev, int chip_type, int irq)
1167{
1168 int i;
1169
1170 if (chip_type == CS8900) {
1171 /* Search the mapping table for the corresponding IRQ pin. */
1172 for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++)
1173 if (cs8900_irq_map[i] == irq)
1174 break;
1175 /* Not found */
1176 if (i == ARRAY_SIZE(cs8900_irq_map))
1177 i = 3;
1178 writereg(dev, PP_CS8900_ISAINT, i);
1179 } else {
1180 writereg(dev, PP_CS8920_ISAINT, irq);
1181 }
1182}
1183
1184/* Open/initialize the board. This is called (in the current kernel)
1185 sometime after booting when the 'ifconfig' program is run.
1186
1187 This routine should set everything up anew at each open, even
1188 registers that "should" only need to be set once at boot, so that
1189 there is non-reboot way to recover if something goes wrong.
1190 */
1191
1192/* AKPM: do we need to do any locking here? */
1193
1194static int
1195net_open(struct net_device *dev)
1196{
1197 struct net_local *lp = netdev_priv(dev);
1198 int result = 0;
1199 int i;
1200 int ret;
1201
1202 if (dev->irq < 2) {
1203 /* Allow interrupts to be generated by the chip */
1204/* Cirrus' release had this: */
1205#if 0
1206 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ );
1207#endif
1208/* And 2.3.47 had this: */
1209 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
1210
1211 for (i = 2; i < CS8920_NO_INTS; i++) {
1212 if ((1 << i) & lp->irq_map) {
1213 if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) {
1214 dev->irq = i;
1215 write_irq(dev, lp->chip_type, i);
1216 /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
1217 break;
1218 }
1219 }
1220 }
1221
1222 if (i >= CS8920_NO_INTS) {
1223 writereg(dev, PP_BusCTL, 0); /* disable interrupts. */
1224 printk(KERN_ERR "cs89x0: can't get an interrupt\n");
1225 ret = -EAGAIN;
1226 goto bad_out;
1227 }
1228 }
1229 else
1230 {
1231#ifndef CONFIG_CS89x0_NONISA_IRQ
1232 if (((1 << dev->irq) & lp->irq_map) == 0) {
1233 printk(KERN_ERR "%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
1234 dev->name, dev->irq, lp->irq_map);
1235 ret = -EAGAIN;
1236 goto bad_out;
1237 }
1238#endif
1239/* FIXME: Cirrus' release had this: */
1240 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL)|ENABLE_IRQ );
1241/* And 2.3.47 had this: */
1242#if 0
1243 writereg(dev, PP_BusCTL, ENABLE_IRQ | MEMORY_ON);
1244#endif
1245 write_irq(dev, lp->chip_type, dev->irq);
1246 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
1247 if (ret) {
1248 printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq);
1249 goto bad_out;
1250 }
1251 }
1252
1253#if ALLOW_DMA
1254 if (lp->use_dma) {
1255 if (lp->isa_config & ANY_ISA_DMA) {
1256 unsigned long flags;
1257 lp->dma_buff = (unsigned char *)__get_dma_pages(GFP_KERNEL,
1258 get_order(lp->dmasize * 1024));
1259
1260 if (!lp->dma_buff) {
1261 printk(KERN_ERR "%s: cannot get %dK memory for DMA\n", dev->name, lp->dmasize);
1262 goto release_irq;
1263 }
1264 if (net_debug > 1) {
1265 printk( "%s: dma %lx %lx\n",
1266 dev->name,
1267 (unsigned long)lp->dma_buff,
1268 (unsigned long)isa_virt_to_bus(lp->dma_buff));
1269 }
1270 if ((unsigned long) lp->dma_buff >= MAX_DMA_ADDRESS ||
1271 !dma_page_eq(lp->dma_buff, lp->dma_buff+lp->dmasize*1024-1)) {
1272 printk(KERN_ERR "%s: not usable as DMA buffer\n", dev->name);
1273 goto release_irq;
1274 }
1275 memset(lp->dma_buff, 0, lp->dmasize * 1024); /* Why? */
1276 if (request_dma(dev->dma, dev->name)) {
1277 printk(KERN_ERR "%s: cannot get dma channel %d\n", dev->name, dev->dma);
1278 goto release_irq;
1279 }
1280 write_dma(dev, lp->chip_type, dev->dma);
1281 lp->rx_dma_ptr = lp->dma_buff;
1282 lp->end_dma_buff = lp->dma_buff + lp->dmasize*1024;
1283 spin_lock_irqsave(&lp->lock, flags);
1284 disable_dma(dev->dma);
1285 clear_dma_ff(dev->dma);
1286 set_dma_mode(dev->dma, DMA_RX_MODE); /* auto_init as well */
1287 set_dma_addr(dev->dma, isa_virt_to_bus(lp->dma_buff));
1288 set_dma_count(dev->dma, lp->dmasize*1024);
1289 enable_dma(dev->dma);
1290 spin_unlock_irqrestore(&lp->lock, flags);
1291 }
1292 }
1293#endif /* ALLOW_DMA */
1294
1295 /* set the Ethernet address */
1296 for (i=0; i < ETH_ALEN/2; i++)
1297 writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
1298
1299 /* while we're testing the interface, leave interrupts disabled */
1300 writereg(dev, PP_BusCTL, MEMORY_ON);
1301
1302 /* Set the LineCTL quintuplet based on adapter configuration read from EEPROM */
1303 if ((lp->adapter_cnf & A_CNF_EXTND_10B_2) && (lp->adapter_cnf & A_CNF_LOW_RX_SQUELCH))
1304 lp->linectl = LOW_RX_SQUELCH;
1305 else
1306 lp->linectl = 0;
1307
1308 /* check to make sure that they have the "right" hardware available */
1309 switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
1310 case A_CNF_MEDIA_10B_T: result = lp->adapter_cnf & A_CNF_10B_T; break;
1311 case A_CNF_MEDIA_AUI: result = lp->adapter_cnf & A_CNF_AUI; break;
1312 case A_CNF_MEDIA_10B_2: result = lp->adapter_cnf & A_CNF_10B_2; break;
1313 default: result = lp->adapter_cnf & (A_CNF_10B_T | A_CNF_AUI | A_CNF_10B_2);
1314 }
1315 if (!result) {
1316 printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name);
1317release_dma:
1318#if ALLOW_DMA
1319 free_dma(dev->dma);
1320release_irq:
1321 release_dma_buff(lp);
1322#endif
1323 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
1324 free_irq(dev->irq, dev);
1325 ret = -EAGAIN;
1326 goto bad_out;
1327 }
1328
1329 /* set the hardware to the configured choice */
1330 switch(lp->adapter_cnf & A_CNF_MEDIA_TYPE) {
1331 case A_CNF_MEDIA_10B_T:
1332 result = detect_tp(dev);
1333 if (result==DETECTED_NONE) {
1334 printk(KERN_WARNING "%s: 10Base-T (RJ-45) has no cable\n", dev->name);
1335 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
1336 result = DETECTED_RJ45H; /* Yes! I don't care if I see a link pulse */
1337 }
1338 break;
1339 case A_CNF_MEDIA_AUI:
1340 result = detect_aui(dev);
1341 if (result==DETECTED_NONE) {
1342 printk(KERN_WARNING "%s: 10Base-5 (AUI) has no cable\n", dev->name);
1343 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
1344 result = DETECTED_AUI; /* Yes! I don't care if I see a carrrier */
1345 }
1346 break;
1347 case A_CNF_MEDIA_10B_2:
1348 result = detect_bnc(dev);
1349 if (result==DETECTED_NONE) {
1350 printk(KERN_WARNING "%s: 10Base-2 (BNC) has no cable\n", dev->name);
1351 if (lp->auto_neg_cnf & IMM_BIT) /* check "ignore missing media" bit */
1352 result = DETECTED_BNC; /* Yes! I don't care if I can xmit a packet */
1353 }
1354 break;
1355 case A_CNF_MEDIA_AUTO:
1356 writereg(dev, PP_LineCTL, lp->linectl | AUTO_AUI_10BASET);
1357 if (lp->adapter_cnf & A_CNF_10B_T)
1358 if ((result = detect_tp(dev)) != DETECTED_NONE)
1359 break;
1360 if (lp->adapter_cnf & A_CNF_AUI)
1361 if ((result = detect_aui(dev)) != DETECTED_NONE)
1362 break;
1363 if (lp->adapter_cnf & A_CNF_10B_2)
1364 if ((result = detect_bnc(dev)) != DETECTED_NONE)
1365 break;
1366 printk(KERN_ERR "%s: no media detected\n", dev->name);
1367 goto release_dma;
1368 }
1369 switch(result) {
1370 case DETECTED_NONE:
1371 printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name);
1372 goto release_dma;
1373 case DETECTED_RJ45H:
1374 printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name);
1375 break;
1376 case DETECTED_RJ45F:
1377 printk(KERN_INFO "%s: using full-duplex 10Base-T (RJ-45)\n", dev->name);
1378 break;
1379 case DETECTED_AUI:
1380 printk(KERN_INFO "%s: using 10Base-5 (AUI)\n", dev->name);
1381 break;
1382 case DETECTED_BNC:
1383 printk(KERN_INFO "%s: using 10Base-2 (BNC)\n", dev->name);
1384 break;
1385 }
1386
1387 /* Turn on both receive and transmit operations */
1388 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
1389
1390 /* Receive only error free packets addressed to this card */
1391 lp->rx_mode = 0;
1392 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
1393
1394 lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
1395
1396 if (lp->isa_config & STREAM_TRANSFER)
1397 lp->curr_rx_cfg |= RX_STREAM_ENBL;
1398#if ALLOW_DMA
1399 set_dma_cfg(dev);
1400#endif
1401 writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
1402
1403 writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL |
1404 TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL);
1405
1406 writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL |
1407#if ALLOW_DMA
1408 dma_bufcfg(dev) |
1409#endif
1410 TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL);
1411
1412 /* now that we've got our act together, enable everything */
1413 writereg(dev, PP_BusCTL, ENABLE_IRQ
1414 | (dev->mem_start?MEMORY_ON : 0) /* turn memory on */
1415#if ALLOW_DMA
1416 | dma_busctl(dev)
1417#endif
1418 );
1419 netif_start_queue(dev);
1420 if (net_debug > 1)
1421 printk("cs89x0: net_open() succeeded\n");
1422 return 0;
1423bad_out:
1424 return ret;
1425}
1426
1427static void net_timeout(struct net_device *dev)
1428{
1429 /* If we get here, some higher level has decided we are broken.
1430 There should really be a "kick me" function call instead. */
1431 if (net_debug > 0) printk("%s: transmit timed out, %s?\n", dev->name,
1432 tx_done(dev) ? "IRQ conflict ?" : "network cable problem");
1433 /* Try to restart the adaptor. */
1434 netif_wake_queue(dev);
1435}
1436
1437static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
1438{
1439 struct net_local *lp = netdev_priv(dev);
1440 unsigned long flags;
1441
1442 if (net_debug > 3) {
1443 printk("%s: sent %d byte packet of type %x\n",
1444 dev->name, skb->len,
1445 (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
1446 }
1447
1448 /* keep the upload from being interrupted, since we
1449 ask the chip to start transmitting before the
1450 whole packet has been completely uploaded. */
1451
1452 spin_lock_irqsave(&lp->lock, flags);
1453 netif_stop_queue(dev);
1454
1455 /* initiate a transmit sequence */
1456 writeword(dev->base_addr, TX_CMD_PORT, lp->send_cmd);
1457 writeword(dev->base_addr, TX_LEN_PORT, skb->len);
1458
1459 /* Test to see if the chip has allocated memory for the packet */
1460 if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
1461 /*
1462 * Gasp! It hasn't. But that shouldn't happen since
1463 * we're waiting for TxOk, so return 1 and requeue this packet.
1464 */
1465
1466 spin_unlock_irqrestore(&lp->lock, flags);
1467 if (net_debug) printk("cs89x0: Tx buffer not free!\n");
1468 return NETDEV_TX_BUSY;
1469 }
1470 /* Write the contents of the packet */
1471 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
1472 spin_unlock_irqrestore(&lp->lock, flags);
1473 dev->stats.tx_bytes += skb->len;
1474 dev_kfree_skb (skb);
1475
1476 /*
1477 * We DO NOT call netif_wake_queue() here.
1478 * We also DO NOT call netif_start_queue().
1479 *
1480 * Either of these would cause another bottom half run through
1481 * net_send_packet() before this packet has fully gone out. That causes
1482 * us to hit the "Gasp!" above and the send is rescheduled. it runs like
1483 * a dog. We just return and wait for the Tx completion interrupt handler
1484 * to restart the netdevice layer
1485 */
1486
1487 return NETDEV_TX_OK;
1488}
1489
1490/* The typical workload of the driver:
1491 Handle the network interface interrupts. */
1492
1493static irqreturn_t net_interrupt(int irq, void *dev_id)
1494{
1495 struct net_device *dev = dev_id;
1496 struct net_local *lp;
1497 int ioaddr, status;
1498 int handled = 0;
1499
1500 ioaddr = dev->base_addr;
1501 lp = netdev_priv(dev);
1502
1503 /* we MUST read all the events out of the ISQ, otherwise we'll never
1504 get interrupted again. As a consequence, we can't have any limit
1505 on the number of times we loop in the interrupt handler. The
1506 hardware guarantees that eventually we'll run out of events. Of
1507 course, if you're on a slow machine, and packets are arriving
1508 faster than you can read them off, you're screwed. Hasta la
1509 vista, baby! */
1510 while ((status = readword(dev->base_addr, ISQ_PORT))) {
1511 if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
1512 handled = 1;
1513 switch(status & ISQ_EVENT_MASK) {
1514 case ISQ_RECEIVER_EVENT:
1515 /* Got a packet(s). */
1516 net_rx(dev);
1517 break;
1518 case ISQ_TRANSMITTER_EVENT:
1519 dev->stats.tx_packets++;
1520 netif_wake_queue(dev); /* Inform upper layers. */
1521 if ((status & ( TX_OK |
1522 TX_LOST_CRS |
1523 TX_SQE_ERROR |
1524 TX_LATE_COL |
1525 TX_16_COL)) != TX_OK) {
1526 if ((status & TX_OK) == 0)
1527 dev->stats.tx_errors++;
1528 if (status & TX_LOST_CRS)
1529 dev->stats.tx_carrier_errors++;
1530 if (status & TX_SQE_ERROR)
1531 dev->stats.tx_heartbeat_errors++;
1532 if (status & TX_LATE_COL)
1533 dev->stats.tx_window_errors++;
1534 if (status & TX_16_COL)
1535 dev->stats.tx_aborted_errors++;
1536 }
1537 break;
1538 case ISQ_BUFFER_EVENT:
1539 if (status & READY_FOR_TX) {
1540 /* we tried to transmit a packet earlier,
1541 but inexplicably ran out of buffers.
1542 That shouldn't happen since we only ever
1543 load one packet. Shrug. Do the right
1544 thing anyway. */
1545 netif_wake_queue(dev); /* Inform upper layers. */
1546 }
1547 if (status & TX_UNDERRUN) {
1548 if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
1549 lp->send_underrun++;
1550 if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
1551 else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
1552 /* transmit cycle is done, although
1553 frame wasn't transmitted - this
1554 avoids having to wait for the upper
1555 layers to timeout on us, in the
1556 event of a tx underrun */
1557 netif_wake_queue(dev); /* Inform upper layers. */
1558 }
1559#if ALLOW_DMA
1560 if (lp->use_dma && (status & RX_DMA)) {
1561 int count = readreg(dev, PP_DmaFrameCnt);
1562 while(count) {
1563 if (net_debug > 5)
1564 printk("%s: receiving %d DMA frames\n", dev->name, count);
1565 if (net_debug > 2 && count >1)
1566 printk("%s: receiving %d DMA frames\n", dev->name, count);
1567 dma_rx(dev);
1568 if (--count == 0)
1569 count = readreg(dev, PP_DmaFrameCnt);
1570 if (net_debug > 2 && count > 0)
1571 printk("%s: continuing with %d DMA frames\n", dev->name, count);
1572 }
1573 }
1574#endif
1575 break;
1576 case ISQ_RX_MISS_EVENT:
1577 dev->stats.rx_missed_errors += (status >> 6);
1578 break;
1579 case ISQ_TX_COL_EVENT:
1580 dev->stats.collisions += (status >> 6);
1581 break;
1582 }
1583 }
1584 return IRQ_RETVAL(handled);
1585}
1586
1587static void
1588count_rx_errors(int status, struct net_device *dev)
1589{
1590 dev->stats.rx_errors++;
1591 if (status & RX_RUNT)
1592 dev->stats.rx_length_errors++;
1593 if (status & RX_EXTRA_DATA)
1594 dev->stats.rx_length_errors++;
1595 if ((status & RX_CRC_ERROR) && !(status & (RX_EXTRA_DATA|RX_RUNT)))
1596 /* per str 172 */
1597 dev->stats.rx_crc_errors++;
1598 if (status & RX_DRIBBLE)
1599 dev->stats.rx_frame_errors++;
1600}
1601
1602/* We have a good packet(s), get it/them out of the buffers. */
1603static void
1604net_rx(struct net_device *dev)
1605{
1606 struct sk_buff *skb;
1607 int status, length;
1608
1609 int ioaddr = dev->base_addr;
1610 status = readword(ioaddr, RX_FRAME_PORT);
1611 length = readword(ioaddr, RX_FRAME_PORT);
1612
1613 if ((status & RX_OK) == 0) {
1614 count_rx_errors(status, dev);
1615 return;
1616 }
1617
1618 /* Malloc up new buffer. */
1619 skb = dev_alloc_skb(length + 2);
1620 if (skb == NULL) {
1621#if 0 /* Again, this seems a cruel thing to do */
1622 printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n", dev->name);
1623#endif
1624 dev->stats.rx_dropped++;
1625 return;
1626 }
1627 skb_reserve(skb, 2); /* longword align L3 header */
1628
1629 readwords(ioaddr, RX_FRAME_PORT, skb_put(skb, length), length >> 1);
1630 if (length & 1)
1631 skb->data[length-1] = readword(ioaddr, RX_FRAME_PORT);
1632
1633 if (net_debug > 3) {
1634 printk( "%s: received %d byte packet of type %x\n",
1635 dev->name, length,
1636 (skb->data[ETH_ALEN+ETH_ALEN] << 8) | skb->data[ETH_ALEN+ETH_ALEN+1]);
1637 }
1638
1639 skb->protocol=eth_type_trans(skb,dev);
1640 netif_rx(skb);
1641 dev->stats.rx_packets++;
1642 dev->stats.rx_bytes += length;
1643}
1644
1645#if ALLOW_DMA
1646static void release_dma_buff(struct net_local *lp)
1647{
1648 if (lp->dma_buff) {
1649 free_pages((unsigned long)(lp->dma_buff), get_order(lp->dmasize * 1024));
1650 lp->dma_buff = NULL;
1651 }
1652}
1653#endif
1654
1655/* The inverse routine to net_open(). */
1656static int
1657net_close(struct net_device *dev)
1658{
1659#if ALLOW_DMA
1660 struct net_local *lp = netdev_priv(dev);
1661#endif
1662
1663 netif_stop_queue(dev);
1664
1665 writereg(dev, PP_RxCFG, 0);
1666 writereg(dev, PP_TxCFG, 0);
1667 writereg(dev, PP_BufCFG, 0);
1668 writereg(dev, PP_BusCTL, 0);
1669
1670 free_irq(dev->irq, dev);
1671
1672#if ALLOW_DMA
1673 if (lp->use_dma && lp->dma) {
1674 free_dma(dev->dma);
1675 release_dma_buff(lp);
1676 }
1677#endif
1678
1679 /* Update the statistics here. */
1680 return 0;
1681}
1682
1683/* Get the current statistics. This may be called with the card open or
1684 closed. */
1685static struct net_device_stats *
1686net_get_stats(struct net_device *dev)
1687{
1688 struct net_local *lp = netdev_priv(dev);
1689 unsigned long flags;
1690
1691 spin_lock_irqsave(&lp->lock, flags);
1692 /* Update the statistics from the device registers. */
1693 dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
1694 dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
1695 spin_unlock_irqrestore(&lp->lock, flags);
1696
1697 return &dev->stats;
1698}
1699
1700static void set_multicast_list(struct net_device *dev)
1701{
1702 struct net_local *lp = netdev_priv(dev);
1703 unsigned long flags;
1704
1705 spin_lock_irqsave(&lp->lock, flags);
1706 if(dev->flags&IFF_PROMISC)
1707 {
1708 lp->rx_mode = RX_ALL_ACCEPT;
1709 }
1710 else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
1711 {
1712 /* The multicast-accept list is initialized to accept-all, and we
1713 rely on higher-level filtering for now. */
1714 lp->rx_mode = RX_MULTCAST_ACCEPT;
1715 }
1716 else
1717 lp->rx_mode = 0;
1718
1719 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
1720
1721 /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */
1722 writereg(dev, PP_RxCFG, lp->curr_rx_cfg |
1723 (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0));
1724 spin_unlock_irqrestore(&lp->lock, flags);
1725}
1726
1727
1728static int set_mac_address(struct net_device *dev, void *p)
1729{
1730 int i;
1731 struct sockaddr *addr = p;
1732
1733 if (netif_running(dev))
1734 return -EBUSY;
1735
1736 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1737
1738 if (net_debug)
1739 printk("%s: Setting MAC address to %pM.\n",
1740 dev->name, dev->dev_addr);
1741
1742 /* set the Ethernet address */
1743 for (i=0; i < ETH_ALEN/2; i++)
1744 writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
1745
1746 return 0;
1747}
1748
1749#ifdef MODULE
1750
1751static struct net_device *dev_cs89x0;
1752
1753/*
1754 * Support the 'debug' module parm even if we're compiled for non-debug to
1755 * avoid breaking someone's startup scripts
1756 */
1757
1758static int io;
1759static int irq;
1760static int debug;
1761static char media[8];
1762static int duplex=-1;
1763
1764static int use_dma; /* These generate unused var warnings if ALLOW_DMA = 0 */
1765static int dma;
1766static int dmasize=16; /* or 64 */
1767
1768module_param(io, int, 0);
1769module_param(irq, int, 0);
1770module_param(debug, int, 0);
1771module_param_string(media, media, sizeof(media), 0);
1772module_param(duplex, int, 0);
1773module_param(dma , int, 0);
1774module_param(dmasize , int, 0);
1775module_param(use_dma , int, 0);
1776MODULE_PARM_DESC(io, "cs89x0 I/O base address");
1777MODULE_PARM_DESC(irq, "cs89x0 IRQ number");
1778#if DEBUGGING
1779MODULE_PARM_DESC(debug, "cs89x0 debug level (0-6)");
1780#else
1781MODULE_PARM_DESC(debug, "(ignored)");
1782#endif
1783MODULE_PARM_DESC(media, "Set cs89x0 adapter(s) media type(s) (rj45,bnc,aui)");
1784/* No other value than -1 for duplex seems to be currently interpreted */
1785MODULE_PARM_DESC(duplex, "(ignored)");
1786#if ALLOW_DMA
1787MODULE_PARM_DESC(dma , "cs89x0 ISA DMA channel; ignored if use_dma=0");
1788MODULE_PARM_DESC(dmasize , "cs89x0 DMA size in kB (16,64); ignored if use_dma=0");
1789MODULE_PARM_DESC(use_dma , "cs89x0 using DMA (0-1)");
1790#else
1791MODULE_PARM_DESC(dma , "(ignored)");
1792MODULE_PARM_DESC(dmasize , "(ignored)");
1793MODULE_PARM_DESC(use_dma , "(ignored)");
1794#endif
1795
1796MODULE_AUTHOR("Mike Cruse, Russwll Nelson <nelson@crynwr.com>, Andrew Morton");
1797MODULE_LICENSE("GPL");
1798
1799
1800/*
1801* media=t - specify media type
1802 or media=2
1803 or media=aui
1804 or medai=auto
1805* duplex=0 - specify forced half/full/autonegotiate duplex
1806* debug=# - debug level
1807
1808
1809* Default Chip Configuration:
1810 * DMA Burst = enabled
1811 * IOCHRDY Enabled = enabled
1812 * UseSA = enabled
1813 * CS8900 defaults to half-duplex if not specified on command-line
1814 * CS8920 defaults to autoneg if not specified on command-line
1815 * Use reset defaults for other config parameters
1816
1817* Assumptions:
1818 * media type specified is supported (circuitry is present)
1819 * if memory address is > 1MB, then required mem decode hw is present
1820 * if 10B-2, then agent other than driver will enable DC/DC converter
1821 (hw or software util)
1822
1823
1824*/
1825
1826int __init init_module(void)
1827{
1828 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1829 struct net_local *lp;
1830 int ret = 0;
1831
1832#if DEBUGGING
1833 net_debug = debug;
1834#else
1835 debug = 0;
1836#endif
1837 if (!dev)
1838 return -ENOMEM;
1839
1840 dev->irq = irq;
1841 dev->base_addr = io;
1842 lp = netdev_priv(dev);
1843
1844#if ALLOW_DMA
1845 if (use_dma) {
1846 lp->use_dma = use_dma;
1847 lp->dma = dma;
1848 lp->dmasize = dmasize;
1849 }
1850#endif
1851
1852 spin_lock_init(&lp->lock);
1853
1854 /* boy, they'd better get these right */
1855 if (!strcmp(media, "rj45"))
1856 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
1857 else if (!strcmp(media, "aui"))
1858 lp->adapter_cnf = A_CNF_MEDIA_AUI | A_CNF_AUI;
1859 else if (!strcmp(media, "bnc"))
1860 lp->adapter_cnf = A_CNF_MEDIA_10B_2 | A_CNF_10B_2;
1861 else
1862 lp->adapter_cnf = A_CNF_MEDIA_10B_T | A_CNF_10B_T;
1863
1864 if (duplex==-1)
1865 lp->auto_neg_cnf = AUTO_NEG_ENABLE;
1866
1867 if (io == 0) {
1868 printk(KERN_ERR "cs89x0.c: Module autoprobing not allowed.\n");
1869 printk(KERN_ERR "cs89x0.c: Append io=0xNNN\n");
1870 ret = -EPERM;
1871 goto out;
1872 } else if (io <= 0x1ff) {
1873 ret = -ENXIO;
1874 goto out;
1875 }
1876
1877#if ALLOW_DMA
1878 if (use_dma && dmasize != 16 && dmasize != 64) {
1879 printk(KERN_ERR "cs89x0.c: dma size must be either 16K or 64K, not %dK\n", dmasize);
1880 ret = -EPERM;
1881 goto out;
1882 }
1883#endif
1884 ret = cs89x0_probe1(dev, io, 1);
1885 if (ret)
1886 goto out;
1887
1888 dev_cs89x0 = dev;
1889 return 0;
1890out:
1891 free_netdev(dev);
1892 return ret;
1893}
1894
1895void __exit
1896cleanup_module(void)
1897{
1898 unregister_netdev(dev_cs89x0);
1899 writeword(dev_cs89x0->base_addr, ADD_PORT, PP_ChipID);
1900 release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT);
1901 free_netdev(dev_cs89x0);
1902}
1903#endif /* MODULE */
1904
1905/*
1906 * Local variables:
1907 * version-control: t
1908 * kept-new-versions: 5
1909 * c-indent-level: 8
1910 * tab-width: 8
1911 * End:
1912 *
1913 */
diff --git a/drivers/net/ethernet/apple/cs89x0.h b/drivers/net/ethernet/apple/cs89x0.h
new file mode 100644
index 000000000000..91423b70bb45
--- /dev/null
+++ b/drivers/net/ethernet/apple/cs89x0.h
@@ -0,0 +1,465 @@
1/* Copyright, 1988-1992, Russell Nelson, Crynwr Software
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation, version 1.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12 You should have received a copy of the GNU General Public License
13 along with this program; if not, write to the Free Software
14 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
15 */
16
17
18#define PP_ChipID 0x0000 /* offset 0h -> Corp -ID */
19 /* offset 2h -> Model/Product Number */
20 /* offset 3h -> Chip Revision Number */
21
22#define PP_ISAIOB 0x0020 /* IO base address */
23#define PP_CS8900_ISAINT 0x0022 /* ISA interrupt select */
24#define PP_CS8920_ISAINT 0x0370 /* ISA interrupt select */
25#define PP_CS8900_ISADMA 0x0024 /* ISA Rec DMA channel */
26#define PP_CS8920_ISADMA 0x0374 /* ISA Rec DMA channel */
27#define PP_ISASOF 0x0026 /* ISA DMA offset */
28#define PP_DmaFrameCnt 0x0028 /* ISA DMA Frame count */
29#define PP_DmaByteCnt 0x002A /* ISA DMA Byte count */
30#define PP_CS8900_ISAMemB 0x002C /* Memory base */
31#define PP_CS8920_ISAMemB 0x0348 /* */
32
33#define PP_ISABootBase 0x0030 /* Boot Prom base */
34#define PP_ISABootMask 0x0034 /* Boot Prom Mask */
35
36/* EEPROM data and command registers */
37#define PP_EECMD 0x0040 /* NVR Interface Command register */
38#define PP_EEData 0x0042 /* NVR Interface Data Register */
39#define PP_DebugReg 0x0044 /* Debug Register */
40
41#define PP_RxCFG 0x0102 /* Rx Bus config */
42#define PP_RxCTL 0x0104 /* Receive Control Register */
43#define PP_TxCFG 0x0106 /* Transmit Config Register */
44#define PP_TxCMD 0x0108 /* Transmit Command Register */
45#define PP_BufCFG 0x010A /* Bus configuration Register */
46#define PP_LineCTL 0x0112 /* Line Config Register */
47#define PP_SelfCTL 0x0114 /* Self Command Register */
48#define PP_BusCTL 0x0116 /* ISA bus control Register */
49#define PP_TestCTL 0x0118 /* Test Register */
50#define PP_AutoNegCTL 0x011C /* Auto Negotiation Ctrl */
51
52#define PP_ISQ 0x0120 /* Interrupt Status */
53#define PP_RxEvent 0x0124 /* Rx Event Register */
54#define PP_TxEvent 0x0128 /* Tx Event Register */
55#define PP_BufEvent 0x012C /* Bus Event Register */
56#define PP_RxMiss 0x0130 /* Receive Miss Count */
57#define PP_TxCol 0x0132 /* Transmit Collision Count */
58#define PP_LineST 0x0134 /* Line State Register */
59#define PP_SelfST 0x0136 /* Self State register */
60#define PP_BusST 0x0138 /* Bus Status */
61#define PP_TDR 0x013C /* Time Domain Reflectometry */
62#define PP_AutoNegST 0x013E /* Auto Neg Status */
63#define PP_TxCommand 0x0144 /* Tx Command */
64#define PP_TxLength 0x0146 /* Tx Length */
65#define PP_LAF 0x0150 /* Hash Table */
66#define PP_IA 0x0158 /* Physical Address Register */
67
68#define PP_RxStatus 0x0400 /* Receive start of frame */
69#define PP_RxLength 0x0402 /* Receive Length of frame */
70#define PP_RxFrame 0x0404 /* Receive frame pointer */
71#define PP_TxFrame 0x0A00 /* Transmit frame pointer */
72
73/* Primary I/O Base Address. If no I/O base is supplied by the user, then this */
74/* can be used as the default I/O base to access the PacketPage Area. */
75#define DEFAULTIOBASE 0x0300
76#define FIRST_IO 0x020C /* First I/O port to check */
77#define LAST_IO 0x037C /* Last I/O port to check (+10h) */
78#define ADD_MASK 0x3000 /* Mask it use of the ADD_PORT register */
79#define ADD_SIG 0x3000 /* Expected ID signature */
80
81/* On Macs, we only need use the ISA I/O stuff until we do MEMORY_ON */
82#ifdef CONFIG_MAC
83#define LCSLOTBASE 0xfee00000
84#define MMIOBASE 0x40000
85#endif
86
87#define CHIP_EISA_ID_SIG 0x630E /* Product ID Code for Crystal Chip (CS8900 spec 4.3) */
88#define CHIP_EISA_ID_SIG_STR "0x630E"
89
90#ifdef IBMEIPKT
91#define EISA_ID_SIG 0x4D24 /* IBM */
92#define PART_NO_SIG 0x1010 /* IBM */
93#define MONGOOSE_BIT 0x0000 /* IBM */
94#else
95#define EISA_ID_SIG 0x630E /* PnP Vendor ID (same as chip id for Crystal board) */
96#define PART_NO_SIG 0x4000 /* ID code CS8920 board (PnP Vendor Product code) */
97#define MONGOOSE_BIT 0x2000 /* PART_NO_SIG + MONGOOSE_BUT => ID of mongoose */
98#endif
99
100#define PRODUCT_ID_ADD 0x0002 /* Address of product ID */
101
102/* Mask to find out the types of registers */
103#define REG_TYPE_MASK 0x001F
104
105/* Eeprom Commands */
106#define ERSE_WR_ENBL 0x00F0
107#define ERSE_WR_DISABLE 0x0000
108
109/* Defines Control/Config register quintuplet numbers */
110#define RX_BUF_CFG 0x0003
111#define RX_CONTROL 0x0005
112#define TX_CFG 0x0007
113#define TX_COMMAND 0x0009
114#define BUF_CFG 0x000B
115#define LINE_CONTROL 0x0013
116#define SELF_CONTROL 0x0015
117#define BUS_CONTROL 0x0017
118#define TEST_CONTROL 0x0019
119
120/* Defines Status/Count registers quintuplet numbers */
121#define RX_EVENT 0x0004
122#define TX_EVENT 0x0008
123#define BUF_EVENT 0x000C
124#define RX_MISS_COUNT 0x0010
125#define TX_COL_COUNT 0x0012
126#define LINE_STATUS 0x0014
127#define SELF_STATUS 0x0016
128#define BUS_STATUS 0x0018
129#define TDR 0x001C
130
131/* PP_RxCFG - Receive Configuration and Interrupt Mask bit definition - Read/write */
132#define SKIP_1 0x0040
133#define RX_STREAM_ENBL 0x0080
134#define RX_OK_ENBL 0x0100
135#define RX_DMA_ONLY 0x0200
136#define AUTO_RX_DMA 0x0400
137#define BUFFER_CRC 0x0800
138#define RX_CRC_ERROR_ENBL 0x1000
139#define RX_RUNT_ENBL 0x2000
140#define RX_EXTRA_DATA_ENBL 0x4000
141
142/* PP_RxCTL - Receive Control bit definition - Read/write */
143#define RX_IA_HASH_ACCEPT 0x0040
144#define RX_PROM_ACCEPT 0x0080
145#define RX_OK_ACCEPT 0x0100
146#define RX_MULTCAST_ACCEPT 0x0200
147#define RX_IA_ACCEPT 0x0400
148#define RX_BROADCAST_ACCEPT 0x0800
149#define RX_BAD_CRC_ACCEPT 0x1000
150#define RX_RUNT_ACCEPT 0x2000
151#define RX_EXTRA_DATA_ACCEPT 0x4000
152#define RX_ALL_ACCEPT (RX_PROM_ACCEPT|RX_BAD_CRC_ACCEPT|RX_RUNT_ACCEPT|RX_EXTRA_DATA_ACCEPT)
153/* Default receive mode - individually addressed, broadcast, and error free */
154#define DEF_RX_ACCEPT (RX_IA_ACCEPT | RX_BROADCAST_ACCEPT | RX_OK_ACCEPT)
155
156/* PP_TxCFG - Transmit Configuration Interrupt Mask bit definition - Read/write */
157#define TX_LOST_CRS_ENBL 0x0040
158#define TX_SQE_ERROR_ENBL 0x0080
159#define TX_OK_ENBL 0x0100
160#define TX_LATE_COL_ENBL 0x0200
161#define TX_JBR_ENBL 0x0400
162#define TX_ANY_COL_ENBL 0x0800
163#define TX_16_COL_ENBL 0x8000
164
165/* PP_TxCMD - Transmit Command bit definition - Read-only */
166#define TX_START_4_BYTES 0x0000
167#define TX_START_64_BYTES 0x0040
168#define TX_START_128_BYTES 0x0080
169#define TX_START_ALL_BYTES 0x00C0
170#define TX_FORCE 0x0100
171#define TX_ONE_COL 0x0200
172#define TX_TWO_PART_DEFF_DISABLE 0x0400
173#define TX_NO_CRC 0x1000
174#define TX_RUNT 0x2000
175
176/* PP_BufCFG - Buffer Configuration Interrupt Mask bit definition - Read/write */
177#define GENERATE_SW_INTERRUPT 0x0040
178#define RX_DMA_ENBL 0x0080
179#define READY_FOR_TX_ENBL 0x0100
180#define TX_UNDERRUN_ENBL 0x0200
181#define RX_MISS_ENBL 0x0400
182#define RX_128_BYTE_ENBL 0x0800
183#define TX_COL_COUNT_OVRFLOW_ENBL 0x1000
184#define RX_MISS_COUNT_OVRFLOW_ENBL 0x2000
185#define RX_DEST_MATCH_ENBL 0x8000
186
187/* PP_LineCTL - Line Control bit definition - Read/write */
188#define SERIAL_RX_ON 0x0040
189#define SERIAL_TX_ON 0x0080
190#define AUI_ONLY 0x0100
191#define AUTO_AUI_10BASET 0x0200
192#define MODIFIED_BACKOFF 0x0800
193#define NO_AUTO_POLARITY 0x1000
194#define TWO_PART_DEFDIS 0x2000
195#define LOW_RX_SQUELCH 0x4000
196
197/* PP_SelfCTL - Software Self Control bit definition - Read/write */
198#define POWER_ON_RESET 0x0040
199#define SW_STOP 0x0100
200#define SLEEP_ON 0x0200
201#define AUTO_WAKEUP 0x0400
202#define HCB0_ENBL 0x1000
203#define HCB1_ENBL 0x2000
204#define HCB0 0x4000
205#define HCB1 0x8000
206
207/* PP_BusCTL - ISA Bus Control bit definition - Read/write */
208#define RESET_RX_DMA 0x0040
209#define MEMORY_ON 0x0400
210#define DMA_BURST_MODE 0x0800
211#define IO_CHANNEL_READY_ON 0x1000
212#define RX_DMA_SIZE_64K 0x2000
213#define ENABLE_IRQ 0x8000
214
215/* PP_TestCTL - Test Control bit definition - Read/write */
216#define LINK_OFF 0x0080
217#define ENDEC_LOOPBACK 0x0200
218#define AUI_LOOPBACK 0x0400
219#define BACKOFF_OFF 0x0800
220#define FDX_8900 0x4000
221#define FAST_TEST 0x8000
222
223/* PP_RxEvent - Receive Event Bit definition - Read-only */
224#define RX_IA_HASHED 0x0040
225#define RX_DRIBBLE 0x0080
226#define RX_OK 0x0100
227#define RX_HASHED 0x0200
228#define RX_IA 0x0400
229#define RX_BROADCAST 0x0800
230#define RX_CRC_ERROR 0x1000
231#define RX_RUNT 0x2000
232#define RX_EXTRA_DATA 0x4000
233
234#define HASH_INDEX_MASK 0x0FC00
235
236/* PP_TxEvent - Transmit Event Bit definition - Read-only */
237#define TX_LOST_CRS 0x0040
238#define TX_SQE_ERROR 0x0080
239#define TX_OK 0x0100
240#define TX_LATE_COL 0x0200
241#define TX_JBR 0x0400
242#define TX_16_COL 0x8000
243#define TX_SEND_OK_BITS (TX_OK|TX_LOST_CRS)
244#define TX_COL_COUNT_MASK 0x7800
245
246/* PP_BufEvent - Buffer Event Bit definition - Read-only */
247#define SW_INTERRUPT 0x0040
248#define RX_DMA 0x0080
249#define READY_FOR_TX 0x0100
250#define TX_UNDERRUN 0x0200
251#define RX_MISS 0x0400
252#define RX_128_BYTE 0x0800
253#define TX_COL_OVRFLW 0x1000
254#define RX_MISS_OVRFLW 0x2000
255#define RX_DEST_MATCH 0x8000
256
257/* PP_LineST - Ethernet Line Status bit definition - Read-only */
258#define LINK_OK 0x0080
259#define AUI_ON 0x0100
260#define TENBASET_ON 0x0200
261#define POLARITY_OK 0x1000
262#define CRS_OK 0x4000
263
264/* PP_SelfST - Chip Software Status bit definition */
265#define ACTIVE_33V 0x0040
266#define INIT_DONE 0x0080
267#define SI_BUSY 0x0100
268#define EEPROM_PRESENT 0x0200
269#define EEPROM_OK 0x0400
270#define EL_PRESENT 0x0800
271#define EE_SIZE_64 0x1000
272
273/* PP_BusST - ISA Bus Status bit definition */
274#define TX_BID_ERROR 0x0080
275#define READY_FOR_TX_NOW 0x0100
276
277/* PP_AutoNegCTL - Auto Negotiation Control bit definition */
278#define RE_NEG_NOW 0x0040
279#define ALLOW_FDX 0x0080
280#define AUTO_NEG_ENABLE 0x0100
281#define NLP_ENABLE 0x0200
282#define FORCE_FDX 0x8000
283#define AUTO_NEG_BITS (FORCE_FDX|NLP_ENABLE|AUTO_NEG_ENABLE)
284#define AUTO_NEG_MASK (FORCE_FDX|NLP_ENABLE|AUTO_NEG_ENABLE|ALLOW_FDX|RE_NEG_NOW)
285
286/* PP_AutoNegST - Auto Negotiation Status bit definition */
287#define AUTO_NEG_BUSY 0x0080
288#define FLP_LINK 0x0100
289#define FLP_LINK_GOOD 0x0800
290#define LINK_FAULT 0x1000
291#define HDX_ACTIVE 0x4000
292#define FDX_ACTIVE 0x8000
293
294/* The following block defines the ISQ event types */
295#define ISQ_RECEIVER_EVENT 0x04
296#define ISQ_TRANSMITTER_EVENT 0x08
297#define ISQ_BUFFER_EVENT 0x0c
298#define ISQ_RX_MISS_EVENT 0x10
299#define ISQ_TX_COL_EVENT 0x12
300
301#define ISQ_EVENT_MASK 0x003F /* ISQ mask to find out type of event */
302#define ISQ_HIST 16 /* small history buffer */
303#define AUTOINCREMENT 0x8000 /* Bit mask to set bit-15 for autoincrement */
304
305#define TXRXBUFSIZE 0x0600
306#define RXDMABUFSIZE 0x8000
307#define RXDMASIZE 0x4000
308#define TXRX_LENGTH_MASK 0x07FF
309
310/* rx options bits */
311#define RCV_WITH_RXON 1 /* Set SerRx ON */
312#define RCV_COUNTS 2 /* Use Framecnt1 */
313#define RCV_PONG 4 /* Pong respondent */
314#define RCV_DONG 8 /* Dong operation */
315#define RCV_POLLING 0x10 /* Poll RxEvent */
316#define RCV_ISQ 0x20 /* Use ISQ, int */
317#define RCV_AUTO_DMA 0x100 /* Set AutoRxDMAE */
318#define RCV_DMA 0x200 /* Set RxDMA only */
319#define RCV_DMA_ALL 0x400 /* Copy all DMA'ed */
320#define RCV_FIXED_DATA 0x800 /* Every frame same */
321#define RCV_IO 0x1000 /* Use ISA IO only */
322#define RCV_MEMORY 0x2000 /* Use ISA Memory */
323
324#define RAM_SIZE 0x1000 /* The card has 4k bytes or RAM */
325#define PKT_START PP_TxFrame /* Start of packet RAM */
326
327#define RX_FRAME_PORT 0x0000
328#define TX_FRAME_PORT RX_FRAME_PORT
329#define TX_CMD_PORT 0x0004
330#define TX_NOW 0x0000 /* Tx packet after 5 bytes copied */
331#define TX_AFTER_381 0x0040 /* Tx packet after 381 bytes copied */
332#define TX_AFTER_ALL 0x00c0 /* Tx packet after all bytes copied */
333#define TX_LEN_PORT 0x0006
334#define ISQ_PORT 0x0008
335#define ADD_PORT 0x000A
336#define DATA_PORT 0x000C
337
338#define EEPROM_WRITE_EN 0x00F0
339#define EEPROM_WRITE_DIS 0x0000
340#define EEPROM_WRITE_CMD 0x0100
341#define EEPROM_READ_CMD 0x0200
342
343/* Receive Header */
344/* Description of header of each packet in receive area of memory */
345#define RBUF_EVENT_LOW 0 /* Low byte of RxEvent - status of received frame */
346#define RBUF_EVENT_HIGH 1 /* High byte of RxEvent - status of received frame */
347#define RBUF_LEN_LOW 2 /* Length of received data - low byte */
348#define RBUF_LEN_HI 3 /* Length of received data - high byte */
349#define RBUF_HEAD_LEN 4 /* Length of this header */
350
351#define CHIP_READ 0x1 /* Used to mark state of the repins code (chip or dma) */
352#define DMA_READ 0x2 /* Used to mark state of the repins code (chip or dma) */
353
354/* for bios scan */
355/* */
356#ifdef CSDEBUG
357/* use these values for debugging bios scan */
358#define BIOS_START_SEG 0x00000
359#define BIOS_OFFSET_INC 0x0010
360#else
361#define BIOS_START_SEG 0x0c000
362#define BIOS_OFFSET_INC 0x0200
363#endif
364
365#define BIOS_LAST_OFFSET 0x0fc00
366
367/* Byte offsets into the EEPROM configuration buffer */
368#define ISA_CNF_OFFSET 0x6
369#define TX_CTL_OFFSET (ISA_CNF_OFFSET + 8) /* 8900 eeprom */
370#define AUTO_NEG_CNF_OFFSET (ISA_CNF_OFFSET + 8) /* 8920 eeprom */
371
372 /* the assumption here is that the bits in the eeprom are generally */
373 /* in the same position as those in the autonegctl register. */
374 /* Of course the IMM bit is not in that register so it must be */
375 /* masked out */
376#define EE_FORCE_FDX 0x8000
377#define EE_NLP_ENABLE 0x0200
378#define EE_AUTO_NEG_ENABLE 0x0100
379#define EE_ALLOW_FDX 0x0080
380#define EE_AUTO_NEG_CNF_MASK (EE_FORCE_FDX|EE_NLP_ENABLE|EE_AUTO_NEG_ENABLE|EE_ALLOW_FDX)
381
382#define IMM_BIT 0x0040 /* ignore missing media */
383
384#define ADAPTER_CNF_OFFSET (AUTO_NEG_CNF_OFFSET + 2)
385#define A_CNF_10B_T 0x0001
386#define A_CNF_AUI 0x0002
387#define A_CNF_10B_2 0x0004
388#define A_CNF_MEDIA_TYPE 0x0070
389#define A_CNF_MEDIA_AUTO 0x0070
390#define A_CNF_MEDIA_10B_T 0x0020
391#define A_CNF_MEDIA_AUI 0x0040
392#define A_CNF_MEDIA_10B_2 0x0010
393#define A_CNF_DC_DC_POLARITY 0x0080
394#define A_CNF_NO_AUTO_POLARITY 0x2000
395#define A_CNF_LOW_RX_SQUELCH 0x4000
396#define A_CNF_EXTND_10B_2 0x8000
397
398#define PACKET_PAGE_OFFSET 0x8
399
400/* Bit definitions for the ISA configuration word from the EEPROM */
401#define INT_NO_MASK 0x000F
402#define DMA_NO_MASK 0x0070
403#define ISA_DMA_SIZE 0x0200
404#define ISA_AUTO_RxDMA 0x0400
405#define ISA_RxDMA 0x0800
406#define DMA_BURST 0x1000
407#define STREAM_TRANSFER 0x2000
408#define ANY_ISA_DMA (ISA_AUTO_RxDMA | ISA_RxDMA)
409
410/* DMA controller registers */
411#define DMA_BASE 0x00 /* DMA controller base */
412#define DMA_BASE_2 0x0C0 /* DMA controller base */
413
414#define DMA_STAT 0x0D0 /* DMA controller status register */
415#define DMA_MASK 0x0D4 /* DMA controller mask register */
416#define DMA_MODE 0x0D6 /* DMA controller mode register */
417#define DMA_RESETFF 0x0D8 /* DMA controller first/last flip flop */
418
419/* DMA data */
420#define DMA_DISABLE 0x04 /* Disable channel n */
421#define DMA_ENABLE 0x00 /* Enable channel n */
422/* Demand transfers, incr. address, auto init, writes, ch. n */
423#define DMA_RX_MODE 0x14
424/* Demand transfers, incr. address, auto init, reads, ch. n */
425#define DMA_TX_MODE 0x18
426
427#define DMA_SIZE (16*1024) /* Size of dma buffer - 16k */
428
429#define CS8900 0x0000
430#define CS8920 0x4000
431#define CS8920M 0x6000
432#define REVISON_BITS 0x1F00
433#define EEVER_NUMBER 0x12
434#define CHKSUM_LEN 0x14
435#define CHKSUM_VAL 0x0000
436#define START_EEPROM_DATA 0x001c /* Offset into eeprom for start of data */
437#define IRQ_MAP_EEPROM_DATA 0x0046 /* Offset into eeprom for the IRQ map */
438#define IRQ_MAP_LEN 0x0004 /* No of bytes to read for the IRQ map */
439#define PNP_IRQ_FRMT 0x0022 /* PNP small item IRQ format */
440#define CS8900_IRQ_MAP 0x1c20 /* This IRQ map is fixed */
441
442#define CS8920_NO_INTS 0x0F /* Max CS8920 interrupt select # */
443
444#define PNP_ADD_PORT 0x0279
445#define PNP_WRITE_PORT 0x0A79
446
447#define GET_PNP_ISA_STRUCT 0x40
448#define PNP_ISA_STRUCT_LEN 0x06
449#define PNP_CSN_CNT_OFF 0x01
450#define PNP_RD_PORT_OFF 0x02
451#define PNP_FUNCTION_OK 0x00
452#define PNP_WAKE 0x03
453#define PNP_RSRC_DATA 0x04
454#define PNP_RSRC_READY 0x01
455#define PNP_STATUS 0x05
456#define PNP_ACTIVATE 0x30
457#define PNP_CNF_IO_H 0x60
458#define PNP_CNF_IO_L 0x61
459#define PNP_CNF_INT 0x70
460#define PNP_CNF_DMA 0x74
461#define PNP_CNF_MEM 0x48
462
463#define BIT0 1
464#define BIT15 0x8000
465
diff --git a/drivers/net/ethernet/apple/mac89x0.c b/drivers/net/ethernet/apple/mac89x0.c
new file mode 100644
index 000000000000..669b317974a8
--- /dev/null
+++ b/drivers/net/ethernet/apple/mac89x0.c
@@ -0,0 +1,634 @@
1/* mac89x0.c: A Crystal Semiconductor CS89[02]0 driver for linux. */
2/*
3 Written 1996 by Russell Nelson, with reference to skeleton.c
4 written 1993-1994 by Donald Becker.
5
6 This software may be used and distributed according to the terms
7 of the GNU General Public License, incorporated herein by reference.
8
9 The author may be reached at nelson@crynwr.com, Crynwr
10 Software, 11 Grant St., Potsdam, NY 13676
11
12 Changelog:
13
14 Mike Cruse : mcruse@cti-ltd.com
15 : Changes for Linux 2.0 compatibility.
16 : Added dev_id parameter in net_interrupt(),
17 : request_irq() and free_irq(). Just NULL for now.
18
19 Mike Cruse : Added MOD_INC_USE_COUNT and MOD_DEC_USE_COUNT macros
20 : in net_open() and net_close() so kerneld would know
21 : that the module is in use and wouldn't eject the
22 : driver prematurely.
23
24 Mike Cruse : Rewrote init_module() and cleanup_module using 8390.c
25 : as an example. Disabled autoprobing in init_module(),
26 : not a good thing to do to other devices while Linux
27 : is running from all accounts.
28
29 Alan Cox : Removed 1.2 support, added 2.1 extra counters.
30
31 David Huggins-Daines <dhd@debian.org>
32
33 Split this off into mac89x0.c, and gutted it of all parts which are
34 not relevant to the existing CS8900 cards on the Macintosh
35 (i.e. basically the Daynaport CS and LC cards). To be precise:
36
37 * Removed all the media-detection stuff, because these cards are
38 TP-only.
39
40 * Lobotomized the ISA interrupt bogosity, because these cards use
41 a hardwired NuBus interrupt and a magic ISAIRQ value in the card.
42
43 * Basically eliminated everything not relevant to getting the
44 cards minimally functioning on the Macintosh.
45
46 I might add that these cards are badly designed even from the Mac
47 standpoint, in that Dayna, in their infinite wisdom, used NuBus slot
48 I/O space and NuBus interrupts for these cards, but neglected to
49 provide anything even remotely resembling a NuBus ROM. Therefore we
50 have to probe for them in a brain-damaged ISA-like fashion.
51
52 Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
53 check kmalloc and release the allocated memory on failure in
54 mac89x0_probe and in init_module
55 use local_irq_{save,restore}(flags) in net_get_stat, not just
56 local_irq_{dis,en}able()
57*/
58
59static char *version =
60"cs89x0.c:v1.02 11/26/96 Russell Nelson <nelson@crynwr.com>\n";
61
62/* ======================= configure the driver here ======================= */
63
64/* use 0 for production, 1 for verification, >2 for debug */
65#ifndef NET_DEBUG
66#define NET_DEBUG 0
67#endif
68
69/* ======================= end of configuration ======================= */
70
71
72/* Always include 'config.h' first in case the user wants to turn on
73 or override something. */
74#include <linux/module.h>
75
76/*
77 Sources:
78
79 Crynwr packet driver epktisa.
80
81 Crystal Semiconductor data sheets.
82
83*/
84
85#include <linux/kernel.h>
86#include <linux/types.h>
87#include <linux/fcntl.h>
88#include <linux/interrupt.h>
89#include <linux/ioport.h>
90#include <linux/in.h>
91#include <linux/string.h>
92#include <linux/nubus.h>
93#include <linux/errno.h>
94#include <linux/init.h>
95#include <linux/netdevice.h>
96#include <linux/etherdevice.h>
97#include <linux/skbuff.h>
98#include <linux/delay.h>
99#include <linux/bitops.h>
100#include <linux/gfp.h>
101
102#include <asm/system.h>
103#include <asm/io.h>
104#include <asm/hwtest.h>
105#include <asm/macints.h>
106
107#include "cs89x0.h"
108
109static unsigned int net_debug = NET_DEBUG;
110
111/* Information that need to be kept for each board. */
112struct net_local {
113 int chip_type; /* one of: CS8900, CS8920, CS8920M */
114 char chip_revision; /* revision letter of the chip ('A'...) */
115 int send_cmd; /* the propercommand used to send a packet. */
116 int rx_mode;
117 int curr_rx_cfg;
118 int send_underrun; /* keep track of how many underruns in a row we get */
119 struct sk_buff *skb;
120};
121
122/* Index to functions, as function prototypes. */
123
124#if 0
125extern void reset_chip(struct net_device *dev);
126#endif
127static int net_open(struct net_device *dev);
128static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
129static irqreturn_t net_interrupt(int irq, void *dev_id);
130static void set_multicast_list(struct net_device *dev);
131static void net_rx(struct net_device *dev);
132static int net_close(struct net_device *dev);
133static struct net_device_stats *net_get_stats(struct net_device *dev);
134static int set_mac_address(struct net_device *dev, void *addr);
135
136
137/* Example routines you must write ;->. */
138#define tx_done(dev) 1
139
140/* For reading/writing registers ISA-style */
141static inline int
142readreg_io(struct net_device *dev, int portno)
143{
144 nubus_writew(swab16(portno), dev->base_addr + ADD_PORT);
145 return swab16(nubus_readw(dev->base_addr + DATA_PORT));
146}
147
148static inline void
149writereg_io(struct net_device *dev, int portno, int value)
150{
151 nubus_writew(swab16(portno), dev->base_addr + ADD_PORT);
152 nubus_writew(swab16(value), dev->base_addr + DATA_PORT);
153}
154
155/* These are for reading/writing registers in shared memory */
156static inline int
157readreg(struct net_device *dev, int portno)
158{
159 return swab16(nubus_readw(dev->mem_start + portno));
160}
161
162static inline void
163writereg(struct net_device *dev, int portno, int value)
164{
165 nubus_writew(swab16(value), dev->mem_start + portno);
166}
167
168static const struct net_device_ops mac89x0_netdev_ops = {
169 .ndo_open = net_open,
170 .ndo_stop = net_close,
171 .ndo_start_xmit = net_send_packet,
172 .ndo_get_stats = net_get_stats,
173 .ndo_set_multicast_list = set_multicast_list,
174 .ndo_set_mac_address = set_mac_address,
175 .ndo_validate_addr = eth_validate_addr,
176 .ndo_change_mtu = eth_change_mtu,
177};
178
179/* Probe for the CS8900 card in slot E. We won't bother looking
180 anywhere else until we have a really good reason to do so. */
181struct net_device * __init mac89x0_probe(int unit)
182{
183 struct net_device *dev;
184 static int once_is_enough;
185 struct net_local *lp;
186 static unsigned version_printed;
187 int i, slot;
188 unsigned rev_type = 0;
189 unsigned long ioaddr;
190 unsigned short sig;
191 int err = -ENODEV;
192
193 if (!MACH_IS_MAC)
194 return ERR_PTR(-ENODEV);
195
196 dev = alloc_etherdev(sizeof(struct net_local));
197 if (!dev)
198 return ERR_PTR(-ENOMEM);
199
200 if (unit >= 0) {
201 sprintf(dev->name, "eth%d", unit);
202 netdev_boot_setup_check(dev);
203 }
204
205 if (once_is_enough)
206 goto out;
207 once_is_enough = 1;
208
209 /* We might have to parameterize this later */
210 slot = 0xE;
211 /* Get out now if there's a real NuBus card in slot E */
212 if (nubus_find_slot(slot, NULL) != NULL)
213 goto out;
214
215 /* The pseudo-ISA bits always live at offset 0x300 (gee,
216 wonder why...) */
217 ioaddr = (unsigned long)
218 nubus_slot_addr(slot) | (((slot&0xf) << 20) + DEFAULTIOBASE);
219 {
220 unsigned long flags;
221 int card_present;
222
223 local_irq_save(flags);
224 card_present = (hwreg_present((void*) ioaddr+4) &&
225 hwreg_present((void*) ioaddr + DATA_PORT));
226 local_irq_restore(flags);
227
228 if (!card_present)
229 goto out;
230 }
231
232 nubus_writew(0, ioaddr + ADD_PORT);
233 sig = nubus_readw(ioaddr + DATA_PORT);
234 if (sig != swab16(CHIP_EISA_ID_SIG))
235 goto out;
236
237 /* Initialize the net_device structure. */
238 lp = netdev_priv(dev);
239
240 /* Fill in the 'dev' fields. */
241 dev->base_addr = ioaddr;
242 dev->mem_start = (unsigned long)
243 nubus_slot_addr(slot) | (((slot&0xf) << 20) + MMIOBASE);
244 dev->mem_end = dev->mem_start + 0x1000;
245
246 /* Turn on shared memory */
247 writereg_io(dev, PP_BusCTL, MEMORY_ON);
248
249 /* get the chip type */
250 rev_type = readreg(dev, PRODUCT_ID_ADD);
251 lp->chip_type = rev_type &~ REVISON_BITS;
252 lp->chip_revision = ((rev_type & REVISON_BITS) >> 8) + 'A';
253
254 /* Check the chip type and revision in order to set the correct send command
255 CS8920 revision C and CS8900 revision F can use the faster send. */
256 lp->send_cmd = TX_AFTER_381;
257 if (lp->chip_type == CS8900 && lp->chip_revision >= 'F')
258 lp->send_cmd = TX_NOW;
259 if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
260 lp->send_cmd = TX_NOW;
261
262 if (net_debug && version_printed++ == 0)
263 printk(version);
264
265 printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#8lx",
266 dev->name,
267 lp->chip_type==CS8900?'0':'2',
268 lp->chip_type==CS8920M?"M":"",
269 lp->chip_revision,
270 dev->base_addr);
271
272 /* Try to read the MAC address */
273 if ((readreg(dev, PP_SelfST) & (EEPROM_PRESENT | EEPROM_OK)) == 0) {
274 printk("\nmac89x0: No EEPROM, giving up now.\n");
275 goto out1;
276 } else {
277 for (i = 0; i < ETH_ALEN; i += 2) {
278 /* Big-endian (why??!) */
279 unsigned short s = readreg(dev, PP_IA + i);
280 dev->dev_addr[i] = s >> 8;
281 dev->dev_addr[i+1] = s & 0xff;
282 }
283 }
284
285 dev->irq = SLOT2IRQ(slot);
286
287 /* print the IRQ and ethernet address. */
288
289 printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr);
290
291 dev->netdev_ops = &mac89x0_netdev_ops;
292
293 err = register_netdev(dev);
294 if (err)
295 goto out1;
296 return NULL;
297out1:
298 nubus_writew(0, dev->base_addr + ADD_PORT);
299out:
300 free_netdev(dev);
301 return ERR_PTR(err);
302}
303
304#if 0
305/* This is useful for something, but I don't know what yet. */
306void __init reset_chip(struct net_device *dev)
307{
308 int reset_start_time;
309
310 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
311
312 /* wait 30 ms */
313 msleep_interruptible(30);
314
315 /* Wait until the chip is reset */
316 reset_start_time = jiffies;
317 while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
318 ;
319}
320#endif
321
322/* Open/initialize the board. This is called (in the current kernel)
323 sometime after booting when the 'ifconfig' program is run.
324
325 This routine should set everything up anew at each open, even
326 registers that "should" only need to be set once at boot, so that
327 there is non-reboot way to recover if something goes wrong.
328 */
329static int
330net_open(struct net_device *dev)
331{
332 struct net_local *lp = netdev_priv(dev);
333 int i;
334
335 /* Disable the interrupt for now */
336 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ);
337
338 /* Grab the interrupt */
339 if (request_irq(dev->irq, net_interrupt, 0, "cs89x0", dev))
340 return -EAGAIN;
341
342 /* Set up the IRQ - Apparently magic */
343 if (lp->chip_type == CS8900)
344 writereg(dev, PP_CS8900_ISAINT, 0);
345 else
346 writereg(dev, PP_CS8920_ISAINT, 0);
347
348 /* set the Ethernet address */
349 for (i=0; i < ETH_ALEN/2; i++)
350 writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
351
352 /* Turn on both receive and transmit operations */
353 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) | SERIAL_RX_ON | SERIAL_TX_ON);
354
355 /* Receive only error free packets addressed to this card */
356 lp->rx_mode = 0;
357 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT);
358
359 lp->curr_rx_cfg = RX_OK_ENBL | RX_CRC_ERROR_ENBL;
360
361 writereg(dev, PP_RxCFG, lp->curr_rx_cfg);
362
363 writereg(dev, PP_TxCFG, TX_LOST_CRS_ENBL | TX_SQE_ERROR_ENBL | TX_OK_ENBL |
364 TX_LATE_COL_ENBL | TX_JBR_ENBL | TX_ANY_COL_ENBL | TX_16_COL_ENBL);
365
366 writereg(dev, PP_BufCFG, READY_FOR_TX_ENBL | RX_MISS_COUNT_OVRFLOW_ENBL |
367 TX_COL_COUNT_OVRFLOW_ENBL | TX_UNDERRUN_ENBL);
368
369 /* now that we've got our act together, enable everything */
370 writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) | ENABLE_IRQ);
371 netif_start_queue(dev);
372 return 0;
373}
374
375static int
376net_send_packet(struct sk_buff *skb, struct net_device *dev)
377{
378 struct net_local *lp = netdev_priv(dev);
379 unsigned long flags;
380
381 if (net_debug > 3)
382 printk("%s: sent %d byte packet of type %x\n",
383 dev->name, skb->len,
384 (skb->data[ETH_ALEN+ETH_ALEN] << 8)
385 | skb->data[ETH_ALEN+ETH_ALEN+1]);
386
387 /* keep the upload from being interrupted, since we
388 ask the chip to start transmitting before the
389 whole packet has been completely uploaded. */
390 local_irq_save(flags);
391 netif_stop_queue(dev);
392
393 /* initiate a transmit sequence */
394 writereg(dev, PP_TxCMD, lp->send_cmd);
395 writereg(dev, PP_TxLength, skb->len);
396
397 /* Test to see if the chip has allocated memory for the packet */
398 if ((readreg(dev, PP_BusST) & READY_FOR_TX_NOW) == 0) {
399 /* Gasp! It hasn't. But that shouldn't happen since
400 we're waiting for TxOk, so return 1 and requeue this packet. */
401 local_irq_restore(flags);
402 return NETDEV_TX_BUSY;
403 }
404
405 /* Write the contents of the packet */
406 skb_copy_from_linear_data(skb, (void *)(dev->mem_start + PP_TxFrame),
407 skb->len+1);
408
409 local_irq_restore(flags);
410 dev_kfree_skb (skb);
411
412 return NETDEV_TX_OK;
413}
414
415/* The typical workload of the driver:
416 Handle the network interface interrupts. */
417static irqreturn_t net_interrupt(int irq, void *dev_id)
418{
419 struct net_device *dev = dev_id;
420 struct net_local *lp;
421 int ioaddr, status;
422
423 if (dev == NULL) {
424 printk ("net_interrupt(): irq %d for unknown device.\n", irq);
425 return IRQ_NONE;
426 }
427
428 ioaddr = dev->base_addr;
429 lp = netdev_priv(dev);
430
431 /* we MUST read all the events out of the ISQ, otherwise we'll never
432 get interrupted again. As a consequence, we can't have any limit
433 on the number of times we loop in the interrupt handler. The
434 hardware guarantees that eventually we'll run out of events. Of
435 course, if you're on a slow machine, and packets are arriving
436 faster than you can read them off, you're screwed. Hasta la
437 vista, baby! */
438 while ((status = swab16(nubus_readw(dev->base_addr + ISQ_PORT)))) {
439 if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
440 switch(status & ISQ_EVENT_MASK) {
441 case ISQ_RECEIVER_EVENT:
442 /* Got a packet(s). */
443 net_rx(dev);
444 break;
445 case ISQ_TRANSMITTER_EVENT:
446 dev->stats.tx_packets++;
447 netif_wake_queue(dev);
448 if ((status & TX_OK) == 0)
449 dev->stats.tx_errors++;
450 if (status & TX_LOST_CRS)
451 dev->stats.tx_carrier_errors++;
452 if (status & TX_SQE_ERROR)
453 dev->stats.tx_heartbeat_errors++;
454 if (status & TX_LATE_COL)
455 dev->stats.tx_window_errors++;
456 if (status & TX_16_COL)
457 dev->stats.tx_aborted_errors++;
458 break;
459 case ISQ_BUFFER_EVENT:
460 if (status & READY_FOR_TX) {
461 /* we tried to transmit a packet earlier,
462 but inexplicably ran out of buffers.
463 That shouldn't happen since we only ever
464 load one packet. Shrug. Do the right
465 thing anyway. */
466 netif_wake_queue(dev);
467 }
468 if (status & TX_UNDERRUN) {
469 if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
470 lp->send_underrun++;
471 if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
472 else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
473 }
474 break;
475 case ISQ_RX_MISS_EVENT:
476 dev->stats.rx_missed_errors += (status >> 6);
477 break;
478 case ISQ_TX_COL_EVENT:
479 dev->stats.collisions += (status >> 6);
480 break;
481 }
482 }
483 return IRQ_HANDLED;
484}
485
486/* We have a good packet(s), get it/them out of the buffers. */
487static void
488net_rx(struct net_device *dev)
489{
490 struct sk_buff *skb;
491 int status, length;
492
493 status = readreg(dev, PP_RxStatus);
494 if ((status & RX_OK) == 0) {
495 dev->stats.rx_errors++;
496 if (status & RX_RUNT)
497 dev->stats.rx_length_errors++;
498 if (status & RX_EXTRA_DATA)
499 dev->stats.rx_length_errors++;
500 if ((status & RX_CRC_ERROR) &&
501 !(status & (RX_EXTRA_DATA|RX_RUNT)))
502 /* per str 172 */
503 dev->stats.rx_crc_errors++;
504 if (status & RX_DRIBBLE)
505 dev->stats.rx_frame_errors++;
506 return;
507 }
508
509 length = readreg(dev, PP_RxLength);
510 /* Malloc up new buffer. */
511 skb = alloc_skb(length, GFP_ATOMIC);
512 if (skb == NULL) {
513 printk("%s: Memory squeeze, dropping packet.\n", dev->name);
514 dev->stats.rx_dropped++;
515 return;
516 }
517 skb_put(skb, length);
518
519 skb_copy_to_linear_data(skb, (void *)(dev->mem_start + PP_RxFrame),
520 length);
521
522 if (net_debug > 3)printk("%s: received %d byte packet of type %x\n",
523 dev->name, length,
524 (skb->data[ETH_ALEN+ETH_ALEN] << 8)
525 | skb->data[ETH_ALEN+ETH_ALEN+1]);
526
527 skb->protocol=eth_type_trans(skb,dev);
528 netif_rx(skb);
529 dev->stats.rx_packets++;
530 dev->stats.rx_bytes += length;
531}
532
533/* The inverse routine to net_open(). */
534static int
535net_close(struct net_device *dev)
536{
537
538 writereg(dev, PP_RxCFG, 0);
539 writereg(dev, PP_TxCFG, 0);
540 writereg(dev, PP_BufCFG, 0);
541 writereg(dev, PP_BusCTL, 0);
542
543 netif_stop_queue(dev);
544
545 free_irq(dev->irq, dev);
546
547 /* Update the statistics here. */
548
549 return 0;
550
551}
552
553/* Get the current statistics. This may be called with the card open or
554 closed. */
555static struct net_device_stats *
556net_get_stats(struct net_device *dev)
557{
558 unsigned long flags;
559
560 local_irq_save(flags);
561 /* Update the statistics from the device registers. */
562 dev->stats.rx_missed_errors += (readreg(dev, PP_RxMiss) >> 6);
563 dev->stats.collisions += (readreg(dev, PP_TxCol) >> 6);
564 local_irq_restore(flags);
565
566 return &dev->stats;
567}
568
569static void set_multicast_list(struct net_device *dev)
570{
571 struct net_local *lp = netdev_priv(dev);
572
573 if(dev->flags&IFF_PROMISC)
574 {
575 lp->rx_mode = RX_ALL_ACCEPT;
576 } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
577 /* The multicast-accept list is initialized to accept-all, and we
578 rely on higher-level filtering for now. */
579 lp->rx_mode = RX_MULTCAST_ACCEPT;
580 }
581 else
582 lp->rx_mode = 0;
583
584 writereg(dev, PP_RxCTL, DEF_RX_ACCEPT | lp->rx_mode);
585
586 /* in promiscuous mode, we accept errored packets, so we have to enable interrupts on them also */
587 writereg(dev, PP_RxCFG, lp->curr_rx_cfg |
588 (lp->rx_mode == RX_ALL_ACCEPT? (RX_CRC_ERROR_ENBL|RX_RUNT_ENBL|RX_EXTRA_DATA_ENBL) : 0));
589}
590
591
592static int set_mac_address(struct net_device *dev, void *addr)
593{
594 int i;
595 printk("%s: Setting MAC address to ", dev->name);
596 for (i = 0; i < 6; i++)
597 printk(" %2.2x", dev->dev_addr[i] = ((unsigned char *)addr)[i]);
598 printk(".\n");
599 /* set the Ethernet address */
600 for (i=0; i < ETH_ALEN/2; i++)
601 writereg(dev, PP_IA+i*2, dev->dev_addr[i*2] | (dev->dev_addr[i*2+1] << 8));
602
603 return 0;
604}
605
606#ifdef MODULE
607
608static struct net_device *dev_cs89x0;
609static int debug;
610
611module_param(debug, int, 0);
612MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)");
613MODULE_LICENSE("GPL");
614
615int __init
616init_module(void)
617{
618 net_debug = debug;
619 dev_cs89x0 = mac89x0_probe(-1);
620 if (IS_ERR(dev_cs89x0)) {
621 printk(KERN_WARNING "mac89x0.c: No card found\n");
622 return PTR_ERR(dev_cs89x0);
623 }
624 return 0;
625}
626
627void
628cleanup_module(void)
629{
630 unregister_netdev(dev_cs89x0);
631 nubus_writew(0, dev_cs89x0->base_addr + ADD_PORT);
632 free_netdev(dev_cs89x0);
633}
634#endif /* MODULE */
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
new file mode 100644
index 000000000000..2074e9724ba3
--- /dev/null
+++ b/drivers/net/ethernet/apple/mace.c
@@ -0,0 +1,1031 @@
1/*
2 * Network device driver for the MACE ethernet controller on
3 * Apple Powermacs. Assumes it's under a DBDMA controller.
4 *
5 * Copyright (C) 1996 Paul Mackerras.
6 */
7
8#include <linux/module.h>
9#include <linux/kernel.h>
10#include <linux/netdevice.h>
11#include <linux/etherdevice.h>
12#include <linux/delay.h>
13#include <linux/string.h>
14#include <linux/timer.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/crc32.h>
18#include <linux/spinlock.h>
19#include <linux/bitrev.h>
20#include <linux/slab.h>
21#include <asm/prom.h>
22#include <asm/dbdma.h>
23#include <asm/io.h>
24#include <asm/pgtable.h>
25#include <asm/macio.h>
26
27#include "mace.h"
28
29static int port_aaui = -1;
30
31#define N_RX_RING 8
32#define N_TX_RING 6
33#define MAX_TX_ACTIVE 1
34#define NCMDS_TX 1 /* dma commands per element in tx ring */
35#define RX_BUFLEN (ETH_FRAME_LEN + 8)
36#define TX_TIMEOUT HZ /* 1 second */
37
38/* Chip rev needs workaround on HW & multicast addr change */
39#define BROKEN_ADDRCHG_REV 0x0941
40
41/* Bits in transmit DMA status */
42#define TX_DMA_ERR 0x80
43
44struct mace_data {
45 volatile struct mace __iomem *mace;
46 volatile struct dbdma_regs __iomem *tx_dma;
47 int tx_dma_intr;
48 volatile struct dbdma_regs __iomem *rx_dma;
49 int rx_dma_intr;
50 volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */
51 volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */
52 struct sk_buff *rx_bufs[N_RX_RING];
53 int rx_fill;
54 int rx_empty;
55 struct sk_buff *tx_bufs[N_TX_RING];
56 int tx_fill;
57 int tx_empty;
58 unsigned char maccc;
59 unsigned char tx_fullup;
60 unsigned char tx_active;
61 unsigned char tx_bad_runt;
62 struct timer_list tx_timeout;
63 int timeout_active;
64 int port_aaui;
65 int chipid;
66 struct macio_dev *mdev;
67 spinlock_t lock;
68};
69
70/*
71 * Number of bytes of private data per MACE: allow enough for
72 * the rx and tx dma commands plus a branch dma command each,
73 * and another 16 bytes to allow us to align the dma command
74 * buffers on a 16 byte boundary.
75 */
76#define PRIV_BYTES (sizeof(struct mace_data) \
77 + (N_RX_RING + NCMDS_TX * N_TX_RING + 3) * sizeof(struct dbdma_cmd))
78
79static int mace_open(struct net_device *dev);
80static int mace_close(struct net_device *dev);
81static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
82static void mace_set_multicast(struct net_device *dev);
83static void mace_reset(struct net_device *dev);
84static int mace_set_address(struct net_device *dev, void *addr);
85static irqreturn_t mace_interrupt(int irq, void *dev_id);
86static irqreturn_t mace_txdma_intr(int irq, void *dev_id);
87static irqreturn_t mace_rxdma_intr(int irq, void *dev_id);
88static void mace_set_timeout(struct net_device *dev);
89static void mace_tx_timeout(unsigned long data);
90static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
91static inline void mace_clean_rings(struct mace_data *mp);
92static void __mace_set_address(struct net_device *dev, void *addr);
93
94/*
95 * If we can't get a skbuff when we need it, we use this area for DMA.
96 */
97static unsigned char *dummy_buf;
98
99static const struct net_device_ops mace_netdev_ops = {
100 .ndo_open = mace_open,
101 .ndo_stop = mace_close,
102 .ndo_start_xmit = mace_xmit_start,
103 .ndo_set_multicast_list = mace_set_multicast,
104 .ndo_set_mac_address = mace_set_address,
105 .ndo_change_mtu = eth_change_mtu,
106 .ndo_validate_addr = eth_validate_addr,
107};
108
109static int __devinit mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
110{
111 struct device_node *mace = macio_get_of_node(mdev);
112 struct net_device *dev;
113 struct mace_data *mp;
114 const unsigned char *addr;
115 int j, rev, rc = -EBUSY;
116
117 if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
118 printk(KERN_ERR "can't use MACE %s: need 3 addrs and 3 irqs\n",
119 mace->full_name);
120 return -ENODEV;
121 }
122
123 addr = of_get_property(mace, "mac-address", NULL);
124 if (addr == NULL) {
125 addr = of_get_property(mace, "local-mac-address", NULL);
126 if (addr == NULL) {
127 printk(KERN_ERR "Can't get mac-address for MACE %s\n",
128 mace->full_name);
129 return -ENODEV;
130 }
131 }
132
133 /*
134 * lazy allocate the driver-wide dummy buffer. (Note that we
135 * never have more than one MACE in the system anyway)
136 */
137 if (dummy_buf == NULL) {
138 dummy_buf = kmalloc(RX_BUFLEN+2, GFP_KERNEL);
139 if (dummy_buf == NULL) {
140 printk(KERN_ERR "MACE: couldn't allocate dummy buffer\n");
141 return -ENOMEM;
142 }
143 }
144
145 if (macio_request_resources(mdev, "mace")) {
146 printk(KERN_ERR "MACE: can't request IO resources !\n");
147 return -EBUSY;
148 }
149
150 dev = alloc_etherdev(PRIV_BYTES);
151 if (!dev) {
152 printk(KERN_ERR "MACE: can't allocate ethernet device !\n");
153 rc = -ENOMEM;
154 goto err_release;
155 }
156 SET_NETDEV_DEV(dev, &mdev->ofdev.dev);
157
158 mp = netdev_priv(dev);
159 mp->mdev = mdev;
160 macio_set_drvdata(mdev, dev);
161
162 dev->base_addr = macio_resource_start(mdev, 0);
163 mp->mace = ioremap(dev->base_addr, 0x1000);
164 if (mp->mace == NULL) {
165 printk(KERN_ERR "MACE: can't map IO resources !\n");
166 rc = -ENOMEM;
167 goto err_free;
168 }
169 dev->irq = macio_irq(mdev, 0);
170
171 rev = addr[0] == 0 && addr[1] == 0xA0;
172 for (j = 0; j < 6; ++j) {
173 dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
174 }
175 mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
176 in_8(&mp->mace->chipid_lo);
177
178
179 mp = netdev_priv(dev);
180 mp->maccc = ENXMT | ENRCV;
181
182 mp->tx_dma = ioremap(macio_resource_start(mdev, 1), 0x1000);
183 if (mp->tx_dma == NULL) {
184 printk(KERN_ERR "MACE: can't map TX DMA resources !\n");
185 rc = -ENOMEM;
186 goto err_unmap_io;
187 }
188 mp->tx_dma_intr = macio_irq(mdev, 1);
189
190 mp->rx_dma = ioremap(macio_resource_start(mdev, 2), 0x1000);
191 if (mp->rx_dma == NULL) {
192 printk(KERN_ERR "MACE: can't map RX DMA resources !\n");
193 rc = -ENOMEM;
194 goto err_unmap_tx_dma;
195 }
196 mp->rx_dma_intr = macio_irq(mdev, 2);
197
198 mp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(mp + 1);
199 mp->rx_cmds = mp->tx_cmds + NCMDS_TX * N_TX_RING + 1;
200
201 memset((char *) mp->tx_cmds, 0,
202 (NCMDS_TX*N_TX_RING + N_RX_RING + 2) * sizeof(struct dbdma_cmd));
203 init_timer(&mp->tx_timeout);
204 spin_lock_init(&mp->lock);
205 mp->timeout_active = 0;
206
207 if (port_aaui >= 0)
208 mp->port_aaui = port_aaui;
209 else {
210 /* Apple Network Server uses the AAUI port */
211 if (of_machine_is_compatible("AAPL,ShinerESB"))
212 mp->port_aaui = 1;
213 else {
214#ifdef CONFIG_MACE_AAUI_PORT
215 mp->port_aaui = 1;
216#else
217 mp->port_aaui = 0;
218#endif
219 }
220 }
221
222 dev->netdev_ops = &mace_netdev_ops;
223
224 /*
225 * Most of what is below could be moved to mace_open()
226 */
227 mace_reset(dev);
228
229 rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
230 if (rc) {
231 printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
232 goto err_unmap_rx_dma;
233 }
234 rc = request_irq(mp->tx_dma_intr, mace_txdma_intr, 0, "MACE-txdma", dev);
235 if (rc) {
236 printk(KERN_ERR "MACE: can't get irq %d\n", mp->tx_dma_intr);
237 goto err_free_irq;
238 }
239 rc = request_irq(mp->rx_dma_intr, mace_rxdma_intr, 0, "MACE-rxdma", dev);
240 if (rc) {
241 printk(KERN_ERR "MACE: can't get irq %d\n", mp->rx_dma_intr);
242 goto err_free_tx_irq;
243 }
244
245 rc = register_netdev(dev);
246 if (rc) {
247 printk(KERN_ERR "MACE: Cannot register net device, aborting.\n");
248 goto err_free_rx_irq;
249 }
250
251 printk(KERN_INFO "%s: MACE at %pM, chip revision %d.%d\n",
252 dev->name, dev->dev_addr,
253 mp->chipid >> 8, mp->chipid & 0xff);
254
255 return 0;
256
257 err_free_rx_irq:
258 free_irq(macio_irq(mdev, 2), dev);
259 err_free_tx_irq:
260 free_irq(macio_irq(mdev, 1), dev);
261 err_free_irq:
262 free_irq(macio_irq(mdev, 0), dev);
263 err_unmap_rx_dma:
264 iounmap(mp->rx_dma);
265 err_unmap_tx_dma:
266 iounmap(mp->tx_dma);
267 err_unmap_io:
268 iounmap(mp->mace);
269 err_free:
270 free_netdev(dev);
271 err_release:
272 macio_release_resources(mdev);
273
274 return rc;
275}
276
277static int __devexit mace_remove(struct macio_dev *mdev)
278{
279 struct net_device *dev = macio_get_drvdata(mdev);
280 struct mace_data *mp;
281
282 BUG_ON(dev == NULL);
283
284 macio_set_drvdata(mdev, NULL);
285
286 mp = netdev_priv(dev);
287
288 unregister_netdev(dev);
289
290 free_irq(dev->irq, dev);
291 free_irq(mp->tx_dma_intr, dev);
292 free_irq(mp->rx_dma_intr, dev);
293
294 iounmap(mp->rx_dma);
295 iounmap(mp->tx_dma);
296 iounmap(mp->mace);
297
298 free_netdev(dev);
299
300 macio_release_resources(mdev);
301
302 return 0;
303}
304
305static void dbdma_reset(volatile struct dbdma_regs __iomem *dma)
306{
307 int i;
308
309 out_le32(&dma->control, (WAKE|FLUSH|PAUSE|RUN) << 16);
310
311 /*
312 * Yes this looks peculiar, but apparently it needs to be this
313 * way on some machines.
314 */
315 for (i = 200; i > 0; --i)
316 if (ld_le32(&dma->control) & RUN)
317 udelay(1);
318}
319
320static void mace_reset(struct net_device *dev)
321{
322 struct mace_data *mp = netdev_priv(dev);
323 volatile struct mace __iomem *mb = mp->mace;
324 int i;
325
326 /* soft-reset the chip */
327 i = 200;
328 while (--i) {
329 out_8(&mb->biucc, SWRST);
330 if (in_8(&mb->biucc) & SWRST) {
331 udelay(10);
332 continue;
333 }
334 break;
335 }
336 if (!i) {
337 printk(KERN_ERR "mace: cannot reset chip!\n");
338 return;
339 }
340
341 out_8(&mb->imr, 0xff); /* disable all intrs for now */
342 i = in_8(&mb->ir);
343 out_8(&mb->maccc, 0); /* turn off tx, rx */
344
345 out_8(&mb->biucc, XMTSP_64);
346 out_8(&mb->utr, RTRD);
347 out_8(&mb->fifocc, RCVFW_32 | XMTFW_16 | XMTFWU | RCVFWU | XMTBRST);
348 out_8(&mb->xmtfc, AUTO_PAD_XMIT); /* auto-pad short frames */
349 out_8(&mb->rcvfc, 0);
350
351 /* load up the hardware address */
352 __mace_set_address(dev, dev->dev_addr);
353
354 /* clear the multicast filter */
355 if (mp->chipid == BROKEN_ADDRCHG_REV)
356 out_8(&mb->iac, LOGADDR);
357 else {
358 out_8(&mb->iac, ADDRCHG | LOGADDR);
359 while ((in_8(&mb->iac) & ADDRCHG) != 0)
360 ;
361 }
362 for (i = 0; i < 8; ++i)
363 out_8(&mb->ladrf, 0);
364
365 /* done changing address */
366 if (mp->chipid != BROKEN_ADDRCHG_REV)
367 out_8(&mb->iac, 0);
368
369 if (mp->port_aaui)
370 out_8(&mb->plscc, PORTSEL_AUI + ENPLSIO);
371 else
372 out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
373}
374
375static void __mace_set_address(struct net_device *dev, void *addr)
376{
377 struct mace_data *mp = netdev_priv(dev);
378 volatile struct mace __iomem *mb = mp->mace;
379 unsigned char *p = addr;
380 int i;
381
382 /* load up the hardware address */
383 if (mp->chipid == BROKEN_ADDRCHG_REV)
384 out_8(&mb->iac, PHYADDR);
385 else {
386 out_8(&mb->iac, ADDRCHG | PHYADDR);
387 while ((in_8(&mb->iac) & ADDRCHG) != 0)
388 ;
389 }
390 for (i = 0; i < 6; ++i)
391 out_8(&mb->padr, dev->dev_addr[i] = p[i]);
392 if (mp->chipid != BROKEN_ADDRCHG_REV)
393 out_8(&mb->iac, 0);
394}
395
396static int mace_set_address(struct net_device *dev, void *addr)
397{
398 struct mace_data *mp = netdev_priv(dev);
399 volatile struct mace __iomem *mb = mp->mace;
400 unsigned long flags;
401
402 spin_lock_irqsave(&mp->lock, flags);
403
404 __mace_set_address(dev, addr);
405
406 /* note: setting ADDRCHG clears ENRCV */
407 out_8(&mb->maccc, mp->maccc);
408
409 spin_unlock_irqrestore(&mp->lock, flags);
410 return 0;
411}
412
413static inline void mace_clean_rings(struct mace_data *mp)
414{
415 int i;
416
417 /* free some skb's */
418 for (i = 0; i < N_RX_RING; ++i) {
419 if (mp->rx_bufs[i] != NULL) {
420 dev_kfree_skb(mp->rx_bufs[i]);
421 mp->rx_bufs[i] = NULL;
422 }
423 }
424 for (i = mp->tx_empty; i != mp->tx_fill; ) {
425 dev_kfree_skb(mp->tx_bufs[i]);
426 if (++i >= N_TX_RING)
427 i = 0;
428 }
429}
430
431static int mace_open(struct net_device *dev)
432{
433 struct mace_data *mp = netdev_priv(dev);
434 volatile struct mace __iomem *mb = mp->mace;
435 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
436 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
437 volatile struct dbdma_cmd *cp;
438 int i;
439 struct sk_buff *skb;
440 unsigned char *data;
441
442 /* reset the chip */
443 mace_reset(dev);
444
445 /* initialize list of sk_buffs for receiving and set up recv dma */
446 mace_clean_rings(mp);
447 memset((char *)mp->rx_cmds, 0, N_RX_RING * sizeof(struct dbdma_cmd));
448 cp = mp->rx_cmds;
449 for (i = 0; i < N_RX_RING - 1; ++i) {
450 skb = dev_alloc_skb(RX_BUFLEN + 2);
451 if (!skb) {
452 data = dummy_buf;
453 } else {
454 skb_reserve(skb, 2); /* so IP header lands on 4-byte bdry */
455 data = skb->data;
456 }
457 mp->rx_bufs[i] = skb;
458 st_le16(&cp->req_count, RX_BUFLEN);
459 st_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
460 st_le32(&cp->phy_addr, virt_to_bus(data));
461 cp->xfer_status = 0;
462 ++cp;
463 }
464 mp->rx_bufs[i] = NULL;
465 st_le16(&cp->command, DBDMA_STOP);
466 mp->rx_fill = i;
467 mp->rx_empty = 0;
468
469 /* Put a branch back to the beginning of the receive command list */
470 ++cp;
471 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
472 st_le32(&cp->cmd_dep, virt_to_bus(mp->rx_cmds));
473
474 /* start rx dma */
475 out_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
476 out_le32(&rd->cmdptr, virt_to_bus(mp->rx_cmds));
477 out_le32(&rd->control, (RUN << 16) | RUN);
478
479 /* put a branch at the end of the tx command list */
480 cp = mp->tx_cmds + NCMDS_TX * N_TX_RING;
481 st_le16(&cp->command, DBDMA_NOP + BR_ALWAYS);
482 st_le32(&cp->cmd_dep, virt_to_bus(mp->tx_cmds));
483
484 /* reset tx dma */
485 out_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16);
486 out_le32(&td->cmdptr, virt_to_bus(mp->tx_cmds));
487 mp->tx_fill = 0;
488 mp->tx_empty = 0;
489 mp->tx_fullup = 0;
490 mp->tx_active = 0;
491 mp->tx_bad_runt = 0;
492
493 /* turn it on! */
494 out_8(&mb->maccc, mp->maccc);
495 /* enable all interrupts except receive interrupts */
496 out_8(&mb->imr, RCVINT);
497
498 return 0;
499}
500
501static int mace_close(struct net_device *dev)
502{
503 struct mace_data *mp = netdev_priv(dev);
504 volatile struct mace __iomem *mb = mp->mace;
505 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
506 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
507
508 /* disable rx and tx */
509 out_8(&mb->maccc, 0);
510 out_8(&mb->imr, 0xff); /* disable all intrs */
511
512 /* disable rx and tx dma */
513 st_le32(&rd->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
514 st_le32(&td->control, (RUN|PAUSE|FLUSH|WAKE) << 16); /* clear run bit */
515
516 mace_clean_rings(mp);
517
518 return 0;
519}
520
521static inline void mace_set_timeout(struct net_device *dev)
522{
523 struct mace_data *mp = netdev_priv(dev);
524
525 if (mp->timeout_active)
526 del_timer(&mp->tx_timeout);
527 mp->tx_timeout.expires = jiffies + TX_TIMEOUT;
528 mp->tx_timeout.function = mace_tx_timeout;
529 mp->tx_timeout.data = (unsigned long) dev;
530 add_timer(&mp->tx_timeout);
531 mp->timeout_active = 1;
532}
533
534static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
535{
536 struct mace_data *mp = netdev_priv(dev);
537 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
538 volatile struct dbdma_cmd *cp, *np;
539 unsigned long flags;
540 int fill, next, len;
541
542 /* see if there's a free slot in the tx ring */
543 spin_lock_irqsave(&mp->lock, flags);
544 fill = mp->tx_fill;
545 next = fill + 1;
546 if (next >= N_TX_RING)
547 next = 0;
548 if (next == mp->tx_empty) {
549 netif_stop_queue(dev);
550 mp->tx_fullup = 1;
551 spin_unlock_irqrestore(&mp->lock, flags);
552 return NETDEV_TX_BUSY; /* can't take it at the moment */
553 }
554 spin_unlock_irqrestore(&mp->lock, flags);
555
556 /* partially fill in the dma command block */
557 len = skb->len;
558 if (len > ETH_FRAME_LEN) {
559 printk(KERN_DEBUG "mace: xmit frame too long (%d)\n", len);
560 len = ETH_FRAME_LEN;
561 }
562 mp->tx_bufs[fill] = skb;
563 cp = mp->tx_cmds + NCMDS_TX * fill;
564 st_le16(&cp->req_count, len);
565 st_le32(&cp->phy_addr, virt_to_bus(skb->data));
566
567 np = mp->tx_cmds + NCMDS_TX * next;
568 out_le16(&np->command, DBDMA_STOP);
569
570 /* poke the tx dma channel */
571 spin_lock_irqsave(&mp->lock, flags);
572 mp->tx_fill = next;
573 if (!mp->tx_bad_runt && mp->tx_active < MAX_TX_ACTIVE) {
574 out_le16(&cp->xfer_status, 0);
575 out_le16(&cp->command, OUTPUT_LAST);
576 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
577 ++mp->tx_active;
578 mace_set_timeout(dev);
579 }
580 if (++next >= N_TX_RING)
581 next = 0;
582 if (next == mp->tx_empty)
583 netif_stop_queue(dev);
584 spin_unlock_irqrestore(&mp->lock, flags);
585
586 return NETDEV_TX_OK;
587}
588
589static void mace_set_multicast(struct net_device *dev)
590{
591 struct mace_data *mp = netdev_priv(dev);
592 volatile struct mace __iomem *mb = mp->mace;
593 int i;
594 u32 crc;
595 unsigned long flags;
596
597 spin_lock_irqsave(&mp->lock, flags);
598 mp->maccc &= ~PROM;
599 if (dev->flags & IFF_PROMISC) {
600 mp->maccc |= PROM;
601 } else {
602 unsigned char multicast_filter[8];
603 struct netdev_hw_addr *ha;
604
605 if (dev->flags & IFF_ALLMULTI) {
606 for (i = 0; i < 8; i++)
607 multicast_filter[i] = 0xff;
608 } else {
609 for (i = 0; i < 8; i++)
610 multicast_filter[i] = 0;
611 netdev_for_each_mc_addr(ha, dev) {
612 crc = ether_crc_le(6, ha->addr);
613 i = crc >> 26; /* bit number in multicast_filter */
614 multicast_filter[i >> 3] |= 1 << (i & 7);
615 }
616 }
617#if 0
618 printk("Multicast filter :");
619 for (i = 0; i < 8; i++)
620 printk("%02x ", multicast_filter[i]);
621 printk("\n");
622#endif
623
624 if (mp->chipid == BROKEN_ADDRCHG_REV)
625 out_8(&mb->iac, LOGADDR);
626 else {
627 out_8(&mb->iac, ADDRCHG | LOGADDR);
628 while ((in_8(&mb->iac) & ADDRCHG) != 0)
629 ;
630 }
631 for (i = 0; i < 8; ++i)
632 out_8(&mb->ladrf, multicast_filter[i]);
633 if (mp->chipid != BROKEN_ADDRCHG_REV)
634 out_8(&mb->iac, 0);
635 }
636 /* reset maccc */
637 out_8(&mb->maccc, mp->maccc);
638 spin_unlock_irqrestore(&mp->lock, flags);
639}
640
641static void mace_handle_misc_intrs(struct mace_data *mp, int intr, struct net_device *dev)
642{
643 volatile struct mace __iomem *mb = mp->mace;
644 static int mace_babbles, mace_jabbers;
645
646 if (intr & MPCO)
647 dev->stats.rx_missed_errors += 256;
648 dev->stats.rx_missed_errors += in_8(&mb->mpc); /* reading clears it */
649 if (intr & RNTPCO)
650 dev->stats.rx_length_errors += 256;
651 dev->stats.rx_length_errors += in_8(&mb->rntpc); /* reading clears it */
652 if (intr & CERR)
653 ++dev->stats.tx_heartbeat_errors;
654 if (intr & BABBLE)
655 if (mace_babbles++ < 4)
656 printk(KERN_DEBUG "mace: babbling transmitter\n");
657 if (intr & JABBER)
658 if (mace_jabbers++ < 4)
659 printk(KERN_DEBUG "mace: jabbering transceiver\n");
660}
661
662static irqreturn_t mace_interrupt(int irq, void *dev_id)
663{
664 struct net_device *dev = (struct net_device *) dev_id;
665 struct mace_data *mp = netdev_priv(dev);
666 volatile struct mace __iomem *mb = mp->mace;
667 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
668 volatile struct dbdma_cmd *cp;
669 int intr, fs, i, stat, x;
670 int xcount, dstat;
671 unsigned long flags;
672 /* static int mace_last_fs, mace_last_xcount; */
673
674 spin_lock_irqsave(&mp->lock, flags);
675 intr = in_8(&mb->ir); /* read interrupt register */
676 in_8(&mb->xmtrc); /* get retries */
677 mace_handle_misc_intrs(mp, intr, dev);
678
679 i = mp->tx_empty;
680 while (in_8(&mb->pr) & XMTSV) {
681 del_timer(&mp->tx_timeout);
682 mp->timeout_active = 0;
683 /*
684 * Clear any interrupt indication associated with this status
685 * word. This appears to unlatch any error indication from
686 * the DMA controller.
687 */
688 intr = in_8(&mb->ir);
689 if (intr != 0)
690 mace_handle_misc_intrs(mp, intr, dev);
691 if (mp->tx_bad_runt) {
692 fs = in_8(&mb->xmtfs);
693 mp->tx_bad_runt = 0;
694 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
695 continue;
696 }
697 dstat = ld_le32(&td->status);
698 /* stop DMA controller */
699 out_le32(&td->control, RUN << 16);
700 /*
701 * xcount is the number of complete frames which have been
702 * written to the fifo but for which status has not been read.
703 */
704 xcount = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
705 if (xcount == 0 || (dstat & DEAD)) {
706 /*
707 * If a packet was aborted before the DMA controller has
708 * finished transferring it, it seems that there are 2 bytes
709 * which are stuck in some buffer somewhere. These will get
710 * transmitted as soon as we read the frame status (which
711 * reenables the transmit data transfer request). Turning
712 * off the DMA controller and/or resetting the MACE doesn't
713 * help. So we disable auto-padding and FCS transmission
714 * so the two bytes will only be a runt packet which should
715 * be ignored by other stations.
716 */
717 out_8(&mb->xmtfc, DXMTFCS);
718 }
719 fs = in_8(&mb->xmtfs);
720 if ((fs & XMTSV) == 0) {
721 printk(KERN_ERR "mace: xmtfs not valid! (fs=%x xc=%d ds=%x)\n",
722 fs, xcount, dstat);
723 mace_reset(dev);
724 /*
725 * XXX mace likes to hang the machine after a xmtfs error.
726 * This is hard to reproduce, reseting *may* help
727 */
728 }
729 cp = mp->tx_cmds + NCMDS_TX * i;
730 stat = ld_le16(&cp->xfer_status);
731 if ((fs & (UFLO|LCOL|LCAR|RTRY)) || (dstat & DEAD) || xcount == 0) {
732 /*
733 * Check whether there were in fact 2 bytes written to
734 * the transmit FIFO.
735 */
736 udelay(1);
737 x = (in_8(&mb->fifofc) >> XMTFC_SH) & XMTFC_MASK;
738 if (x != 0) {
739 /* there were two bytes with an end-of-packet indication */
740 mp->tx_bad_runt = 1;
741 mace_set_timeout(dev);
742 } else {
743 /*
744 * Either there weren't the two bytes buffered up, or they
745 * didn't have an end-of-packet indication.
746 * We flush the transmit FIFO just in case (by setting the
747 * XMTFWU bit with the transmitter disabled).
748 */
749 out_8(&mb->maccc, in_8(&mb->maccc) & ~ENXMT);
750 out_8(&mb->fifocc, in_8(&mb->fifocc) | XMTFWU);
751 udelay(1);
752 out_8(&mb->maccc, in_8(&mb->maccc) | ENXMT);
753 out_8(&mb->xmtfc, AUTO_PAD_XMIT);
754 }
755 }
756 /* dma should have finished */
757 if (i == mp->tx_fill) {
758 printk(KERN_DEBUG "mace: tx ring ran out? (fs=%x xc=%d ds=%x)\n",
759 fs, xcount, dstat);
760 continue;
761 }
762 /* Update stats */
763 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
764 ++dev->stats.tx_errors;
765 if (fs & LCAR)
766 ++dev->stats.tx_carrier_errors;
767 if (fs & (UFLO|LCOL|RTRY))
768 ++dev->stats.tx_aborted_errors;
769 } else {
770 dev->stats.tx_bytes += mp->tx_bufs[i]->len;
771 ++dev->stats.tx_packets;
772 }
773 dev_kfree_skb_irq(mp->tx_bufs[i]);
774 --mp->tx_active;
775 if (++i >= N_TX_RING)
776 i = 0;
777#if 0
778 mace_last_fs = fs;
779 mace_last_xcount = xcount;
780#endif
781 }
782
783 if (i != mp->tx_empty) {
784 mp->tx_fullup = 0;
785 netif_wake_queue(dev);
786 }
787 mp->tx_empty = i;
788 i += mp->tx_active;
789 if (i >= N_TX_RING)
790 i -= N_TX_RING;
791 if (!mp->tx_bad_runt && i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE) {
792 do {
793 /* set up the next one */
794 cp = mp->tx_cmds + NCMDS_TX * i;
795 out_le16(&cp->xfer_status, 0);
796 out_le16(&cp->command, OUTPUT_LAST);
797 ++mp->tx_active;
798 if (++i >= N_TX_RING)
799 i = 0;
800 } while (i != mp->tx_fill && mp->tx_active < MAX_TX_ACTIVE);
801 out_le32(&td->control, ((RUN|WAKE) << 16) + (RUN|WAKE));
802 mace_set_timeout(dev);
803 }
804 spin_unlock_irqrestore(&mp->lock, flags);
805 return IRQ_HANDLED;
806}
807
808static void mace_tx_timeout(unsigned long data)
809{
810 struct net_device *dev = (struct net_device *) data;
811 struct mace_data *mp = netdev_priv(dev);
812 volatile struct mace __iomem *mb = mp->mace;
813 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
814 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
815 volatile struct dbdma_cmd *cp;
816 unsigned long flags;
817 int i;
818
819 spin_lock_irqsave(&mp->lock, flags);
820 mp->timeout_active = 0;
821 if (mp->tx_active == 0 && !mp->tx_bad_runt)
822 goto out;
823
824 /* update various counters */
825 mace_handle_misc_intrs(mp, in_8(&mb->ir), dev);
826
827 cp = mp->tx_cmds + NCMDS_TX * mp->tx_empty;
828
829 /* turn off both tx and rx and reset the chip */
830 out_8(&mb->maccc, 0);
831 printk(KERN_ERR "mace: transmit timeout - resetting\n");
832 dbdma_reset(td);
833 mace_reset(dev);
834
835 /* restart rx dma */
836 cp = bus_to_virt(ld_le32(&rd->cmdptr));
837 dbdma_reset(rd);
838 out_le16(&cp->xfer_status, 0);
839 out_le32(&rd->cmdptr, virt_to_bus(cp));
840 out_le32(&rd->control, (RUN << 16) | RUN);
841
842 /* fix up the transmit side */
843 i = mp->tx_empty;
844 mp->tx_active = 0;
845 ++dev->stats.tx_errors;
846 if (mp->tx_bad_runt) {
847 mp->tx_bad_runt = 0;
848 } else if (i != mp->tx_fill) {
849 dev_kfree_skb(mp->tx_bufs[i]);
850 if (++i >= N_TX_RING)
851 i = 0;
852 mp->tx_empty = i;
853 }
854 mp->tx_fullup = 0;
855 netif_wake_queue(dev);
856 if (i != mp->tx_fill) {
857 cp = mp->tx_cmds + NCMDS_TX * i;
858 out_le16(&cp->xfer_status, 0);
859 out_le16(&cp->command, OUTPUT_LAST);
860 out_le32(&td->cmdptr, virt_to_bus(cp));
861 out_le32(&td->control, (RUN << 16) | RUN);
862 ++mp->tx_active;
863 mace_set_timeout(dev);
864 }
865
866 /* turn it back on */
867 out_8(&mb->imr, RCVINT);
868 out_8(&mb->maccc, mp->maccc);
869
870out:
871 spin_unlock_irqrestore(&mp->lock, flags);
872}
873
874static irqreturn_t mace_txdma_intr(int irq, void *dev_id)
875{
876 return IRQ_HANDLED;
877}
878
879static irqreturn_t mace_rxdma_intr(int irq, void *dev_id)
880{
881 struct net_device *dev = (struct net_device *) dev_id;
882 struct mace_data *mp = netdev_priv(dev);
883 volatile struct dbdma_regs __iomem *rd = mp->rx_dma;
884 volatile struct dbdma_cmd *cp, *np;
885 int i, nb, stat, next;
886 struct sk_buff *skb;
887 unsigned frame_status;
888 static int mace_lost_status;
889 unsigned char *data;
890 unsigned long flags;
891
892 spin_lock_irqsave(&mp->lock, flags);
893 for (i = mp->rx_empty; i != mp->rx_fill; ) {
894 cp = mp->rx_cmds + i;
895 stat = ld_le16(&cp->xfer_status);
896 if ((stat & ACTIVE) == 0) {
897 next = i + 1;
898 if (next >= N_RX_RING)
899 next = 0;
900 np = mp->rx_cmds + next;
901 if (next != mp->rx_fill &&
902 (ld_le16(&np->xfer_status) & ACTIVE) != 0) {
903 printk(KERN_DEBUG "mace: lost a status word\n");
904 ++mace_lost_status;
905 } else
906 break;
907 }
908 nb = ld_le16(&cp->req_count) - ld_le16(&cp->res_count);
909 out_le16(&cp->command, DBDMA_STOP);
910 /* got a packet, have a look at it */
911 skb = mp->rx_bufs[i];
912 if (!skb) {
913 ++dev->stats.rx_dropped;
914 } else if (nb > 8) {
915 data = skb->data;
916 frame_status = (data[nb-3] << 8) + data[nb-4];
917 if (frame_status & (RS_OFLO|RS_CLSN|RS_FRAMERR|RS_FCSERR)) {
918 ++dev->stats.rx_errors;
919 if (frame_status & RS_OFLO)
920 ++dev->stats.rx_over_errors;
921 if (frame_status & RS_FRAMERR)
922 ++dev->stats.rx_frame_errors;
923 if (frame_status & RS_FCSERR)
924 ++dev->stats.rx_crc_errors;
925 } else {
926 /* Mace feature AUTO_STRIP_RCV is on by default, dropping the
927 * FCS on frames with 802.3 headers. This means that Ethernet
928 * frames have 8 extra octets at the end, while 802.3 frames
929 * have only 4. We need to correctly account for this. */
930 if (*(unsigned short *)(data+12) < 1536) /* 802.3 header */
931 nb -= 4;
932 else /* Ethernet header; mace includes FCS */
933 nb -= 8;
934 skb_put(skb, nb);
935 skb->protocol = eth_type_trans(skb, dev);
936 dev->stats.rx_bytes += skb->len;
937 netif_rx(skb);
938 mp->rx_bufs[i] = NULL;
939 ++dev->stats.rx_packets;
940 }
941 } else {
942 ++dev->stats.rx_errors;
943 ++dev->stats.rx_length_errors;
944 }
945
946 /* advance to next */
947 if (++i >= N_RX_RING)
948 i = 0;
949 }
950 mp->rx_empty = i;
951
952 i = mp->rx_fill;
953 for (;;) {
954 next = i + 1;
955 if (next >= N_RX_RING)
956 next = 0;
957 if (next == mp->rx_empty)
958 break;
959 cp = mp->rx_cmds + i;
960 skb = mp->rx_bufs[i];
961 if (!skb) {
962 skb = dev_alloc_skb(RX_BUFLEN + 2);
963 if (skb) {
964 skb_reserve(skb, 2);
965 mp->rx_bufs[i] = skb;
966 }
967 }
968 st_le16(&cp->req_count, RX_BUFLEN);
969 data = skb? skb->data: dummy_buf;
970 st_le32(&cp->phy_addr, virt_to_bus(data));
971 out_le16(&cp->xfer_status, 0);
972 out_le16(&cp->command, INPUT_LAST + INTR_ALWAYS);
973#if 0
974 if ((ld_le32(&rd->status) & ACTIVE) != 0) {
975 out_le32(&rd->control, (PAUSE << 16) | PAUSE);
976 while ((in_le32(&rd->status) & ACTIVE) != 0)
977 ;
978 }
979#endif
980 i = next;
981 }
982 if (i != mp->rx_fill) {
983 out_le32(&rd->control, ((RUN|WAKE) << 16) | (RUN|WAKE));
984 mp->rx_fill = i;
985 }
986 spin_unlock_irqrestore(&mp->lock, flags);
987 return IRQ_HANDLED;
988}
989
990static struct of_device_id mace_match[] =
991{
992 {
993 .name = "mace",
994 },
995 {},
996};
997MODULE_DEVICE_TABLE (of, mace_match);
998
999static struct macio_driver mace_driver =
1000{
1001 .driver = {
1002 .name = "mace",
1003 .owner = THIS_MODULE,
1004 .of_match_table = mace_match,
1005 },
1006 .probe = mace_probe,
1007 .remove = mace_remove,
1008};
1009
1010
1011static int __init mace_init(void)
1012{
1013 return macio_register_driver(&mace_driver);
1014}
1015
1016static void __exit mace_cleanup(void)
1017{
1018 macio_unregister_driver(&mace_driver);
1019
1020 kfree(dummy_buf);
1021 dummy_buf = NULL;
1022}
1023
1024MODULE_AUTHOR("Paul Mackerras");
1025MODULE_DESCRIPTION("PowerMac MACE driver.");
1026module_param(port_aaui, int, 0);
1027MODULE_PARM_DESC(port_aaui, "MACE uses AAUI port (0-1)");
1028MODULE_LICENSE("GPL");
1029
1030module_init(mace_init);
1031module_exit(mace_cleanup);
diff --git a/drivers/net/ethernet/apple/mace.h b/drivers/net/ethernet/apple/mace.h
new file mode 100644
index 000000000000..30b7ec0cedb5
--- /dev/null
+++ b/drivers/net/ethernet/apple/mace.h
@@ -0,0 +1,173 @@
1/*
2 * mace.h - definitions for the registers in the Am79C940 MACE
3 * (Medium Access Control for Ethernet) controller.
4 *
5 * Copyright (C) 1996 Paul Mackerras.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#define REG(x) volatile unsigned char x; char x ## _pad[15]
14
15struct mace {
16 REG(rcvfifo); /* receive FIFO */
17 REG(xmtfifo); /* transmit FIFO */
18 REG(xmtfc); /* transmit frame control */
19 REG(xmtfs); /* transmit frame status */
20 REG(xmtrc); /* transmit retry count */
21 REG(rcvfc); /* receive frame control */
22 REG(rcvfs); /* receive frame status (4 bytes) */
23 REG(fifofc); /* FIFO frame count */
24 REG(ir); /* interrupt register */
25 REG(imr); /* interrupt mask register */
26 REG(pr); /* poll register */
27 REG(biucc); /* bus interface unit config control */
28 REG(fifocc); /* FIFO configuration control */
29 REG(maccc); /* medium access control config control */
30 REG(plscc); /* phys layer signalling config control */
31 REG(phycc); /* physical configuration control */
32 REG(chipid_lo); /* chip ID, lsb */
33 REG(chipid_hi); /* chip ID, msb */
34 REG(iac); /* internal address config */
35 REG(reg19);
36 REG(ladrf); /* logical address filter (8 bytes) */
37 REG(padr); /* physical address (6 bytes) */
38 REG(reg22);
39 REG(reg23);
40 REG(mpc); /* missed packet count (clears when read) */
41 REG(reg25);
42 REG(rntpc); /* runt packet count (clears when read) */
43 REG(rcvcc); /* recv collision count (clears when read) */
44 REG(reg28);
45 REG(utr); /* user test reg */
46 REG(reg30);
47 REG(reg31);
48};
49
50/* Bits in XMTFC */
51#define DRTRY 0x80 /* don't retry transmission after collision */
52#define DXMTFCS 0x08 /* don't append FCS to transmitted frame */
53#define AUTO_PAD_XMIT 0x01 /* auto-pad short packets on transmission */
54
55/* Bits in XMTFS: only valid when XMTSV is set in PR and XMTFS */
56#define XMTSV 0x80 /* transmit status (i.e. XMTFS) valid */
57#define UFLO 0x40 /* underflow - xmit fifo ran dry */
58#define LCOL 0x20 /* late collision (transmission aborted) */
59#define MORE 0x10 /* 2 or more retries needed to xmit frame */
60#define ONE 0x08 /* 1 retry needed to xmit frame */
61#define DEFER 0x04 /* MACE had to defer xmission (enet busy) */
62#define LCAR 0x02 /* loss of carrier (transmission aborted) */
63#define RTRY 0x01 /* too many retries (transmission aborted) */
64
65/* Bits in XMTRC: only valid when XMTSV is set in PR (and XMTFS) */
66#define EXDEF 0x80 /* had to defer for excessive time */
67#define RETRY_MASK 0x0f /* number of retries (0 - 15) */
68
69/* Bits in RCVFC */
70#define LLRCV 0x08 /* low latency receive: early DMA request */
71#define M_RBAR 0x04 /* sets function of EAM/R pin */
72#define AUTO_STRIP_RCV 0x01 /* auto-strip short LLC frames on recv */
73
74/*
75 * Bits in RCVFS. After a frame is received, four bytes of status
76 * are automatically read from this register and appended to the frame
77 * data in memory. These are:
78 * Byte 0 and 1: message byte count and frame status
79 * Byte 2: runt packet count
80 * Byte 3: receive collision count
81 */
82#define RS_OFLO 0x8000 /* receive FIFO overflowed */
83#define RS_CLSN 0x4000 /* received frame suffered (late) collision */
84#define RS_FRAMERR 0x2000 /* framing error flag */
85#define RS_FCSERR 0x1000 /* frame had FCS error */
86#define RS_COUNT 0x0fff /* mask for byte count field */
87
88/* Bits (fields) in FIFOFC */
89#define RCVFC_SH 4 /* receive frame count in FIFO */
90#define RCVFC_MASK 0x0f
91#define XMTFC_SH 0 /* transmit frame count in FIFO */
92#define XMTFC_MASK 0x0f
93
94/*
95 * Bits in IR and IMR. The IR clears itself when read.
96 * Setting a bit in the IMR will disable the corresponding interrupt.
97 */
98#define JABBER 0x80 /* jabber error - 10baseT xmission too long */
99#define BABBLE 0x40 /* babble - xmitter xmitting for too long */
100#define CERR 0x20 /* collision err - no SQE test (heartbeat) */
101#define RCVCCO 0x10 /* RCVCC overflow */
102#define RNTPCO 0x08 /* RNTPC overflow */
103#define MPCO 0x04 /* MPC overflow */
104#define RCVINT 0x02 /* receive interrupt */
105#define XMTINT 0x01 /* transmitter interrupt */
106
107/* Bits in PR */
108#define XMTSV 0x80 /* XMTFS valid (same as in XMTFS) */
109#define TDTREQ 0x40 /* set when xmit fifo is requesting data */
110#define RDTREQ 0x20 /* set when recv fifo requests data xfer */
111
112/* Bits in BIUCC */
113#define BSWP 0x40 /* byte swap, i.e. big-endian bus */
114#define XMTSP_4 0x00 /* start xmitting when 4 bytes in FIFO */
115#define XMTSP_16 0x10 /* start xmitting when 16 bytes in FIFO */
116#define XMTSP_64 0x20 /* start xmitting when 64 bytes in FIFO */
117#define XMTSP_112 0x30 /* start xmitting when 112 bytes in FIFO */
118#define SWRST 0x01 /* software reset */
119
120/* Bits in FIFOCC */
121#define XMTFW_8 0x00 /* xmit fifo watermark = 8 words free */
122#define XMTFW_16 0x40 /* 16 words free */
123#define XMTFW_32 0x80 /* 32 words free */
124#define RCVFW_16 0x00 /* recv fifo watermark = 16 bytes avail */
125#define RCVFW_32 0x10 /* 32 bytes avail */
126#define RCVFW_64 0x20 /* 64 bytes avail */
127#define XMTFWU 0x08 /* xmit fifo watermark update enable */
128#define RCVFWU 0x04 /* recv fifo watermark update enable */
129#define XMTBRST 0x02 /* enable transmit burst mode */
130#define RCVBRST 0x01 /* enable receive burst mode */
131
132/* Bits in MACCC */
133#define PROM 0x80 /* promiscuous mode */
134#define DXMT2PD 0x40 /* disable xmit two-part deferral algorithm */
135#define EMBA 0x20 /* enable modified backoff algorithm */
136#define DRCVPA 0x08 /* disable receiving physical address */
137#define DRCVBC 0x04 /* disable receiving broadcasts */
138#define ENXMT 0x02 /* enable transmitter */
139#define ENRCV 0x01 /* enable receiver */
140
141/* Bits in PLSCC */
142#define XMTSEL 0x08 /* select DO+/DO- state when idle */
143#define PORTSEL_AUI 0x00 /* select AUI port */
144#define PORTSEL_10T 0x02 /* select 10Base-T port */
145#define PORTSEL_DAI 0x04 /* select DAI port */
146#define PORTSEL_GPSI 0x06 /* select GPSI port */
147#define ENPLSIO 0x01 /* enable optional PLS I/O pins */
148
149/* Bits in PHYCC */
150#define LNKFL 0x80 /* reports 10Base-T link failure */
151#define DLNKTST 0x40 /* disable 10Base-T link test */
152#define REVPOL 0x20 /* 10Base-T receiver polarity reversed */
153#define DAPC 0x10 /* disable auto receiver polarity correction */
154#define LRT 0x08 /* low receive threshold for long links */
155#define ASEL 0x04 /* auto-select AUI or 10Base-T port */
156#define RWAKE 0x02 /* remote wake function */
157#define AWAKE 0x01 /* auto wake function */
158
159/* Bits in IAC */
160#define ADDRCHG 0x80 /* request address change */
161#define PHYADDR 0x04 /* access physical address */
162#define LOGADDR 0x02 /* access multicast filter */
163
164/* Bits in UTR */
165#define RTRE 0x80 /* reserved test register enable. DON'T SET. */
166#define RTRD 0x40 /* reserved test register disable. Sticky */
167#define RPAC 0x20 /* accept runt packets */
168#define FCOLL 0x10 /* force collision */
169#define RCVFCSE 0x08 /* receive FCS enable */
170#define LOOP_NONE 0x00 /* no loopback */
171#define LOOP_EXT 0x02 /* external loopback */
172#define LOOP_INT 0x04 /* internal loopback, excludes MENDEC */
173#define LOOP_MENDEC 0x06 /* internal loopback, includes MENDEC */
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
new file mode 100644
index 000000000000..4286e67f9634
--- /dev/null
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -0,0 +1,799 @@
1/*
2 * Driver for the Macintosh 68K onboard MACE controller with PSC
3 * driven DMA. The MACE driver code is derived from mace.c. The
4 * Mac68k theory of operation is courtesy of the MacBSD wizards.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * Copyright (C) 1996 Paul Mackerras.
12 * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
13 *
14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15 *
16 * Copyright (C) 2007 Finn Thain
17 *
18 * Converted to DMA API, converted to unified driver model,
19 * sync'd some routines with mace.c and fixed various bugs.
20 */
21
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/string.h>
29#include <linux/crc32.h>
30#include <linux/bitrev.h>
31#include <linux/dma-mapping.h>
32#include <linux/platform_device.h>
33#include <linux/gfp.h>
34#include <asm/io.h>
35#include <asm/irq.h>
36#include <asm/macintosh.h>
37#include <asm/macints.h>
38#include <asm/mac_psc.h>
39#include <asm/page.h>
40#include "mace.h"
41
42static char mac_mace_string[] = "macmace";
43
44#define N_TX_BUFF_ORDER 0
45#define N_TX_RING (1 << N_TX_BUFF_ORDER)
46#define N_RX_BUFF_ORDER 3
47#define N_RX_RING (1 << N_RX_BUFF_ORDER)
48
49#define TX_TIMEOUT HZ
50
51#define MACE_BUFF_SIZE 0x800
52
53/* Chip rev needs workaround on HW & multicast addr change */
54#define BROKEN_ADDRCHG_REV 0x0941
55
56/* The MACE is simply wired down on a Mac68K box */
57
58#define MACE_BASE (void *)(0x50F1C000)
59#define MACE_PROM (void *)(0x50F08001)
60
61struct mace_data {
62 volatile struct mace *mace;
63 unsigned char *tx_ring;
64 dma_addr_t tx_ring_phys;
65 unsigned char *rx_ring;
66 dma_addr_t rx_ring_phys;
67 int dma_intr;
68 int rx_slot, rx_tail;
69 int tx_slot, tx_sloti, tx_count;
70 int chipid;
71 struct device *device;
72};
73
74struct mace_frame {
75 u8 rcvcnt;
76 u8 pad1;
77 u8 rcvsts;
78 u8 pad2;
79 u8 rntpc;
80 u8 pad3;
81 u8 rcvcc;
82 u8 pad4;
83 u32 pad5;
84 u32 pad6;
85 u8 data[1];
86 /* And frame continues.. */
87};
88
89#define PRIV_BYTES sizeof(struct mace_data)
90
91static int mace_open(struct net_device *dev);
92static int mace_close(struct net_device *dev);
93static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
94static void mace_set_multicast(struct net_device *dev);
95static int mace_set_address(struct net_device *dev, void *addr);
96static void mace_reset(struct net_device *dev);
97static irqreturn_t mace_interrupt(int irq, void *dev_id);
98static irqreturn_t mace_dma_intr(int irq, void *dev_id);
99static void mace_tx_timeout(struct net_device *dev);
100static void __mace_set_address(struct net_device *dev, void *addr);
101
102/*
103 * Load a receive DMA channel with a base address and ring length
104 */
105
106static void mace_load_rxdma_base(struct net_device *dev, int set)
107{
108 struct mace_data *mp = netdev_priv(dev);
109
110 psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
111 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
112 psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
113 psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
114 mp->rx_tail = 0;
115}
116
117/*
118 * Reset the receive DMA subsystem
119 */
120
121static void mace_rxdma_reset(struct net_device *dev)
122{
123 struct mace_data *mp = netdev_priv(dev);
124 volatile struct mace *mace = mp->mace;
125 u8 maccc = mace->maccc;
126
127 mace->maccc = maccc & ~ENRCV;
128
129 psc_write_word(PSC_ENETRD_CTL, 0x8800);
130 mace_load_rxdma_base(dev, 0x00);
131 psc_write_word(PSC_ENETRD_CTL, 0x0400);
132
133 psc_write_word(PSC_ENETRD_CTL, 0x8800);
134 mace_load_rxdma_base(dev, 0x10);
135 psc_write_word(PSC_ENETRD_CTL, 0x0400);
136
137 mace->maccc = maccc;
138 mp->rx_slot = 0;
139
140 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
141 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
142}
143
144/*
145 * Reset the transmit DMA subsystem
146 */
147
148static void mace_txdma_reset(struct net_device *dev)
149{
150 struct mace_data *mp = netdev_priv(dev);
151 volatile struct mace *mace = mp->mace;
152 u8 maccc;
153
154 psc_write_word(PSC_ENETWR_CTL, 0x8800);
155
156 maccc = mace->maccc;
157 mace->maccc = maccc & ~ENXMT;
158
159 mp->tx_slot = mp->tx_sloti = 0;
160 mp->tx_count = N_TX_RING;
161
162 psc_write_word(PSC_ENETWR_CTL, 0x0400);
163 mace->maccc = maccc;
164}
165
166/*
167 * Disable DMA
168 */
169
170static void mace_dma_off(struct net_device *dev)
171{
172 psc_write_word(PSC_ENETRD_CTL, 0x8800);
173 psc_write_word(PSC_ENETRD_CTL, 0x1000);
174 psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
175 psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
176
177 psc_write_word(PSC_ENETWR_CTL, 0x8800);
178 psc_write_word(PSC_ENETWR_CTL, 0x1000);
179 psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
180 psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
181}
182
183static const struct net_device_ops mace_netdev_ops = {
184 .ndo_open = mace_open,
185 .ndo_stop = mace_close,
186 .ndo_start_xmit = mace_xmit_start,
187 .ndo_tx_timeout = mace_tx_timeout,
188 .ndo_set_multicast_list = mace_set_multicast,
189 .ndo_set_mac_address = mace_set_address,
190 .ndo_change_mtu = eth_change_mtu,
191 .ndo_validate_addr = eth_validate_addr,
192};
193
194/*
195 * Not really much of a probe. The hardware table tells us if this
196 * model of Macintrash has a MACE (AV macintoshes)
197 */
198
199static int __devinit mace_probe(struct platform_device *pdev)
200{
201 int j;
202 struct mace_data *mp;
203 unsigned char *addr;
204 struct net_device *dev;
205 unsigned char checksum = 0;
206 static int found = 0;
207 int err;
208
209 if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
210 return -ENODEV;
211
212 found = 1; /* prevent 'finding' one on every device probe */
213
214 dev = alloc_etherdev(PRIV_BYTES);
215 if (!dev)
216 return -ENOMEM;
217
218 mp = netdev_priv(dev);
219
220 mp->device = &pdev->dev;
221 SET_NETDEV_DEV(dev, &pdev->dev);
222
223 dev->base_addr = (u32)MACE_BASE;
224 mp->mace = MACE_BASE;
225
226 dev->irq = IRQ_MAC_MACE;
227 mp->dma_intr = IRQ_MAC_MACE_DMA;
228
229 mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
230
231 /*
232 * The PROM contains 8 bytes which total 0xFF when XOR'd
233 * together. Due to the usual peculiar apple brain damage
234 * the bytes are spaced out in a strange boundary and the
235 * bits are reversed.
236 */
237
238 addr = (void *)MACE_PROM;
239
240 for (j = 0; j < 6; ++j) {
241 u8 v = bitrev8(addr[j<<4]);
242 checksum ^= v;
243 dev->dev_addr[j] = v;
244 }
245 for (; j < 8; ++j) {
246 checksum ^= bitrev8(addr[j<<4]);
247 }
248
249 if (checksum != 0xFF) {
250 free_netdev(dev);
251 return -ENODEV;
252 }
253
254 dev->netdev_ops = &mace_netdev_ops;
255 dev->watchdog_timeo = TX_TIMEOUT;
256
257 printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
258 dev->name, dev->dev_addr);
259
260 err = register_netdev(dev);
261 if (!err)
262 return 0;
263
264 free_netdev(dev);
265 return err;
266}
267
268/*
269 * Reset the chip.
270 */
271
272static void mace_reset(struct net_device *dev)
273{
274 struct mace_data *mp = netdev_priv(dev);
275 volatile struct mace *mb = mp->mace;
276 int i;
277
278 /* soft-reset the chip */
279 i = 200;
280 while (--i) {
281 mb->biucc = SWRST;
282 if (mb->biucc & SWRST) {
283 udelay(10);
284 continue;
285 }
286 break;
287 }
288 if (!i) {
289 printk(KERN_ERR "macmace: cannot reset chip!\n");
290 return;
291 }
292
293 mb->maccc = 0; /* turn off tx, rx */
294 mb->imr = 0xFF; /* disable all intrs for now */
295 i = mb->ir;
296
297 mb->biucc = XMTSP_64;
298 mb->utr = RTRD;
299 mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
300
301 mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
302 mb->rcvfc = 0;
303
304 /* load up the hardware address */
305 __mace_set_address(dev, dev->dev_addr);
306
307 /* clear the multicast filter */
308 if (mp->chipid == BROKEN_ADDRCHG_REV)
309 mb->iac = LOGADDR;
310 else {
311 mb->iac = ADDRCHG | LOGADDR;
312 while ((mb->iac & ADDRCHG) != 0)
313 ;
314 }
315 for (i = 0; i < 8; ++i)
316 mb->ladrf = 0;
317
318 /* done changing address */
319 if (mp->chipid != BROKEN_ADDRCHG_REV)
320 mb->iac = 0;
321
322 mb->plscc = PORTSEL_AUI;
323}
324
325/*
326 * Load the address on a mace controller.
327 */
328
329static void __mace_set_address(struct net_device *dev, void *addr)
330{
331 struct mace_data *mp = netdev_priv(dev);
332 volatile struct mace *mb = mp->mace;
333 unsigned char *p = addr;
334 int i;
335
336 /* load up the hardware address */
337 if (mp->chipid == BROKEN_ADDRCHG_REV)
338 mb->iac = PHYADDR;
339 else {
340 mb->iac = ADDRCHG | PHYADDR;
341 while ((mb->iac & ADDRCHG) != 0)
342 ;
343 }
344 for (i = 0; i < 6; ++i)
345 mb->padr = dev->dev_addr[i] = p[i];
346 if (mp->chipid != BROKEN_ADDRCHG_REV)
347 mb->iac = 0;
348}
349
350static int mace_set_address(struct net_device *dev, void *addr)
351{
352 struct mace_data *mp = netdev_priv(dev);
353 volatile struct mace *mb = mp->mace;
354 unsigned long flags;
355 u8 maccc;
356
357 local_irq_save(flags);
358
359 maccc = mb->maccc;
360
361 __mace_set_address(dev, addr);
362
363 mb->maccc = maccc;
364
365 local_irq_restore(flags);
366
367 return 0;
368}
369
370/*
371 * Open the Macintosh MACE. Most of this is playing with the DMA
372 * engine. The ethernet chip is quite friendly.
373 */
374
375static int mace_open(struct net_device *dev)
376{
377 struct mace_data *mp = netdev_priv(dev);
378 volatile struct mace *mb = mp->mace;
379
380 /* reset the chip */
381 mace_reset(dev);
382
383 if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
384 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
385 return -EAGAIN;
386 }
387 if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
388 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
389 free_irq(dev->irq, dev);
390 return -EAGAIN;
391 }
392
393 /* Allocate the DMA ring buffers */
394
395 mp->tx_ring = dma_alloc_coherent(mp->device,
396 N_TX_RING * MACE_BUFF_SIZE,
397 &mp->tx_ring_phys, GFP_KERNEL);
398 if (mp->tx_ring == NULL) {
399 printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
400 goto out1;
401 }
402
403 mp->rx_ring = dma_alloc_coherent(mp->device,
404 N_RX_RING * MACE_BUFF_SIZE,
405 &mp->rx_ring_phys, GFP_KERNEL);
406 if (mp->rx_ring == NULL) {
407 printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
408 goto out2;
409 }
410
411 mace_dma_off(dev);
412
413 /* Not sure what these do */
414
415 psc_write_word(PSC_ENETWR_CTL, 0x9000);
416 psc_write_word(PSC_ENETRD_CTL, 0x9000);
417 psc_write_word(PSC_ENETWR_CTL, 0x0400);
418 psc_write_word(PSC_ENETRD_CTL, 0x0400);
419
420 mace_rxdma_reset(dev);
421 mace_txdma_reset(dev);
422
423 /* turn it on! */
424 mb->maccc = ENXMT | ENRCV;
425 /* enable all interrupts except receive interrupts */
426 mb->imr = RCVINT;
427 return 0;
428
429out2:
430 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
431 mp->tx_ring, mp->tx_ring_phys);
432out1:
433 free_irq(dev->irq, dev);
434 free_irq(mp->dma_intr, dev);
435 return -ENOMEM;
436}
437
438/*
439 * Shut down the mace and its interrupt channel
440 */
441
442static int mace_close(struct net_device *dev)
443{
444 struct mace_data *mp = netdev_priv(dev);
445 volatile struct mace *mb = mp->mace;
446
447 mb->maccc = 0; /* disable rx and tx */
448 mb->imr = 0xFF; /* disable all irqs */
449 mace_dma_off(dev); /* disable rx and tx dma */
450
451 return 0;
452}
453
454/*
455 * Transmit a frame
456 */
457
458static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
459{
460 struct mace_data *mp = netdev_priv(dev);
461 unsigned long flags;
462
463 /* Stop the queue since there's only the one buffer */
464
465 local_irq_save(flags);
466 netif_stop_queue(dev);
467 if (!mp->tx_count) {
468 printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
469 local_irq_restore(flags);
470 return NETDEV_TX_BUSY;
471 }
472 mp->tx_count--;
473 local_irq_restore(flags);
474
475 dev->stats.tx_packets++;
476 dev->stats.tx_bytes += skb->len;
477
478 /* We need to copy into our xmit buffer to take care of alignment and caching issues */
479 skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
480
481 /* load the Tx DMA and fire it off */
482
483 psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
484 psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
485 psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
486
487 mp->tx_slot ^= 0x10;
488
489 dev_kfree_skb(skb);
490
491 return NETDEV_TX_OK;
492}
493
494static void mace_set_multicast(struct net_device *dev)
495{
496 struct mace_data *mp = netdev_priv(dev);
497 volatile struct mace *mb = mp->mace;
498 int i;
499 u32 crc;
500 u8 maccc;
501 unsigned long flags;
502
503 local_irq_save(flags);
504 maccc = mb->maccc;
505 mb->maccc &= ~PROM;
506
507 if (dev->flags & IFF_PROMISC) {
508 mb->maccc |= PROM;
509 } else {
510 unsigned char multicast_filter[8];
511 struct netdev_hw_addr *ha;
512
513 if (dev->flags & IFF_ALLMULTI) {
514 for (i = 0; i < 8; i++) {
515 multicast_filter[i] = 0xFF;
516 }
517 } else {
518 for (i = 0; i < 8; i++)
519 multicast_filter[i] = 0;
520 netdev_for_each_mc_addr(ha, dev) {
521 crc = ether_crc_le(6, ha->addr);
522 /* bit number in multicast_filter */
523 i = crc >> 26;
524 multicast_filter[i >> 3] |= 1 << (i & 7);
525 }
526 }
527
528 if (mp->chipid == BROKEN_ADDRCHG_REV)
529 mb->iac = LOGADDR;
530 else {
531 mb->iac = ADDRCHG | LOGADDR;
532 while ((mb->iac & ADDRCHG) != 0)
533 ;
534 }
535 for (i = 0; i < 8; ++i)
536 mb->ladrf = multicast_filter[i];
537 if (mp->chipid != BROKEN_ADDRCHG_REV)
538 mb->iac = 0;
539 }
540
541 mb->maccc = maccc;
542 local_irq_restore(flags);
543}
544
545static void mace_handle_misc_intrs(struct net_device *dev, int intr)
546{
547 struct mace_data *mp = netdev_priv(dev);
548 volatile struct mace *mb = mp->mace;
549 static int mace_babbles, mace_jabbers;
550
551 if (intr & MPCO)
552 dev->stats.rx_missed_errors += 256;
553 dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */
554 if (intr & RNTPCO)
555 dev->stats.rx_length_errors += 256;
556 dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
557 if (intr & CERR)
558 ++dev->stats.tx_heartbeat_errors;
559 if (intr & BABBLE)
560 if (mace_babbles++ < 4)
561 printk(KERN_DEBUG "macmace: babbling transmitter\n");
562 if (intr & JABBER)
563 if (mace_jabbers++ < 4)
564 printk(KERN_DEBUG "macmace: jabbering transceiver\n");
565}
566
567static irqreturn_t mace_interrupt(int irq, void *dev_id)
568{
569 struct net_device *dev = (struct net_device *) dev_id;
570 struct mace_data *mp = netdev_priv(dev);
571 volatile struct mace *mb = mp->mace;
572 int intr, fs;
573 unsigned long flags;
574
575 /* don't want the dma interrupt handler to fire */
576 local_irq_save(flags);
577
578 intr = mb->ir; /* read interrupt register */
579 mace_handle_misc_intrs(dev, intr);
580
581 if (intr & XMTINT) {
582 fs = mb->xmtfs;
583 if ((fs & XMTSV) == 0) {
584 printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
585 mace_reset(dev);
586 /*
587 * XXX mace likes to hang the machine after a xmtfs error.
588 * This is hard to reproduce, reseting *may* help
589 */
590 }
591 /* dma should have finished */
592 if (!mp->tx_count) {
593 printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
594 }
595 /* Update stats */
596 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
597 ++dev->stats.tx_errors;
598 if (fs & LCAR)
599 ++dev->stats.tx_carrier_errors;
600 else if (fs & (UFLO|LCOL|RTRY)) {
601 ++dev->stats.tx_aborted_errors;
602 if (mb->xmtfs & UFLO) {
603 printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
604 dev->stats.tx_fifo_errors++;
605 mace_txdma_reset(dev);
606 }
607 }
608 }
609 }
610
611 if (mp->tx_count)
612 netif_wake_queue(dev);
613
614 local_irq_restore(flags);
615
616 return IRQ_HANDLED;
617}
618
619static void mace_tx_timeout(struct net_device *dev)
620{
621 struct mace_data *mp = netdev_priv(dev);
622 volatile struct mace *mb = mp->mace;
623 unsigned long flags;
624
625 local_irq_save(flags);
626
627 /* turn off both tx and rx and reset the chip */
628 mb->maccc = 0;
629 printk(KERN_ERR "macmace: transmit timeout - resetting\n");
630 mace_txdma_reset(dev);
631 mace_reset(dev);
632
633 /* restart rx dma */
634 mace_rxdma_reset(dev);
635
636 mp->tx_count = N_TX_RING;
637 netif_wake_queue(dev);
638
639 /* turn it on! */
640 mb->maccc = ENXMT | ENRCV;
641 /* enable all interrupts except receive interrupts */
642 mb->imr = RCVINT;
643
644 local_irq_restore(flags);
645}
646
647/*
648 * Handle a newly arrived frame
649 */
650
651static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
652{
653 struct sk_buff *skb;
654 unsigned int frame_status = mf->rcvsts;
655
656 if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
657 dev->stats.rx_errors++;
658 if (frame_status & RS_OFLO) {
659 printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
660 dev->stats.rx_fifo_errors++;
661 }
662 if (frame_status & RS_CLSN)
663 dev->stats.collisions++;
664 if (frame_status & RS_FRAMERR)
665 dev->stats.rx_frame_errors++;
666 if (frame_status & RS_FCSERR)
667 dev->stats.rx_crc_errors++;
668 } else {
669 unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
670
671 skb = dev_alloc_skb(frame_length + 2);
672 if (!skb) {
673 dev->stats.rx_dropped++;
674 return;
675 }
676 skb_reserve(skb, 2);
677 memcpy(skb_put(skb, frame_length), mf->data, frame_length);
678
679 skb->protocol = eth_type_trans(skb, dev);
680 netif_rx(skb);
681 dev->stats.rx_packets++;
682 dev->stats.rx_bytes += frame_length;
683 }
684}
685
686/*
687 * The PSC has passed us a DMA interrupt event.
688 */
689
690static irqreturn_t mace_dma_intr(int irq, void *dev_id)
691{
692 struct net_device *dev = (struct net_device *) dev_id;
693 struct mace_data *mp = netdev_priv(dev);
694 int left, head;
695 u16 status;
696 u32 baka;
697
698 /* Not sure what this does */
699
700 while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
701 if (!(baka & 0x60000000)) return IRQ_NONE;
702
703 /*
704 * Process the read queue
705 */
706
707 status = psc_read_word(PSC_ENETRD_CTL);
708
709 if (status & 0x2000) {
710 mace_rxdma_reset(dev);
711 } else if (status & 0x0100) {
712 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
713
714 left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
715 head = N_RX_RING - left;
716
717 /* Loop through the ring buffer and process new packages */
718
719 while (mp->rx_tail < head) {
720 mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
721 + (mp->rx_tail * MACE_BUFF_SIZE)));
722 mp->rx_tail++;
723 }
724
725 /* If we're out of buffers in this ring then switch to */
726 /* the other set, otherwise just reactivate this one. */
727
728 if (!left) {
729 mace_load_rxdma_base(dev, mp->rx_slot);
730 mp->rx_slot ^= 0x10;
731 } else {
732 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
733 }
734 }
735
736 /*
737 * Process the write queue
738 */
739
740 status = psc_read_word(PSC_ENETWR_CTL);
741
742 if (status & 0x2000) {
743 mace_txdma_reset(dev);
744 } else if (status & 0x0100) {
745 psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
746 mp->tx_sloti ^= 0x10;
747 mp->tx_count++;
748 }
749 return IRQ_HANDLED;
750}
751
752MODULE_LICENSE("GPL");
753MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
754MODULE_ALIAS("platform:macmace");
755
756static int __devexit mac_mace_device_remove (struct platform_device *pdev)
757{
758 struct net_device *dev = platform_get_drvdata(pdev);
759 struct mace_data *mp = netdev_priv(dev);
760
761 unregister_netdev(dev);
762
763 free_irq(dev->irq, dev);
764 free_irq(IRQ_MAC_MACE_DMA, dev);
765
766 dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
767 mp->rx_ring, mp->rx_ring_phys);
768 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
769 mp->tx_ring, mp->tx_ring_phys);
770
771 free_netdev(dev);
772
773 return 0;
774}
775
776static struct platform_driver mac_mace_driver = {
777 .probe = mace_probe,
778 .remove = __devexit_p(mac_mace_device_remove),
779 .driver = {
780 .name = mac_mace_string,
781 .owner = THIS_MODULE,
782 },
783};
784
785static int __init mac_mace_init_module(void)
786{
787 if (!MACH_IS_MAC)
788 return -ENODEV;
789
790 return platform_driver_register(&mac_mace_driver);
791}
792
793static void __exit mac_mace_cleanup_module(void)
794{
795 platform_driver_unregister(&mac_mace_driver);
796}
797
798module_init(mac_mace_init_module);
799module_exit(mac_mace_cleanup_module);