aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Space.c4
-rw-r--r--drivers/net/macmace.c591
3 files changed, 364 insertions, 235 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 092e4cf07430..69dba62e0bad 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -337,8 +337,8 @@ config MACSONIC
337 be called macsonic. 337 be called macsonic.
338 338
339config MACMACE 339config MACMACE
340 bool "Macintosh (AV) onboard MACE ethernet (EXPERIMENTAL)" 340 bool "Macintosh (AV) onboard MACE ethernet"
341 depends on NET_ETHERNET && MAC && EXPERIMENTAL 341 depends on NET_ETHERNET && MAC
342 select CRC32 342 select CRC32
343 help 343 help
344 Support for the onboard AMD 79C940 MACE Ethernet controller used in 344 Support for the onboard AMD 79C940 MACE Ethernet controller used in
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index dd8ed456c8b2..1c3e293fbaf7 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -83,7 +83,6 @@ extern struct net_device *bagetlance_probe(int unit);
83extern struct net_device *mvme147lance_probe(int unit); 83extern struct net_device *mvme147lance_probe(int unit);
84extern struct net_device *tc515_probe(int unit); 84extern struct net_device *tc515_probe(int unit);
85extern struct net_device *lance_probe(int unit); 85extern struct net_device *lance_probe(int unit);
86extern struct net_device *mace_probe(int unit);
87extern struct net_device *mac8390_probe(int unit); 86extern struct net_device *mac8390_probe(int unit);
88extern struct net_device *mac89x0_probe(int unit); 87extern struct net_device *mac89x0_probe(int unit);
89extern struct net_device *mc32_probe(int unit); 88extern struct net_device *mc32_probe(int unit);
@@ -274,9 +273,6 @@ static struct devprobe2 m68k_probes[] __initdata = {
274#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */ 273#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */
275 {mvme147lance_probe, 0}, 274 {mvme147lance_probe, 0},
276#endif 275#endif
277#ifdef CONFIG_MACMACE /* Mac 68k Quadra AV builtin Ethernet */
278 {mace_probe, 0},
279#endif
280#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */ 276#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */
281 {mac8390_probe, 0}, 277 {mac8390_probe, 0},
282#endif 278#endif
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 27911c07558d..fef3193121f9 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -12,6 +12,11 @@
12 * Copyright (C) 1998 Alan Cox <alan@redhat.com> 12 * Copyright (C) 1998 Alan Cox <alan@redhat.com>
13 * 13 *
14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver 14 * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
15 *
16 * Copyright (C) 2007 Finn Thain
17 *
18 * Converted to DMA API, converted to unified driver model,
19 * sync'd some routines with mace.c and fixed various bugs.
15 */ 20 */
16 21
17 22
@@ -23,8 +28,9 @@
23#include <linux/string.h> 28#include <linux/string.h>
24#include <linux/crc32.h> 29#include <linux/crc32.h>
25#include <linux/bitrev.h> 30#include <linux/bitrev.h>
31#include <linux/dma-mapping.h>
32#include <linux/platform_device.h>
26#include <asm/io.h> 33#include <asm/io.h>
27#include <asm/pgtable.h>
28#include <asm/irq.h> 34#include <asm/irq.h>
29#include <asm/macintosh.h> 35#include <asm/macintosh.h>
30#include <asm/macints.h> 36#include <asm/macints.h>
@@ -32,13 +38,20 @@
32#include <asm/page.h> 38#include <asm/page.h>
33#include "mace.h" 39#include "mace.h"
34 40
35#define N_TX_RING 1 41static char mac_mace_string[] = "macmace";
36#define N_RX_RING 8 42static struct platform_device *mac_mace_device;
37#define N_RX_PAGES ((N_RX_RING * 0x0800 + PAGE_SIZE - 1) / PAGE_SIZE) 43
44#define N_TX_BUFF_ORDER 0
45#define N_TX_RING (1 << N_TX_BUFF_ORDER)
46#define N_RX_BUFF_ORDER 3
47#define N_RX_RING (1 << N_RX_BUFF_ORDER)
48
38#define TX_TIMEOUT HZ 49#define TX_TIMEOUT HZ
39 50
40/* Bits in transmit DMA status */ 51#define MACE_BUFF_SIZE 0x800
41#define TX_DMA_ERR 0x80 52
53/* Chip rev needs workaround on HW & multicast addr change */
54#define BROKEN_ADDRCHG_REV 0x0941
42 55
43/* The MACE is simply wired down on a Mac68K box */ 56/* The MACE is simply wired down on a Mac68K box */
44 57
@@ -47,40 +60,46 @@
47 60
48struct mace_data { 61struct mace_data {
49 volatile struct mace *mace; 62 volatile struct mace *mace;
50 volatile unsigned char *tx_ring; 63 unsigned char *tx_ring;
51 volatile unsigned char *tx_ring_phys; 64 dma_addr_t tx_ring_phys;
52 volatile unsigned char *rx_ring; 65 unsigned char *rx_ring;
53 volatile unsigned char *rx_ring_phys; 66 dma_addr_t rx_ring_phys;
54 int dma_intr; 67 int dma_intr;
55 struct net_device_stats stats; 68 struct net_device_stats stats;
56 int rx_slot, rx_tail; 69 int rx_slot, rx_tail;
57 int tx_slot, tx_sloti, tx_count; 70 int tx_slot, tx_sloti, tx_count;
71 int chipid;
72 struct device *device;
58}; 73};
59 74
60struct mace_frame { 75struct mace_frame {
61 u16 len; 76 u8 rcvcnt;
62 u16 status; 77 u8 pad1;
63 u16 rntpc; 78 u8 rcvsts;
64 u16 rcvcc; 79 u8 pad2;
65 u32 pad1; 80 u8 rntpc;
66 u32 pad2; 81 u8 pad3;
82 u8 rcvcc;
83 u8 pad4;
84 u32 pad5;
85 u32 pad6;
67 u8 data[1]; 86 u8 data[1];
68 /* And frame continues.. */ 87 /* And frame continues.. */
69}; 88};
70 89
71#define PRIV_BYTES sizeof(struct mace_data) 90#define PRIV_BYTES sizeof(struct mace_data)
72 91
73extern void psc_debug_dump(void);
74
75static int mace_open(struct net_device *dev); 92static int mace_open(struct net_device *dev);
76static int mace_close(struct net_device *dev); 93static int mace_close(struct net_device *dev);
77static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 94static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
78static struct net_device_stats *mace_stats(struct net_device *dev); 95static struct net_device_stats *mace_stats(struct net_device *dev);
79static void mace_set_multicast(struct net_device *dev); 96static void mace_set_multicast(struct net_device *dev);
80static int mace_set_address(struct net_device *dev, void *addr); 97static int mace_set_address(struct net_device *dev, void *addr);
98static void mace_reset(struct net_device *dev);
81static irqreturn_t mace_interrupt(int irq, void *dev_id); 99static irqreturn_t mace_interrupt(int irq, void *dev_id);
82static irqreturn_t mace_dma_intr(int irq, void *dev_id); 100static irqreturn_t mace_dma_intr(int irq, void *dev_id);
83static void mace_tx_timeout(struct net_device *dev); 101static void mace_tx_timeout(struct net_device *dev);
102static void __mace_set_address(struct net_device *dev, void *addr);
84 103
85/* 104/*
86 * Load a receive DMA channel with a base address and ring length 105 * Load a receive DMA channel with a base address and ring length
@@ -88,7 +107,7 @@ static void mace_tx_timeout(struct net_device *dev);
88 107
89static void mace_load_rxdma_base(struct net_device *dev, int set) 108static void mace_load_rxdma_base(struct net_device *dev, int set)
90{ 109{
91 struct mace_data *mp = (struct mace_data *) dev->priv; 110 struct mace_data *mp = netdev_priv(dev);
92 111
93 psc_write_word(PSC_ENETRD_CMD + set, 0x0100); 112 psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
94 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys); 113 psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
@@ -103,7 +122,7 @@ static void mace_load_rxdma_base(struct net_device *dev, int set)
103 122
104static void mace_rxdma_reset(struct net_device *dev) 123static void mace_rxdma_reset(struct net_device *dev)
105{ 124{
106 struct mace_data *mp = (struct mace_data *) dev->priv; 125 struct mace_data *mp = netdev_priv(dev);
107 volatile struct mace *mace = mp->mace; 126 volatile struct mace *mace = mp->mace;
108 u8 maccc = mace->maccc; 127 u8 maccc = mace->maccc;
109 128
@@ -130,7 +149,7 @@ static void mace_rxdma_reset(struct net_device *dev)
130 149
131static void mace_txdma_reset(struct net_device *dev) 150static void mace_txdma_reset(struct net_device *dev)
132{ 151{
133 struct mace_data *mp = (struct mace_data *) dev->priv; 152 struct mace_data *mp = netdev_priv(dev);
134 volatile struct mace *mace = mp->mace; 153 volatile struct mace *mace = mp->mace;
135 u8 maccc; 154 u8 maccc;
136 155
@@ -168,7 +187,7 @@ static void mace_dma_off(struct net_device *dev)
168 * model of Macintrash has a MACE (AV macintoshes) 187 * model of Macintrash has a MACE (AV macintoshes)
169 */ 188 */
170 189
171struct net_device *mace_probe(int unit) 190static int __devinit mace_probe(struct platform_device *pdev)
172{ 191{
173 int j; 192 int j;
174 struct mace_data *mp; 193 struct mace_data *mp;
@@ -179,24 +198,28 @@ struct net_device *mace_probe(int unit)
179 int err; 198 int err;
180 199
181 if (found || macintosh_config->ether_type != MAC_ETHER_MACE) 200 if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
182 return ERR_PTR(-ENODEV); 201 return -ENODEV;
183 202
184 found = 1; /* prevent 'finding' one on every device probe */ 203 found = 1; /* prevent 'finding' one on every device probe */
185 204
186 dev = alloc_etherdev(PRIV_BYTES); 205 dev = alloc_etherdev(PRIV_BYTES);
187 if (!dev) 206 if (!dev)
188 return ERR_PTR(-ENOMEM); 207 return -ENOMEM;
189 208
190 if (unit >= 0) 209 mp = netdev_priv(dev);
191 sprintf(dev->name, "eth%d", unit); 210
211 mp->device = &pdev->dev;
212 SET_NETDEV_DEV(dev, &pdev->dev);
213 SET_MODULE_OWNER(dev);
192 214
193 mp = (struct mace_data *) dev->priv;
194 dev->base_addr = (u32)MACE_BASE; 215 dev->base_addr = (u32)MACE_BASE;
195 mp->mace = (volatile struct mace *) MACE_BASE; 216 mp->mace = (volatile struct mace *) MACE_BASE;
196 217
197 dev->irq = IRQ_MAC_MACE; 218 dev->irq = IRQ_MAC_MACE;
198 mp->dma_intr = IRQ_MAC_MACE_DMA; 219 mp->dma_intr = IRQ_MAC_MACE_DMA;
199 220
221 mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
222
200 /* 223 /*
201 * The PROM contains 8 bytes which total 0xFF when XOR'd 224 * The PROM contains 8 bytes which total 0xFF when XOR'd
202 * together. Due to the usual peculiar apple brain damage 225 * together. Due to the usual peculiar apple brain damage
@@ -217,7 +240,7 @@ struct net_device *mace_probe(int unit)
217 240
218 if (checksum != 0xFF) { 241 if (checksum != 0xFF) {
219 free_netdev(dev); 242 free_netdev(dev);
220 return ERR_PTR(-ENODEV); 243 return -ENODEV;
221 } 244 }
222 245
223 memset(&mp->stats, 0, sizeof(mp->stats)); 246 memset(&mp->stats, 0, sizeof(mp->stats));
@@ -237,22 +260,98 @@ struct net_device *mace_probe(int unit)
237 260
238 err = register_netdev(dev); 261 err = register_netdev(dev);
239 if (!err) 262 if (!err)
240 return dev; 263 return 0;
241 264
242 free_netdev(dev); 265 free_netdev(dev);
243 return ERR_PTR(err); 266 return err;
267}
268
269/*
270 * Reset the chip.
271 */
272
273static void mace_reset(struct net_device *dev)
274{
275 struct mace_data *mp = netdev_priv(dev);
276 volatile struct mace *mb = mp->mace;
277 int i;
278
279 /* soft-reset the chip */
280 i = 200;
281 while (--i) {
282 mb->biucc = SWRST;
283 if (mb->biucc & SWRST) {
284 udelay(10);
285 continue;
286 }
287 break;
288 }
289 if (!i) {
290 printk(KERN_ERR "macmace: cannot reset chip!\n");
291 return;
292 }
293
294 mb->maccc = 0; /* turn off tx, rx */
295 mb->imr = 0xFF; /* disable all intrs for now */
296 i = mb->ir;
297
298 mb->biucc = XMTSP_64;
299 mb->utr = RTRD;
300 mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
301
302 mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
303 mb->rcvfc = 0;
304
305 /* load up the hardware address */
306 __mace_set_address(dev, dev->dev_addr);
307
308 /* clear the multicast filter */
309 if (mp->chipid == BROKEN_ADDRCHG_REV)
310 mb->iac = LOGADDR;
311 else {
312 mb->iac = ADDRCHG | LOGADDR;
313 while ((mb->iac & ADDRCHG) != 0)
314 ;
315 }
316 for (i = 0; i < 8; ++i)
317 mb->ladrf = 0;
318
319 /* done changing address */
320 if (mp->chipid != BROKEN_ADDRCHG_REV)
321 mb->iac = 0;
322
323 mb->plscc = PORTSEL_AUI;
244} 324}
245 325
246/* 326/*
247 * Load the address on a mace controller. 327 * Load the address on a mace controller.
248 */ 328 */
249 329
250static int mace_set_address(struct net_device *dev, void *addr) 330static void __mace_set_address(struct net_device *dev, void *addr)
251{ 331{
252 unsigned char *p = addr; 332 struct mace_data *mp = netdev_priv(dev);
253 struct mace_data *mp = (struct mace_data *) dev->priv;
254 volatile struct mace *mb = mp->mace; 333 volatile struct mace *mb = mp->mace;
334 unsigned char *p = addr;
255 int i; 335 int i;
336
337 /* load up the hardware address */
338 if (mp->chipid == BROKEN_ADDRCHG_REV)
339 mb->iac = PHYADDR;
340 else {
341 mb->iac = ADDRCHG | PHYADDR;
342 while ((mb->iac & ADDRCHG) != 0)
343 ;
344 }
345 for (i = 0; i < 6; ++i)
346 mb->padr = dev->dev_addr[i] = p[i];
347 if (mp->chipid != BROKEN_ADDRCHG_REV)
348 mb->iac = 0;
349}
350
351static int mace_set_address(struct net_device *dev, void *addr)
352{
353 struct mace_data *mp = netdev_priv(dev);
354 volatile struct mace *mb = mp->mace;
256 unsigned long flags; 355 unsigned long flags;
257 u8 maccc; 356 u8 maccc;
258 357
@@ -260,15 +359,10 @@ static int mace_set_address(struct net_device *dev, void *addr)
260 359
261 maccc = mb->maccc; 360 maccc = mb->maccc;
262 361
263 /* load up the hardware address */ 362 __mace_set_address(dev, addr);
264 mb->iac = ADDRCHG | PHYADDR;
265 while ((mb->iac & ADDRCHG) != 0);
266
267 for (i = 0; i < 6; ++i) {
268 mb->padr = dev->dev_addr[i] = p[i];
269 }
270 363
271 mb->maccc = maccc; 364 mb->maccc = maccc;
365
272 local_irq_restore(flags); 366 local_irq_restore(flags);
273 367
274 return 0; 368 return 0;
@@ -281,31 +375,11 @@ static int mace_set_address(struct net_device *dev, void *addr)
281 375
282static int mace_open(struct net_device *dev) 376static int mace_open(struct net_device *dev)
283{ 377{
284 struct mace_data *mp = (struct mace_data *) dev->priv; 378 struct mace_data *mp = netdev_priv(dev);
285 volatile struct mace *mb = mp->mace; 379 volatile struct mace *mb = mp->mace;
286#if 0
287 int i;
288 380
289 i = 200; 381 /* reset the chip */
290 while (--i) { 382 mace_reset(dev);
291 mb->biucc = SWRST;
292 if (mb->biucc & SWRST) {
293 udelay(10);
294 continue;
295 }
296 break;
297 }
298 if (!i) {
299 printk(KERN_ERR "%s: software reset failed!!\n", dev->name);
300 return -EAGAIN;
301 }
302#endif
303
304 mb->biucc = XMTSP_64;
305 mb->fifocc = XMTFW_16 | RCVFW_64 | XMTFWU | RCVFWU | XMTBRST | RCVBRST;
306 mb->xmtfc = AUTO_PAD_XMIT;
307 mb->plscc = PORTSEL_AUI;
308 /* mb->utr = RTRD; */
309 383
310 if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) { 384 if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
311 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq); 385 printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
@@ -319,25 +393,21 @@ static int mace_open(struct net_device *dev)
319 393
320 /* Allocate the DMA ring buffers */ 394 /* Allocate the DMA ring buffers */
321 395
322 mp->rx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, N_RX_PAGES); 396 mp->tx_ring = dma_alloc_coherent(mp->device,
323 mp->tx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, 0); 397 N_TX_RING * MACE_BUFF_SIZE,
324 398 &mp->tx_ring_phys, GFP_KERNEL);
325 if (mp->tx_ring==NULL || mp->rx_ring==NULL) { 399 if (mp->tx_ring == NULL) {
326 if (mp->rx_ring) free_pages((u32) mp->rx_ring, N_RX_PAGES); 400 printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
327 if (mp->tx_ring) free_pages((u32) mp->tx_ring, 0); 401 goto out1;
328 free_irq(dev->irq, dev);
329 free_irq(mp->dma_intr, dev);
330 printk(KERN_ERR "%s: unable to allocate DMA buffers\n", dev->name);
331 return -ENOMEM;
332 } 402 }
333 403
334 mp->rx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->rx_ring); 404 mp->rx_ring = dma_alloc_coherent(mp->device,
335 mp->tx_ring_phys = (unsigned char *) virt_to_bus((void *)mp->tx_ring); 405 N_RX_RING * MACE_BUFF_SIZE,
336 406 &mp->rx_ring_phys, GFP_KERNEL);
337 /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */ 407 if (mp->rx_ring == NULL) {
338 408 printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
339 kernel_set_cachemode((void *)mp->rx_ring, N_RX_PAGES * PAGE_SIZE, IOMAP_NOCACHE_NONSER); 409 goto out2;
340 kernel_set_cachemode((void *)mp->tx_ring, PAGE_SIZE, IOMAP_WRITETHROUGH); 410 }
341 411
342 mace_dma_off(dev); 412 mace_dma_off(dev);
343 413
@@ -348,34 +418,22 @@ static int mace_open(struct net_device *dev)
348 psc_write_word(PSC_ENETWR_CTL, 0x0400); 418 psc_write_word(PSC_ENETWR_CTL, 0x0400);
349 psc_write_word(PSC_ENETRD_CTL, 0x0400); 419 psc_write_word(PSC_ENETRD_CTL, 0x0400);
350 420
351#if 0
352 /* load up the hardware address */
353
354 mb->iac = ADDRCHG | PHYADDR;
355
356 while ((mb->iac & ADDRCHG) != 0);
357
358 for (i = 0; i < 6; ++i)
359 mb->padr = dev->dev_addr[i];
360
361 /* clear the multicast filter */
362 mb->iac = ADDRCHG | LOGADDR;
363
364 while ((mb->iac & ADDRCHG) != 0);
365
366 for (i = 0; i < 8; ++i)
367 mb->ladrf = 0;
368
369 mb->plscc = PORTSEL_GPSI + ENPLSIO;
370
371 mb->maccc = ENXMT | ENRCV;
372 mb->imr = RCVINT;
373#endif
374
375 mace_rxdma_reset(dev); 421 mace_rxdma_reset(dev);
376 mace_txdma_reset(dev); 422 mace_txdma_reset(dev);
377 423
424 /* turn it on! */
425 mb->maccc = ENXMT | ENRCV;
426 /* enable all interrupts except receive interrupts */
427 mb->imr = RCVINT;
378 return 0; 428 return 0;
429
430out2:
431 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
432 mp->tx_ring, mp->tx_ring_phys);
433out1:
434 free_irq(dev->irq, dev);
435 free_irq(mp->dma_intr, dev);
436 return -ENOMEM;
379} 437}
380 438
381/* 439/*
@@ -384,19 +442,13 @@ static int mace_open(struct net_device *dev)
384 442
385static int mace_close(struct net_device *dev) 443static int mace_close(struct net_device *dev)
386{ 444{
387 struct mace_data *mp = (struct mace_data *) dev->priv; 445 struct mace_data *mp = netdev_priv(dev);
388 volatile struct mace *mb = mp->mace; 446 volatile struct mace *mb = mp->mace;
389 447
390 mb->maccc = 0; /* disable rx and tx */ 448 mb->maccc = 0; /* disable rx and tx */
391 mb->imr = 0xFF; /* disable all irqs */ 449 mb->imr = 0xFF; /* disable all irqs */
392 mace_dma_off(dev); /* disable rx and tx dma */ 450 mace_dma_off(dev); /* disable rx and tx dma */
393 451
394 free_irq(dev->irq, dev);
395 free_irq(IRQ_MAC_MACE_DMA, dev);
396
397 free_pages((u32) mp->rx_ring, N_RX_PAGES);
398 free_pages((u32) mp->tx_ring, 0);
399
400 return 0; 452 return 0;
401} 453}
402 454
@@ -406,15 +458,20 @@ static int mace_close(struct net_device *dev)
406 458
407static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 459static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
408{ 460{
409 struct mace_data *mp = (struct mace_data *) dev->priv; 461 struct mace_data *mp = netdev_priv(dev);
462 unsigned long flags;
410 463
411 /* Stop the queue if the buffer is full */ 464 /* Stop the queue since there's only the one buffer */
412 465
466 local_irq_save(flags);
467 netif_stop_queue(dev);
413 if (!mp->tx_count) { 468 if (!mp->tx_count) {
414 netif_stop_queue(dev); 469 printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
415 return 1; 470 local_irq_restore(flags);
471 return NETDEV_TX_BUSY;
416 } 472 }
417 mp->tx_count--; 473 mp->tx_count--;
474 local_irq_restore(flags);
418 475
419 mp->stats.tx_packets++; 476 mp->stats.tx_packets++;
420 mp->stats.tx_bytes += skb->len; 477 mp->stats.tx_bytes += skb->len;
@@ -432,23 +489,26 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
432 489
433 dev_kfree_skb(skb); 490 dev_kfree_skb(skb);
434 491
435 return 0; 492 dev->trans_start = jiffies;
493 return NETDEV_TX_OK;
436} 494}
437 495
438static struct net_device_stats *mace_stats(struct net_device *dev) 496static struct net_device_stats *mace_stats(struct net_device *dev)
439{ 497{
440 struct mace_data *p = (struct mace_data *) dev->priv; 498 struct mace_data *mp = netdev_priv(dev);
441 return &p->stats; 499 return &mp->stats;
442} 500}
443 501
444static void mace_set_multicast(struct net_device *dev) 502static void mace_set_multicast(struct net_device *dev)
445{ 503{
446 struct mace_data *mp = (struct mace_data *) dev->priv; 504 struct mace_data *mp = netdev_priv(dev);
447 volatile struct mace *mb = mp->mace; 505 volatile struct mace *mb = mp->mace;
448 int i, j; 506 int i, j;
449 u32 crc; 507 u32 crc;
450 u8 maccc; 508 u8 maccc;
509 unsigned long flags;
451 510
511 local_irq_save(flags);
452 maccc = mb->maccc; 512 maccc = mb->maccc;
453 mb->maccc &= ~PROM; 513 mb->maccc &= ~PROM;
454 514
@@ -473,116 +533,122 @@ static void mace_set_multicast(struct net_device *dev)
473 } 533 }
474 } 534 }
475 535
476 mb->iac = ADDRCHG | LOGADDR; 536 if (mp->chipid == BROKEN_ADDRCHG_REV)
477 while (mb->iac & ADDRCHG); 537 mb->iac = LOGADDR;
478 538 else {
479 for (i = 0; i < 8; ++i) { 539 mb->iac = ADDRCHG | LOGADDR;
480 mb->ladrf = multicast_filter[i]; 540 while ((mb->iac & ADDRCHG) != 0)
541 ;
481 } 542 }
543 for (i = 0; i < 8; ++i)
544 mb->ladrf = multicast_filter[i];
545 if (mp->chipid != BROKEN_ADDRCHG_REV)
546 mb->iac = 0;
482 } 547 }
483 548
484 mb->maccc = maccc; 549 mb->maccc = maccc;
550 local_irq_restore(flags);
485} 551}
486 552
487/*
488 * Miscellaneous interrupts are handled here. We may end up
489 * having to bash the chip on the head for bad errors
490 */
491
492static void mace_handle_misc_intrs(struct mace_data *mp, int intr) 553static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
493{ 554{
494 volatile struct mace *mb = mp->mace; 555 volatile struct mace *mb = mp->mace;
495 static int mace_babbles, mace_jabbers; 556 static int mace_babbles, mace_jabbers;
496 557
497 if (intr & MPCO) { 558 if (intr & MPCO)
498 mp->stats.rx_missed_errors += 256; 559 mp->stats.rx_missed_errors += 256;
499 } 560 mp->stats.rx_missed_errors += mb->mpc; /* reading clears it */
500 mp->stats.rx_missed_errors += mb->mpc; /* reading clears it */ 561 if (intr & RNTPCO)
501
502 if (intr & RNTPCO) {
503 mp->stats.rx_length_errors += 256; 562 mp->stats.rx_length_errors += 256;
504 } 563 mp->stats.rx_length_errors += mb->rntpc; /* reading clears it */
505 mp->stats.rx_length_errors += mb->rntpc; /* reading clears it */ 564 if (intr & CERR)
506
507 if (intr & CERR) {
508 ++mp->stats.tx_heartbeat_errors; 565 ++mp->stats.tx_heartbeat_errors;
509 } 566 if (intr & BABBLE)
510 if (intr & BABBLE) { 567 if (mace_babbles++ < 4)
511 if (mace_babbles++ < 4) { 568 printk(KERN_DEBUG "macmace: babbling transmitter\n");
512 printk(KERN_DEBUG "mace: babbling transmitter\n"); 569 if (intr & JABBER)
513 } 570 if (mace_jabbers++ < 4)
514 } 571 printk(KERN_DEBUG "macmace: jabbering transceiver\n");
515 if (intr & JABBER) {
516 if (mace_jabbers++ < 4) {
517 printk(KERN_DEBUG "mace: jabbering transceiver\n");
518 }
519 }
520} 572}
521 573
522/* 574static irqreturn_t mace_interrupt(int irq, void *dev_id)
523 * A transmit error has occurred. (We kick the transmit side from
524 * the DMA completion)
525 */
526
527static void mace_xmit_error(struct net_device *dev)
528{ 575{
529 struct mace_data *mp = (struct mace_data *) dev->priv; 576 struct net_device *dev = (struct net_device *) dev_id;
577 struct mace_data *mp = netdev_priv(dev);
530 volatile struct mace *mb = mp->mace; 578 volatile struct mace *mb = mp->mace;
531 u8 xmtfs, xmtrc; 579 int intr, fs;
580 unsigned int flags;
532 581
533 xmtfs = mb->xmtfs; 582 /* don't want the dma interrupt handler to fire */
534 xmtrc = mb->xmtrc; 583 local_irq_save(flags);
535 584
536 if (xmtfs & XMTSV) { 585 intr = mb->ir; /* read interrupt register */
537 if (xmtfs & UFLO) { 586 mace_handle_misc_intrs(mp, intr);
538 printk("%s: DMA underrun.\n", dev->name); 587
539 mp->stats.tx_errors++; 588 if (intr & XMTINT) {
540 mp->stats.tx_fifo_errors++; 589 fs = mb->xmtfs;
541 mace_txdma_reset(dev); 590 if ((fs & XMTSV) == 0) {
591 printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
592 mace_reset(dev);
593 /*
594 * XXX mace likes to hang the machine after a xmtfs error.
595 * This is hard to reproduce, reseting *may* help
596 */
542 } 597 }
543 if (xmtfs & RTRY) { 598 /* dma should have finished */
544 mp->stats.collisions++; 599 if (!mp->tx_count) {
600 printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
601 }
602 /* Update stats */
603 if (fs & (UFLO|LCOL|LCAR|RTRY)) {
604 ++mp->stats.tx_errors;
605 if (fs & LCAR)
606 ++mp->stats.tx_carrier_errors;
607 else if (fs & (UFLO|LCOL|RTRY)) {
608 ++mp->stats.tx_aborted_errors;
609 if (mb->xmtfs & UFLO) {
610 printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
611 mp->stats.tx_fifo_errors++;
612 mace_txdma_reset(dev);
613 }
614 }
545 } 615 }
546 } 616 }
547}
548 617
549/* 618 if (mp->tx_count)
550 * A receive interrupt occurred. 619 netif_wake_queue(dev);
551 */
552 620
553static void mace_recv_interrupt(struct net_device *dev) 621 local_irq_restore(flags);
554{
555/* struct mace_data *mp = (struct mace_data *) dev->priv; */
556// volatile struct mace *mb = mp->mace;
557}
558 622
559/* 623 return IRQ_HANDLED;
560 * Process the chip interrupt 624}
561 */
562 625
563static irqreturn_t mace_interrupt(int irq, void *dev_id) 626static void mace_tx_timeout(struct net_device *dev)
564{ 627{
565 struct net_device *dev = (struct net_device *) dev_id; 628 struct mace_data *mp = netdev_priv(dev);
566 struct mace_data *mp = (struct mace_data *) dev->priv;
567 volatile struct mace *mb = mp->mace; 629 volatile struct mace *mb = mp->mace;
568 u8 ir; 630 unsigned long flags;
569 631
570 ir = mb->ir; 632 local_irq_save(flags);
571 mace_handle_misc_intrs(mp, ir);
572 633
573 if (ir & XMTINT) { 634 /* turn off both tx and rx and reset the chip */
574 mace_xmit_error(dev); 635 mb->maccc = 0;
575 } 636 printk(KERN_ERR "macmace: transmit timeout - resetting\n");
576 if (ir & RCVINT) { 637 mace_txdma_reset(dev);
577 mace_recv_interrupt(dev); 638 mace_reset(dev);
578 }
579 return IRQ_HANDLED;
580}
581 639
582static void mace_tx_timeout(struct net_device *dev) 640 /* restart rx dma */
583{ 641 mace_rxdma_reset(dev);
584/* struct mace_data *mp = (struct mace_data *) dev->priv; */ 642
585// volatile struct mace *mb = mp->mace; 643 mp->tx_count = N_TX_RING;
644 netif_wake_queue(dev);
645
646 /* turn it on! */
647 mb->maccc = ENXMT | ENRCV;
648 /* enable all interrupts except receive interrupts */
649 mb->imr = RCVINT;
650
651 local_irq_restore(flags);
586} 652}
587 653
588/* 654/*
@@ -591,40 +657,39 @@ static void mace_tx_timeout(struct net_device *dev)
591 657
592static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) 658static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
593{ 659{
594 struct mace_data *mp = (struct mace_data *) dev->priv; 660 struct mace_data *mp = netdev_priv(dev);
595 struct sk_buff *skb; 661 struct sk_buff *skb;
662 unsigned int frame_status = mf->rcvsts;
596 663
597 if (mf->status & RS_OFLO) { 664 if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
598 printk("%s: fifo overflow.\n", dev->name);
599 mp->stats.rx_errors++;
600 mp->stats.rx_fifo_errors++;
601 }
602 if (mf->status&(RS_CLSN|RS_FRAMERR|RS_FCSERR))
603 mp->stats.rx_errors++; 665 mp->stats.rx_errors++;
666 if (frame_status & RS_OFLO) {
667 printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
668 mp->stats.rx_fifo_errors++;
669 }
670 if (frame_status & RS_CLSN)
671 mp->stats.collisions++;
672 if (frame_status & RS_FRAMERR)
673 mp->stats.rx_frame_errors++;
674 if (frame_status & RS_FCSERR)
675 mp->stats.rx_crc_errors++;
676 } else {
677 unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
604 678
605 if (mf->status&RS_CLSN) { 679 skb = dev_alloc_skb(frame_length + 2);
606 mp->stats.collisions++; 680 if (!skb) {
607 } 681 mp->stats.rx_dropped++;
608 if (mf->status&RS_FRAMERR) { 682 return;
609 mp->stats.rx_frame_errors++; 683 }
610 } 684 skb_reserve(skb, 2);
611 if (mf->status&RS_FCSERR) { 685 memcpy(skb_put(skb, frame_length), mf->data, frame_length);
612 mp->stats.rx_crc_errors++; 686
613 } 687 skb->protocol = eth_type_trans(skb, dev);
614 688 netif_rx(skb);
615 skb = dev_alloc_skb(mf->len+2); 689 dev->last_rx = jiffies;
616 if (!skb) { 690 mp->stats.rx_packets++;
617 mp->stats.rx_dropped++; 691 mp->stats.rx_bytes += frame_length;
618 return;
619 } 692 }
620 skb_reserve(skb,2);
621 memcpy(skb_put(skb, mf->len), mf->data, mf->len);
622
623 skb->protocol = eth_type_trans(skb, dev);
624 netif_rx(skb);
625 dev->last_rx = jiffies;
626 mp->stats.rx_packets++;
627 mp->stats.rx_bytes += mf->len;
628} 693}
629 694
630/* 695/*
@@ -634,7 +699,7 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
634static irqreturn_t mace_dma_intr(int irq, void *dev_id) 699static irqreturn_t mace_dma_intr(int irq, void *dev_id)
635{ 700{
636 struct net_device *dev = (struct net_device *) dev_id; 701 struct net_device *dev = (struct net_device *) dev_id;
637 struct mace_data *mp = (struct mace_data *) dev->priv; 702 struct mace_data *mp = netdev_priv(dev);
638 int left, head; 703 int left, head;
639 u16 status; 704 u16 status;
640 u32 baka; 705 u32 baka;
@@ -661,7 +726,8 @@ static irqreturn_t mace_dma_intr(int irq, void *dev_id)
661 /* Loop through the ring buffer and process new packages */ 726 /* Loop through the ring buffer and process new packages */
662 727
663 while (mp->rx_tail < head) { 728 while (mp->rx_tail < head) {
664 mace_dma_rx_frame(dev, (struct mace_frame *) (mp->rx_ring + (mp->rx_tail * 0x0800))); 729 mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
730 + (mp->rx_tail * MACE_BUFF_SIZE)));
665 mp->rx_tail++; 731 mp->rx_tail++;
666 } 732 }
667 733
@@ -688,9 +754,76 @@ static irqreturn_t mace_dma_intr(int irq, void *dev_id)
688 psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100); 754 psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
689 mp->tx_sloti ^= 0x10; 755 mp->tx_sloti ^= 0x10;
690 mp->tx_count++; 756 mp->tx_count++;
691 netif_wake_queue(dev);
692 } 757 }
693 return IRQ_HANDLED; 758 return IRQ_HANDLED;
694} 759}
695 760
696MODULE_LICENSE("GPL"); 761MODULE_LICENSE("GPL");
762MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
763
764static int __devexit mac_mace_device_remove (struct platform_device *pdev)
765{
766 struct net_device *dev = platform_get_drvdata(pdev);
767 struct mace_data *mp = netdev_priv(dev);
768
769 unregister_netdev(dev);
770
771 free_irq(dev->irq, dev);
772 free_irq(IRQ_MAC_MACE_DMA, dev);
773
774 dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
775 mp->rx_ring, mp->rx_ring_phys);
776 dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
777 mp->tx_ring, mp->tx_ring_phys);
778
779 free_netdev(dev);
780
781 return 0;
782}
783
784static struct platform_driver mac_mace_driver = {
785 .probe = mace_probe,
786 .remove = __devexit_p(mac_mace_device_remove),
787 .driver = {
788 .name = mac_mace_string,
789 },
790};
791
792static int __init mac_mace_init_module(void)
793{
794 int err;
795
796 if ((err = platform_driver_register(&mac_mace_driver))) {
797 printk(KERN_ERR "Driver registration failed\n");
798 return err;
799 }
800
801 mac_mace_device = platform_device_alloc(mac_mace_string, 0);
802 if (!mac_mace_device)
803 goto out_unregister;
804
805 if (platform_device_add(mac_mace_device)) {
806 platform_device_put(mac_mace_device);
807 mac_mace_device = NULL;
808 }
809
810 return 0;
811
812out_unregister:
813 platform_driver_unregister(&mac_mace_driver);
814
815 return -ENOMEM;
816}
817
818static void __exit mac_mace_cleanup_module(void)
819{
820 platform_driver_unregister(&mac_mace_driver);
821
822 if (mac_mace_device) {
823 platform_device_unregister(mac_mace_device);
824 mac_mace_device = NULL;
825 }
826}
827
828module_init(mac_mace_init_module);
829module_exit(mac_mace_cleanup_module);