aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/macmace.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/macmace.c')
-rw-r--r--drivers/net/macmace.c88
1 files changed, 44 insertions, 44 deletions
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 79a6fc139757..696d5513e558 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -63,7 +63,7 @@ struct mace_frame {
63 u16 rcvcc; 63 u16 rcvcc;
64 u32 pad1; 64 u32 pad1;
65 u32 pad2; 65 u32 pad2;
66 u8 data[1]; 66 u8 data[1];
67 /* And frame continues.. */ 67 /* And frame continues.. */
68}; 68};
69 69
@@ -118,17 +118,17 @@ static void mace_rxdma_reset(struct net_device *dev)
118 struct mace_data *mp = (struct mace_data *) dev->priv; 118 struct mace_data *mp = (struct mace_data *) dev->priv;
119 volatile struct mace *mace = mp->mace; 119 volatile struct mace *mace = mp->mace;
120 u8 maccc = mace->maccc; 120 u8 maccc = mace->maccc;
121 121
122 mace->maccc = maccc & ~ENRCV; 122 mace->maccc = maccc & ~ENRCV;
123 123
124 psc_write_word(PSC_ENETRD_CTL, 0x8800); 124 psc_write_word(PSC_ENETRD_CTL, 0x8800);
125 mace_load_rxdma_base(dev, 0x00); 125 mace_load_rxdma_base(dev, 0x00);
126 psc_write_word(PSC_ENETRD_CTL, 0x0400); 126 psc_write_word(PSC_ENETRD_CTL, 0x0400);
127 127
128 psc_write_word(PSC_ENETRD_CTL, 0x8800); 128 psc_write_word(PSC_ENETRD_CTL, 0x8800);
129 mace_load_rxdma_base(dev, 0x10); 129 mace_load_rxdma_base(dev, 0x10);
130 psc_write_word(PSC_ENETRD_CTL, 0x0400); 130 psc_write_word(PSC_ENETRD_CTL, 0x0400);
131 131
132 mace->maccc = maccc; 132 mace->maccc = maccc;
133 mp->rx_slot = 0; 133 mp->rx_slot = 0;
134 134
@@ -139,7 +139,7 @@ static void mace_rxdma_reset(struct net_device *dev)
139/* 139/*
140 * Reset the transmit DMA subsystem 140 * Reset the transmit DMA subsystem
141 */ 141 */
142 142
143static void mace_txdma_reset(struct net_device *dev) 143static void mace_txdma_reset(struct net_device *dev)
144{ 144{
145 struct mace_data *mp = (struct mace_data *) dev->priv; 145 struct mace_data *mp = (struct mace_data *) dev->priv;
@@ -161,7 +161,7 @@ static void mace_txdma_reset(struct net_device *dev)
161/* 161/*
162 * Disable DMA 162 * Disable DMA
163 */ 163 */
164 164
165static void mace_dma_off(struct net_device *dev) 165static void mace_dma_off(struct net_device *dev)
166{ 166{
167 psc_write_word(PSC_ENETRD_CTL, 0x8800); 167 psc_write_word(PSC_ENETRD_CTL, 0x8800);
@@ -179,7 +179,7 @@ static void mace_dma_off(struct net_device *dev)
179 * Not really much of a probe. The hardware table tells us if this 179 * Not really much of a probe. The hardware table tells us if this
180 * model of Macintrash has a MACE (AV macintoshes) 180 * model of Macintrash has a MACE (AV macintoshes)
181 */ 181 */
182 182
183struct net_device *mace_probe(int unit) 183struct net_device *mace_probe(int unit)
184{ 184{
185 int j; 185 int j;
@@ -189,7 +189,7 @@ struct net_device *mace_probe(int unit)
189 unsigned char checksum = 0; 189 unsigned char checksum = 0;
190 static int found = 0; 190 static int found = 0;
191 int err; 191 int err;
192 192
193 if (found || macintosh_config->ether_type != MAC_ETHER_MACE) 193 if (found || macintosh_config->ether_type != MAC_ETHER_MACE)
194 return ERR_PTR(-ENODEV); 194 return ERR_PTR(-ENODEV);
195 195
@@ -205,7 +205,7 @@ struct net_device *mace_probe(int unit)
205 mp = (struct mace_data *) dev->priv; 205 mp = (struct mace_data *) dev->priv;
206 dev->base_addr = (u32)MACE_BASE; 206 dev->base_addr = (u32)MACE_BASE;
207 mp->mace = (volatile struct mace *) MACE_BASE; 207 mp->mace = (volatile struct mace *) MACE_BASE;
208 208
209 dev->irq = IRQ_MAC_MACE; 209 dev->irq = IRQ_MAC_MACE;
210 mp->dma_intr = IRQ_MAC_MACE_DMA; 210 mp->dma_intr = IRQ_MAC_MACE_DMA;
211 211
@@ -217,7 +217,7 @@ struct net_device *mace_probe(int unit)
217 */ 217 */
218 218
219 addr = (void *)MACE_PROM; 219 addr = (void *)MACE_PROM;
220 220
221 for (j = 0; j < 6; ++j) { 221 for (j = 0; j < 6; ++j) {
222 u8 v=bitrev(addr[j<<4]); 222 u8 v=bitrev(addr[j<<4]);
223 checksum ^= v; 223 checksum ^= v;
@@ -226,7 +226,7 @@ struct net_device *mace_probe(int unit)
226 for (; j < 8; ++j) { 226 for (; j < 8; ++j) {
227 checksum ^= bitrev(addr[j<<4]); 227 checksum ^= bitrev(addr[j<<4]);
228 } 228 }
229 229
230 if (checksum != 0xFF) { 230 if (checksum != 0xFF) {
231 free_netdev(dev); 231 free_netdev(dev);
232 return ERR_PTR(-ENODEV); 232 return ERR_PTR(-ENODEV);
@@ -275,7 +275,7 @@ static int mace_set_address(struct net_device *dev, void *addr)
275 /* load up the hardware address */ 275 /* load up the hardware address */
276 mb->iac = ADDRCHG | PHYADDR; 276 mb->iac = ADDRCHG | PHYADDR;
277 while ((mb->iac & ADDRCHG) != 0); 277 while ((mb->iac & ADDRCHG) != 0);
278 278
279 for (i = 0; i < 6; ++i) { 279 for (i = 0; i < 6; ++i) {
280 mb->padr = dev->dev_addr[i] = p[i]; 280 mb->padr = dev->dev_addr[i] = p[i];
281 } 281 }
@@ -290,7 +290,7 @@ static int mace_set_address(struct net_device *dev, void *addr)
290 * Open the Macintosh MACE. Most of this is playing with the DMA 290 * Open the Macintosh MACE. Most of this is playing with the DMA
291 * engine. The ethernet chip is quite friendly. 291 * engine. The ethernet chip is quite friendly.
292 */ 292 */
293 293
294static int mace_open(struct net_device *dev) 294static int mace_open(struct net_device *dev)
295{ 295{
296 struct mace_data *mp = (struct mace_data *) dev->priv; 296 struct mace_data *mp = (struct mace_data *) dev->priv;
@@ -333,7 +333,7 @@ static int mace_open(struct net_device *dev)
333 333
334 mp->rx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, N_RX_PAGES); 334 mp->rx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, N_RX_PAGES);
335 mp->tx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, 0); 335 mp->tx_ring = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, 0);
336 336
337 if (mp->tx_ring==NULL || mp->rx_ring==NULL) { 337 if (mp->tx_ring==NULL || mp->rx_ring==NULL) {
338 if (mp->rx_ring) free_pages((u32) mp->rx_ring, N_RX_PAGES); 338 if (mp->rx_ring) free_pages((u32) mp->rx_ring, N_RX_PAGES);
339 if (mp->tx_ring) free_pages((u32) mp->tx_ring, 0); 339 if (mp->tx_ring) free_pages((u32) mp->tx_ring, 0);
@@ -348,7 +348,7 @@ static int mace_open(struct net_device *dev)
348 348
349 /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */ 349 /* We want the Rx buffer to be uncached and the Tx buffer to be writethrough */
350 350
351 kernel_set_cachemode((void *)mp->rx_ring, N_RX_PAGES * PAGE_SIZE, IOMAP_NOCACHE_NONSER); 351 kernel_set_cachemode((void *)mp->rx_ring, N_RX_PAGES * PAGE_SIZE, IOMAP_NOCACHE_NONSER);
352 kernel_set_cachemode((void *)mp->tx_ring, PAGE_SIZE, IOMAP_WRITETHROUGH); 352 kernel_set_cachemode((void *)mp->tx_ring, PAGE_SIZE, IOMAP_WRITETHROUGH);
353 353
354 mace_dma_off(dev); 354 mace_dma_off(dev);
@@ -362,11 +362,11 @@ static int mace_open(struct net_device *dev)
362 362
363#if 0 363#if 0
364 /* load up the hardware address */ 364 /* load up the hardware address */
365 365
366 mb->iac = ADDRCHG | PHYADDR; 366 mb->iac = ADDRCHG | PHYADDR;
367 367
368 while ((mb->iac & ADDRCHG) != 0); 368 while ((mb->iac & ADDRCHG) != 0);
369 369
370 for (i = 0; i < 6; ++i) 370 for (i = 0; i < 6; ++i)
371 mb->padr = dev->dev_addr[i]; 371 mb->padr = dev->dev_addr[i];
372 372
@@ -374,7 +374,7 @@ static int mace_open(struct net_device *dev)
374 mb->iac = ADDRCHG | LOGADDR; 374 mb->iac = ADDRCHG | LOGADDR;
375 375
376 while ((mb->iac & ADDRCHG) != 0); 376 while ((mb->iac & ADDRCHG) != 0);
377 377
378 for (i = 0; i < 8; ++i) 378 for (i = 0; i < 8; ++i)
379 mb->ladrf = 0; 379 mb->ladrf = 0;
380 380
@@ -386,14 +386,14 @@ static int mace_open(struct net_device *dev)
386 386
387 mace_rxdma_reset(dev); 387 mace_rxdma_reset(dev);
388 mace_txdma_reset(dev); 388 mace_txdma_reset(dev);
389 389
390 return 0; 390 return 0;
391} 391}
392 392
393/* 393/*
394 * Shut down the mace and its interrupt channel 394 * Shut down the mace and its interrupt channel
395 */ 395 */
396 396
397static int mace_close(struct net_device *dev) 397static int mace_close(struct net_device *dev)
398{ 398{
399 struct mace_data *mp = (struct mace_data *) dev->priv; 399 struct mace_data *mp = (struct mace_data *) dev->priv;
@@ -415,7 +415,7 @@ static int mace_close(struct net_device *dev)
415/* 415/*
416 * Transmit a frame 416 * Transmit a frame
417 */ 417 */
418 418
419static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 419static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
420{ 420{
421 struct mace_data *mp = (struct mace_data *) dev->priv; 421 struct mace_data *mp = (struct mace_data *) dev->priv;
@@ -427,7 +427,7 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
427 return 1; 427 return 1;
428 } 428 }
429 mp->tx_count--; 429 mp->tx_count--;
430 430
431 mp->stats.tx_packets++; 431 mp->stats.tx_packets++;
432 mp->stats.tx_bytes += skb->len; 432 mp->stats.tx_bytes += skb->len;
433 433
@@ -488,7 +488,7 @@ static void mace_set_multicast(struct net_device *dev)
488 488
489 mb->iac = ADDRCHG | LOGADDR; 489 mb->iac = ADDRCHG | LOGADDR;
490 while (mb->iac & ADDRCHG); 490 while (mb->iac & ADDRCHG);
491 491
492 for (i = 0; i < 8; ++i) { 492 for (i = 0; i < 8; ++i) {
493 mb->ladrf = multicast_filter[i]; 493 mb->ladrf = multicast_filter[i];
494 } 494 }
@@ -498,10 +498,10 @@ static void mace_set_multicast(struct net_device *dev)
498} 498}
499 499
500/* 500/*
501 * Miscellaneous interrupts are handled here. We may end up 501 * Miscellaneous interrupts are handled here. We may end up
502 * having to bash the chip on the head for bad errors 502 * having to bash the chip on the head for bad errors
503 */ 503 */
504 504
505static void mace_handle_misc_intrs(struct mace_data *mp, int intr) 505static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
506{ 506{
507 volatile struct mace *mb = mp->mace; 507 volatile struct mace *mb = mp->mace;
@@ -536,16 +536,16 @@ static void mace_handle_misc_intrs(struct mace_data *mp, int intr)
536 * A transmit error has occurred. (We kick the transmit side from 536 * A transmit error has occurred. (We kick the transmit side from
537 * the DMA completion) 537 * the DMA completion)
538 */ 538 */
539 539
540static void mace_xmit_error(struct net_device *dev) 540static void mace_xmit_error(struct net_device *dev)
541{ 541{
542 struct mace_data *mp = (struct mace_data *) dev->priv; 542 struct mace_data *mp = (struct mace_data *) dev->priv;
543 volatile struct mace *mb = mp->mace; 543 volatile struct mace *mb = mp->mace;
544 u8 xmtfs, xmtrc; 544 u8 xmtfs, xmtrc;
545 545
546 xmtfs = mb->xmtfs; 546 xmtfs = mb->xmtfs;
547 xmtrc = mb->xmtrc; 547 xmtrc = mb->xmtrc;
548 548
549 if (xmtfs & XMTSV) { 549 if (xmtfs & XMTSV) {
550 if (xmtfs & UFLO) { 550 if (xmtfs & UFLO) {
551 printk("%s: DMA underrun.\n", dev->name); 551 printk("%s: DMA underrun.\n", dev->name);
@@ -556,13 +556,13 @@ static void mace_xmit_error(struct net_device *dev)
556 if (xmtfs & RTRY) { 556 if (xmtfs & RTRY) {
557 mp->stats.collisions++; 557 mp->stats.collisions++;
558 } 558 }
559 } 559 }
560} 560}
561 561
562/* 562/*
563 * A receive interrupt occurred. 563 * A receive interrupt occurred.
564 */ 564 */
565 565
566static void mace_recv_interrupt(struct net_device *dev) 566static void mace_recv_interrupt(struct net_device *dev)
567{ 567{
568/* struct mace_data *mp = (struct mace_data *) dev->priv; */ 568/* struct mace_data *mp = (struct mace_data *) dev->priv; */
@@ -572,17 +572,17 @@ static void mace_recv_interrupt(struct net_device *dev)
572/* 572/*
573 * Process the chip interrupt 573 * Process the chip interrupt
574 */ 574 */
575 575
576static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs) 576static irqreturn_t mace_interrupt(int irq, void *dev_id, struct pt_regs *regs)
577{ 577{
578 struct net_device *dev = (struct net_device *) dev_id; 578 struct net_device *dev = (struct net_device *) dev_id;
579 struct mace_data *mp = (struct mace_data *) dev->priv; 579 struct mace_data *mp = (struct mace_data *) dev->priv;
580 volatile struct mace *mb = mp->mace; 580 volatile struct mace *mb = mp->mace;
581 u8 ir; 581 u8 ir;
582 582
583 ir = mb->ir; 583 ir = mb->ir;
584 mace_handle_misc_intrs(mp, ir); 584 mace_handle_misc_intrs(mp, ir);
585 585
586 if (ir & XMTINT) { 586 if (ir & XMTINT) {
587 mace_xmit_error(dev); 587 mace_xmit_error(dev);
588 } 588 }
@@ -601,7 +601,7 @@ static void mace_tx_timeout(struct net_device *dev)
601/* 601/*
602 * Handle a newly arrived frame 602 * Handle a newly arrived frame
603 */ 603 */
604 604
605static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf) 605static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
606{ 606{
607 struct mace_data *mp = (struct mace_data *) dev->priv; 607 struct mace_data *mp = (struct mace_data *) dev->priv;
@@ -614,7 +614,7 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
614 } 614 }
615 if (mf->status&(RS_CLSN|RS_FRAMERR|RS_FCSERR)) 615 if (mf->status&(RS_CLSN|RS_FRAMERR|RS_FCSERR))
616 mp->stats.rx_errors++; 616 mp->stats.rx_errors++;
617 617
618 if (mf->status&RS_CLSN) { 618 if (mf->status&RS_CLSN) {
619 mp->stats.collisions++; 619 mp->stats.collisions++;
620 } 620 }
@@ -624,7 +624,7 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
624 if (mf->status&RS_FCSERR) { 624 if (mf->status&RS_FCSERR) {
625 mp->stats.rx_crc_errors++; 625 mp->stats.rx_crc_errors++;
626 } 626 }
627 627
628 skb = dev_alloc_skb(mf->len+2); 628 skb = dev_alloc_skb(mf->len+2);
629 if (!skb) { 629 if (!skb) {
630 mp->stats.rx_dropped++; 630 mp->stats.rx_dropped++;
@@ -632,7 +632,7 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
632 } 632 }
633 skb_reserve(skb,2); 633 skb_reserve(skb,2);
634 memcpy(skb_put(skb, mf->len), mf->data, mf->len); 634 memcpy(skb_put(skb, mf->len), mf->data, mf->len);
635 635
636 skb->dev = dev; 636 skb->dev = dev;
637 skb->protocol = eth_type_trans(skb, dev); 637 skb->protocol = eth_type_trans(skb, dev);
638 netif_rx(skb); 638 netif_rx(skb);
@@ -644,7 +644,7 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
644/* 644/*
645 * The PSC has passed us a DMA interrupt event. 645 * The PSC has passed us a DMA interrupt event.
646 */ 646 */
647 647
648static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs) 648static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs)
649{ 649{
650 struct net_device *dev = (struct net_device *) dev_id; 650 struct net_device *dev = (struct net_device *) dev_id;
@@ -661,9 +661,9 @@ static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs)
661 /* 661 /*
662 * Process the read queue 662 * Process the read queue
663 */ 663 */
664 664
665 status = psc_read_word(PSC_ENETRD_CTL); 665 status = psc_read_word(PSC_ENETRD_CTL);
666 666
667 if (status & 0x2000) { 667 if (status & 0x2000) {
668 mace_rxdma_reset(dev); 668 mace_rxdma_reset(dev);
669 } else if (status & 0x0100) { 669 } else if (status & 0x0100) {
@@ -678,7 +678,7 @@ static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs)
678 mace_dma_rx_frame(dev, (struct mace_frame *) (mp->rx_ring + (mp->rx_tail * 0x0800))); 678 mace_dma_rx_frame(dev, (struct mace_frame *) (mp->rx_ring + (mp->rx_tail * 0x0800)));
679 mp->rx_tail++; 679 mp->rx_tail++;
680 } 680 }
681 681
682 /* If we're out of buffers in this ring then switch to */ 682 /* If we're out of buffers in this ring then switch to */
683 /* the other set, otherwise just reactivate this one. */ 683 /* the other set, otherwise just reactivate this one. */
684 684
@@ -689,7 +689,7 @@ static irqreturn_t mace_dma_intr(int irq, void *dev_id, struct pt_regs *regs)
689 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800); 689 psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
690 } 690 }
691 } 691 }
692 692
693 /* 693 /*
694 * Process the write queue 694 * Process the write queue
695 */ 695 */