diff options
author | Sascha Hauer <s.hauer@pengutronix.de> | 2009-04-14 21:32:18 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-04-16 05:34:50 -0400 |
commit | 22f6b860da25abe2c3e33347ccb806e6bcc57390 (patch) | |
tree | 555737e3658ec576f20a7fe40325511b0b254d39 /drivers | |
parent | 8d4dd5cff892e18a34422852c05a88b79ff978ed (diff) |
fec: Codingstyle cleanups
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/net/fec.c | 354 |
1 files changed, 139 insertions, 215 deletions
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index e03b1773966d..672566b89ecf 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -86,8 +86,7 @@ static unsigned char fec_mac_default[] = { | |||
86 | #endif | 86 | #endif |
87 | #endif /* CONFIG_M5272 */ | 87 | #endif /* CONFIG_M5272 */ |
88 | 88 | ||
89 | /* Forward declarations of some structures to support different PHYs | 89 | /* Forward declarations of some structures to support different PHYs */ |
90 | */ | ||
91 | 90 | ||
92 | typedef struct { | 91 | typedef struct { |
93 | uint mii_data; | 92 | uint mii_data; |
@@ -123,8 +122,7 @@ typedef struct { | |||
123 | #error "FEC: descriptor ring size constants too large" | 122 | #error "FEC: descriptor ring size constants too large" |
124 | #endif | 123 | #endif |
125 | 124 | ||
126 | /* Interrupt events/masks. | 125 | /* Interrupt events/masks. */ |
127 | */ | ||
128 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ | 126 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ |
129 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ | 127 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ |
130 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ | 128 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ |
@@ -177,15 +175,14 @@ struct fec_enet_private { | |||
177 | ushort skb_cur; | 175 | ushort skb_cur; |
178 | ushort skb_dirty; | 176 | ushort skb_dirty; |
179 | 177 | ||
180 | /* CPM dual port RAM relative addresses. | 178 | /* CPM dual port RAM relative addresses */ |
181 | */ | ||
182 | dma_addr_t bd_dma; | 179 | dma_addr_t bd_dma; |
183 | /* Address of Rx and Tx buffers. */ | 180 | /* Address of Rx and Tx buffers */ |
184 | struct bufdesc *rx_bd_base; | 181 | struct bufdesc *rx_bd_base; |
185 | struct bufdesc *tx_bd_base; | 182 | struct bufdesc *tx_bd_base; |
186 | /* The next free ring entry */ | 183 | /* The next free ring entry */ |
187 | struct bufdesc *cur_rx, *cur_tx; | 184 | struct bufdesc *cur_rx, *cur_tx; |
188 | /* The ring entries to be free()ed. */ | 185 | /* The ring entries to be free()ed */ |
189 | struct bufdesc *dirty_tx; | 186 | struct bufdesc *dirty_tx; |
190 | 187 | ||
191 | uint tx_full; | 188 | uint tx_full; |
@@ -245,19 +242,16 @@ static mii_list_t *mii_tail; | |||
245 | static int mii_queue(struct net_device *dev, int request, | 242 | static int mii_queue(struct net_device *dev, int request, |
246 | void (*func)(uint, struct net_device *)); | 243 | void (*func)(uint, struct net_device *)); |
247 | 244 | ||
248 | /* Make MII read/write commands for the FEC. | 245 | /* Make MII read/write commands for the FEC */ |
249 | */ | ||
250 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) | 246 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) |
251 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ | 247 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ |
252 | (VAL & 0xffff)) | 248 | (VAL & 0xffff)) |
253 | #define mk_mii_end 0 | 249 | #define mk_mii_end 0 |
254 | 250 | ||
255 | /* Transmitter timeout. | 251 | /* Transmitter timeout */ |
256 | */ | 252 | #define TX_TIMEOUT (2 * HZ) |
257 | #define TX_TIMEOUT (2*HZ) | ||
258 | 253 | ||
259 | /* Register definitions for the PHY. | 254 | /* Register definitions for the PHY */ |
260 | */ | ||
261 | 255 | ||
262 | #define MII_REG_CR 0 /* Control Register */ | 256 | #define MII_REG_CR 0 /* Control Register */ |
263 | #define MII_REG_SR 1 /* Status Register */ | 257 | #define MII_REG_SR 1 /* Status Register */ |
@@ -307,7 +301,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
307 | bdp = fep->cur_tx; | 301 | bdp = fep->cur_tx; |
308 | 302 | ||
309 | status = bdp->cbd_sc; | 303 | status = bdp->cbd_sc; |
310 | #ifndef final_version | 304 | |
311 | if (status & BD_ENET_TX_READY) { | 305 | if (status & BD_ENET_TX_READY) { |
312 | /* Ooops. All transmit buffers are full. Bail out. | 306 | /* Ooops. All transmit buffers are full. Bail out. |
313 | * This should not happen, since dev->tbusy should be set. | 307 | * This should not happen, since dev->tbusy should be set. |
@@ -316,21 +310,18 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
316 | spin_unlock_irqrestore(&fep->hw_lock, flags); | 310 | spin_unlock_irqrestore(&fep->hw_lock, flags); |
317 | return 1; | 311 | return 1; |
318 | } | 312 | } |
319 | #endif | ||
320 | 313 | ||
321 | /* Clear all of the status flags. | 314 | /* Clear all of the status flags */ |
322 | */ | ||
323 | status &= ~BD_ENET_TX_STATS; | 315 | status &= ~BD_ENET_TX_STATS; |
324 | 316 | ||
325 | /* Set buffer length and buffer pointer. | 317 | /* Set buffer length and buffer pointer */ |
326 | */ | ||
327 | bdp->cbd_bufaddr = __pa(skb->data); | 318 | bdp->cbd_bufaddr = __pa(skb->data); |
328 | bdp->cbd_datlen = skb->len; | 319 | bdp->cbd_datlen = skb->len; |
329 | 320 | ||
330 | /* | 321 | /* |
331 | * On some FEC implementations data must be aligned on | 322 | * On some FEC implementations data must be aligned on |
332 | * 4-byte boundaries. Use bounce buffers to copy data | 323 | * 4-byte boundaries. Use bounce buffers to copy data |
333 | * and get it aligned. Ugh. | 324 | * and get it aligned. Ugh. |
334 | */ | 325 | */ |
335 | if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { | 326 | if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { |
336 | unsigned int index; | 327 | unsigned int index; |
@@ -339,8 +330,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
339 | bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); | 330 | bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); |
340 | } | 331 | } |
341 | 332 | ||
342 | /* Save skb pointer. | 333 | /* Save skb pointer */ |
343 | */ | ||
344 | fep->tx_skbuff[fep->skb_cur] = skb; | 334 | fep->tx_skbuff[fep->skb_cur] = skb; |
345 | 335 | ||
346 | dev->stats.tx_bytes += skb->len; | 336 | dev->stats.tx_bytes += skb->len; |
@@ -355,7 +345,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
355 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | 345 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
356 | * it's the last BD of the frame, and to put the CRC on the end. | 346 | * it's the last BD of the frame, and to put the CRC on the end. |
357 | */ | 347 | */ |
358 | |||
359 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | 348 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR |
360 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | 349 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); |
361 | bdp->cbd_sc = status; | 350 | bdp->cbd_sc = status; |
@@ -365,13 +354,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
365 | /* Trigger transmission start */ | 354 | /* Trigger transmission start */ |
366 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); | 355 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
367 | 356 | ||
368 | /* If this was the last BD in the ring, start at the beginning again. | 357 | /* If this was the last BD in the ring, start at the beginning again. */ |
369 | */ | 358 | if (status & BD_ENET_TX_WRAP) |
370 | if (status & BD_ENET_TX_WRAP) { | ||
371 | bdp = fep->tx_bd_base; | 359 | bdp = fep->tx_bd_base; |
372 | } else { | 360 | else |
373 | bdp++; | 361 | bdp++; |
374 | } | ||
375 | 362 | ||
376 | if (bdp == fep->dirty_tx) { | 363 | if (bdp == fep->dirty_tx) { |
377 | fep->tx_full = 1; | 364 | fep->tx_full = 1; |
@@ -429,9 +416,6 @@ fec_timeout(struct net_device *dev) | |||
429 | netif_wake_queue(dev); | 416 | netif_wake_queue(dev); |
430 | } | 417 | } |
431 | 418 | ||
432 | /* The interrupt handler. | ||
433 | * This is called from the MPC core interrupt. | ||
434 | */ | ||
435 | static irqreturn_t | 419 | static irqreturn_t |
436 | fec_enet_interrupt(int irq, void * dev_id) | 420 | fec_enet_interrupt(int irq, void * dev_id) |
437 | { | 421 | { |
@@ -440,12 +424,10 @@ fec_enet_interrupt(int irq, void * dev_id) | |||
440 | uint int_events; | 424 | uint int_events; |
441 | irqreturn_t ret = IRQ_NONE; | 425 | irqreturn_t ret = IRQ_NONE; |
442 | 426 | ||
443 | /* Get the interrupt events that caused us to be here. */ | ||
444 | do { | 427 | do { |
445 | int_events = readl(fep->hwp + FEC_IEVENT); | 428 | int_events = readl(fep->hwp + FEC_IEVENT); |
446 | writel(int_events, fep->hwp + FEC_IEVENT); | 429 | writel(int_events, fep->hwp + FEC_IEVENT); |
447 | 430 | ||
448 | /* Handle receive event in its own function. */ | ||
449 | if (int_events & FEC_ENET_RXF) { | 431 | if (int_events & FEC_ENET_RXF) { |
450 | ret = IRQ_HANDLED; | 432 | ret = IRQ_HANDLED; |
451 | fec_enet_rx(dev); | 433 | fec_enet_rx(dev); |
@@ -506,31 +488,27 @@ fec_enet_tx(struct net_device *dev) | |||
506 | dev->stats.tx_packets++; | 488 | dev->stats.tx_packets++; |
507 | } | 489 | } |
508 | 490 | ||
509 | #ifndef final_version | ||
510 | if (status & BD_ENET_TX_READY) | 491 | if (status & BD_ENET_TX_READY) |
511 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); | 492 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); |
512 | #endif | 493 | |
513 | /* Deferred means some collisions occurred during transmit, | 494 | /* Deferred means some collisions occurred during transmit, |
514 | * but we eventually sent the packet OK. | 495 | * but we eventually sent the packet OK. |
515 | */ | 496 | */ |
516 | if (status & BD_ENET_TX_DEF) | 497 | if (status & BD_ENET_TX_DEF) |
517 | dev->stats.collisions++; | 498 | dev->stats.collisions++; |
518 | 499 | ||
519 | /* Free the sk buffer associated with this last transmit. | 500 | /* Free the sk buffer associated with this last transmit */ |
520 | */ | ||
521 | dev_kfree_skb_any(skb); | 501 | dev_kfree_skb_any(skb); |
522 | fep->tx_skbuff[fep->skb_dirty] = NULL; | 502 | fep->tx_skbuff[fep->skb_dirty] = NULL; |
523 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; | 503 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; |
524 | 504 | ||
525 | /* Update pointer to next buffer descriptor to be transmitted. | 505 | /* Update pointer to next buffer descriptor to be transmitted */ |
526 | */ | ||
527 | if (status & BD_ENET_TX_WRAP) | 506 | if (status & BD_ENET_TX_WRAP) |
528 | bdp = fep->tx_bd_base; | 507 | bdp = fep->tx_bd_base; |
529 | else | 508 | else |
530 | bdp++; | 509 | bdp++; |
531 | 510 | ||
532 | /* Since we have freed up a buffer, the ring is no longer | 511 | /* Since we have freed up a buffer, the ring is no longer full |
533 | * full. | ||
534 | */ | 512 | */ |
535 | if (fep->tx_full) { | 513 | if (fep->tx_full) { |
536 | fep->tx_full = 0; | 514 | fep->tx_full = 0; |
@@ -569,114 +547,93 @@ fec_enet_rx(struct net_device *dev) | |||
569 | */ | 547 | */ |
570 | bdp = fep->cur_rx; | 548 | bdp = fep->cur_rx; |
571 | 549 | ||
572 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | 550 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { |
573 | 551 | ||
574 | #ifndef final_version | 552 | /* Since we have allocated space to hold a complete frame, |
575 | /* Since we have allocated space to hold a complete frame, | 553 | * the last indicator should be set. |
576 | * the last indicator should be set. | 554 | */ |
577 | */ | 555 | if ((status & BD_ENET_RX_LAST) == 0) |
578 | if ((status & BD_ENET_RX_LAST) == 0) | 556 | printk("FEC ENET: rcv is not +last\n"); |
579 | printk("FEC ENET: rcv is not +last\n"); | ||
580 | #endif | ||
581 | 557 | ||
582 | if (!fep->opened) | 558 | if (!fep->opened) |
583 | goto rx_processing_done; | 559 | goto rx_processing_done; |
584 | 560 | ||
585 | /* Check for errors. */ | 561 | /* Check for errors. */ |
586 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | 562 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
587 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | 563 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { |
588 | dev->stats.rx_errors++; | 564 | dev->stats.rx_errors++; |
589 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | 565 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { |
590 | /* Frame too long or too short. */ | 566 | /* Frame too long or too short. */ |
591 | dev->stats.rx_length_errors++; | 567 | dev->stats.rx_length_errors++; |
568 | } | ||
569 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | ||
570 | dev->stats.rx_frame_errors++; | ||
571 | if (status & BD_ENET_RX_CR) /* CRC Error */ | ||
572 | dev->stats.rx_crc_errors++; | ||
573 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | ||
574 | dev->stats.rx_fifo_errors++; | ||
592 | } | 575 | } |
593 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | ||
594 | dev->stats.rx_frame_errors++; | ||
595 | if (status & BD_ENET_RX_CR) /* CRC Error */ | ||
596 | dev->stats.rx_crc_errors++; | ||
597 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | ||
598 | dev->stats.rx_fifo_errors++; | ||
599 | } | ||
600 | 576 | ||
601 | /* Report late collisions as a frame error. | 577 | /* Report late collisions as a frame error. |
602 | * On this error, the BD is closed, but we don't know what we | 578 | * On this error, the BD is closed, but we don't know what we |
603 | * have in the buffer. So, just drop this frame on the floor. | 579 | * have in the buffer. So, just drop this frame on the floor. |
604 | */ | 580 | */ |
605 | if (status & BD_ENET_RX_CL) { | 581 | if (status & BD_ENET_RX_CL) { |
606 | dev->stats.rx_errors++; | 582 | dev->stats.rx_errors++; |
607 | dev->stats.rx_frame_errors++; | 583 | dev->stats.rx_frame_errors++; |
608 | goto rx_processing_done; | 584 | goto rx_processing_done; |
609 | } | 585 | } |
610 | 586 | ||
611 | /* Process the incoming frame. | 587 | /* Process the incoming frame. */ |
612 | */ | 588 | dev->stats.rx_packets++; |
613 | dev->stats.rx_packets++; | 589 | pkt_len = bdp->cbd_datlen; |
614 | pkt_len = bdp->cbd_datlen; | 590 | dev->stats.rx_bytes += pkt_len; |
615 | dev->stats.rx_bytes += pkt_len; | 591 | data = (__u8*)__va(bdp->cbd_bufaddr); |
616 | data = (__u8*)__va(bdp->cbd_bufaddr); | ||
617 | 592 | ||
618 | dma_sync_single(NULL, (unsigned long)__pa(data), | 593 | dma_sync_single(NULL, (unsigned long)__pa(data), |
619 | pkt_len - 4, DMA_FROM_DEVICE); | 594 | pkt_len - 4, DMA_FROM_DEVICE); |
620 | 595 | ||
621 | /* This does 16 byte alignment, exactly what we need. | 596 | /* This does 16 byte alignment, exactly what we need. |
622 | * The packet length includes FCS, but we don't want to | 597 | * The packet length includes FCS, but we don't want to |
623 | * include that when passing upstream as it messes up | 598 | * include that when passing upstream as it messes up |
624 | * bridging applications. | 599 | * bridging applications. |
625 | */ | 600 | */ |
626 | skb = dev_alloc_skb(pkt_len-4); | 601 | skb = dev_alloc_skb(pkt_len - 4); |
627 | |||
628 | if (skb == NULL) { | ||
629 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | ||
630 | dev->stats.rx_dropped++; | ||
631 | } else { | ||
632 | skb_put(skb,pkt_len-4); /* Make room */ | ||
633 | skb_copy_to_linear_data(skb, data, pkt_len-4); | ||
634 | skb->protocol=eth_type_trans(skb,dev); | ||
635 | netif_rx(skb); | ||
636 | } | ||
637 | rx_processing_done: | ||
638 | |||
639 | /* Clear the status flags for this buffer. | ||
640 | */ | ||
641 | status &= ~BD_ENET_RX_STATS; | ||
642 | 602 | ||
643 | /* Mark the buffer empty. | 603 | if (skb == NULL) { |
644 | */ | 604 | printk("%s: Memory squeeze, dropping packet.\n", |
645 | status |= BD_ENET_RX_EMPTY; | 605 | dev->name); |
646 | bdp->cbd_sc = status; | 606 | dev->stats.rx_dropped++; |
607 | } else { | ||
608 | skb_put(skb, pkt_len - 4); /* Make room */ | ||
609 | skb_copy_to_linear_data(skb, data, pkt_len - 4); | ||
610 | skb->protocol = eth_type_trans(skb, dev); | ||
611 | netif_rx(skb); | ||
612 | } | ||
613 | rx_processing_done: | ||
614 | /* Clear the status flags for this buffer */ | ||
615 | status &= ~BD_ENET_RX_STATS; | ||
647 | 616 | ||
648 | /* Update BD pointer to next entry. | 617 | /* Mark the buffer empty */ |
649 | */ | 618 | status |= BD_ENET_RX_EMPTY; |
650 | if (status & BD_ENET_RX_WRAP) | 619 | bdp->cbd_sc = status; |
651 | bdp = fep->rx_bd_base; | ||
652 | else | ||
653 | bdp++; | ||
654 | 620 | ||
655 | #if 1 | 621 | /* Update BD pointer to next entry */ |
656 | /* Doing this here will keep the FEC running while we process | 622 | if (status & BD_ENET_RX_WRAP) |
657 | * incoming frames. On a heavily loaded network, we should be | 623 | bdp = fep->rx_bd_base; |
658 | * able to keep up at the expense of system resources. | 624 | else |
659 | */ | 625 | bdp++; |
660 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | 626 | /* Doing this here will keep the FEC running while we process |
661 | #endif | 627 | * incoming frames. On a heavily loaded network, we should be |
662 | } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */ | 628 | * able to keep up at the expense of system resources. |
629 | */ | ||
630 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | ||
631 | } | ||
663 | fep->cur_rx = bdp; | 632 | fep->cur_rx = bdp; |
664 | 633 | ||
665 | #if 0 | ||
666 | /* Doing this here will allow us to process all frames in the | ||
667 | * ring before the FEC is allowed to put more there. On a heavily | ||
668 | * loaded network, some frames may be lost. Unfortunately, this | ||
669 | * increases the interrupt overhead since we can potentially work | ||
670 | * our way back to the interrupt return only to come right back | ||
671 | * here. | ||
672 | */ | ||
673 | fecp->fec_r_des_active = 0; | ||
674 | #endif | ||
675 | |||
676 | spin_unlock_irq(&fep->hw_lock); | 634 | spin_unlock_irq(&fep->hw_lock); |
677 | } | 635 | } |
678 | 636 | ||
679 | |||
680 | /* called from interrupt context */ | 637 | /* called from interrupt context */ |
681 | static void | 638 | static void |
682 | fec_enet_mii(struct net_device *dev) | 639 | fec_enet_mii(struct net_device *dev) |
@@ -714,8 +671,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi | |||
714 | mii_list_t *mip; | 671 | mii_list_t *mip; |
715 | int retval; | 672 | int retval; |
716 | 673 | ||
717 | /* Add PHY address to register command. | 674 | /* Add PHY address to register command */ |
718 | */ | ||
719 | fep = netdev_priv(dev); | 675 | fep = netdev_priv(dev); |
720 | spin_lock_irqsave(&fep->mii_lock, flags); | 676 | spin_lock_irqsave(&fep->mii_lock, flags); |
721 | 677 | ||
@@ -1358,11 +1314,6 @@ static void mii_relink(struct work_struct *work) | |||
1358 | fec_restart(dev, duplex); | 1314 | fec_restart(dev, duplex); |
1359 | } else | 1315 | } else |
1360 | fec_stop(dev); | 1316 | fec_stop(dev); |
1361 | |||
1362 | #if 0 | ||
1363 | enable_irq(fep->mii_irq); | ||
1364 | #endif | ||
1365 | |||
1366 | } | 1317 | } |
1367 | 1318 | ||
1368 | /* mii_queue_relink is called in interrupt context from mii_link_interrupt */ | 1319 | /* mii_queue_relink is called in interrupt context from mii_link_interrupt */ |
@@ -1371,12 +1322,12 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) | |||
1371 | struct fec_enet_private *fep = netdev_priv(dev); | 1322 | struct fec_enet_private *fep = netdev_priv(dev); |
1372 | 1323 | ||
1373 | /* | 1324 | /* |
1374 | ** We cannot queue phy_task twice in the workqueue. It | 1325 | * We cannot queue phy_task twice in the workqueue. It |
1375 | ** would cause an endless loop in the workqueue. | 1326 | * would cause an endless loop in the workqueue. |
1376 | ** Fortunately, if the last mii_relink entry has not yet been | 1327 | * Fortunately, if the last mii_relink entry has not yet been |
1377 | ** executed now, it will do the job for the current interrupt, | 1328 | * executed now, it will do the job for the current interrupt, |
1378 | ** which is just what we want. | 1329 | * which is just what we want. |
1379 | */ | 1330 | */ |
1380 | if (fep->mii_phy_task_queued) | 1331 | if (fep->mii_phy_task_queued) |
1381 | return; | 1332 | return; |
1382 | 1333 | ||
@@ -1407,8 +1358,7 @@ phy_cmd_t const phy_cmd_config[] = { | |||
1407 | { mk_mii_end, } | 1358 | { mk_mii_end, } |
1408 | }; | 1359 | }; |
1409 | 1360 | ||
1410 | /* Read remainder of PHY ID. | 1361 | /* Read remainder of PHY ID. */ |
1411 | */ | ||
1412 | static void | 1362 | static void |
1413 | mii_discover_phy3(uint mii_reg, struct net_device *dev) | 1363 | mii_discover_phy3(uint mii_reg, struct net_device *dev) |
1414 | { | 1364 | { |
@@ -1447,8 +1397,7 @@ mii_discover_phy(uint mii_reg, struct net_device *dev) | |||
1447 | if (fep->phy_addr < 32) { | 1397 | if (fep->phy_addr < 32) { |
1448 | if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { | 1398 | if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { |
1449 | 1399 | ||
1450 | /* Got first part of ID, now get remainder. | 1400 | /* Got first part of ID, now get remainder */ |
1451 | */ | ||
1452 | fep->phy_id = phytype << 16; | 1401 | fep->phy_id = phytype << 16; |
1453 | mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), | 1402 | mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), |
1454 | mii_discover_phy3); | 1403 | mii_discover_phy3); |
@@ -1468,8 +1417,7 @@ mii_discover_phy(uint mii_reg, struct net_device *dev) | |||
1468 | } | 1417 | } |
1469 | } | 1418 | } |
1470 | 1419 | ||
1471 | /* This interrupt occurs when the PHY detects a link change. | 1420 | /* This interrupt occurs when the PHY detects a link change */ |
1472 | */ | ||
1473 | #ifdef HAVE_mii_link_interrupt | 1421 | #ifdef HAVE_mii_link_interrupt |
1474 | static irqreturn_t | 1422 | static irqreturn_t |
1475 | mii_link_interrupt(int irq, void * dev_id) | 1423 | mii_link_interrupt(int irq, void * dev_id) |
@@ -1479,10 +1427,6 @@ mii_link_interrupt(int irq, void * dev_id) | |||
1479 | 1427 | ||
1480 | fec_phy_ack_intr(); | 1428 | fec_phy_ack_intr(); |
1481 | 1429 | ||
1482 | #if 0 | ||
1483 | disable_irq(fep->mii_irq); /* disable now, enable later */ | ||
1484 | #endif | ||
1485 | |||
1486 | mii_do_cmd(dev, fep->phy->ack_int); | 1430 | mii_do_cmd(dev, fep->phy->ack_int); |
1487 | mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ | 1431 | mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ |
1488 | 1432 | ||
@@ -1533,7 +1477,7 @@ fec_enet_open(struct net_device *dev) | |||
1533 | 1477 | ||
1534 | netif_start_queue(dev); | 1478 | netif_start_queue(dev); |
1535 | fep->opened = 1; | 1479 | fep->opened = 1; |
1536 | return 0; /* Success */ | 1480 | return 0; |
1537 | } | 1481 | } |
1538 | 1482 | ||
1539 | static int | 1483 | static int |
@@ -1541,8 +1485,7 @@ fec_enet_close(struct net_device *dev) | |||
1541 | { | 1485 | { |
1542 | struct fec_enet_private *fep = netdev_priv(dev); | 1486 | struct fec_enet_private *fep = netdev_priv(dev); |
1543 | 1487 | ||
1544 | /* Don't know what to do yet. | 1488 | /* Don't know what to do yet. */ |
1545 | */ | ||
1546 | fep->opened = 0; | 1489 | fep->opened = 0; |
1547 | netif_stop_queue(dev); | 1490 | netif_stop_queue(dev); |
1548 | fec_stop(dev); | 1491 | fec_stop(dev); |
@@ -1570,7 +1513,7 @@ static void set_multicast_list(struct net_device *dev) | |||
1570 | unsigned int i, j, bit, data, crc, tmp; | 1513 | unsigned int i, j, bit, data, crc, tmp; |
1571 | unsigned char hash; | 1514 | unsigned char hash; |
1572 | 1515 | ||
1573 | if (dev->flags&IFF_PROMISC) { | 1516 | if (dev->flags & IFF_PROMISC) { |
1574 | tmp = readl(fep->hwp + FEC_R_CNTRL); | 1517 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
1575 | tmp |= 0x8; | 1518 | tmp |= 0x8; |
1576 | writel(tmp, fep->hwp + FEC_R_CNTRL); | 1519 | writel(tmp, fep->hwp + FEC_R_CNTRL); |
@@ -1581,42 +1524,37 @@ static void set_multicast_list(struct net_device *dev) | |||
1581 | 1524 | ||
1582 | if (dev->flags & IFF_ALLMULTI) { | 1525 | if (dev->flags & IFF_ALLMULTI) { |
1583 | /* Catch all multicast addresses, so set the | 1526 | /* Catch all multicast addresses, so set the |
1584 | * filter to all 1's. | 1527 | * filter to all 1's |
1585 | */ | 1528 | */ |
1586 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | 1529 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1587 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | 1530 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1588 | } else { | 1531 | } else { |
1589 | /* Clear filter and add the addresses in hash register. | 1532 | /* Clear filter and add the addresses in hash register |
1590 | */ | 1533 | */ |
1591 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | 1534 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1592 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | 1535 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1593 | 1536 | ||
1594 | dmi = dev->mc_list; | 1537 | dmi = dev->mc_list; |
1595 | 1538 | ||
1596 | for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) | 1539 | for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) { |
1597 | { | 1540 | /* Only support group multicast for now */ |
1598 | /* Only support group multicast for now. | ||
1599 | */ | ||
1600 | if (!(dmi->dmi_addr[0] & 1)) | 1541 | if (!(dmi->dmi_addr[0] & 1)) |
1601 | continue; | 1542 | continue; |
1602 | 1543 | ||
1603 | /* calculate crc32 value of mac address | 1544 | /* calculate crc32 value of mac address */ |
1604 | */ | ||
1605 | crc = 0xffffffff; | 1545 | crc = 0xffffffff; |
1606 | 1546 | ||
1607 | for (i = 0; i < dmi->dmi_addrlen; i++) | 1547 | for (i = 0; i < dmi->dmi_addrlen; i++) { |
1608 | { | ||
1609 | data = dmi->dmi_addr[i]; | 1548 | data = dmi->dmi_addr[i]; |
1610 | for (bit = 0; bit < 8; bit++, data >>= 1) | 1549 | for (bit = 0; bit < 8; bit++, data >>= 1) { |
1611 | { | ||
1612 | crc = (crc >> 1) ^ | 1550 | crc = (crc >> 1) ^ |
1613 | (((crc ^ data) & 1) ? CRC32_POLY : 0); | 1551 | (((crc ^ data) & 1) ? CRC32_POLY : 0); |
1614 | } | 1552 | } |
1615 | } | 1553 | } |
1616 | 1554 | ||
1617 | /* only upper 6 bits (HASH_BITS) are used | 1555 | /* only upper 6 bits (HASH_BITS) are used |
1618 | which point to specific bit in he hash registers | 1556 | * which point to specific bit in he hash registers |
1619 | */ | 1557 | */ |
1620 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; | 1558 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; |
1621 | 1559 | ||
1622 | if (hash > 31) { | 1560 | if (hash > 31) { |
@@ -1633,8 +1571,7 @@ static void set_multicast_list(struct net_device *dev) | |||
1633 | } | 1571 | } |
1634 | } | 1572 | } |
1635 | 1573 | ||
1636 | /* Set a MAC change in hardware. | 1574 | /* Set a MAC change in hardware. */ |
1637 | */ | ||
1638 | static void | 1575 | static void |
1639 | fec_set_mac_address(struct net_device *dev) | 1576 | fec_set_mac_address(struct net_device *dev) |
1640 | { | 1577 | { |
@@ -1675,8 +1612,7 @@ int __init fec_enet_init(struct net_device *dev, int index) | |||
1675 | fep->hwp = (void __iomem *)dev->base_addr; | 1612 | fep->hwp = (void __iomem *)dev->base_addr; |
1676 | fep->netdev = dev; | 1613 | fep->netdev = dev; |
1677 | 1614 | ||
1678 | /* Whack a reset. We should wait for this. | 1615 | /* Whack a reset. We should wait for this. */ |
1679 | */ | ||
1680 | writel(1, fep->hwp + FEC_ECNTRL); | 1616 | writel(1, fep->hwp + FEC_ECNTRL); |
1681 | udelay(10); | 1617 | udelay(10); |
1682 | 1618 | ||
@@ -1706,18 +1642,15 @@ int __init fec_enet_init(struct net_device *dev, int index) | |||
1706 | 1642 | ||
1707 | fep->skb_cur = fep->skb_dirty = 0; | 1643 | fep->skb_cur = fep->skb_dirty = 0; |
1708 | 1644 | ||
1709 | /* Initialize the receive buffer descriptors. | 1645 | /* Initialize the receive buffer descriptors. */ |
1710 | */ | ||
1711 | bdp = fep->rx_bd_base; | 1646 | bdp = fep->rx_bd_base; |
1712 | for (i=0; i<FEC_ENET_RX_PAGES; i++) { | 1647 | for (i=0; i<FEC_ENET_RX_PAGES; i++) { |
1713 | 1648 | ||
1714 | /* Allocate a page. | 1649 | /* Allocate a page */ |
1715 | */ | ||
1716 | mem_addr = __get_free_page(GFP_KERNEL); | 1650 | mem_addr = __get_free_page(GFP_KERNEL); |
1717 | /* XXX: missing check for allocation failure */ | 1651 | /* XXX: missing check for allocation failure */ |
1718 | 1652 | ||
1719 | /* Initialize the BD for every fragment in the page. | 1653 | /* Initialize the BD for every fragment in the page */ |
1720 | */ | ||
1721 | for (j=0; j<FEC_ENET_RX_FRPPG; j++) { | 1654 | for (j=0; j<FEC_ENET_RX_FRPPG; j++) { |
1722 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 1655 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
1723 | bdp->cbd_bufaddr = __pa(mem_addr); | 1656 | bdp->cbd_bufaddr = __pa(mem_addr); |
@@ -1726,13 +1659,11 @@ int __init fec_enet_init(struct net_device *dev, int index) | |||
1726 | } | 1659 | } |
1727 | } | 1660 | } |
1728 | 1661 | ||
1729 | /* Set the last buffer to wrap. | 1662 | /* Set the last buffer to wrap */ |
1730 | */ | ||
1731 | bdp--; | 1663 | bdp--; |
1732 | bdp->cbd_sc |= BD_SC_WRAP; | 1664 | bdp->cbd_sc |= BD_SC_WRAP; |
1733 | 1665 | ||
1734 | /* ...and the same for transmmit. | 1666 | /* ...and the same for transmit */ |
1735 | */ | ||
1736 | bdp = fep->tx_bd_base; | 1667 | bdp = fep->tx_bd_base; |
1737 | for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) { | 1668 | for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) { |
1738 | if (j >= FEC_ENET_TX_FRPPG) { | 1669 | if (j >= FEC_ENET_TX_FRPPG) { |
@@ -1744,20 +1675,17 @@ int __init fec_enet_init(struct net_device *dev, int index) | |||
1744 | } | 1675 | } |
1745 | fep->tx_bounce[i] = (unsigned char *) mem_addr; | 1676 | fep->tx_bounce[i] = (unsigned char *) mem_addr; |
1746 | 1677 | ||
1747 | /* Initialize the BD for every fragment in the page. | 1678 | /* Initialize the BD for every fragment in the page */ |
1748 | */ | ||
1749 | bdp->cbd_sc = 0; | 1679 | bdp->cbd_sc = 0; |
1750 | bdp->cbd_bufaddr = 0; | 1680 | bdp->cbd_bufaddr = 0; |
1751 | bdp++; | 1681 | bdp++; |
1752 | } | 1682 | } |
1753 | 1683 | ||
1754 | /* Set the last buffer to wrap. | 1684 | /* Set the last buffer to wrap */ |
1755 | */ | ||
1756 | bdp--; | 1685 | bdp--; |
1757 | bdp->cbd_sc |= BD_SC_WRAP; | 1686 | bdp->cbd_sc |= BD_SC_WRAP; |
1758 | 1687 | ||
1759 | /* Set receive and transmit descriptor base. | 1688 | /* Set receive and transmit descriptor base */ |
1760 | */ | ||
1761 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); | 1689 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
1762 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, | 1690 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, |
1763 | fep->hwp + FEC_X_DES_START); | 1691 | fep->hwp + FEC_X_DES_START); |
@@ -1776,7 +1704,7 @@ int __init fec_enet_init(struct net_device *dev, int index) | |||
1776 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | 1704 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); |
1777 | #endif | 1705 | #endif |
1778 | 1706 | ||
1779 | /* The FEC Ethernet specific entries in the device structure. */ | 1707 | /* The FEC Ethernet specific entries in the device structure */ |
1780 | dev->open = fec_enet_open; | 1708 | dev->open = fec_enet_open; |
1781 | dev->hard_start_xmit = fec_enet_start_xmit; | 1709 | dev->hard_start_xmit = fec_enet_start_xmit; |
1782 | dev->tx_timeout = fec_timeout; | 1710 | dev->tx_timeout = fec_timeout; |
@@ -1792,9 +1720,7 @@ int __init fec_enet_init(struct net_device *dev, int index) | |||
1792 | writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); | 1720 | writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); |
1793 | writel(0, fep->hwp + FEC_X_CNTRL); | 1721 | writel(0, fep->hwp + FEC_X_CNTRL); |
1794 | 1722 | ||
1795 | /* | 1723 | /* Set MII speed to 2.5 MHz */ |
1796 | * Set MII speed to 2.5 MHz | ||
1797 | */ | ||
1798 | fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) | 1724 | fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) |
1799 | / 2500000) / 2) & 0x3F) << 1; | 1725 | / 2500000) / 2) & 0x3F) << 1; |
1800 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 1726 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
@@ -1853,8 +1779,8 @@ fec_restart(struct net_device *dev, int duplex) | |||
1853 | 1779 | ||
1854 | /* Reset SKB transmit buffers. */ | 1780 | /* Reset SKB transmit buffers. */ |
1855 | fep->skb_cur = fep->skb_dirty = 0; | 1781 | fep->skb_cur = fep->skb_dirty = 0; |
1856 | for (i=0; i<=TX_RING_MOD_MASK; i++) { | 1782 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
1857 | if (fep->tx_skbuff[i] != NULL) { | 1783 | if (fep->tx_skbuff[i]) { |
1858 | dev_kfree_skb_any(fep->tx_skbuff[i]); | 1784 | dev_kfree_skb_any(fep->tx_skbuff[i]); |
1859 | fep->tx_skbuff[i] = NULL; | 1785 | fep->tx_skbuff[i] = NULL; |
1860 | } | 1786 | } |
@@ -1862,20 +1788,20 @@ fec_restart(struct net_device *dev, int duplex) | |||
1862 | 1788 | ||
1863 | /* Initialize the receive buffer descriptors. */ | 1789 | /* Initialize the receive buffer descriptors. */ |
1864 | bdp = fep->rx_bd_base; | 1790 | bdp = fep->rx_bd_base; |
1865 | for (i=0; i<RX_RING_SIZE; i++) { | 1791 | for (i = 0; i < RX_RING_SIZE; i++) { |
1866 | 1792 | ||
1867 | /* Initialize the BD for every fragment in the page. */ | 1793 | /* Initialize the BD for every fragment in the page. */ |
1868 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 1794 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
1869 | bdp++; | 1795 | bdp++; |
1870 | } | 1796 | } |
1871 | 1797 | ||
1872 | /* Set the last buffer to wrap. */ | 1798 | /* Set the last buffer to wrap */ |
1873 | bdp--; | 1799 | bdp--; |
1874 | bdp->cbd_sc |= BD_SC_WRAP; | 1800 | bdp->cbd_sc |= BD_SC_WRAP; |
1875 | 1801 | ||
1876 | /* ...and the same for transmmit. */ | 1802 | /* ...and the same for transmit */ |
1877 | bdp = fep->tx_bd_base; | 1803 | bdp = fep->tx_bd_base; |
1878 | for (i=0; i<TX_RING_SIZE; i++) { | 1804 | for (i = 0; i < TX_RING_SIZE; i++) { |
1879 | 1805 | ||
1880 | /* Initialize the BD for every fragment in the page. */ | 1806 | /* Initialize the BD for every fragment in the page. */ |
1881 | bdp->cbd_sc = 0; | 1807 | bdp->cbd_sc = 0; |
@@ -1883,11 +1809,11 @@ fec_restart(struct net_device *dev, int duplex) | |||
1883 | bdp++; | 1809 | bdp++; |
1884 | } | 1810 | } |
1885 | 1811 | ||
1886 | /* Set the last buffer to wrap. */ | 1812 | /* Set the last buffer to wrap */ |
1887 | bdp--; | 1813 | bdp--; |
1888 | bdp->cbd_sc |= BD_SC_WRAP; | 1814 | bdp->cbd_sc |= BD_SC_WRAP; |
1889 | 1815 | ||
1890 | /* Enable MII mode. */ | 1816 | /* Enable MII mode */ |
1891 | if (duplex) { | 1817 | if (duplex) { |
1892 | /* MII enable / FD enable */ | 1818 | /* MII enable / FD enable */ |
1893 | writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); | 1819 | writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); |
@@ -1899,14 +1825,14 @@ fec_restart(struct net_device *dev, int duplex) | |||
1899 | } | 1825 | } |
1900 | fep->full_duplex = duplex; | 1826 | fep->full_duplex = duplex; |
1901 | 1827 | ||
1902 | /* Set MII speed. */ | 1828 | /* Set MII speed */ |
1903 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); | 1829 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1904 | 1830 | ||
1905 | /* And last, enable the transmit and receive processing. */ | 1831 | /* And last, enable the transmit and receive processing */ |
1906 | writel(2, fep->hwp + FEC_ECNTRL); | 1832 | writel(2, fep->hwp + FEC_ECNTRL); |
1907 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | 1833 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); |
1908 | 1834 | ||
1909 | /* Enable interrupts we wish to service. */ | 1835 | /* Enable interrupts we wish to service */ |
1910 | writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, | 1836 | writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, |
1911 | fep->hwp + FEC_IMASK); | 1837 | fep->hwp + FEC_IMASK); |
1912 | } | 1838 | } |
@@ -1916,9 +1842,7 @@ fec_stop(struct net_device *dev) | |||
1916 | { | 1842 | { |
1917 | struct fec_enet_private *fep = netdev_priv(dev); | 1843 | struct fec_enet_private *fep = netdev_priv(dev); |
1918 | 1844 | ||
1919 | /* | 1845 | /* We cannot expect a graceful transmit stop without link !!! */ |
1920 | ** We cannot expect a graceful transmit stop without link !!! | ||
1921 | */ | ||
1922 | if (fep->link) { | 1846 | if (fep->link) { |
1923 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ | 1847 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ |
1924 | udelay(10); | 1848 | udelay(10); |