diff options
author | Greg Ungerer <gerg@snapgear.com> | 2006-06-26 23:19:33 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-27 21:30:14 -0400 |
commit | 0e702ab38b0eed1ea5968f3b60b352e814172c27 (patch) | |
tree | 98971ffff39b5b8e2bd630bcc81987468fceb455 /drivers/net | |
parent | 83901fc1c786c642c576f51302d79df849ad7d71 (diff) |
[PATCH] m68knommu: FEC driver event/irq fixes
Collection of fixes for the ColdFire FEC ethernet driver:
. reworked event setting so that it occurs after the MII setup.
roucaries bastien <roucaries.bastien@gmail.com>
. Do not read cbd_sc in memory for each bit we test. Once per buffer is enough.
. Overrun errors must increase `rx_fifo_errors', not `rx_crc_errors'
. No need for a special value to activate rx or tx. Only write access matters.
. Simplify parameter of eth_copy_and_sum : `data' has already the right value.
. Some spelling fixes.
Signed-off-by: Philippe De Muyter <phdm@macqel.be>
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/fec.c | 91 |
1 files changed, 52 insertions, 39 deletions
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 6b7641873e7a..db694c832989 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -310,6 +310,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
310 | struct fec_enet_private *fep; | 310 | struct fec_enet_private *fep; |
311 | volatile fec_t *fecp; | 311 | volatile fec_t *fecp; |
312 | volatile cbd_t *bdp; | 312 | volatile cbd_t *bdp; |
313 | unsigned short status; | ||
313 | 314 | ||
314 | fep = netdev_priv(dev); | 315 | fep = netdev_priv(dev); |
315 | fecp = (volatile fec_t*)dev->base_addr; | 316 | fecp = (volatile fec_t*)dev->base_addr; |
@@ -322,8 +323,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
322 | /* Fill in a Tx ring entry */ | 323 | /* Fill in a Tx ring entry */ |
323 | bdp = fep->cur_tx; | 324 | bdp = fep->cur_tx; |
324 | 325 | ||
326 | status = bdp->cbd_sc; | ||
325 | #ifndef final_version | 327 | #ifndef final_version |
326 | if (bdp->cbd_sc & BD_ENET_TX_READY) { | 328 | if (status & BD_ENET_TX_READY) { |
327 | /* Ooops. All transmit buffers are full. Bail out. | 329 | /* Ooops. All transmit buffers are full. Bail out. |
328 | * This should not happen, since dev->tbusy should be set. | 330 | * This should not happen, since dev->tbusy should be set. |
329 | */ | 331 | */ |
@@ -334,7 +336,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
334 | 336 | ||
335 | /* Clear all of the status flags. | 337 | /* Clear all of the status flags. |
336 | */ | 338 | */ |
337 | bdp->cbd_sc &= ~BD_ENET_TX_STATS; | 339 | status &= ~BD_ENET_TX_STATS; |
338 | 340 | ||
339 | /* Set buffer length and buffer pointer. | 341 | /* Set buffer length and buffer pointer. |
340 | */ | 342 | */ |
@@ -368,21 +370,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
368 | 370 | ||
369 | spin_lock_irq(&fep->lock); | 371 | spin_lock_irq(&fep->lock); |
370 | 372 | ||
371 | /* Send it on its way. Tell FEC its ready, interrupt when done, | 373 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
372 | * its the last BD of the frame, and to put the CRC on the end. | 374 | * it's the last BD of the frame, and to put the CRC on the end. |
373 | */ | 375 | */ |
374 | 376 | ||
375 | bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | 377 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR |
376 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | 378 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); |
379 | bdp->cbd_sc = status; | ||
377 | 380 | ||
378 | dev->trans_start = jiffies; | 381 | dev->trans_start = jiffies; |
379 | 382 | ||
380 | /* Trigger transmission start */ | 383 | /* Trigger transmission start */ |
381 | fecp->fec_x_des_active = 0x01000000; | 384 | fecp->fec_x_des_active = 0; |
382 | 385 | ||
383 | /* If this was the last BD in the ring, start at the beginning again. | 386 | /* If this was the last BD in the ring, start at the beginning again. |
384 | */ | 387 | */ |
385 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) { | 388 | if (status & BD_ENET_TX_WRAP) { |
386 | bdp = fep->tx_bd_base; | 389 | bdp = fep->tx_bd_base; |
387 | } else { | 390 | } else { |
388 | bdp++; | 391 | bdp++; |
@@ -493,43 +496,44 @@ fec_enet_tx(struct net_device *dev) | |||
493 | { | 496 | { |
494 | struct fec_enet_private *fep; | 497 | struct fec_enet_private *fep; |
495 | volatile cbd_t *bdp; | 498 | volatile cbd_t *bdp; |
499 | unsigned short status; | ||
496 | struct sk_buff *skb; | 500 | struct sk_buff *skb; |
497 | 501 | ||
498 | fep = netdev_priv(dev); | 502 | fep = netdev_priv(dev); |
499 | spin_lock(&fep->lock); | 503 | spin_lock(&fep->lock); |
500 | bdp = fep->dirty_tx; | 504 | bdp = fep->dirty_tx; |
501 | 505 | ||
502 | while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) { | 506 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
503 | if (bdp == fep->cur_tx && fep->tx_full == 0) break; | 507 | if (bdp == fep->cur_tx && fep->tx_full == 0) break; |
504 | 508 | ||
505 | skb = fep->tx_skbuff[fep->skb_dirty]; | 509 | skb = fep->tx_skbuff[fep->skb_dirty]; |
506 | /* Check for errors. */ | 510 | /* Check for errors. */ |
507 | if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | | 511 | if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | |
508 | BD_ENET_TX_RL | BD_ENET_TX_UN | | 512 | BD_ENET_TX_RL | BD_ENET_TX_UN | |
509 | BD_ENET_TX_CSL)) { | 513 | BD_ENET_TX_CSL)) { |
510 | fep->stats.tx_errors++; | 514 | fep->stats.tx_errors++; |
511 | if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ | 515 | if (status & BD_ENET_TX_HB) /* No heartbeat */ |
512 | fep->stats.tx_heartbeat_errors++; | 516 | fep->stats.tx_heartbeat_errors++; |
513 | if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ | 517 | if (status & BD_ENET_TX_LC) /* Late collision */ |
514 | fep->stats.tx_window_errors++; | 518 | fep->stats.tx_window_errors++; |
515 | if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ | 519 | if (status & BD_ENET_TX_RL) /* Retrans limit */ |
516 | fep->stats.tx_aborted_errors++; | 520 | fep->stats.tx_aborted_errors++; |
517 | if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ | 521 | if (status & BD_ENET_TX_UN) /* Underrun */ |
518 | fep->stats.tx_fifo_errors++; | 522 | fep->stats.tx_fifo_errors++; |
519 | if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ | 523 | if (status & BD_ENET_TX_CSL) /* Carrier lost */ |
520 | fep->stats.tx_carrier_errors++; | 524 | fep->stats.tx_carrier_errors++; |
521 | } else { | 525 | } else { |
522 | fep->stats.tx_packets++; | 526 | fep->stats.tx_packets++; |
523 | } | 527 | } |
524 | 528 | ||
525 | #ifndef final_version | 529 | #ifndef final_version |
526 | if (bdp->cbd_sc & BD_ENET_TX_READY) | 530 | if (status & BD_ENET_TX_READY) |
527 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); | 531 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); |
528 | #endif | 532 | #endif |
529 | /* Deferred means some collisions occurred during transmit, | 533 | /* Deferred means some collisions occurred during transmit, |
530 | * but we eventually sent the packet OK. | 534 | * but we eventually sent the packet OK. |
531 | */ | 535 | */ |
532 | if (bdp->cbd_sc & BD_ENET_TX_DEF) | 536 | if (status & BD_ENET_TX_DEF) |
533 | fep->stats.collisions++; | 537 | fep->stats.collisions++; |
534 | 538 | ||
535 | /* Free the sk buffer associated with this last transmit. | 539 | /* Free the sk buffer associated with this last transmit. |
@@ -540,7 +544,7 @@ fec_enet_tx(struct net_device *dev) | |||
540 | 544 | ||
541 | /* Update pointer to next buffer descriptor to be transmitted. | 545 | /* Update pointer to next buffer descriptor to be transmitted. |
542 | */ | 546 | */ |
543 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | 547 | if (status & BD_ENET_TX_WRAP) |
544 | bdp = fep->tx_bd_base; | 548 | bdp = fep->tx_bd_base; |
545 | else | 549 | else |
546 | bdp++; | 550 | bdp++; |
@@ -570,9 +574,14 @@ fec_enet_rx(struct net_device *dev) | |||
570 | struct fec_enet_private *fep; | 574 | struct fec_enet_private *fep; |
571 | volatile fec_t *fecp; | 575 | volatile fec_t *fecp; |
572 | volatile cbd_t *bdp; | 576 | volatile cbd_t *bdp; |
577 | unsigned short status; | ||
573 | struct sk_buff *skb; | 578 | struct sk_buff *skb; |
574 | ushort pkt_len; | 579 | ushort pkt_len; |
575 | __u8 *data; | 580 | __u8 *data; |
581 | |||
582 | #ifdef CONFIG_M532x | ||
583 | flush_cache_all(); | ||
584 | #endif | ||
576 | 585 | ||
577 | fep = netdev_priv(dev); | 586 | fep = netdev_priv(dev); |
578 | fecp = (volatile fec_t*)dev->base_addr; | 587 | fecp = (volatile fec_t*)dev->base_addr; |
@@ -582,13 +591,13 @@ fec_enet_rx(struct net_device *dev) | |||
582 | */ | 591 | */ |
583 | bdp = fep->cur_rx; | 592 | bdp = fep->cur_rx; |
584 | 593 | ||
585 | while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | 594 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { |
586 | 595 | ||
587 | #ifndef final_version | 596 | #ifndef final_version |
588 | /* Since we have allocated space to hold a complete frame, | 597 | /* Since we have allocated space to hold a complete frame, |
589 | * the last indicator should be set. | 598 | * the last indicator should be set. |
590 | */ | 599 | */ |
591 | if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0) | 600 | if ((status & BD_ENET_RX_LAST) == 0) |
592 | printk("FEC ENET: rcv is not +last\n"); | 601 | printk("FEC ENET: rcv is not +last\n"); |
593 | #endif | 602 | #endif |
594 | 603 | ||
@@ -596,26 +605,26 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
596 | goto rx_processing_done; | 605 | goto rx_processing_done; |
597 | 606 | ||
598 | /* Check for errors. */ | 607 | /* Check for errors. */ |
599 | if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | 608 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
600 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | 609 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { |
601 | fep->stats.rx_errors++; | 610 | fep->stats.rx_errors++; |
602 | if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | 611 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { |
603 | /* Frame too long or too short. */ | 612 | /* Frame too long or too short. */ |
604 | fep->stats.rx_length_errors++; | 613 | fep->stats.rx_length_errors++; |
605 | } | 614 | } |
606 | if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ | 615 | if (status & BD_ENET_RX_NO) /* Frame alignment */ |
607 | fep->stats.rx_frame_errors++; | 616 | fep->stats.rx_frame_errors++; |
608 | if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ | 617 | if (status & BD_ENET_RX_CR) /* CRC Error */ |
609 | fep->stats.rx_crc_errors++; | ||
610 | if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ | ||
611 | fep->stats.rx_crc_errors++; | 618 | fep->stats.rx_crc_errors++; |
619 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | ||
620 | fep->stats.rx_fifo_errors++; | ||
612 | } | 621 | } |
613 | 622 | ||
614 | /* Report late collisions as a frame error. | 623 | /* Report late collisions as a frame error. |
615 | * On this error, the BD is closed, but we don't know what we | 624 | * On this error, the BD is closed, but we don't know what we |
616 | * have in the buffer. So, just drop this frame on the floor. | 625 | * have in the buffer. So, just drop this frame on the floor. |
617 | */ | 626 | */ |
618 | if (bdp->cbd_sc & BD_ENET_RX_CL) { | 627 | if (status & BD_ENET_RX_CL) { |
619 | fep->stats.rx_errors++; | 628 | fep->stats.rx_errors++; |
620 | fep->stats.rx_frame_errors++; | 629 | fep->stats.rx_frame_errors++; |
621 | goto rx_processing_done; | 630 | goto rx_processing_done; |
@@ -641,9 +650,7 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
641 | } else { | 650 | } else { |
642 | skb->dev = dev; | 651 | skb->dev = dev; |
643 | skb_put(skb,pkt_len-4); /* Make room */ | 652 | skb_put(skb,pkt_len-4); /* Make room */ |
644 | eth_copy_and_sum(skb, | 653 | eth_copy_and_sum(skb, data, pkt_len-4, 0); |
645 | (unsigned char *)__va(bdp->cbd_bufaddr), | ||
646 | pkt_len-4, 0); | ||
647 | skb->protocol=eth_type_trans(skb,dev); | 654 | skb->protocol=eth_type_trans(skb,dev); |
648 | netif_rx(skb); | 655 | netif_rx(skb); |
649 | } | 656 | } |
@@ -651,15 +658,16 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
651 | 658 | ||
652 | /* Clear the status flags for this buffer. | 659 | /* Clear the status flags for this buffer. |
653 | */ | 660 | */ |
654 | bdp->cbd_sc &= ~BD_ENET_RX_STATS; | 661 | status &= ~BD_ENET_RX_STATS; |
655 | 662 | ||
656 | /* Mark the buffer empty. | 663 | /* Mark the buffer empty. |
657 | */ | 664 | */ |
658 | bdp->cbd_sc |= BD_ENET_RX_EMPTY; | 665 | status |= BD_ENET_RX_EMPTY; |
666 | bdp->cbd_sc = status; | ||
659 | 667 | ||
660 | /* Update BD pointer to next entry. | 668 | /* Update BD pointer to next entry. |
661 | */ | 669 | */ |
662 | if (bdp->cbd_sc & BD_ENET_RX_WRAP) | 670 | if (status & BD_ENET_RX_WRAP) |
663 | bdp = fep->rx_bd_base; | 671 | bdp = fep->rx_bd_base; |
664 | else | 672 | else |
665 | bdp++; | 673 | bdp++; |
@@ -669,9 +677,9 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
669 | * incoming frames. On a heavily loaded network, we should be | 677 | * incoming frames. On a heavily loaded network, we should be |
670 | * able to keep up at the expense of system resources. | 678 | * able to keep up at the expense of system resources. |
671 | */ | 679 | */ |
672 | fecp->fec_r_des_active = 0x01000000; | 680 | fecp->fec_r_des_active = 0; |
673 | #endif | 681 | #endif |
674 | } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */ | 682 | } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */ |
675 | fep->cur_rx = (cbd_t *)bdp; | 683 | fep->cur_rx = (cbd_t *)bdp; |
676 | 684 | ||
677 | #if 0 | 685 | #if 0 |
@@ -682,11 +690,12 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) { | |||
682 | * our way back to the interrupt return only to come right back | 690 | * our way back to the interrupt return only to come right back |
683 | * here. | 691 | * here. |
684 | */ | 692 | */ |
685 | fecp->fec_r_des_active = 0x01000000; | 693 | fecp->fec_r_des_active = 0; |
686 | #endif | 694 | #endif |
687 | } | 695 | } |
688 | 696 | ||
689 | 697 | ||
698 | /* called from interrupt context */ | ||
690 | static void | 699 | static void |
691 | fec_enet_mii(struct net_device *dev) | 700 | fec_enet_mii(struct net_device *dev) |
692 | { | 701 | { |
@@ -698,10 +707,12 @@ fec_enet_mii(struct net_device *dev) | |||
698 | fep = netdev_priv(dev); | 707 | fep = netdev_priv(dev); |
699 | ep = fep->hwp; | 708 | ep = fep->hwp; |
700 | mii_reg = ep->fec_mii_data; | 709 | mii_reg = ep->fec_mii_data; |
710 | |||
711 | spin_lock(&fep->lock); | ||
701 | 712 | ||
702 | if ((mip = mii_head) == NULL) { | 713 | if ((mip = mii_head) == NULL) { |
703 | printk("MII and no head!\n"); | 714 | printk("MII and no head!\n"); |
704 | return; | 715 | goto unlock; |
705 | } | 716 | } |
706 | 717 | ||
707 | if (mip->mii_func != NULL) | 718 | if (mip->mii_func != NULL) |
@@ -713,6 +724,9 @@ fec_enet_mii(struct net_device *dev) | |||
713 | 724 | ||
714 | if ((mip = mii_head) != NULL) | 725 | if ((mip = mii_head) != NULL) |
715 | ep->fec_mii_data = mip->mii_regval; | 726 | ep->fec_mii_data = mip->mii_regval; |
727 | |||
728 | unlock: | ||
729 | spin_unlock(&fep->lock); | ||
716 | } | 730 | } |
717 | 731 | ||
718 | static int | 732 | static int |
@@ -730,8 +744,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi | |||
730 | 744 | ||
731 | retval = 0; | 745 | retval = 0; |
732 | 746 | ||
733 | save_flags(flags); | 747 | spin_lock_irqsave(&fep->lock,flags); |
734 | cli(); | ||
735 | 748 | ||
736 | if ((mip = mii_free) != NULL) { | 749 | if ((mip = mii_free) != NULL) { |
737 | mii_free = mip->mii_next; | 750 | mii_free = mip->mii_next; |
@@ -751,7 +764,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi | |||
751 | retval = 1; | 764 | retval = 1; |
752 | } | 765 | } |
753 | 766 | ||
754 | restore_flags(flags); | 767 | spin_unlock_irqrestore(&fep->lock,flags); |
755 | 768 | ||
756 | return(retval); | 769 | return(retval); |
757 | } | 770 | } |