diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-05-31 18:48:39 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-06-12 02:40:22 -0400 |
commit | c9df406f3138265193f5e8d7b7fe85dfbbbd3ac4 (patch) | |
tree | 5adc3cea9fb14b0afb79b42d49895448886a7e2f /drivers/net/mv643xx_eth.c | |
parent | d4c3c0753594adaafbcb77a086f013f1d847b3f0 (diff) |
mv643xx_eth: reverse topological sort of functions
This patch performs a reverse topological sort of all functions in
mv643xx_eth.c, so that we can get rid of all forward declarations,
and end up with a more understandable driver due to related functions
being grouped together.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Acked-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 3438 |
1 files changed, 1646 insertions, 1792 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index b7915cdcc6a5..835b85d30bc6 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -62,6 +62,9 @@ | |||
62 | #include <asm/delay.h> | 62 | #include <asm/delay.h> |
63 | #include <asm/dma-mapping.h> | 63 | #include <asm/dma-mapping.h> |
64 | 64 | ||
65 | static char mv643xx_driver_name[] = "mv643xx_eth"; | ||
66 | static char mv643xx_driver_version[] = "1.0"; | ||
67 | |||
65 | #define MV643XX_CHECKSUM_OFFLOAD_TX | 68 | #define MV643XX_CHECKSUM_OFFLOAD_TX |
66 | #define MV643XX_NAPI | 69 | #define MV643XX_NAPI |
67 | #define MV643XX_TX_FAST_REFILL | 70 | #define MV643XX_TX_FAST_REFILL |
@@ -478,7 +481,21 @@ struct pkt_info { | |||
478 | struct sk_buff *return_info; /* User resource return information */ | 481 | struct sk_buff *return_info; /* User resource return information */ |
479 | }; | 482 | }; |
480 | 483 | ||
481 | /* Ethernet port specific information */ | 484 | |
485 | /* global *******************************************************************/ | ||
486 | struct mv643xx_shared_private { | ||
487 | void __iomem *eth_base; | ||
488 | |||
489 | /* used to protect SMI_REG, which is shared across ports */ | ||
490 | spinlock_t phy_lock; | ||
491 | |||
492 | u32 win_protect; | ||
493 | |||
494 | unsigned int t_clk; | ||
495 | }; | ||
496 | |||
497 | |||
498 | /* per-port *****************************************************************/ | ||
482 | struct mv643xx_mib_counters { | 499 | struct mv643xx_mib_counters { |
483 | u64 good_octets_received; | 500 | u64 good_octets_received; |
484 | u32 bad_octets_received; | 501 | u32 bad_octets_received; |
@@ -512,17 +529,6 @@ struct mv643xx_mib_counters { | |||
512 | u32 late_collision; | 529 | u32 late_collision; |
513 | }; | 530 | }; |
514 | 531 | ||
515 | struct mv643xx_shared_private { | ||
516 | void __iomem *eth_base; | ||
517 | |||
518 | /* used to protect SMI_REG, which is shared across ports */ | ||
519 | spinlock_t phy_lock; | ||
520 | |||
521 | u32 win_protect; | ||
522 | |||
523 | unsigned int t_clk; | ||
524 | }; | ||
525 | |||
526 | struct mv643xx_private { | 532 | struct mv643xx_private { |
527 | struct mv643xx_shared_private *shared; | 533 | struct mv643xx_shared_private *shared; |
528 | int port_num; /* User Ethernet port number */ | 534 | int port_num; /* User Ethernet port number */ |
@@ -585,93 +591,135 @@ struct mv643xx_private { | |||
585 | struct mii_if_info mii; | 591 | struct mii_if_info mii; |
586 | }; | 592 | }; |
587 | 593 | ||
588 | /* Static function declarations */ | ||
589 | static void eth_port_init(struct mv643xx_private *mp); | ||
590 | static void eth_port_reset(struct mv643xx_private *mp); | ||
591 | static void eth_port_start(struct net_device *dev); | ||
592 | 594 | ||
593 | static void ethernet_phy_reset(struct mv643xx_private *mp); | 595 | /* port register accessors **************************************************/ |
596 | static inline u32 rdl(struct mv643xx_private *mp, int offset) | ||
597 | { | ||
598 | return readl(mp->shared->eth_base + offset); | ||
599 | } | ||
594 | 600 | ||
595 | static void eth_port_write_smi_reg(struct mv643xx_private *mp, | 601 | static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) |
596 | unsigned int phy_reg, unsigned int value); | 602 | { |
603 | writel(data, mp->shared->eth_base + offset); | ||
604 | } | ||
597 | 605 | ||
598 | static void eth_port_read_smi_reg(struct mv643xx_private *mp, | ||
599 | unsigned int phy_reg, unsigned int *value); | ||
600 | 606 | ||
601 | static void eth_clear_mib_counters(struct mv643xx_private *mp); | 607 | /* rxq/txq helper functions *************************************************/ |
608 | static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp, | ||
609 | unsigned int queues) | ||
610 | { | ||
611 | wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues); | ||
612 | } | ||
602 | 613 | ||
603 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | 614 | static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp) |
604 | struct pkt_info *p_pkt_info); | 615 | { |
605 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | 616 | unsigned int port_num = mp->port_num; |
606 | struct pkt_info *p_pkt_info); | 617 | u32 queues; |
607 | 618 | ||
608 | static void eth_port_uc_addr_get(struct mv643xx_private *mp, | 619 | /* Stop Rx port activity. Check port Rx activity. */ |
609 | unsigned char *p_addr); | 620 | queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; |
610 | static void eth_port_uc_addr_set(struct mv643xx_private *mp, | 621 | if (queues) { |
611 | unsigned char *p_addr); | 622 | /* Issue stop command for active queues only */ |
612 | static void eth_port_set_multicast_list(struct net_device *); | 623 | wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); |
613 | static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp, | ||
614 | unsigned int queues); | ||
615 | static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp, | ||
616 | unsigned int queues); | ||
617 | static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp); | ||
618 | static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp); | ||
619 | static int mv643xx_eth_open(struct net_device *); | ||
620 | static int mv643xx_eth_stop(struct net_device *); | ||
621 | static void eth_port_init_mac_tables(struct mv643xx_private *mp); | ||
622 | #ifdef MV643XX_NAPI | ||
623 | static int mv643xx_poll(struct napi_struct *napi, int budget); | ||
624 | #endif | ||
625 | static int ethernet_phy_get(struct mv643xx_private *mp); | ||
626 | static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr); | ||
627 | static int ethernet_phy_detect(struct mv643xx_private *mp); | ||
628 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location); | ||
629 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val); | ||
630 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | ||
631 | static const struct ethtool_ops mv643xx_ethtool_ops; | ||
632 | 624 | ||
633 | static char mv643xx_driver_name[] = "mv643xx_eth"; | 625 | /* Wait for all Rx activity to terminate. */ |
634 | static char mv643xx_driver_version[] = "1.0"; | 626 | /* Check port cause register that all Rx queues are stopped */ |
627 | while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) | ||
628 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
629 | } | ||
635 | 630 | ||
636 | static inline u32 rdl(struct mv643xx_private *mp, int offset) | 631 | return queues; |
632 | } | ||
633 | |||
634 | static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp, | ||
635 | unsigned int queues) | ||
637 | { | 636 | { |
638 | return readl(mp->shared->eth_base + offset); | 637 | wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues); |
639 | } | 638 | } |
640 | 639 | ||
641 | static inline void wrl(struct mv643xx_private *mp, int offset, u32 data) | 640 | static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp) |
642 | { | 641 | { |
643 | writel(data, mp->shared->eth_base + offset); | 642 | unsigned int port_num = mp->port_num; |
643 | u32 queues; | ||
644 | |||
645 | /* Stop Tx port activity. Check port Tx activity. */ | ||
646 | queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; | ||
647 | if (queues) { | ||
648 | /* Issue stop command for active queues only */ | ||
649 | wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); | ||
650 | |||
651 | /* Wait for all Tx activity to terminate. */ | ||
652 | /* Check port cause register that all Tx queues are stopped */ | ||
653 | while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) | ||
654 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
655 | |||
656 | /* Wait for Tx FIFO to empty */ | ||
657 | while (rdl(mp, PORT_STATUS_REG(port_num)) & | ||
658 | ETH_PORT_TX_FIFO_EMPTY) | ||
659 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
660 | } | ||
661 | |||
662 | return queues; | ||
644 | } | 663 | } |
645 | 664 | ||
665 | |||
666 | /* rx ***********************************************************************/ | ||
667 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev); | ||
668 | |||
646 | /* | 669 | /* |
647 | * Changes MTU (maximum transfer unit) of the gigabit ethenret port | 670 | * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring. |
648 | * | 671 | * |
649 | * Input : pointer to ethernet interface network device structure | 672 | * DESCRIPTION: |
650 | * new mtu size | 673 | * This routine returns a Rx buffer back to the Rx ring. It retrieves the |
651 | * Output : 0 upon success, -EINVAL upon failure | 674 | * next 'used' descriptor and attached the returned buffer to it. |
675 | * In case the Rx ring was in "resource error" condition, where there are | ||
676 | * no available Rx resources, the function resets the resource error flag. | ||
677 | * | ||
678 | * INPUT: | ||
679 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
680 | * struct pkt_info *p_pkt_info Information on returned buffer. | ||
681 | * | ||
682 | * OUTPUT: | ||
683 | * New available Rx resource in Rx descriptor ring. | ||
684 | * | ||
685 | * RETURN: | ||
686 | * ETH_ERROR in case the routine can not access Rx desc ring. | ||
687 | * ETH_OK otherwise. | ||
652 | */ | 688 | */ |
653 | static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | 689 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, |
690 | struct pkt_info *p_pkt_info) | ||
654 | { | 691 | { |
655 | if ((new_mtu > 9500) || (new_mtu < 64)) | 692 | int used_rx_desc; /* Where to return Rx resource */ |
656 | return -EINVAL; | 693 | volatile struct eth_rx_desc *p_used_rx_desc; |
694 | unsigned long flags; | ||
657 | 695 | ||
658 | dev->mtu = new_mtu; | 696 | spin_lock_irqsave(&mp->lock, flags); |
659 | if (!netif_running(dev)) | ||
660 | return 0; | ||
661 | 697 | ||
662 | /* | 698 | /* Get 'used' Rx descriptor */ |
663 | * Stop and then re-open the interface. This will allocate RX | 699 | used_rx_desc = mp->rx_used_desc_q; |
664 | * skbs of the new MTU. | 700 | p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc]; |
665 | * There is a possible danger that the open will not succeed, | ||
666 | * due to memory being full, which might fail the open function. | ||
667 | */ | ||
668 | mv643xx_eth_stop(dev); | ||
669 | if (mv643xx_eth_open(dev)) { | ||
670 | printk(KERN_ERR "%s: Fatal error on opening device\n", | ||
671 | dev->name); | ||
672 | } | ||
673 | 701 | ||
674 | return 0; | 702 | p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr; |
703 | p_used_rx_desc->buf_size = p_pkt_info->byte_cnt; | ||
704 | mp->rx_skb[used_rx_desc] = p_pkt_info->return_info; | ||
705 | |||
706 | /* Flush the write pipe */ | ||
707 | |||
708 | /* Return the descriptor to DMA ownership */ | ||
709 | wmb(); | ||
710 | p_used_rx_desc->cmd_sts = | ||
711 | ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT; | ||
712 | wmb(); | ||
713 | |||
714 | /* Move the used descriptor pointer to the next descriptor */ | ||
715 | mp->rx_used_desc_q = (used_rx_desc + 1) % mp->rx_ring_size; | ||
716 | |||
717 | /* Any Rx return cancels the Rx resource error status */ | ||
718 | mp->rx_resource_err = 0; | ||
719 | |||
720 | spin_unlock_irqrestore(&mp->lock, flags); | ||
721 | |||
722 | return ETH_OK; | ||
675 | } | 723 | } |
676 | 724 | ||
677 | /* | 725 | /* |
@@ -736,42 +784,841 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) | |||
736 | } | 784 | } |
737 | 785 | ||
738 | /* | 786 | /* |
739 | * mv643xx_eth_update_mac_address | 787 | * eth_port_receive - Get received information from Rx ring. |
740 | * | 788 | * |
741 | * Update the MAC address of the port in the address table | 789 | * DESCRIPTION: |
790 | * This routine returns the received data to the caller. There is no | ||
791 | * data copying during routine operation. All information is returned | ||
792 | * using pointer to packet information struct passed from the caller. | ||
793 | * If the routine exhausts Rx ring resources then the resource error flag | ||
794 | * is set. | ||
742 | * | 795 | * |
743 | * Input : pointer to ethernet interface network device structure | 796 | * INPUT: |
744 | * Output : N/A | 797 | * struct mv643xx_private *mp Ethernet Port Control srtuct. |
798 | * struct pkt_info *p_pkt_info User packet buffer. | ||
799 | * | ||
800 | * OUTPUT: | ||
801 | * Rx ring current and used indexes are updated. | ||
802 | * | ||
803 | * RETURN: | ||
804 | * ETH_ERROR in case the routine can not access Rx desc ring. | ||
805 | * ETH_QUEUE_FULL if Rx ring resources are exhausted. | ||
806 | * ETH_END_OF_JOB if there is no received data. | ||
807 | * ETH_OK otherwise. | ||
745 | */ | 808 | */ |
746 | static void mv643xx_eth_update_mac_address(struct net_device *dev) | 809 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, |
810 | struct pkt_info *p_pkt_info) | ||
811 | { | ||
812 | int rx_next_curr_desc, rx_curr_desc, rx_used_desc; | ||
813 | volatile struct eth_rx_desc *p_rx_desc; | ||
814 | unsigned int command_status; | ||
815 | unsigned long flags; | ||
816 | |||
817 | /* Do not process Rx ring in case of Rx ring resource error */ | ||
818 | if (mp->rx_resource_err) | ||
819 | return ETH_QUEUE_FULL; | ||
820 | |||
821 | spin_lock_irqsave(&mp->lock, flags); | ||
822 | |||
823 | /* Get the Rx Desc ring 'curr and 'used' indexes */ | ||
824 | rx_curr_desc = mp->rx_curr_desc_q; | ||
825 | rx_used_desc = mp->rx_used_desc_q; | ||
826 | |||
827 | p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc]; | ||
828 | |||
829 | /* The following parameters are used to save readings from memory */ | ||
830 | command_status = p_rx_desc->cmd_sts; | ||
831 | rmb(); | ||
832 | |||
833 | /* Nothing to receive... */ | ||
834 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { | ||
835 | spin_unlock_irqrestore(&mp->lock, flags); | ||
836 | return ETH_END_OF_JOB; | ||
837 | } | ||
838 | |||
839 | p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; | ||
840 | p_pkt_info->cmd_sts = command_status; | ||
841 | p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET; | ||
842 | p_pkt_info->return_info = mp->rx_skb[rx_curr_desc]; | ||
843 | p_pkt_info->l4i_chk = p_rx_desc->buf_size; | ||
844 | |||
845 | /* | ||
846 | * Clean the return info field to indicate that the | ||
847 | * packet has been moved to the upper layers | ||
848 | */ | ||
849 | mp->rx_skb[rx_curr_desc] = NULL; | ||
850 | |||
851 | /* Update current index in data structure */ | ||
852 | rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size; | ||
853 | mp->rx_curr_desc_q = rx_next_curr_desc; | ||
854 | |||
855 | /* Rx descriptors exhausted. Set the Rx ring resource error flag */ | ||
856 | if (rx_next_curr_desc == rx_used_desc) | ||
857 | mp->rx_resource_err = 1; | ||
858 | |||
859 | spin_unlock_irqrestore(&mp->lock, flags); | ||
860 | |||
861 | return ETH_OK; | ||
862 | } | ||
863 | |||
864 | /* | ||
865 | * mv643xx_eth_receive | ||
866 | * | ||
867 | * This function is forward packets that are received from the port's | ||
868 | * queues toward kernel core or FastRoute them to another interface. | ||
869 | * | ||
870 | * Input : dev - a pointer to the required interface | ||
871 | * max - maximum number to receive (0 means unlimted) | ||
872 | * | ||
873 | * Output : number of served packets | ||
874 | */ | ||
875 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | ||
747 | { | 876 | { |
748 | struct mv643xx_private *mp = netdev_priv(dev); | 877 | struct mv643xx_private *mp = netdev_priv(dev); |
878 | struct net_device_stats *stats = &dev->stats; | ||
879 | unsigned int received_packets = 0; | ||
880 | struct sk_buff *skb; | ||
881 | struct pkt_info pkt_info; | ||
749 | 882 | ||
750 | eth_port_init_mac_tables(mp); | 883 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { |
751 | eth_port_uc_addr_set(mp, dev->dev_addr); | 884 | dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE, |
885 | DMA_FROM_DEVICE); | ||
886 | mp->rx_desc_count--; | ||
887 | received_packets++; | ||
888 | |||
889 | /* | ||
890 | * Update statistics. | ||
891 | * Note byte count includes 4 byte CRC count | ||
892 | */ | ||
893 | stats->rx_packets++; | ||
894 | stats->rx_bytes += pkt_info.byte_cnt; | ||
895 | skb = pkt_info.return_info; | ||
896 | /* | ||
897 | * In case received a packet without first / last bits on OR | ||
898 | * the error summary bit is on, the packets needs to be dropeed. | ||
899 | */ | ||
900 | if (((pkt_info.cmd_sts | ||
901 | & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) != | ||
902 | (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) | ||
903 | || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) { | ||
904 | stats->rx_dropped++; | ||
905 | if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC | | ||
906 | ETH_RX_LAST_DESC)) != | ||
907 | (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) { | ||
908 | if (net_ratelimit()) | ||
909 | printk(KERN_ERR | ||
910 | "%s: Received packet spread " | ||
911 | "on multiple descriptors\n", | ||
912 | dev->name); | ||
913 | } | ||
914 | if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) | ||
915 | stats->rx_errors++; | ||
916 | |||
917 | dev_kfree_skb_irq(skb); | ||
918 | } else { | ||
919 | /* | ||
920 | * The -4 is for the CRC in the trailer of the | ||
921 | * received packet | ||
922 | */ | ||
923 | skb_put(skb, pkt_info.byte_cnt - 4); | ||
924 | |||
925 | if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { | ||
926 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
927 | skb->csum = htons( | ||
928 | (pkt_info.cmd_sts & 0x0007fff8) >> 3); | ||
929 | } | ||
930 | skb->protocol = eth_type_trans(skb, dev); | ||
931 | #ifdef MV643XX_NAPI | ||
932 | netif_receive_skb(skb); | ||
933 | #else | ||
934 | netif_rx(skb); | ||
935 | #endif | ||
936 | } | ||
937 | dev->last_rx = jiffies; | ||
938 | } | ||
939 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ | ||
940 | |||
941 | return received_packets; | ||
752 | } | 942 | } |
753 | 943 | ||
944 | #ifdef MV643XX_NAPI | ||
754 | /* | 945 | /* |
755 | * mv643xx_eth_set_rx_mode | 946 | * mv643xx_poll |
756 | * | 947 | * |
757 | * Change from promiscuos to regular rx mode | 948 | * This function is used in case of NAPI |
949 | */ | ||
950 | static int mv643xx_poll(struct napi_struct *napi, int budget) | ||
951 | { | ||
952 | struct mv643xx_private *mp = container_of(napi, struct mv643xx_private, napi); | ||
953 | struct net_device *dev = mp->dev; | ||
954 | unsigned int port_num = mp->port_num; | ||
955 | int work_done; | ||
956 | |||
957 | #ifdef MV643XX_TX_FAST_REFILL | ||
958 | if (++mp->tx_clean_threshold > 5) { | ||
959 | mv643xx_eth_free_completed_tx_descs(dev); | ||
960 | mp->tx_clean_threshold = 0; | ||
961 | } | ||
962 | #endif | ||
963 | |||
964 | work_done = 0; | ||
965 | if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) | ||
966 | != (u32) mp->rx_used_desc_q) | ||
967 | work_done = mv643xx_eth_receive_queue(dev, budget); | ||
968 | |||
969 | if (work_done < budget) { | ||
970 | netif_rx_complete(dev, napi); | ||
971 | wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0); | ||
972 | wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
973 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | ||
974 | } | ||
975 | |||
976 | return work_done; | ||
977 | } | ||
978 | #endif | ||
979 | |||
980 | |||
981 | /* tx ***********************************************************************/ | ||
982 | /** | ||
983 | * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments | ||
984 | * | ||
985 | * Hardware can't handle unaligned fragments smaller than 9 bytes. | ||
986 | * This helper function detects that case. | ||
987 | */ | ||
988 | |||
989 | static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | ||
990 | { | ||
991 | unsigned int frag; | ||
992 | skb_frag_t *fragp; | ||
993 | |||
994 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
995 | fragp = &skb_shinfo(skb)->frags[frag]; | ||
996 | if (fragp->size <= 8 && fragp->page_offset & 0x7) | ||
997 | return 1; | ||
998 | } | ||
999 | return 0; | ||
1000 | } | ||
1001 | |||
1002 | /** | ||
1003 | * eth_alloc_tx_desc_index - return the index of the next available tx desc | ||
1004 | */ | ||
1005 | static int eth_alloc_tx_desc_index(struct mv643xx_private *mp) | ||
1006 | { | ||
1007 | int tx_desc_curr; | ||
1008 | |||
1009 | BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); | ||
1010 | |||
1011 | tx_desc_curr = mp->tx_curr_desc_q; | ||
1012 | mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
1013 | |||
1014 | BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q); | ||
1015 | |||
1016 | return tx_desc_curr; | ||
1017 | } | ||
1018 | |||
1019 | /** | ||
1020 | * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments. | ||
1021 | * | ||
1022 | * Ensure the data for each fragment to be transmitted is mapped properly, | ||
1023 | * then fill in descriptors in the tx hw queue. | ||
1024 | */ | ||
1025 | static void eth_tx_fill_frag_descs(struct mv643xx_private *mp, | ||
1026 | struct sk_buff *skb) | ||
1027 | { | ||
1028 | int frag; | ||
1029 | int tx_index; | ||
1030 | struct eth_tx_desc *desc; | ||
1031 | |||
1032 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
1033 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | ||
1034 | |||
1035 | tx_index = eth_alloc_tx_desc_index(mp); | ||
1036 | desc = &mp->p_tx_desc_area[tx_index]; | ||
1037 | |||
1038 | desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA; | ||
1039 | /* Last Frag enables interrupt and frees the skb */ | ||
1040 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1041 | desc->cmd_sts |= ETH_ZERO_PADDING | | ||
1042 | ETH_TX_LAST_DESC | | ||
1043 | ETH_TX_ENABLE_INTERRUPT; | ||
1044 | mp->tx_skb[tx_index] = skb; | ||
1045 | } else | ||
1046 | mp->tx_skb[tx_index] = NULL; | ||
1047 | |||
1048 | desc = &mp->p_tx_desc_area[tx_index]; | ||
1049 | desc->l4i_chk = 0; | ||
1050 | desc->byte_cnt = this_frag->size; | ||
1051 | desc->buf_ptr = dma_map_page(NULL, this_frag->page, | ||
1052 | this_frag->page_offset, | ||
1053 | this_frag->size, | ||
1054 | DMA_TO_DEVICE); | ||
1055 | } | ||
1056 | } | ||
1057 | |||
1058 | static inline __be16 sum16_as_be(__sum16 sum) | ||
1059 | { | ||
1060 | return (__force __be16)sum; | ||
1061 | } | ||
1062 | |||
1063 | /** | ||
1064 | * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw | ||
1065 | * | ||
1066 | * Ensure the data for an skb to be transmitted is mapped properly, | ||
1067 | * then fill in descriptors in the tx hw queue and start the hardware. | ||
1068 | */ | ||
1069 | static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | ||
1070 | struct sk_buff *skb) | ||
1071 | { | ||
1072 | int tx_index; | ||
1073 | struct eth_tx_desc *desc; | ||
1074 | u32 cmd_sts; | ||
1075 | int length; | ||
1076 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
1077 | |||
1078 | cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA; | ||
1079 | |||
1080 | tx_index = eth_alloc_tx_desc_index(mp); | ||
1081 | desc = &mp->p_tx_desc_area[tx_index]; | ||
1082 | |||
1083 | if (nr_frags) { | ||
1084 | eth_tx_fill_frag_descs(mp, skb); | ||
1085 | |||
1086 | length = skb_headlen(skb); | ||
1087 | mp->tx_skb[tx_index] = NULL; | ||
1088 | } else { | ||
1089 | cmd_sts |= ETH_ZERO_PADDING | | ||
1090 | ETH_TX_LAST_DESC | | ||
1091 | ETH_TX_ENABLE_INTERRUPT; | ||
1092 | length = skb->len; | ||
1093 | mp->tx_skb[tx_index] = skb; | ||
1094 | } | ||
1095 | |||
1096 | desc->byte_cnt = length; | ||
1097 | desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); | ||
1098 | |||
1099 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | ||
1100 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | ||
1101 | |||
1102 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | | ||
1103 | ETH_GEN_IP_V_4_CHECKSUM | | ||
1104 | ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT; | ||
1105 | |||
1106 | switch (ip_hdr(skb)->protocol) { | ||
1107 | case IPPROTO_UDP: | ||
1108 | cmd_sts |= ETH_UDP_FRAME; | ||
1109 | desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); | ||
1110 | break; | ||
1111 | case IPPROTO_TCP: | ||
1112 | desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); | ||
1113 | break; | ||
1114 | default: | ||
1115 | BUG(); | ||
1116 | } | ||
1117 | } else { | ||
1118 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1119 | cmd_sts |= 5 << ETH_TX_IHL_SHIFT; | ||
1120 | desc->l4i_chk = 0; | ||
1121 | } | ||
1122 | |||
1123 | /* ensure all other descriptors are written before first cmd_sts */ | ||
1124 | wmb(); | ||
1125 | desc->cmd_sts = cmd_sts; | ||
1126 | |||
1127 | /* ensure all descriptors are written before poking hardware */ | ||
1128 | wmb(); | ||
1129 | mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED); | ||
1130 | |||
1131 | mp->tx_desc_count += nr_frags + 1; | ||
1132 | } | ||
1133 | |||
1134 | /** | ||
1135 | * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission | ||
1136 | * | ||
1137 | */ | ||
1138 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1139 | { | ||
1140 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1141 | struct net_device_stats *stats = &dev->stats; | ||
1142 | unsigned long flags; | ||
1143 | |||
1144 | BUG_ON(netif_queue_stopped(dev)); | ||
1145 | |||
1146 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { | ||
1147 | stats->tx_dropped++; | ||
1148 | printk(KERN_DEBUG "%s: failed to linearize tiny " | ||
1149 | "unaligned fragment\n", dev->name); | ||
1150 | return NETDEV_TX_BUSY; | ||
1151 | } | ||
1152 | |||
1153 | spin_lock_irqsave(&mp->lock, flags); | ||
1154 | |||
1155 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) { | ||
1156 | printk(KERN_ERR "%s: transmit with queue full\n", dev->name); | ||
1157 | netif_stop_queue(dev); | ||
1158 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1159 | return NETDEV_TX_BUSY; | ||
1160 | } | ||
1161 | |||
1162 | eth_tx_submit_descs_for_skb(mp, skb); | ||
1163 | stats->tx_bytes += skb->len; | ||
1164 | stats->tx_packets++; | ||
1165 | dev->trans_start = jiffies; | ||
1166 | |||
1167 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) | ||
1168 | netif_stop_queue(dev); | ||
1169 | |||
1170 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1171 | |||
1172 | return NETDEV_TX_OK; | ||
1173 | } | ||
1174 | |||
1175 | |||
1176 | /* mii management interface *************************************************/ | ||
1177 | static int ethernet_phy_get(struct mv643xx_private *mp); | ||
1178 | |||
1179 | /* | ||
1180 | * eth_port_read_smi_reg - Read PHY registers | ||
1181 | * | ||
1182 | * DESCRIPTION: | ||
1183 | * This routine utilize the SMI interface to interact with the PHY in | ||
1184 | * order to perform PHY register read. | ||
1185 | * | ||
1186 | * INPUT: | ||
1187 | * struct mv643xx_private *mp Ethernet Port. | ||
1188 | * unsigned int phy_reg PHY register address offset. | ||
1189 | * unsigned int *value Register value buffer. | ||
1190 | * | ||
1191 | * OUTPUT: | ||
1192 | * Write the value of a specified PHY register into given buffer. | ||
1193 | * | ||
1194 | * RETURN: | ||
1195 | * false if the PHY is busy or read data is not in valid state. | ||
1196 | * true otherwise. | ||
1197 | * | ||
1198 | */ | ||
1199 | static void eth_port_read_smi_reg(struct mv643xx_private *mp, | ||
1200 | unsigned int phy_reg, unsigned int *value) | ||
1201 | { | ||
1202 | void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; | ||
1203 | int phy_addr = ethernet_phy_get(mp); | ||
1204 | unsigned long flags; | ||
1205 | int i; | ||
1206 | |||
1207 | /* the SMI register is a shared resource */ | ||
1208 | spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); | ||
1209 | |||
1210 | /* wait for the SMI register to become available */ | ||
1211 | for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { | ||
1212 | if (i == PHY_WAIT_ITERATIONS) { | ||
1213 | printk("%s: PHY busy timeout\n", mp->dev->name); | ||
1214 | goto out; | ||
1215 | } | ||
1216 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
1217 | } | ||
1218 | |||
1219 | writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ, | ||
1220 | smi_reg); | ||
1221 | |||
1222 | /* now wait for the data to be valid */ | ||
1223 | for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) { | ||
1224 | if (i == PHY_WAIT_ITERATIONS) { | ||
1225 | printk("%s: PHY read timeout\n", mp->dev->name); | ||
1226 | goto out; | ||
1227 | } | ||
1228 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
1229 | } | ||
1230 | |||
1231 | *value = readl(smi_reg) & 0xffff; | ||
1232 | out: | ||
1233 | spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); | ||
1234 | } | ||
1235 | |||
1236 | /* | ||
1237 | * eth_port_write_smi_reg - Write to PHY registers | ||
1238 | * | ||
1239 | * DESCRIPTION: | ||
1240 | * This routine utilize the SMI interface to interact with the PHY in | ||
1241 | * order to perform writes to PHY registers. | ||
1242 | * | ||
1243 | * INPUT: | ||
1244 | * struct mv643xx_private *mp Ethernet Port. | ||
1245 | * unsigned int phy_reg PHY register address offset. | ||
1246 | * unsigned int value Register value. | ||
1247 | * | ||
1248 | * OUTPUT: | ||
1249 | * Write the given value to the specified PHY register. | ||
1250 | * | ||
1251 | * RETURN: | ||
1252 | * false if the PHY is busy. | ||
1253 | * true otherwise. | ||
1254 | * | ||
1255 | */ | ||
1256 | static void eth_port_write_smi_reg(struct mv643xx_private *mp, | ||
1257 | unsigned int phy_reg, unsigned int value) | ||
1258 | { | ||
1259 | void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; | ||
1260 | int phy_addr = ethernet_phy_get(mp); | ||
1261 | unsigned long flags; | ||
1262 | int i; | ||
1263 | |||
1264 | /* the SMI register is a shared resource */ | ||
1265 | spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); | ||
1266 | |||
1267 | /* wait for the SMI register to become available */ | ||
1268 | for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { | ||
1269 | if (i == PHY_WAIT_ITERATIONS) { | ||
1270 | printk("%s: PHY busy timeout\n", mp->dev->name); | ||
1271 | goto out; | ||
1272 | } | ||
1273 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
1274 | } | ||
1275 | |||
1276 | writel((phy_addr << 16) | (phy_reg << 21) | | ||
1277 | ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg); | ||
1278 | out: | ||
1279 | spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); | ||
1280 | } | ||
1281 | |||
1282 | |||
1283 | /* mib counters *************************************************************/ | ||
1284 | /* | ||
1285 | * eth_clear_mib_counters - Clear all MIB counters | ||
1286 | * | ||
1287 | * DESCRIPTION: | ||
1288 | * This function clears all MIB counters of a specific ethernet port. | ||
1289 | * A read from the MIB counter will reset the counter. | ||
1290 | * | ||
1291 | * INPUT: | ||
1292 | * struct mv643xx_private *mp Ethernet Port. | ||
1293 | * | ||
1294 | * OUTPUT: | ||
1295 | * After reading all MIB counters, the counters resets. | ||
1296 | * | ||
1297 | * RETURN: | ||
1298 | * MIB counter value. | ||
1299 | * | ||
1300 | */ | ||
1301 | static void eth_clear_mib_counters(struct mv643xx_private *mp) | ||
1302 | { | ||
1303 | unsigned int port_num = mp->port_num; | ||
1304 | int i; | ||
1305 | |||
1306 | /* Perform dummy reads from MIB counters */ | ||
1307 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; | ||
1308 | i += 4) | ||
1309 | rdl(mp, MIB_COUNTERS_BASE(port_num) + i); | ||
1310 | } | ||
1311 | |||
1312 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) | ||
1313 | { | ||
1314 | return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset); | ||
1315 | } | ||
1316 | |||
1317 | static void eth_update_mib_counters(struct mv643xx_private *mp) | ||
1318 | { | ||
1319 | struct mv643xx_mib_counters *p = &mp->mib_counters; | ||
1320 | int offset; | ||
1321 | |||
1322 | p->good_octets_received += | ||
1323 | read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW); | ||
1324 | p->good_octets_received += | ||
1325 | (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH) << 32; | ||
1326 | |||
1327 | for (offset = ETH_MIB_BAD_OCTETS_RECEIVED; | ||
1328 | offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS; | ||
1329 | offset += 4) | ||
1330 | *(u32 *)((char *)p + offset) += read_mib(mp, offset); | ||
1331 | |||
1332 | p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW); | ||
1333 | p->good_octets_sent += | ||
1334 | (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_HIGH) << 32; | ||
1335 | |||
1336 | for (offset = ETH_MIB_GOOD_FRAMES_SENT; | ||
1337 | offset <= ETH_MIB_LATE_COLLISION; | ||
1338 | offset += 4) | ||
1339 | *(u32 *)((char *)p + offset) += read_mib(mp, offset); | ||
1340 | } | ||
1341 | |||
1342 | |||
1343 | /* ethtool ******************************************************************/ | ||
1344 | struct mv643xx_stats { | ||
1345 | char stat_string[ETH_GSTRING_LEN]; | ||
1346 | int sizeof_stat; | ||
1347 | int stat_offset; | ||
1348 | }; | ||
1349 | |||
1350 | #define MV643XX_STAT(m) FIELD_SIZEOF(struct mv643xx_private, m), \ | ||
1351 | offsetof(struct mv643xx_private, m) | ||
1352 | |||
1353 | static const struct mv643xx_stats mv643xx_gstrings_stats[] = { | ||
1354 | { "rx_packets", MV643XX_STAT(stats.rx_packets) }, | ||
1355 | { "tx_packets", MV643XX_STAT(stats.tx_packets) }, | ||
1356 | { "rx_bytes", MV643XX_STAT(stats.rx_bytes) }, | ||
1357 | { "tx_bytes", MV643XX_STAT(stats.tx_bytes) }, | ||
1358 | { "rx_errors", MV643XX_STAT(stats.rx_errors) }, | ||
1359 | { "tx_errors", MV643XX_STAT(stats.tx_errors) }, | ||
1360 | { "rx_dropped", MV643XX_STAT(stats.rx_dropped) }, | ||
1361 | { "tx_dropped", MV643XX_STAT(stats.tx_dropped) }, | ||
1362 | { "good_octets_received", MV643XX_STAT(mib_counters.good_octets_received) }, | ||
1363 | { "bad_octets_received", MV643XX_STAT(mib_counters.bad_octets_received) }, | ||
1364 | { "internal_mac_transmit_err", MV643XX_STAT(mib_counters.internal_mac_transmit_err) }, | ||
1365 | { "good_frames_received", MV643XX_STAT(mib_counters.good_frames_received) }, | ||
1366 | { "bad_frames_received", MV643XX_STAT(mib_counters.bad_frames_received) }, | ||
1367 | { "broadcast_frames_received", MV643XX_STAT(mib_counters.broadcast_frames_received) }, | ||
1368 | { "multicast_frames_received", MV643XX_STAT(mib_counters.multicast_frames_received) }, | ||
1369 | { "frames_64_octets", MV643XX_STAT(mib_counters.frames_64_octets) }, | ||
1370 | { "frames_65_to_127_octets", MV643XX_STAT(mib_counters.frames_65_to_127_octets) }, | ||
1371 | { "frames_128_to_255_octets", MV643XX_STAT(mib_counters.frames_128_to_255_octets) }, | ||
1372 | { "frames_256_to_511_octets", MV643XX_STAT(mib_counters.frames_256_to_511_octets) }, | ||
1373 | { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters.frames_512_to_1023_octets) }, | ||
1374 | { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters.frames_1024_to_max_octets) }, | ||
1375 | { "good_octets_sent", MV643XX_STAT(mib_counters.good_octets_sent) }, | ||
1376 | { "good_frames_sent", MV643XX_STAT(mib_counters.good_frames_sent) }, | ||
1377 | { "excessive_collision", MV643XX_STAT(mib_counters.excessive_collision) }, | ||
1378 | { "multicast_frames_sent", MV643XX_STAT(mib_counters.multicast_frames_sent) }, | ||
1379 | { "broadcast_frames_sent", MV643XX_STAT(mib_counters.broadcast_frames_sent) }, | ||
1380 | { "unrec_mac_control_received", MV643XX_STAT(mib_counters.unrec_mac_control_received) }, | ||
1381 | { "fc_sent", MV643XX_STAT(mib_counters.fc_sent) }, | ||
1382 | { "good_fc_received", MV643XX_STAT(mib_counters.good_fc_received) }, | ||
1383 | { "bad_fc_received", MV643XX_STAT(mib_counters.bad_fc_received) }, | ||
1384 | { "undersize_received", MV643XX_STAT(mib_counters.undersize_received) }, | ||
1385 | { "fragments_received", MV643XX_STAT(mib_counters.fragments_received) }, | ||
1386 | { "oversize_received", MV643XX_STAT(mib_counters.oversize_received) }, | ||
1387 | { "jabber_received", MV643XX_STAT(mib_counters.jabber_received) }, | ||
1388 | { "mac_receive_error", MV643XX_STAT(mib_counters.mac_receive_error) }, | ||
1389 | { "bad_crc_event", MV643XX_STAT(mib_counters.bad_crc_event) }, | ||
1390 | { "collision", MV643XX_STAT(mib_counters.collision) }, | ||
1391 | { "late_collision", MV643XX_STAT(mib_counters.late_collision) }, | ||
1392 | }; | ||
1393 | |||
1394 | #define MV643XX_STATS_LEN ARRAY_SIZE(mv643xx_gstrings_stats) | ||
1395 | |||
1396 | static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1397 | { | ||
1398 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1399 | int err; | ||
1400 | |||
1401 | spin_lock_irq(&mp->lock); | ||
1402 | err = mii_ethtool_gset(&mp->mii, cmd); | ||
1403 | spin_unlock_irq(&mp->lock); | ||
1404 | |||
1405 | /* The PHY may support 1000baseT_Half, but the mv643xx does not */ | ||
1406 | cmd->supported &= ~SUPPORTED_1000baseT_Half; | ||
1407 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; | ||
1408 | |||
1409 | return err; | ||
1410 | } | ||
1411 | |||
1412 | static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1413 | { | ||
1414 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1415 | int err; | ||
1416 | |||
1417 | spin_lock_irq(&mp->lock); | ||
1418 | err = mii_ethtool_sset(&mp->mii, cmd); | ||
1419 | spin_unlock_irq(&mp->lock); | ||
1420 | |||
1421 | return err; | ||
1422 | } | ||
1423 | |||
1424 | static void mv643xx_get_drvinfo(struct net_device *netdev, | ||
1425 | struct ethtool_drvinfo *drvinfo) | ||
1426 | { | ||
1427 | strncpy(drvinfo->driver, mv643xx_driver_name, 32); | ||
1428 | strncpy(drvinfo->version, mv643xx_driver_version, 32); | ||
1429 | strncpy(drvinfo->fw_version, "N/A", 32); | ||
1430 | strncpy(drvinfo->bus_info, "mv643xx", 32); | ||
1431 | drvinfo->n_stats = MV643XX_STATS_LEN; | ||
1432 | } | ||
1433 | |||
1434 | static int mv643xx_eth_nway_restart(struct net_device *dev) | ||
1435 | { | ||
1436 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1437 | |||
1438 | return mii_nway_restart(&mp->mii); | ||
1439 | } | ||
1440 | |||
1441 | static u32 mv643xx_eth_get_link(struct net_device *dev) | ||
1442 | { | ||
1443 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1444 | |||
1445 | return mii_link_ok(&mp->mii); | ||
1446 | } | ||
1447 | |||
1448 | static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, | ||
1449 | uint8_t *data) | ||
1450 | { | ||
1451 | int i; | ||
1452 | |||
1453 | switch(stringset) { | ||
1454 | case ETH_SS_STATS: | ||
1455 | for (i=0; i < MV643XX_STATS_LEN; i++) { | ||
1456 | memcpy(data + i * ETH_GSTRING_LEN, | ||
1457 | mv643xx_gstrings_stats[i].stat_string, | ||
1458 | ETH_GSTRING_LEN); | ||
1459 | } | ||
1460 | break; | ||
1461 | } | ||
1462 | } | ||
1463 | |||
1464 | static void mv643xx_get_ethtool_stats(struct net_device *netdev, | ||
1465 | struct ethtool_stats *stats, uint64_t *data) | ||
1466 | { | ||
1467 | struct mv643xx_private *mp = netdev->priv; | ||
1468 | int i; | ||
1469 | |||
1470 | eth_update_mib_counters(mp); | ||
1471 | |||
1472 | for (i = 0; i < MV643XX_STATS_LEN; i++) { | ||
1473 | char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset; | ||
1474 | data[i] = (mv643xx_gstrings_stats[i].sizeof_stat == | ||
1475 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; | ||
1476 | } | ||
1477 | } | ||
1478 | |||
1479 | static int mv643xx_get_sset_count(struct net_device *netdev, int sset) | ||
1480 | { | ||
1481 | switch (sset) { | ||
1482 | case ETH_SS_STATS: | ||
1483 | return MV643XX_STATS_LEN; | ||
1484 | default: | ||
1485 | return -EOPNOTSUPP; | ||
1486 | } | ||
1487 | } | ||
1488 | |||
1489 | static const struct ethtool_ops mv643xx_ethtool_ops = { | ||
1490 | .get_settings = mv643xx_get_settings, | ||
1491 | .set_settings = mv643xx_set_settings, | ||
1492 | .get_drvinfo = mv643xx_get_drvinfo, | ||
1493 | .get_link = mv643xx_eth_get_link, | ||
1494 | .set_sg = ethtool_op_set_sg, | ||
1495 | .get_sset_count = mv643xx_get_sset_count, | ||
1496 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | ||
1497 | .get_strings = mv643xx_get_strings, | ||
1498 | .nway_reset = mv643xx_eth_nway_restart, | ||
1499 | }; | ||
1500 | |||
1501 | |||
1502 | /* address handling *********************************************************/ | ||
1503 | /* | ||
1504 | * eth_port_uc_addr_get - Read the MAC address from the port's hw registers | ||
1505 | */ | ||
1506 | static void eth_port_uc_addr_get(struct mv643xx_private *mp, | ||
1507 | unsigned char *p_addr) | ||
1508 | { | ||
1509 | unsigned int port_num = mp->port_num; | ||
1510 | unsigned int mac_h; | ||
1511 | unsigned int mac_l; | ||
1512 | |||
1513 | mac_h = rdl(mp, MAC_ADDR_HIGH(port_num)); | ||
1514 | mac_l = rdl(mp, MAC_ADDR_LOW(port_num)); | ||
1515 | |||
1516 | p_addr[0] = (mac_h >> 24) & 0xff; | ||
1517 | p_addr[1] = (mac_h >> 16) & 0xff; | ||
1518 | p_addr[2] = (mac_h >> 8) & 0xff; | ||
1519 | p_addr[3] = mac_h & 0xff; | ||
1520 | p_addr[4] = (mac_l >> 8) & 0xff; | ||
1521 | p_addr[5] = mac_l & 0xff; | ||
1522 | } | ||
1523 | |||
1524 | /* | ||
1525 | * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables | ||
1526 | * | ||
1527 | * DESCRIPTION: | ||
1528 | * Go through all the DA filter tables (Unicast, Special Multicast & | ||
1529 | * Other Multicast) and set each entry to 0. | ||
1530 | * | ||
1531 | * INPUT: | ||
1532 | * struct mv643xx_private *mp Ethernet Port. | ||
1533 | * | ||
1534 | * OUTPUT: | ||
1535 | * Multicast and Unicast packets are rejected. | ||
1536 | * | ||
1537 | * RETURN: | ||
1538 | * None. | ||
1539 | */ | ||
1540 | static void eth_port_init_mac_tables(struct mv643xx_private *mp) | ||
1541 | { | ||
1542 | unsigned int port_num = mp->port_num; | ||
1543 | int table_index; | ||
1544 | |||
1545 | /* Clear DA filter unicast table (Ex_dFUT) */ | ||
1546 | for (table_index = 0; table_index <= 0xC; table_index += 4) | ||
1547 | wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) + | ||
1548 | table_index, 0); | ||
1549 | |||
1550 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | ||
1551 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | ||
1552 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) + | ||
1553 | table_index, 0); | ||
1554 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | ||
1555 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) + | ||
1556 | table_index, 0); | ||
1557 | } | ||
1558 | } | ||
1559 | |||
1560 | /* | ||
1561 | * The entries in each table are indexed by a hash of a packet's MAC | ||
1562 | * address. One bit in each entry determines whether the packet is | ||
1563 | * accepted. There are 4 entries (each 8 bits wide) in each register | ||
1564 | * of the table. The bits in each entry are defined as follows: | ||
1565 | * 0 Accept=1, Drop=0 | ||
1566 | * 3-1 Queue (ETH_Q0=0) | ||
1567 | * 7-4 Reserved = 0; | ||
1568 | */ | ||
1569 | static void eth_port_set_filter_table_entry(struct mv643xx_private *mp, | ||
1570 | int table, unsigned char entry) | ||
1571 | { | ||
1572 | unsigned int table_reg; | ||
1573 | unsigned int tbl_offset; | ||
1574 | unsigned int reg_offset; | ||
1575 | |||
1576 | tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */ | ||
1577 | reg_offset = entry % 4; /* Entry offset within the register */ | ||
1578 | |||
1579 | /* Set "accepts frame bit" at specified table entry */ | ||
1580 | table_reg = rdl(mp, table + tbl_offset); | ||
1581 | table_reg |= 0x01 << (8 * reg_offset); | ||
1582 | wrl(mp, table + tbl_offset, table_reg); | ||
1583 | } | ||
1584 | |||
1585 | /* | ||
1586 | * eth_port_uc_addr_set - Write a MAC address into the port's hw registers | ||
1587 | */ | ||
1588 | static void eth_port_uc_addr_set(struct mv643xx_private *mp, | ||
1589 | unsigned char *p_addr) | ||
1590 | { | ||
1591 | unsigned int port_num = mp->port_num; | ||
1592 | unsigned int mac_h; | ||
1593 | unsigned int mac_l; | ||
1594 | int table; | ||
1595 | |||
1596 | mac_l = (p_addr[4] << 8) | (p_addr[5]); | ||
1597 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | ||
1598 | (p_addr[3] << 0); | ||
1599 | |||
1600 | wrl(mp, MAC_ADDR_LOW(port_num), mac_l); | ||
1601 | wrl(mp, MAC_ADDR_HIGH(port_num), mac_h); | ||
1602 | |||
1603 | /* Accept frames with this address */ | ||
1604 | table = DA_FILTER_UNICAST_TABLE_BASE(port_num); | ||
1605 | eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f); | ||
1606 | } | ||
1607 | |||
1608 | /* | ||
1609 | * mv643xx_eth_update_mac_address | ||
1610 | * | ||
1611 | * Update the MAC address of the port in the address table | ||
758 | * | 1612 | * |
759 | * Input : pointer to ethernet interface network device structure | 1613 | * Input : pointer to ethernet interface network device structure |
760 | * Output : N/A | 1614 | * Output : N/A |
761 | */ | 1615 | */ |
762 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) | 1616 | static void mv643xx_eth_update_mac_address(struct net_device *dev) |
763 | { | 1617 | { |
764 | struct mv643xx_private *mp = netdev_priv(dev); | 1618 | struct mv643xx_private *mp = netdev_priv(dev); |
765 | u32 config_reg; | ||
766 | |||
767 | config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num)); | ||
768 | if (dev->flags & IFF_PROMISC) | ||
769 | config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; | ||
770 | else | ||
771 | config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; | ||
772 | wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg); | ||
773 | 1619 | ||
774 | eth_port_set_multicast_list(dev); | 1620 | eth_port_init_mac_tables(mp); |
1621 | eth_port_uc_addr_set(mp, dev->dev_addr); | ||
775 | } | 1622 | } |
776 | 1623 | ||
777 | /* | 1624 | /* |
@@ -797,44 +1644,306 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) | |||
797 | } | 1644 | } |
798 | 1645 | ||
799 | /* | 1646 | /* |
800 | * mv643xx_eth_tx_timeout | 1647 | * eth_port_mc_addr - Multicast address settings. |
801 | * | 1648 | * |
802 | * Called upon a timeout on transmitting a packet | 1649 | * The MV device supports multicast using two tables: |
1650 | * 1) Special Multicast Table for MAC addresses of the form | ||
1651 | * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF). | ||
1652 | * The MAC DA[7:0] bits are used as a pointer to the Special Multicast | ||
1653 | * Table entries in the DA-Filter table. | ||
1654 | * 2) Other Multicast Table for multicast of another type. A CRC-8bit | ||
1655 | * is used as an index to the Other Multicast Table entries in the | ||
1656 | * DA-Filter table. This function calculates the CRC-8bit value. | ||
1657 | * In either case, eth_port_set_filter_table_entry() is then called | ||
1658 | * to set to set the actual table entry. | ||
1659 | */ | ||
1660 | static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr) | ||
1661 | { | ||
1662 | unsigned int port_num = mp->port_num; | ||
1663 | unsigned int mac_h; | ||
1664 | unsigned int mac_l; | ||
1665 | unsigned char crc_result = 0; | ||
1666 | int table; | ||
1667 | int mac_array[48]; | ||
1668 | int crc[8]; | ||
1669 | int i; | ||
1670 | |||
1671 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && | ||
1672 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { | ||
1673 | table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num); | ||
1674 | eth_port_set_filter_table_entry(mp, table, p_addr[5]); | ||
1675 | return; | ||
1676 | } | ||
1677 | |||
1678 | /* Calculate CRC-8 out of the given address */ | ||
1679 | mac_h = (p_addr[0] << 8) | (p_addr[1]); | ||
1680 | mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) | | ||
1681 | (p_addr[4] << 8) | (p_addr[5] << 0); | ||
1682 | |||
1683 | for (i = 0; i < 32; i++) | ||
1684 | mac_array[i] = (mac_l >> i) & 0x1; | ||
1685 | for (i = 32; i < 48; i++) | ||
1686 | mac_array[i] = (mac_h >> (i - 32)) & 0x1; | ||
1687 | |||
1688 | crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^ | ||
1689 | mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^ | ||
1690 | mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^ | ||
1691 | mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^ | ||
1692 | mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0]; | ||
1693 | |||
1694 | crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^ | ||
1695 | mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^ | ||
1696 | mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^ | ||
1697 | mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^ | ||
1698 | mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^ | ||
1699 | mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^ | ||
1700 | mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0]; | ||
1701 | |||
1702 | crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^ | ||
1703 | mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^ | ||
1704 | mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^ | ||
1705 | mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^ | ||
1706 | mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ | ||
1707 | mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0]; | ||
1708 | |||
1709 | crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^ | ||
1710 | mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^ | ||
1711 | mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^ | ||
1712 | mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ | ||
1713 | mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^ | ||
1714 | mac_array[3] ^ mac_array[2] ^ mac_array[1]; | ||
1715 | |||
1716 | crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^ | ||
1717 | mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^ | ||
1718 | mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^ | ||
1719 | mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^ | ||
1720 | mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^ | ||
1721 | mac_array[3] ^ mac_array[2]; | ||
1722 | |||
1723 | crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^ | ||
1724 | mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^ | ||
1725 | mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^ | ||
1726 | mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^ | ||
1727 | mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^ | ||
1728 | mac_array[4] ^ mac_array[3]; | ||
1729 | |||
1730 | crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^ | ||
1731 | mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^ | ||
1732 | mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^ | ||
1733 | mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^ | ||
1734 | mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^ | ||
1735 | mac_array[4]; | ||
1736 | |||
1737 | crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^ | ||
1738 | mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^ | ||
1739 | mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^ | ||
1740 | mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^ | ||
1741 | mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5]; | ||
1742 | |||
1743 | for (i = 0; i < 8; i++) | ||
1744 | crc_result = crc_result | (crc[i] << i); | ||
1745 | |||
1746 | table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num); | ||
1747 | eth_port_set_filter_table_entry(mp, table, crc_result); | ||
1748 | } | ||
1749 | |||
1750 | /* | ||
1751 | * Set the entire multicast list based on dev->mc_list. | ||
1752 | */ | ||
1753 | static void eth_port_set_multicast_list(struct net_device *dev) | ||
1754 | { | ||
1755 | |||
1756 | struct dev_mc_list *mc_list; | ||
1757 | int i; | ||
1758 | int table_index; | ||
1759 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1760 | unsigned int eth_port_num = mp->port_num; | ||
1761 | |||
1762 | /* If the device is in promiscuous mode or in all multicast mode, | ||
1763 | * we will fully populate both multicast tables with accept. | ||
1764 | * This is guaranteed to yield a match on all multicast addresses... | ||
1765 | */ | ||
1766 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) { | ||
1767 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | ||
1768 | /* Set all entries in DA filter special multicast | ||
1769 | * table (Ex_dFSMT) | ||
1770 | * Set for ETH_Q0 for now | ||
1771 | * Bits | ||
1772 | * 0 Accept=1, Drop=0 | ||
1773 | * 3-1 Queue ETH_Q0=0 | ||
1774 | * 7-4 Reserved = 0; | ||
1775 | */ | ||
1776 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | ||
1777 | |||
1778 | /* Set all entries in DA filter other multicast | ||
1779 | * table (Ex_dFOMT) | ||
1780 | * Set for ETH_Q0 for now | ||
1781 | * Bits | ||
1782 | * 0 Accept=1, Drop=0 | ||
1783 | * 3-1 Queue ETH_Q0=0 | ||
1784 | * 7-4 Reserved = 0; | ||
1785 | */ | ||
1786 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | ||
1787 | } | ||
1788 | return; | ||
1789 | } | ||
1790 | |||
1791 | /* We will clear out multicast tables every time we get the list. | ||
1792 | * Then add the entire new list... | ||
1793 | */ | ||
1794 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | ||
1795 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | ||
1796 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | ||
1797 | (eth_port_num) + table_index, 0); | ||
1798 | |||
1799 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | ||
1800 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE | ||
1801 | (eth_port_num) + table_index, 0); | ||
1802 | } | ||
1803 | |||
1804 | /* Get pointer to net_device multicast list and add each one... */ | ||
1805 | for (i = 0, mc_list = dev->mc_list; | ||
1806 | (i < 256) && (mc_list != NULL) && (i < dev->mc_count); | ||
1807 | i++, mc_list = mc_list->next) | ||
1808 | if (mc_list->dmi_addrlen == 6) | ||
1809 | eth_port_mc_addr(mp, mc_list->dmi_addr); | ||
1810 | } | ||
1811 | |||
1812 | /* | ||
1813 | * mv643xx_eth_set_rx_mode | ||
803 | * | 1814 | * |
804 | * Input : pointer to ethernet interface network device structure. | 1815 | * Change from promiscuos to regular rx mode |
1816 | * | ||
1817 | * Input : pointer to ethernet interface network device structure | ||
805 | * Output : N/A | 1818 | * Output : N/A |
806 | */ | 1819 | */ |
807 | static void mv643xx_eth_tx_timeout(struct net_device *dev) | 1820 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) |
808 | { | 1821 | { |
809 | struct mv643xx_private *mp = netdev_priv(dev); | 1822 | struct mv643xx_private *mp = netdev_priv(dev); |
1823 | u32 config_reg; | ||
810 | 1824 | ||
811 | printk(KERN_INFO "%s: TX timeout ", dev->name); | 1825 | config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num)); |
1826 | if (dev->flags & IFF_PROMISC) | ||
1827 | config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; | ||
1828 | else | ||
1829 | config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; | ||
1830 | wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg); | ||
812 | 1831 | ||
813 | /* Do the reset outside of interrupt context */ | 1832 | eth_port_set_multicast_list(dev); |
814 | schedule_work(&mp->tx_timeout_task); | ||
815 | } | 1833 | } |
816 | 1834 | ||
1835 | |||
1836 | /* rx/tx queue initialisation ***********************************************/ | ||
817 | /* | 1837 | /* |
818 | * mv643xx_eth_tx_timeout_task | 1838 | * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. |
819 | * | 1839 | * |
820 | * Actual routine to reset the adapter when a timeout on Tx has occurred | 1840 | * DESCRIPTION: |
1841 | * This function prepares a Rx chained list of descriptors and packet | ||
1842 | * buffers in a form of a ring. The routine must be called after port | ||
1843 | * initialization routine and before port start routine. | ||
1844 | * The Ethernet SDMA engine uses CPU bus addresses to access the various | ||
1845 | * devices in the system (i.e. DRAM). This function uses the ethernet | ||
1846 | * struct 'virtual to physical' routine (set by the user) to set the ring | ||
1847 | * with physical addresses. | ||
1848 | * | ||
1849 | * INPUT: | ||
1850 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
1851 | * | ||
1852 | * OUTPUT: | ||
1853 | * The routine updates the Ethernet port control struct with information | ||
1854 | * regarding the Rx descriptors and buffers. | ||
1855 | * | ||
1856 | * RETURN: | ||
1857 | * None. | ||
821 | */ | 1858 | */ |
822 | static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) | 1859 | static void ether_init_rx_desc_ring(struct mv643xx_private *mp) |
823 | { | 1860 | { |
824 | struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, | 1861 | volatile struct eth_rx_desc *p_rx_desc; |
825 | tx_timeout_task); | 1862 | int rx_desc_num = mp->rx_ring_size; |
826 | struct net_device *dev = mp->dev; | 1863 | int i; |
827 | 1864 | ||
828 | if (!netif_running(dev)) | 1865 | /* initialize the next_desc_ptr links in the Rx descriptors ring */ |
829 | return; | 1866 | p_rx_desc = (struct eth_rx_desc *)mp->p_rx_desc_area; |
1867 | for (i = 0; i < rx_desc_num; i++) { | ||
1868 | p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma + | ||
1869 | ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc); | ||
1870 | } | ||
830 | 1871 | ||
831 | netif_stop_queue(dev); | 1872 | /* Save Rx desc pointer to driver struct. */ |
1873 | mp->rx_curr_desc_q = 0; | ||
1874 | mp->rx_used_desc_q = 0; | ||
832 | 1875 | ||
833 | eth_port_reset(mp); | 1876 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); |
834 | eth_port_start(dev); | 1877 | } |
835 | 1878 | ||
836 | if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) | 1879 | static void mv643xx_eth_free_rx_rings(struct net_device *dev) |
837 | netif_wake_queue(dev); | 1880 | { |
1881 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1882 | int curr; | ||
1883 | |||
1884 | /* Stop RX Queues */ | ||
1885 | mv643xx_eth_port_disable_rx(mp); | ||
1886 | |||
1887 | /* Free preallocated skb's on RX rings */ | ||
1888 | for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { | ||
1889 | if (mp->rx_skb[curr]) { | ||
1890 | dev_kfree_skb(mp->rx_skb[curr]); | ||
1891 | mp->rx_desc_count--; | ||
1892 | } | ||
1893 | } | ||
1894 | |||
1895 | if (mp->rx_desc_count) | ||
1896 | printk(KERN_ERR | ||
1897 | "%s: Error in freeing Rx Ring. %d skb's still" | ||
1898 | " stuck in RX Ring - ignoring them\n", dev->name, | ||
1899 | mp->rx_desc_count); | ||
1900 | /* Free RX ring */ | ||
1901 | if (mp->rx_sram_size) | ||
1902 | iounmap(mp->p_rx_desc_area); | ||
1903 | else | ||
1904 | dma_free_coherent(NULL, mp->rx_desc_area_size, | ||
1905 | mp->p_rx_desc_area, mp->rx_desc_dma); | ||
1906 | } | ||
1907 | |||
1908 | /* | ||
1909 | * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory. | ||
1910 | * | ||
1911 | * DESCRIPTION: | ||
1912 | * This function prepares a Tx chained list of descriptors and packet | ||
1913 | * buffers in a form of a ring. The routine must be called after port | ||
1914 | * initialization routine and before port start routine. | ||
1915 | * The Ethernet SDMA engine uses CPU bus addresses to access the various | ||
1916 | * devices in the system (i.e. DRAM). This function uses the ethernet | ||
1917 | * struct 'virtual to physical' routine (set by the user) to set the ring | ||
1918 | * with physical addresses. | ||
1919 | * | ||
1920 | * INPUT: | ||
1921 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
1922 | * | ||
1923 | * OUTPUT: | ||
1924 | * The routine updates the Ethernet port control struct with information | ||
1925 | * regarding the Tx descriptors and buffers. | ||
1926 | * | ||
1927 | * RETURN: | ||
1928 | * None. | ||
1929 | */ | ||
1930 | static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | ||
1931 | { | ||
1932 | int tx_desc_num = mp->tx_ring_size; | ||
1933 | struct eth_tx_desc *p_tx_desc; | ||
1934 | int i; | ||
1935 | |||
1936 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ | ||
1937 | p_tx_desc = (struct eth_tx_desc *)mp->p_tx_desc_area; | ||
1938 | for (i = 0; i < tx_desc_num; i++) { | ||
1939 | p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma + | ||
1940 | ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc); | ||
1941 | } | ||
1942 | |||
1943 | mp->tx_curr_desc_q = 0; | ||
1944 | mp->tx_used_desc_q = 0; | ||
1945 | |||
1946 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); | ||
838 | } | 1947 | } |
839 | 1948 | ||
840 | /** | 1949 | /** |
@@ -916,86 +2025,30 @@ static void mv643xx_eth_free_all_tx_descs(struct net_device *dev) | |||
916 | mv643xx_eth_free_tx_descs(dev, 1); | 2025 | mv643xx_eth_free_tx_descs(dev, 1); |
917 | } | 2026 | } |
918 | 2027 | ||
919 | /* | 2028 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) |
920 | * mv643xx_eth_receive | ||
921 | * | ||
922 | * This function is forward packets that are received from the port's | ||
923 | * queues toward kernel core or FastRoute them to another interface. | ||
924 | * | ||
925 | * Input : dev - a pointer to the required interface | ||
926 | * max - maximum number to receive (0 means unlimted) | ||
927 | * | ||
928 | * Output : number of served packets | ||
929 | */ | ||
930 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | ||
931 | { | 2029 | { |
932 | struct mv643xx_private *mp = netdev_priv(dev); | 2030 | struct mv643xx_private *mp = netdev_priv(dev); |
933 | struct net_device_stats *stats = &dev->stats; | ||
934 | unsigned int received_packets = 0; | ||
935 | struct sk_buff *skb; | ||
936 | struct pkt_info pkt_info; | ||
937 | 2031 | ||
938 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { | 2032 | /* Stop Tx Queues */ |
939 | dma_unmap_single(NULL, pkt_info.buf_ptr, ETH_RX_SKB_SIZE, | 2033 | mv643xx_eth_port_disable_tx(mp); |
940 | DMA_FROM_DEVICE); | ||
941 | mp->rx_desc_count--; | ||
942 | received_packets++; | ||
943 | |||
944 | /* | ||
945 | * Update statistics. | ||
946 | * Note byte count includes 4 byte CRC count | ||
947 | */ | ||
948 | stats->rx_packets++; | ||
949 | stats->rx_bytes += pkt_info.byte_cnt; | ||
950 | skb = pkt_info.return_info; | ||
951 | /* | ||
952 | * In case received a packet without first / last bits on OR | ||
953 | * the error summary bit is on, the packets needs to be dropeed. | ||
954 | */ | ||
955 | if (((pkt_info.cmd_sts | ||
956 | & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) != | ||
957 | (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) | ||
958 | || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) { | ||
959 | stats->rx_dropped++; | ||
960 | if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC | | ||
961 | ETH_RX_LAST_DESC)) != | ||
962 | (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) { | ||
963 | if (net_ratelimit()) | ||
964 | printk(KERN_ERR | ||
965 | "%s: Received packet spread " | ||
966 | "on multiple descriptors\n", | ||
967 | dev->name); | ||
968 | } | ||
969 | if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) | ||
970 | stats->rx_errors++; | ||
971 | 2034 | ||
972 | dev_kfree_skb_irq(skb); | 2035 | /* Free outstanding skb's on TX ring */ |
973 | } else { | 2036 | mv643xx_eth_free_all_tx_descs(dev); |
974 | /* | ||
975 | * The -4 is for the CRC in the trailer of the | ||
976 | * received packet | ||
977 | */ | ||
978 | skb_put(skb, pkt_info.byte_cnt - 4); | ||
979 | 2037 | ||
980 | if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { | 2038 | BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q); |
981 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
982 | skb->csum = htons( | ||
983 | (pkt_info.cmd_sts & 0x0007fff8) >> 3); | ||
984 | } | ||
985 | skb->protocol = eth_type_trans(skb, dev); | ||
986 | #ifdef MV643XX_NAPI | ||
987 | netif_receive_skb(skb); | ||
988 | #else | ||
989 | netif_rx(skb); | ||
990 | #endif | ||
991 | } | ||
992 | dev->last_rx = jiffies; | ||
993 | } | ||
994 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ | ||
995 | 2039 | ||
996 | return received_packets; | 2040 | /* Free TX ring */ |
2041 | if (mp->tx_sram_size) | ||
2042 | iounmap(mp->p_tx_desc_area); | ||
2043 | else | ||
2044 | dma_free_coherent(NULL, mp->tx_desc_area_size, | ||
2045 | mp->p_tx_desc_area, mp->tx_desc_dma); | ||
997 | } | 2046 | } |
998 | 2047 | ||
2048 | |||
2049 | /* netdev ops and related ***************************************************/ | ||
2050 | static void eth_port_reset(struct mv643xx_private *mp); | ||
2051 | |||
999 | /* Set the mv643xx port configuration register for the speed/duplex mode. */ | 2052 | /* Set the mv643xx port configuration register for the speed/duplex mode. */ |
1000 | static void mv643xx_eth_update_pscr(struct net_device *dev, | 2053 | static void mv643xx_eth_update_pscr(struct net_device *dev, |
1001 | struct ethtool_cmd *ecmd) | 2054 | struct ethtool_cmd *ecmd) |
@@ -1118,6 +2171,125 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) | |||
1118 | return IRQ_HANDLED; | 2171 | return IRQ_HANDLED; |
1119 | } | 2172 | } |
1120 | 2173 | ||
2174 | /* | ||
2175 | * ethernet_phy_reset - Reset Ethernet port PHY. | ||
2176 | * | ||
2177 | * DESCRIPTION: | ||
2178 | * This routine utilizes the SMI interface to reset the ethernet port PHY. | ||
2179 | * | ||
2180 | * INPUT: | ||
2181 | * struct mv643xx_private *mp Ethernet Port. | ||
2182 | * | ||
2183 | * OUTPUT: | ||
2184 | * The PHY is reset. | ||
2185 | * | ||
2186 | * RETURN: | ||
2187 | * None. | ||
2188 | * | ||
2189 | */ | ||
2190 | static void ethernet_phy_reset(struct mv643xx_private *mp) | ||
2191 | { | ||
2192 | unsigned int phy_reg_data; | ||
2193 | |||
2194 | /* Reset the PHY */ | ||
2195 | eth_port_read_smi_reg(mp, 0, &phy_reg_data); | ||
2196 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ | ||
2197 | eth_port_write_smi_reg(mp, 0, phy_reg_data); | ||
2198 | |||
2199 | /* wait for PHY to come out of reset */ | ||
2200 | do { | ||
2201 | udelay(1); | ||
2202 | eth_port_read_smi_reg(mp, 0, &phy_reg_data); | ||
2203 | } while (phy_reg_data & 0x8000); | ||
2204 | } | ||
2205 | |||
2206 | /* | ||
2207 | * eth_port_start - Start the Ethernet port activity. | ||
2208 | * | ||
2209 | * DESCRIPTION: | ||
2210 | * This routine prepares the Ethernet port for Rx and Tx activity: | ||
2211 | * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that | ||
2212 | * has been initialized a descriptor's ring (using | ||
2213 | * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx) | ||
2214 | * 2. Initialize and enable the Ethernet configuration port by writing to | ||
2215 | * the port's configuration and command registers. | ||
2216 | * 3. Initialize and enable the SDMA by writing to the SDMA's | ||
2217 | * configuration and command registers. After completing these steps, | ||
2218 | * the ethernet port SDMA can starts to perform Rx and Tx activities. | ||
2219 | * | ||
2220 | * Note: Each Rx and Tx queue descriptor's list must be initialized prior | ||
2221 | * to calling this function (use ether_init_tx_desc_ring for Tx queues | ||
2222 | * and ether_init_rx_desc_ring for Rx queues). | ||
2223 | * | ||
2224 | * INPUT: | ||
2225 | * dev - a pointer to the required interface | ||
2226 | * | ||
2227 | * OUTPUT: | ||
2228 | * Ethernet port is ready to receive and transmit. | ||
2229 | * | ||
2230 | * RETURN: | ||
2231 | * None. | ||
2232 | */ | ||
2233 | static void eth_port_start(struct net_device *dev) | ||
2234 | { | ||
2235 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2236 | unsigned int port_num = mp->port_num; | ||
2237 | int tx_curr_desc, rx_curr_desc; | ||
2238 | u32 pscr; | ||
2239 | struct ethtool_cmd ethtool_cmd; | ||
2240 | |||
2241 | /* Assignment of Tx CTRP of given queue */ | ||
2242 | tx_curr_desc = mp->tx_curr_desc_q; | ||
2243 | wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num), | ||
2244 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | ||
2245 | |||
2246 | /* Assignment of Rx CRDP of given queue */ | ||
2247 | rx_curr_desc = mp->rx_curr_desc_q; | ||
2248 | wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num), | ||
2249 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | ||
2250 | |||
2251 | /* Add the assigned Ethernet address to the port's address table */ | ||
2252 | eth_port_uc_addr_set(mp, dev->dev_addr); | ||
2253 | |||
2254 | /* Assign port configuration and command. */ | ||
2255 | wrl(mp, PORT_CONFIG_REG(port_num), | ||
2256 | PORT_CONFIG_DEFAULT_VALUE); | ||
2257 | |||
2258 | wrl(mp, PORT_CONFIG_EXTEND_REG(port_num), | ||
2259 | PORT_CONFIG_EXTEND_DEFAULT_VALUE); | ||
2260 | |||
2261 | pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); | ||
2262 | |||
2263 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); | ||
2264 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
2265 | |||
2266 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | ||
2267 | DISABLE_AUTO_NEG_SPEED_GMII | | ||
2268 | DISABLE_AUTO_NEG_FOR_DUPLX | | ||
2269 | DO_NOT_FORCE_LINK_FAIL | | ||
2270 | SERIAL_PORT_CONTROL_RESERVED; | ||
2271 | |||
2272 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
2273 | |||
2274 | pscr |= SERIAL_PORT_ENABLE; | ||
2275 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
2276 | |||
2277 | /* Assign port SDMA configuration */ | ||
2278 | wrl(mp, SDMA_CONFIG_REG(port_num), | ||
2279 | PORT_SDMA_CONFIG_DEFAULT_VALUE); | ||
2280 | |||
2281 | /* Enable port Rx. */ | ||
2282 | mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED); | ||
2283 | |||
2284 | /* Disable port bandwidth limits by clearing MTU register */ | ||
2285 | wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0); | ||
2286 | |||
2287 | /* save phy settings across reset */ | ||
2288 | mv643xx_get_settings(dev, ðtool_cmd); | ||
2289 | ethernet_phy_reset(mp); | ||
2290 | mv643xx_set_settings(dev, ðtool_cmd); | ||
2291 | } | ||
2292 | |||
1121 | #ifdef MV643XX_COAL | 2293 | #ifdef MV643XX_COAL |
1122 | 2294 | ||
1123 | /* | 2295 | /* |
@@ -1192,114 +2364,36 @@ static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp, | |||
1192 | } | 2364 | } |
1193 | 2365 | ||
1194 | /* | 2366 | /* |
1195 | * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. | 2367 | * eth_port_init - Initialize the Ethernet port driver |
1196 | * | ||
1197 | * DESCRIPTION: | ||
1198 | * This function prepares a Rx chained list of descriptors and packet | ||
1199 | * buffers in a form of a ring. The routine must be called after port | ||
1200 | * initialization routine and before port start routine. | ||
1201 | * The Ethernet SDMA engine uses CPU bus addresses to access the various | ||
1202 | * devices in the system (i.e. DRAM). This function uses the ethernet | ||
1203 | * struct 'virtual to physical' routine (set by the user) to set the ring | ||
1204 | * with physical addresses. | ||
1205 | * | ||
1206 | * INPUT: | ||
1207 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
1208 | * | ||
1209 | * OUTPUT: | ||
1210 | * The routine updates the Ethernet port control struct with information | ||
1211 | * regarding the Rx descriptors and buffers. | ||
1212 | * | ||
1213 | * RETURN: | ||
1214 | * None. | ||
1215 | */ | ||
1216 | static void ether_init_rx_desc_ring(struct mv643xx_private *mp) | ||
1217 | { | ||
1218 | volatile struct eth_rx_desc *p_rx_desc; | ||
1219 | int rx_desc_num = mp->rx_ring_size; | ||
1220 | int i; | ||
1221 | |||
1222 | /* initialize the next_desc_ptr links in the Rx descriptors ring */ | ||
1223 | p_rx_desc = (struct eth_rx_desc *)mp->p_rx_desc_area; | ||
1224 | for (i = 0; i < rx_desc_num; i++) { | ||
1225 | p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma + | ||
1226 | ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc); | ||
1227 | } | ||
1228 | |||
1229 | /* Save Rx desc pointer to driver struct. */ | ||
1230 | mp->rx_curr_desc_q = 0; | ||
1231 | mp->rx_used_desc_q = 0; | ||
1232 | |||
1233 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); | ||
1234 | } | ||
1235 | |||
1236 | /* | ||
1237 | * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory. | ||
1238 | * | 2368 | * |
1239 | * DESCRIPTION: | 2369 | * DESCRIPTION: |
1240 | * This function prepares a Tx chained list of descriptors and packet | 2370 | * This function prepares the ethernet port to start its activity: |
1241 | * buffers in a form of a ring. The routine must be called after port | 2371 | * 1) Completes the ethernet port driver struct initialization toward port |
1242 | * initialization routine and before port start routine. | 2372 | * start routine. |
1243 | * The Ethernet SDMA engine uses CPU bus addresses to access the various | 2373 | * 2) Resets the device to a quiescent state in case of warm reboot. |
1244 | * devices in the system (i.e. DRAM). This function uses the ethernet | 2374 | * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM. |
1245 | * struct 'virtual to physical' routine (set by the user) to set the ring | 2375 | * 4) Clean MAC tables. The reset status of those tables is unknown. |
1246 | * with physical addresses. | 2376 | * 5) Set PHY address. |
2377 | * Note: Call this routine prior to eth_port_start routine and after | ||
2378 | * setting user values in the user fields of Ethernet port control | ||
2379 | * struct. | ||
1247 | * | 2380 | * |
1248 | * INPUT: | 2381 | * INPUT: |
1249 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | 2382 | * struct mv643xx_private *mp Ethernet port control struct |
1250 | * | 2383 | * |
1251 | * OUTPUT: | 2384 | * OUTPUT: |
1252 | * The routine updates the Ethernet port control struct with information | 2385 | * See description. |
1253 | * regarding the Tx descriptors and buffers. | ||
1254 | * | 2386 | * |
1255 | * RETURN: | 2387 | * RETURN: |
1256 | * None. | 2388 | * None. |
1257 | */ | 2389 | */ |
1258 | static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | 2390 | static void eth_port_init(struct mv643xx_private *mp) |
1259 | { | ||
1260 | int tx_desc_num = mp->tx_ring_size; | ||
1261 | struct eth_tx_desc *p_tx_desc; | ||
1262 | int i; | ||
1263 | |||
1264 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ | ||
1265 | p_tx_desc = (struct eth_tx_desc *)mp->p_tx_desc_area; | ||
1266 | for (i = 0; i < tx_desc_num; i++) { | ||
1267 | p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma + | ||
1268 | ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc); | ||
1269 | } | ||
1270 | |||
1271 | mp->tx_curr_desc_q = 0; | ||
1272 | mp->tx_used_desc_q = 0; | ||
1273 | |||
1274 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); | ||
1275 | } | ||
1276 | |||
1277 | static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1278 | { | ||
1279 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1280 | int err; | ||
1281 | |||
1282 | spin_lock_irq(&mp->lock); | ||
1283 | err = mii_ethtool_sset(&mp->mii, cmd); | ||
1284 | spin_unlock_irq(&mp->lock); | ||
1285 | |||
1286 | return err; | ||
1287 | } | ||
1288 | |||
1289 | static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
1290 | { | 2391 | { |
1291 | struct mv643xx_private *mp = netdev_priv(dev); | 2392 | mp->rx_resource_err = 0; |
1292 | int err; | ||
1293 | |||
1294 | spin_lock_irq(&mp->lock); | ||
1295 | err = mii_ethtool_gset(&mp->mii, cmd); | ||
1296 | spin_unlock_irq(&mp->lock); | ||
1297 | 2393 | ||
1298 | /* The PHY may support 1000baseT_Half, but the mv643xx does not */ | 2394 | eth_port_reset(mp); |
1299 | cmd->supported &= ~SUPPORTED_1000baseT_Half; | ||
1300 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; | ||
1301 | 2395 | ||
1302 | return err; | 2396 | eth_port_init_mac_tables(mp); |
1303 | } | 2397 | } |
1304 | 2398 | ||
1305 | /* | 2399 | /* |
@@ -1449,53 +2543,41 @@ out_free_irq: | |||
1449 | return err; | 2543 | return err; |
1450 | } | 2544 | } |
1451 | 2545 | ||
1452 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) | 2546 | /* |
2547 | * eth_port_reset - Reset Ethernet port | ||
2548 | * | ||
2549 | * DESCRIPTION: | ||
2550 | * This routine resets the chip by aborting any SDMA engine activity and | ||
2551 | * clearing the MIB counters. The Receiver and the Transmit unit are in | ||
2552 | * idle state after this command is performed and the port is disabled. | ||
2553 | * | ||
2554 | * INPUT: | ||
2555 | * struct mv643xx_private *mp Ethernet Port. | ||
2556 | * | ||
2557 | * OUTPUT: | ||
2558 | * Channel activity is halted. | ||
2559 | * | ||
2560 | * RETURN: | ||
2561 | * None. | ||
2562 | * | ||
2563 | */ | ||
2564 | static void eth_port_reset(struct mv643xx_private *mp) | ||
1453 | { | 2565 | { |
1454 | struct mv643xx_private *mp = netdev_priv(dev); | 2566 | unsigned int port_num = mp->port_num; |
2567 | unsigned int reg_data; | ||
1455 | 2568 | ||
1456 | /* Stop Tx Queues */ | ||
1457 | mv643xx_eth_port_disable_tx(mp); | 2569 | mv643xx_eth_port_disable_tx(mp); |
1458 | |||
1459 | /* Free outstanding skb's on TX ring */ | ||
1460 | mv643xx_eth_free_all_tx_descs(dev); | ||
1461 | |||
1462 | BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q); | ||
1463 | |||
1464 | /* Free TX ring */ | ||
1465 | if (mp->tx_sram_size) | ||
1466 | iounmap(mp->p_tx_desc_area); | ||
1467 | else | ||
1468 | dma_free_coherent(NULL, mp->tx_desc_area_size, | ||
1469 | mp->p_tx_desc_area, mp->tx_desc_dma); | ||
1470 | } | ||
1471 | |||
1472 | static void mv643xx_eth_free_rx_rings(struct net_device *dev) | ||
1473 | { | ||
1474 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1475 | int curr; | ||
1476 | |||
1477 | /* Stop RX Queues */ | ||
1478 | mv643xx_eth_port_disable_rx(mp); | 2570 | mv643xx_eth_port_disable_rx(mp); |
1479 | 2571 | ||
1480 | /* Free preallocated skb's on RX rings */ | 2572 | /* Clear all MIB counters */ |
1481 | for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { | 2573 | eth_clear_mib_counters(mp); |
1482 | if (mp->rx_skb[curr]) { | ||
1483 | dev_kfree_skb(mp->rx_skb[curr]); | ||
1484 | mp->rx_desc_count--; | ||
1485 | } | ||
1486 | } | ||
1487 | 2574 | ||
1488 | if (mp->rx_desc_count) | 2575 | /* Reset the Enable bit in the Configuration Register */ |
1489 | printk(KERN_ERR | 2576 | reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); |
1490 | "%s: Error in freeing Rx Ring. %d skb's still" | 2577 | reg_data &= ~(SERIAL_PORT_ENABLE | |
1491 | " stuck in RX Ring - ignoring them\n", dev->name, | 2578 | DO_NOT_FORCE_LINK_FAIL | |
1492 | mp->rx_desc_count); | 2579 | FORCE_LINK_PASS); |
1493 | /* Free RX ring */ | 2580 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data); |
1494 | if (mp->rx_sram_size) | ||
1495 | iounmap(mp->p_rx_desc_area); | ||
1496 | else | ||
1497 | dma_free_coherent(NULL, mp->rx_desc_area_size, | ||
1498 | mp->p_rx_desc_area, mp->rx_desc_dma); | ||
1499 | } | 2581 | } |
1500 | 2582 | ||
1501 | /* | 2583 | /* |
@@ -1534,250 +2616,308 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
1534 | return 0; | 2616 | return 0; |
1535 | } | 2617 | } |
1536 | 2618 | ||
1537 | #ifdef MV643XX_NAPI | 2619 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
1538 | /* | ||
1539 | * mv643xx_poll | ||
1540 | * | ||
1541 | * This function is used in case of NAPI | ||
1542 | */ | ||
1543 | static int mv643xx_poll(struct napi_struct *napi, int budget) | ||
1544 | { | 2620 | { |
1545 | struct mv643xx_private *mp = container_of(napi, struct mv643xx_private, napi); | 2621 | struct mv643xx_private *mp = netdev_priv(dev); |
1546 | struct net_device *dev = mp->dev; | ||
1547 | unsigned int port_num = mp->port_num; | ||
1548 | int work_done; | ||
1549 | |||
1550 | #ifdef MV643XX_TX_FAST_REFILL | ||
1551 | if (++mp->tx_clean_threshold > 5) { | ||
1552 | mv643xx_eth_free_completed_tx_descs(dev); | ||
1553 | mp->tx_clean_threshold = 0; | ||
1554 | } | ||
1555 | #endif | ||
1556 | |||
1557 | work_done = 0; | ||
1558 | if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) | ||
1559 | != (u32) mp->rx_used_desc_q) | ||
1560 | work_done = mv643xx_eth_receive_queue(dev, budget); | ||
1561 | |||
1562 | if (work_done < budget) { | ||
1563 | netif_rx_complete(dev, napi); | ||
1564 | wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0); | ||
1565 | wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
1566 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | ||
1567 | } | ||
1568 | 2622 | ||
1569 | return work_done; | 2623 | return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL); |
1570 | } | 2624 | } |
1571 | #endif | ||
1572 | 2625 | ||
1573 | /** | 2626 | /* |
1574 | * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments | 2627 | * Changes MTU (maximum transfer unit) of the gigabit ethenret port |
1575 | * | 2628 | * |
1576 | * Hardware can't handle unaligned fragments smaller than 9 bytes. | 2629 | * Input : pointer to ethernet interface network device structure |
1577 | * This helper function detects that case. | 2630 | * new mtu size |
2631 | * Output : 0 upon success, -EINVAL upon failure | ||
1578 | */ | 2632 | */ |
1579 | 2633 | static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | |
1580 | static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | ||
1581 | { | 2634 | { |
1582 | unsigned int frag; | 2635 | if ((new_mtu > 9500) || (new_mtu < 64)) |
1583 | skb_frag_t *fragp; | 2636 | return -EINVAL; |
1584 | 2637 | ||
1585 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 2638 | dev->mtu = new_mtu; |
1586 | fragp = &skb_shinfo(skb)->frags[frag]; | 2639 | if (!netif_running(dev)) |
1587 | if (fragp->size <= 8 && fragp->page_offset & 0x7) | 2640 | return 0; |
1588 | return 1; | 2641 | |
2642 | /* | ||
2643 | * Stop and then re-open the interface. This will allocate RX | ||
2644 | * skbs of the new MTU. | ||
2645 | * There is a possible danger that the open will not succeed, | ||
2646 | * due to memory being full, which might fail the open function. | ||
2647 | */ | ||
2648 | mv643xx_eth_stop(dev); | ||
2649 | if (mv643xx_eth_open(dev)) { | ||
2650 | printk(KERN_ERR "%s: Fatal error on opening device\n", | ||
2651 | dev->name); | ||
1589 | } | 2652 | } |
2653 | |||
1590 | return 0; | 2654 | return 0; |
1591 | } | 2655 | } |
1592 | 2656 | ||
1593 | /** | 2657 | /* |
1594 | * eth_alloc_tx_desc_index - return the index of the next available tx desc | 2658 | * mv643xx_eth_tx_timeout_task |
2659 | * | ||
2660 | * Actual routine to reset the adapter when a timeout on Tx has occurred | ||
1595 | */ | 2661 | */ |
1596 | static int eth_alloc_tx_desc_index(struct mv643xx_private *mp) | 2662 | static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) |
1597 | { | 2663 | { |
1598 | int tx_desc_curr; | 2664 | struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, |
2665 | tx_timeout_task); | ||
2666 | struct net_device *dev = mp->dev; | ||
1599 | 2667 | ||
1600 | BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); | 2668 | if (!netif_running(dev)) |
2669 | return; | ||
1601 | 2670 | ||
1602 | tx_desc_curr = mp->tx_curr_desc_q; | 2671 | netif_stop_queue(dev); |
1603 | mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
1604 | 2672 | ||
1605 | BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q); | 2673 | eth_port_reset(mp); |
2674 | eth_port_start(dev); | ||
1606 | 2675 | ||
1607 | return tx_desc_curr; | 2676 | if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) |
2677 | netif_wake_queue(dev); | ||
1608 | } | 2678 | } |
1609 | 2679 | ||
1610 | /** | 2680 | /* |
1611 | * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments. | 2681 | * mv643xx_eth_tx_timeout |
1612 | * | 2682 | * |
1613 | * Ensure the data for each fragment to be transmitted is mapped properly, | 2683 | * Called upon a timeout on transmitting a packet |
1614 | * then fill in descriptors in the tx hw queue. | 2684 | * |
2685 | * Input : pointer to ethernet interface network device structure. | ||
2686 | * Output : N/A | ||
1615 | */ | 2687 | */ |
1616 | static void eth_tx_fill_frag_descs(struct mv643xx_private *mp, | 2688 | static void mv643xx_eth_tx_timeout(struct net_device *dev) |
1617 | struct sk_buff *skb) | ||
1618 | { | 2689 | { |
1619 | int frag; | 2690 | struct mv643xx_private *mp = netdev_priv(dev); |
1620 | int tx_index; | ||
1621 | struct eth_tx_desc *desc; | ||
1622 | |||
1623 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
1624 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | ||
1625 | |||
1626 | tx_index = eth_alloc_tx_desc_index(mp); | ||
1627 | desc = &mp->p_tx_desc_area[tx_index]; | ||
1628 | 2691 | ||
1629 | desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA; | 2692 | printk(KERN_INFO "%s: TX timeout ", dev->name); |
1630 | /* Last Frag enables interrupt and frees the skb */ | ||
1631 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1632 | desc->cmd_sts |= ETH_ZERO_PADDING | | ||
1633 | ETH_TX_LAST_DESC | | ||
1634 | ETH_TX_ENABLE_INTERRUPT; | ||
1635 | mp->tx_skb[tx_index] = skb; | ||
1636 | } else | ||
1637 | mp->tx_skb[tx_index] = NULL; | ||
1638 | 2693 | ||
1639 | desc = &mp->p_tx_desc_area[tx_index]; | 2694 | /* Do the reset outside of interrupt context */ |
1640 | desc->l4i_chk = 0; | 2695 | schedule_work(&mp->tx_timeout_task); |
1641 | desc->byte_cnt = this_frag->size; | ||
1642 | desc->buf_ptr = dma_map_page(NULL, this_frag->page, | ||
1643 | this_frag->page_offset, | ||
1644 | this_frag->size, | ||
1645 | DMA_TO_DEVICE); | ||
1646 | } | ||
1647 | } | 2696 | } |
1648 | 2697 | ||
1649 | static inline __be16 sum16_as_be(__sum16 sum) | 2698 | #ifdef CONFIG_NET_POLL_CONTROLLER |
2699 | static void mv643xx_netpoll(struct net_device *netdev) | ||
1650 | { | 2700 | { |
1651 | return (__force __be16)sum; | 2701 | struct mv643xx_private *mp = netdev_priv(netdev); |
2702 | int port_num = mp->port_num; | ||
2703 | |||
2704 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | ||
2705 | /* wait for previous write to complete */ | ||
2706 | rdl(mp, INTERRUPT_MASK_REG(port_num)); | ||
2707 | |||
2708 | mv643xx_eth_int_handler(netdev->irq, netdev); | ||
2709 | |||
2710 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | ||
1652 | } | 2711 | } |
2712 | #endif | ||
1653 | 2713 | ||
1654 | /** | 2714 | /* |
1655 | * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw | 2715 | * Wrappers for MII support library. |
1656 | * | ||
1657 | * Ensure the data for an skb to be transmitted is mapped properly, | ||
1658 | * then fill in descriptors in the tx hw queue and start the hardware. | ||
1659 | */ | 2716 | */ |
1660 | static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, | 2717 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) |
1661 | struct sk_buff *skb) | ||
1662 | { | 2718 | { |
1663 | int tx_index; | 2719 | struct mv643xx_private *mp = netdev_priv(dev); |
1664 | struct eth_tx_desc *desc; | 2720 | int val; |
1665 | u32 cmd_sts; | ||
1666 | int length; | ||
1667 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
1668 | 2721 | ||
1669 | cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA; | 2722 | eth_port_read_smi_reg(mp, location, &val); |
2723 | return val; | ||
2724 | } | ||
1670 | 2725 | ||
1671 | tx_index = eth_alloc_tx_desc_index(mp); | 2726 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) |
1672 | desc = &mp->p_tx_desc_area[tx_index]; | 2727 | { |
2728 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2729 | eth_port_write_smi_reg(mp, location, val); | ||
2730 | } | ||
1673 | 2731 | ||
1674 | if (nr_frags) { | ||
1675 | eth_tx_fill_frag_descs(mp, skb); | ||
1676 | 2732 | ||
1677 | length = skb_headlen(skb); | 2733 | /* platform glue ************************************************************/ |
1678 | mp->tx_skb[tx_index] = NULL; | 2734 | static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp, |
1679 | } else { | 2735 | struct mbus_dram_target_info *dram) |
1680 | cmd_sts |= ETH_ZERO_PADDING | | 2736 | { |
1681 | ETH_TX_LAST_DESC | | 2737 | void __iomem *base = msp->eth_base; |
1682 | ETH_TX_ENABLE_INTERRUPT; | 2738 | u32 win_enable; |
1683 | length = skb->len; | 2739 | u32 win_protect; |
1684 | mp->tx_skb[tx_index] = skb; | 2740 | int i; |
2741 | |||
2742 | for (i = 0; i < 6; i++) { | ||
2743 | writel(0, base + WINDOW_BASE(i)); | ||
2744 | writel(0, base + WINDOW_SIZE(i)); | ||
2745 | if (i < 4) | ||
2746 | writel(0, base + WINDOW_REMAP_HIGH(i)); | ||
1685 | } | 2747 | } |
1686 | 2748 | ||
1687 | desc->byte_cnt = length; | 2749 | win_enable = 0x3f; |
1688 | desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); | 2750 | win_protect = 0; |
1689 | 2751 | ||
1690 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2752 | for (i = 0; i < dram->num_cs; i++) { |
1691 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | 2753 | struct mbus_dram_window *cs = dram->cs + i; |
1692 | 2754 | ||
1693 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | | 2755 | writel((cs->base & 0xffff0000) | |
1694 | ETH_GEN_IP_V_4_CHECKSUM | | 2756 | (cs->mbus_attr << 8) | |
1695 | ip_hdr(skb)->ihl << ETH_TX_IHL_SHIFT; | 2757 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); |
2758 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | ||
1696 | 2759 | ||
1697 | switch (ip_hdr(skb)->protocol) { | 2760 | win_enable &= ~(1 << i); |
1698 | case IPPROTO_UDP: | 2761 | win_protect |= 3 << (2 * i); |
1699 | cmd_sts |= ETH_UDP_FRAME; | ||
1700 | desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); | ||
1701 | break; | ||
1702 | case IPPROTO_TCP: | ||
1703 | desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); | ||
1704 | break; | ||
1705 | default: | ||
1706 | BUG(); | ||
1707 | } | ||
1708 | } else { | ||
1709 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1710 | cmd_sts |= 5 << ETH_TX_IHL_SHIFT; | ||
1711 | desc->l4i_chk = 0; | ||
1712 | } | 2762 | } |
1713 | 2763 | ||
1714 | /* ensure all other descriptors are written before first cmd_sts */ | 2764 | writel(win_enable, base + WINDOW_BAR_ENABLE); |
1715 | wmb(); | 2765 | msp->win_protect = win_protect; |
1716 | desc->cmd_sts = cmd_sts; | 2766 | } |
1717 | 2767 | ||
1718 | /* ensure all descriptors are written before poking hardware */ | 2768 | static int mv643xx_eth_shared_probe(struct platform_device *pdev) |
1719 | wmb(); | 2769 | { |
1720 | mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED); | 2770 | static int mv643xx_version_printed = 0; |
2771 | struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; | ||
2772 | struct mv643xx_shared_private *msp; | ||
2773 | struct resource *res; | ||
2774 | int ret; | ||
1721 | 2775 | ||
1722 | mp->tx_desc_count += nr_frags + 1; | 2776 | if (!mv643xx_version_printed++) |
2777 | printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); | ||
2778 | |||
2779 | ret = -EINVAL; | ||
2780 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2781 | if (res == NULL) | ||
2782 | goto out; | ||
2783 | |||
2784 | ret = -ENOMEM; | ||
2785 | msp = kmalloc(sizeof(*msp), GFP_KERNEL); | ||
2786 | if (msp == NULL) | ||
2787 | goto out; | ||
2788 | memset(msp, 0, sizeof(*msp)); | ||
2789 | |||
2790 | msp->eth_base = ioremap(res->start, res->end - res->start + 1); | ||
2791 | if (msp->eth_base == NULL) | ||
2792 | goto out_free; | ||
2793 | |||
2794 | spin_lock_init(&msp->phy_lock); | ||
2795 | msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; | ||
2796 | |||
2797 | platform_set_drvdata(pdev, msp); | ||
2798 | |||
2799 | /* | ||
2800 | * (Re-)program MBUS remapping windows if we are asked to. | ||
2801 | */ | ||
2802 | if (pd != NULL && pd->dram != NULL) | ||
2803 | mv643xx_eth_conf_mbus_windows(msp, pd->dram); | ||
2804 | |||
2805 | return 0; | ||
2806 | |||
2807 | out_free: | ||
2808 | kfree(msp); | ||
2809 | out: | ||
2810 | return ret; | ||
1723 | } | 2811 | } |
1724 | 2812 | ||
1725 | /** | 2813 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) |
1726 | * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission | ||
1727 | * | ||
1728 | */ | ||
1729 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1730 | { | 2814 | { |
1731 | struct mv643xx_private *mp = netdev_priv(dev); | 2815 | struct mv643xx_shared_private *msp = platform_get_drvdata(pdev); |
1732 | struct net_device_stats *stats = &dev->stats; | ||
1733 | unsigned long flags; | ||
1734 | 2816 | ||
1735 | BUG_ON(netif_queue_stopped(dev)); | 2817 | iounmap(msp->eth_base); |
2818 | kfree(msp); | ||
1736 | 2819 | ||
1737 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { | 2820 | return 0; |
1738 | stats->tx_dropped++; | 2821 | } |
1739 | printk(KERN_DEBUG "%s: failed to linearize tiny " | ||
1740 | "unaligned fragment\n", dev->name); | ||
1741 | return NETDEV_TX_BUSY; | ||
1742 | } | ||
1743 | 2822 | ||
1744 | spin_lock_irqsave(&mp->lock, flags); | 2823 | static struct platform_driver mv643xx_eth_shared_driver = { |
2824 | .probe = mv643xx_eth_shared_probe, | ||
2825 | .remove = mv643xx_eth_shared_remove, | ||
2826 | .driver = { | ||
2827 | .name = MV643XX_ETH_SHARED_NAME, | ||
2828 | .owner = THIS_MODULE, | ||
2829 | }, | ||
2830 | }; | ||
1745 | 2831 | ||
1746 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) { | 2832 | /* |
1747 | printk(KERN_ERR "%s: transmit with queue full\n", dev->name); | 2833 | * ethernet_phy_set - Set the ethernet port PHY address. |
1748 | netif_stop_queue(dev); | 2834 | * |
1749 | spin_unlock_irqrestore(&mp->lock, flags); | 2835 | * DESCRIPTION: |
1750 | return NETDEV_TX_BUSY; | 2836 | * This routine sets the given ethernet port PHY address. |
1751 | } | 2837 | * |
2838 | * INPUT: | ||
2839 | * struct mv643xx_private *mp Ethernet Port. | ||
2840 | * int phy_addr PHY address. | ||
2841 | * | ||
2842 | * OUTPUT: | ||
2843 | * None. | ||
2844 | * | ||
2845 | * RETURN: | ||
2846 | * None. | ||
2847 | * | ||
2848 | */ | ||
2849 | static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr) | ||
2850 | { | ||
2851 | u32 reg_data; | ||
2852 | int addr_shift = 5 * mp->port_num; | ||
1752 | 2853 | ||
1753 | eth_tx_submit_descs_for_skb(mp, skb); | 2854 | reg_data = rdl(mp, PHY_ADDR_REG); |
1754 | stats->tx_bytes += skb->len; | 2855 | reg_data &= ~(0x1f << addr_shift); |
1755 | stats->tx_packets++; | 2856 | reg_data |= (phy_addr & 0x1f) << addr_shift; |
1756 | dev->trans_start = jiffies; | 2857 | wrl(mp, PHY_ADDR_REG, reg_data); |
2858 | } | ||
1757 | 2859 | ||
1758 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) | 2860 | /* |
1759 | netif_stop_queue(dev); | 2861 | * ethernet_phy_get - Get the ethernet port PHY address. |
2862 | * | ||
2863 | * DESCRIPTION: | ||
2864 | * This routine returns the given ethernet port PHY address. | ||
2865 | * | ||
2866 | * INPUT: | ||
2867 | * struct mv643xx_private *mp Ethernet Port. | ||
2868 | * | ||
2869 | * OUTPUT: | ||
2870 | * None. | ||
2871 | * | ||
2872 | * RETURN: | ||
2873 | * PHY address. | ||
2874 | * | ||
2875 | */ | ||
2876 | static int ethernet_phy_get(struct mv643xx_private *mp) | ||
2877 | { | ||
2878 | unsigned int reg_data; | ||
1760 | 2879 | ||
1761 | spin_unlock_irqrestore(&mp->lock, flags); | 2880 | reg_data = rdl(mp, PHY_ADDR_REG); |
1762 | 2881 | ||
1763 | return NETDEV_TX_OK; | 2882 | return ((reg_data >> (5 * mp->port_num)) & 0x1f); |
1764 | } | 2883 | } |
1765 | 2884 | ||
1766 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2885 | /* |
1767 | static void mv643xx_netpoll(struct net_device *netdev) | 2886 | * ethernet_phy_detect - Detect whether a phy is present |
2887 | * | ||
2888 | * DESCRIPTION: | ||
2889 | * This function tests whether there is a PHY present on | ||
2890 | * the specified port. | ||
2891 | * | ||
2892 | * INPUT: | ||
2893 | * struct mv643xx_private *mp Ethernet Port. | ||
2894 | * | ||
2895 | * OUTPUT: | ||
2896 | * None | ||
2897 | * | ||
2898 | * RETURN: | ||
2899 | * 0 on success | ||
2900 | * -ENODEV on failure | ||
2901 | * | ||
2902 | */ | ||
2903 | static int ethernet_phy_detect(struct mv643xx_private *mp) | ||
1768 | { | 2904 | { |
1769 | struct mv643xx_private *mp = netdev_priv(netdev); | 2905 | unsigned int phy_reg_data0; |
1770 | int port_num = mp->port_num; | 2906 | int auto_neg; |
1771 | 2907 | ||
1772 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); | 2908 | eth_port_read_smi_reg(mp, 0, &phy_reg_data0); |
1773 | /* wait for previous write to complete */ | 2909 | auto_neg = phy_reg_data0 & 0x1000; |
1774 | rdl(mp, INTERRUPT_MASK_REG(port_num)); | 2910 | phy_reg_data0 ^= 0x1000; /* invert auto_neg */ |
2911 | eth_port_write_smi_reg(mp, 0, phy_reg_data0); | ||
1775 | 2912 | ||
1776 | mv643xx_eth_int_handler(netdev->irq, netdev); | 2913 | eth_port_read_smi_reg(mp, 0, &phy_reg_data0); |
2914 | if ((phy_reg_data0 & 0x1000) == auto_neg) | ||
2915 | return -ENODEV; /* change didn't take */ | ||
1777 | 2916 | ||
1778 | wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); | 2917 | phy_reg_data0 ^= 0x1000; |
2918 | eth_port_write_smi_reg(mp, 0, phy_reg_data0); | ||
2919 | return 0; | ||
1779 | } | 2920 | } |
1780 | #endif | ||
1781 | 2921 | ||
1782 | static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, | 2922 | static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, |
1783 | int speed, int duplex, | 2923 | int speed, int duplex, |
@@ -2008,95 +3148,6 @@ static int mv643xx_eth_remove(struct platform_device *pdev) | |||
2008 | return 0; | 3148 | return 0; |
2009 | } | 3149 | } |
2010 | 3150 | ||
2011 | static void mv643xx_eth_conf_mbus_windows(struct mv643xx_shared_private *msp, | ||
2012 | struct mbus_dram_target_info *dram) | ||
2013 | { | ||
2014 | void __iomem *base = msp->eth_base; | ||
2015 | u32 win_enable; | ||
2016 | u32 win_protect; | ||
2017 | int i; | ||
2018 | |||
2019 | for (i = 0; i < 6; i++) { | ||
2020 | writel(0, base + WINDOW_BASE(i)); | ||
2021 | writel(0, base + WINDOW_SIZE(i)); | ||
2022 | if (i < 4) | ||
2023 | writel(0, base + WINDOW_REMAP_HIGH(i)); | ||
2024 | } | ||
2025 | |||
2026 | win_enable = 0x3f; | ||
2027 | win_protect = 0; | ||
2028 | |||
2029 | for (i = 0; i < dram->num_cs; i++) { | ||
2030 | struct mbus_dram_window *cs = dram->cs + i; | ||
2031 | |||
2032 | writel((cs->base & 0xffff0000) | | ||
2033 | (cs->mbus_attr << 8) | | ||
2034 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | ||
2035 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | ||
2036 | |||
2037 | win_enable &= ~(1 << i); | ||
2038 | win_protect |= 3 << (2 * i); | ||
2039 | } | ||
2040 | |||
2041 | writel(win_enable, base + WINDOW_BAR_ENABLE); | ||
2042 | msp->win_protect = win_protect; | ||
2043 | } | ||
2044 | |||
2045 | static int mv643xx_eth_shared_probe(struct platform_device *pdev) | ||
2046 | { | ||
2047 | static int mv643xx_version_printed = 0; | ||
2048 | struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data; | ||
2049 | struct mv643xx_shared_private *msp; | ||
2050 | struct resource *res; | ||
2051 | int ret; | ||
2052 | |||
2053 | if (!mv643xx_version_printed++) | ||
2054 | printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); | ||
2055 | |||
2056 | ret = -EINVAL; | ||
2057 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
2058 | if (res == NULL) | ||
2059 | goto out; | ||
2060 | |||
2061 | ret = -ENOMEM; | ||
2062 | msp = kmalloc(sizeof(*msp), GFP_KERNEL); | ||
2063 | if (msp == NULL) | ||
2064 | goto out; | ||
2065 | memset(msp, 0, sizeof(*msp)); | ||
2066 | |||
2067 | msp->eth_base = ioremap(res->start, res->end - res->start + 1); | ||
2068 | if (msp->eth_base == NULL) | ||
2069 | goto out_free; | ||
2070 | |||
2071 | spin_lock_init(&msp->phy_lock); | ||
2072 | msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; | ||
2073 | |||
2074 | platform_set_drvdata(pdev, msp); | ||
2075 | |||
2076 | /* | ||
2077 | * (Re-)program MBUS remapping windows if we are asked to. | ||
2078 | */ | ||
2079 | if (pd != NULL && pd->dram != NULL) | ||
2080 | mv643xx_eth_conf_mbus_windows(msp, pd->dram); | ||
2081 | |||
2082 | return 0; | ||
2083 | |||
2084 | out_free: | ||
2085 | kfree(msp); | ||
2086 | out: | ||
2087 | return ret; | ||
2088 | } | ||
2089 | |||
2090 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) | ||
2091 | { | ||
2092 | struct mv643xx_shared_private *msp = platform_get_drvdata(pdev); | ||
2093 | |||
2094 | iounmap(msp->eth_base); | ||
2095 | kfree(msp); | ||
2096 | |||
2097 | return 0; | ||
2098 | } | ||
2099 | |||
2100 | static void mv643xx_eth_shutdown(struct platform_device *pdev) | 3151 | static void mv643xx_eth_shutdown(struct platform_device *pdev) |
2101 | { | 3152 | { |
2102 | struct net_device *dev = platform_get_drvdata(pdev); | 3153 | struct net_device *dev = platform_get_drvdata(pdev); |
@@ -2120,15 +3171,6 @@ static struct platform_driver mv643xx_eth_driver = { | |||
2120 | }, | 3171 | }, |
2121 | }; | 3172 | }; |
2122 | 3173 | ||
2123 | static struct platform_driver mv643xx_eth_shared_driver = { | ||
2124 | .probe = mv643xx_eth_shared_probe, | ||
2125 | .remove = mv643xx_eth_shared_remove, | ||
2126 | .driver = { | ||
2127 | .name = MV643XX_ETH_SHARED_NAME, | ||
2128 | .owner = THIS_MODULE, | ||
2129 | }, | ||
2130 | }; | ||
2131 | |||
2132 | /* | 3174 | /* |
2133 | * mv643xx_init_module | 3175 | * mv643xx_init_module |
2134 | * | 3176 | * |
@@ -2175,1191 +3217,3 @@ MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani" | |||
2175 | MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | 3217 | MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); |
2176 | MODULE_ALIAS("platform:" MV643XX_ETH_NAME); | 3218 | MODULE_ALIAS("platform:" MV643XX_ETH_NAME); |
2177 | MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); | 3219 | MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); |
2178 | |||
2179 | /* | ||
2180 | * The second part is the low level driver of the gigE ethernet ports. | ||
2181 | */ | ||
2182 | |||
2183 | /* | ||
2184 | * Marvell's Gigabit Ethernet controller low level driver | ||
2185 | * | ||
2186 | * DESCRIPTION: | ||
2187 | * This file introduce low level API to Marvell's Gigabit Ethernet | ||
2188 | * controller. This Gigabit Ethernet Controller driver API controls | ||
2189 | * 1) Operations (i.e. port init, start, reset etc'). | ||
2190 | * 2) Data flow (i.e. port send, receive etc'). | ||
2191 | * Each Gigabit Ethernet port is controlled via | ||
2192 | * struct mv643xx_private. | ||
2193 | * This struct includes user configuration information as well as | ||
2194 | * driver internal data needed for its operations. | ||
2195 | * | ||
2196 | * Supported Features: | ||
2197 | * - This low level driver is OS independent. Allocating memory for | ||
2198 | * the descriptor rings and buffers are not within the scope of | ||
2199 | * this driver. | ||
2200 | * - The user is free from Rx/Tx queue managing. | ||
2201 | * - This low level driver introduce functionality API that enable | ||
2202 | * the to operate Marvell's Gigabit Ethernet Controller in a | ||
2203 | * convenient way. | ||
2204 | * - Simple Gigabit Ethernet port operation API. | ||
2205 | * - Simple Gigabit Ethernet port data flow API. | ||
2206 | * - Data flow and operation API support per queue functionality. | ||
2207 | * - Support cached descriptors for better performance. | ||
2208 | * - Enable access to all four DRAM banks and internal SRAM memory | ||
2209 | * spaces. | ||
2210 | * - PHY access and control API. | ||
2211 | * - Port control register configuration API. | ||
2212 | * - Full control over Unicast and Multicast MAC configurations. | ||
2213 | * | ||
2214 | * Operation flow: | ||
2215 | * | ||
2216 | * Initialization phase | ||
2217 | * This phase complete the initialization of the the | ||
2218 | * mv643xx_private struct. | ||
2219 | * User information regarding port configuration has to be set | ||
2220 | * prior to calling the port initialization routine. | ||
2221 | * | ||
2222 | * In this phase any port Tx/Rx activity is halted, MIB counters | ||
2223 | * are cleared, PHY address is set according to user parameter and | ||
2224 | * access to DRAM and internal SRAM memory spaces. | ||
2225 | * | ||
2226 | * Driver ring initialization | ||
2227 | * Allocating memory for the descriptor rings and buffers is not | ||
2228 | * within the scope of this driver. Thus, the user is required to | ||
2229 | * allocate memory for the descriptors ring and buffers. Those | ||
2230 | * memory parameters are used by the Rx and Tx ring initialization | ||
2231 | * routines in order to curve the descriptor linked list in a form | ||
2232 | * of a ring. | ||
2233 | * Note: Pay special attention to alignment issues when using | ||
2234 | * cached descriptors/buffers. In this phase the driver store | ||
2235 | * information in the mv643xx_private struct regarding each queue | ||
2236 | * ring. | ||
2237 | * | ||
2238 | * Driver start | ||
2239 | * This phase prepares the Ethernet port for Rx and Tx activity. | ||
2240 | * It uses the information stored in the mv643xx_private struct to | ||
2241 | * initialize the various port registers. | ||
2242 | * | ||
2243 | * Data flow: | ||
2244 | * All packet references to/from the driver are done using | ||
2245 | * struct pkt_info. | ||
2246 | * This struct is a unified struct used with Rx and Tx operations. | ||
2247 | * This way the user is not required to be familiar with neither | ||
2248 | * Tx nor Rx descriptors structures. | ||
2249 | * The driver's descriptors rings are management by indexes. | ||
2250 | * Those indexes controls the ring resources and used to indicate | ||
2251 | * a SW resource error: | ||
2252 | * 'current' | ||
2253 | * This index points to the current available resource for use. For | ||
2254 | * example in Rx process this index will point to the descriptor | ||
2255 | * that will be passed to the user upon calling the receive | ||
2256 | * routine. In Tx process, this index will point to the descriptor | ||
2257 | * that will be assigned with the user packet info and transmitted. | ||
2258 | * 'used' | ||
2259 | * This index points to the descriptor that need to restore its | ||
2260 | * resources. For example in Rx process, using the Rx buffer return | ||
2261 | * API will attach the buffer returned in packet info to the | ||
2262 | * descriptor pointed by 'used'. In Tx process, using the Tx | ||
2263 | * descriptor return will merely return the user packet info with | ||
2264 | * the command status of the transmitted buffer pointed by the | ||
2265 | * 'used' index. Nevertheless, it is essential to use this routine | ||
2266 | * to update the 'used' index. | ||
2267 | * 'first' | ||
2268 | * This index supports Tx Scatter-Gather. It points to the first | ||
2269 | * descriptor of a packet assembled of multiple buffers. For | ||
2270 | * example when in middle of Such packet we have a Tx resource | ||
2271 | * error the 'curr' index get the value of 'first' to indicate | ||
2272 | * that the ring returned to its state before trying to transmit | ||
2273 | * this packet. | ||
2274 | * | ||
2275 | * Receive operation: | ||
2276 | * The eth_port_receive API set the packet information struct, | ||
2277 | * passed by the caller, with received information from the | ||
2278 | * 'current' SDMA descriptor. | ||
2279 | * It is the user responsibility to return this resource back | ||
2280 | * to the Rx descriptor ring to enable the reuse of this source. | ||
2281 | * Return Rx resource is done using the eth_rx_return_buff API. | ||
2282 | * | ||
2283 | * Prior to calling the initialization routine eth_port_init() the user | ||
2284 | * must set the following fields under mv643xx_private struct: | ||
2285 | * port_num User Ethernet port number. | ||
2286 | * port_config User port configuration value. | ||
2287 | * port_config_extend User port config extend value. | ||
2288 | * port_sdma_config User port SDMA config value. | ||
2289 | * port_serial_control User port serial control value. | ||
2290 | * | ||
2291 | * This driver data flow is done using the struct pkt_info which | ||
2292 | * is a unified struct for Rx and Tx operations: | ||
2293 | * | ||
2294 | * byte_cnt Tx/Rx descriptor buffer byte count. | ||
2295 | * l4i_chk CPU provided TCP Checksum. For Tx operation | ||
2296 | * only. | ||
2297 | * cmd_sts Tx/Rx descriptor command status. | ||
2298 | * buf_ptr Tx/Rx descriptor buffer pointer. | ||
2299 | * return_info Tx/Rx user resource return information. | ||
2300 | */ | ||
2301 | |||
2302 | /* Ethernet Port routines */ | ||
2303 | static void eth_port_set_filter_table_entry(struct mv643xx_private *mp, | ||
2304 | int table, unsigned char entry); | ||
2305 | |||
2306 | /* | ||
2307 | * eth_port_init - Initialize the Ethernet port driver | ||
2308 | * | ||
2309 | * DESCRIPTION: | ||
2310 | * This function prepares the ethernet port to start its activity: | ||
2311 | * 1) Completes the ethernet port driver struct initialization toward port | ||
2312 | * start routine. | ||
2313 | * 2) Resets the device to a quiescent state in case of warm reboot. | ||
2314 | * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM. | ||
2315 | * 4) Clean MAC tables. The reset status of those tables is unknown. | ||
2316 | * 5) Set PHY address. | ||
2317 | * Note: Call this routine prior to eth_port_start routine and after | ||
2318 | * setting user values in the user fields of Ethernet port control | ||
2319 | * struct. | ||
2320 | * | ||
2321 | * INPUT: | ||
2322 | * struct mv643xx_private *mp Ethernet port control struct | ||
2323 | * | ||
2324 | * OUTPUT: | ||
2325 | * See description. | ||
2326 | * | ||
2327 | * RETURN: | ||
2328 | * None. | ||
2329 | */ | ||
2330 | static void eth_port_init(struct mv643xx_private *mp) | ||
2331 | { | ||
2332 | mp->rx_resource_err = 0; | ||
2333 | |||
2334 | eth_port_reset(mp); | ||
2335 | |||
2336 | eth_port_init_mac_tables(mp); | ||
2337 | } | ||
2338 | |||
2339 | /* | ||
2340 | * eth_port_start - Start the Ethernet port activity. | ||
2341 | * | ||
2342 | * DESCRIPTION: | ||
2343 | * This routine prepares the Ethernet port for Rx and Tx activity: | ||
2344 | * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that | ||
2345 | * has been initialized a descriptor's ring (using | ||
2346 | * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx) | ||
2347 | * 2. Initialize and enable the Ethernet configuration port by writing to | ||
2348 | * the port's configuration and command registers. | ||
2349 | * 3. Initialize and enable the SDMA by writing to the SDMA's | ||
2350 | * configuration and command registers. After completing these steps, | ||
2351 | * the ethernet port SDMA can starts to perform Rx and Tx activities. | ||
2352 | * | ||
2353 | * Note: Each Rx and Tx queue descriptor's list must be initialized prior | ||
2354 | * to calling this function (use ether_init_tx_desc_ring for Tx queues | ||
2355 | * and ether_init_rx_desc_ring for Rx queues). | ||
2356 | * | ||
2357 | * INPUT: | ||
2358 | * dev - a pointer to the required interface | ||
2359 | * | ||
2360 | * OUTPUT: | ||
2361 | * Ethernet port is ready to receive and transmit. | ||
2362 | * | ||
2363 | * RETURN: | ||
2364 | * None. | ||
2365 | */ | ||
2366 | static void eth_port_start(struct net_device *dev) | ||
2367 | { | ||
2368 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2369 | unsigned int port_num = mp->port_num; | ||
2370 | int tx_curr_desc, rx_curr_desc; | ||
2371 | u32 pscr; | ||
2372 | struct ethtool_cmd ethtool_cmd; | ||
2373 | |||
2374 | /* Assignment of Tx CTRP of given queue */ | ||
2375 | tx_curr_desc = mp->tx_curr_desc_q; | ||
2376 | wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num), | ||
2377 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | ||
2378 | |||
2379 | /* Assignment of Rx CRDP of given queue */ | ||
2380 | rx_curr_desc = mp->rx_curr_desc_q; | ||
2381 | wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num), | ||
2382 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | ||
2383 | |||
2384 | /* Add the assigned Ethernet address to the port's address table */ | ||
2385 | eth_port_uc_addr_set(mp, dev->dev_addr); | ||
2386 | |||
2387 | /* Assign port configuration and command. */ | ||
2388 | wrl(mp, PORT_CONFIG_REG(port_num), | ||
2389 | PORT_CONFIG_DEFAULT_VALUE); | ||
2390 | |||
2391 | wrl(mp, PORT_CONFIG_EXTEND_REG(port_num), | ||
2392 | PORT_CONFIG_EXTEND_DEFAULT_VALUE); | ||
2393 | |||
2394 | pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); | ||
2395 | |||
2396 | pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); | ||
2397 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
2398 | |||
2399 | pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | | ||
2400 | DISABLE_AUTO_NEG_SPEED_GMII | | ||
2401 | DISABLE_AUTO_NEG_FOR_DUPLX | | ||
2402 | DO_NOT_FORCE_LINK_FAIL | | ||
2403 | SERIAL_PORT_CONTROL_RESERVED; | ||
2404 | |||
2405 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
2406 | |||
2407 | pscr |= SERIAL_PORT_ENABLE; | ||
2408 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
2409 | |||
2410 | /* Assign port SDMA configuration */ | ||
2411 | wrl(mp, SDMA_CONFIG_REG(port_num), | ||
2412 | PORT_SDMA_CONFIG_DEFAULT_VALUE); | ||
2413 | |||
2414 | /* Enable port Rx. */ | ||
2415 | mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED); | ||
2416 | |||
2417 | /* Disable port bandwidth limits by clearing MTU register */ | ||
2418 | wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0); | ||
2419 | |||
2420 | /* save phy settings across reset */ | ||
2421 | mv643xx_get_settings(dev, ðtool_cmd); | ||
2422 | ethernet_phy_reset(mp); | ||
2423 | mv643xx_set_settings(dev, ðtool_cmd); | ||
2424 | } | ||
2425 | |||
2426 | /* | ||
2427 | * eth_port_uc_addr_set - Write a MAC address into the port's hw registers | ||
2428 | */ | ||
2429 | static void eth_port_uc_addr_set(struct mv643xx_private *mp, | ||
2430 | unsigned char *p_addr) | ||
2431 | { | ||
2432 | unsigned int port_num = mp->port_num; | ||
2433 | unsigned int mac_h; | ||
2434 | unsigned int mac_l; | ||
2435 | int table; | ||
2436 | |||
2437 | mac_l = (p_addr[4] << 8) | (p_addr[5]); | ||
2438 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | ||
2439 | (p_addr[3] << 0); | ||
2440 | |||
2441 | wrl(mp, MAC_ADDR_LOW(port_num), mac_l); | ||
2442 | wrl(mp, MAC_ADDR_HIGH(port_num), mac_h); | ||
2443 | |||
2444 | /* Accept frames with this address */ | ||
2445 | table = DA_FILTER_UNICAST_TABLE_BASE(port_num); | ||
2446 | eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f); | ||
2447 | } | ||
2448 | |||
2449 | /* | ||
2450 | * eth_port_uc_addr_get - Read the MAC address from the port's hw registers | ||
2451 | */ | ||
2452 | static void eth_port_uc_addr_get(struct mv643xx_private *mp, | ||
2453 | unsigned char *p_addr) | ||
2454 | { | ||
2455 | unsigned int port_num = mp->port_num; | ||
2456 | unsigned int mac_h; | ||
2457 | unsigned int mac_l; | ||
2458 | |||
2459 | mac_h = rdl(mp, MAC_ADDR_HIGH(port_num)); | ||
2460 | mac_l = rdl(mp, MAC_ADDR_LOW(port_num)); | ||
2461 | |||
2462 | p_addr[0] = (mac_h >> 24) & 0xff; | ||
2463 | p_addr[1] = (mac_h >> 16) & 0xff; | ||
2464 | p_addr[2] = (mac_h >> 8) & 0xff; | ||
2465 | p_addr[3] = mac_h & 0xff; | ||
2466 | p_addr[4] = (mac_l >> 8) & 0xff; | ||
2467 | p_addr[5] = mac_l & 0xff; | ||
2468 | } | ||
2469 | |||
2470 | /* | ||
2471 | * The entries in each table are indexed by a hash of a packet's MAC | ||
2472 | * address. One bit in each entry determines whether the packet is | ||
2473 | * accepted. There are 4 entries (each 8 bits wide) in each register | ||
2474 | * of the table. The bits in each entry are defined as follows: | ||
2475 | * 0 Accept=1, Drop=0 | ||
2476 | * 3-1 Queue (ETH_Q0=0) | ||
2477 | * 7-4 Reserved = 0; | ||
2478 | */ | ||
2479 | static void eth_port_set_filter_table_entry(struct mv643xx_private *mp, | ||
2480 | int table, unsigned char entry) | ||
2481 | { | ||
2482 | unsigned int table_reg; | ||
2483 | unsigned int tbl_offset; | ||
2484 | unsigned int reg_offset; | ||
2485 | |||
2486 | tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */ | ||
2487 | reg_offset = entry % 4; /* Entry offset within the register */ | ||
2488 | |||
2489 | /* Set "accepts frame bit" at specified table entry */ | ||
2490 | table_reg = rdl(mp, table + tbl_offset); | ||
2491 | table_reg |= 0x01 << (8 * reg_offset); | ||
2492 | wrl(mp, table + tbl_offset, table_reg); | ||
2493 | } | ||
2494 | |||
2495 | /* | ||
2496 | * eth_port_mc_addr - Multicast address settings. | ||
2497 | * | ||
2498 | * The MV device supports multicast using two tables: | ||
2499 | * 1) Special Multicast Table for MAC addresses of the form | ||
2500 | * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF). | ||
2501 | * The MAC DA[7:0] bits are used as a pointer to the Special Multicast | ||
2502 | * Table entries in the DA-Filter table. | ||
2503 | * 2) Other Multicast Table for multicast of another type. A CRC-8bit | ||
2504 | * is used as an index to the Other Multicast Table entries in the | ||
2505 | * DA-Filter table. This function calculates the CRC-8bit value. | ||
2506 | * In either case, eth_port_set_filter_table_entry() is then called | ||
2507 | * to set to set the actual table entry. | ||
2508 | */ | ||
2509 | static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr) | ||
2510 | { | ||
2511 | unsigned int port_num = mp->port_num; | ||
2512 | unsigned int mac_h; | ||
2513 | unsigned int mac_l; | ||
2514 | unsigned char crc_result = 0; | ||
2515 | int table; | ||
2516 | int mac_array[48]; | ||
2517 | int crc[8]; | ||
2518 | int i; | ||
2519 | |||
2520 | if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && | ||
2521 | (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { | ||
2522 | table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num); | ||
2523 | eth_port_set_filter_table_entry(mp, table, p_addr[5]); | ||
2524 | return; | ||
2525 | } | ||
2526 | |||
2527 | /* Calculate CRC-8 out of the given address */ | ||
2528 | mac_h = (p_addr[0] << 8) | (p_addr[1]); | ||
2529 | mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) | | ||
2530 | (p_addr[4] << 8) | (p_addr[5] << 0); | ||
2531 | |||
2532 | for (i = 0; i < 32; i++) | ||
2533 | mac_array[i] = (mac_l >> i) & 0x1; | ||
2534 | for (i = 32; i < 48; i++) | ||
2535 | mac_array[i] = (mac_h >> (i - 32)) & 0x1; | ||
2536 | |||
2537 | crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^ | ||
2538 | mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^ | ||
2539 | mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^ | ||
2540 | mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^ | ||
2541 | mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0]; | ||
2542 | |||
2543 | crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^ | ||
2544 | mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^ | ||
2545 | mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^ | ||
2546 | mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^ | ||
2547 | mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^ | ||
2548 | mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^ | ||
2549 | mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0]; | ||
2550 | |||
2551 | crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^ | ||
2552 | mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^ | ||
2553 | mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^ | ||
2554 | mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^ | ||
2555 | mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ | ||
2556 | mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0]; | ||
2557 | |||
2558 | crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^ | ||
2559 | mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^ | ||
2560 | mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^ | ||
2561 | mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ | ||
2562 | mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^ | ||
2563 | mac_array[3] ^ mac_array[2] ^ mac_array[1]; | ||
2564 | |||
2565 | crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^ | ||
2566 | mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^ | ||
2567 | mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^ | ||
2568 | mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^ | ||
2569 | mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^ | ||
2570 | mac_array[3] ^ mac_array[2]; | ||
2571 | |||
2572 | crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^ | ||
2573 | mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^ | ||
2574 | mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^ | ||
2575 | mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^ | ||
2576 | mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^ | ||
2577 | mac_array[4] ^ mac_array[3]; | ||
2578 | |||
2579 | crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^ | ||
2580 | mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^ | ||
2581 | mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^ | ||
2582 | mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^ | ||
2583 | mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^ | ||
2584 | mac_array[4]; | ||
2585 | |||
2586 | crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^ | ||
2587 | mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^ | ||
2588 | mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^ | ||
2589 | mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^ | ||
2590 | mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5]; | ||
2591 | |||
2592 | for (i = 0; i < 8; i++) | ||
2593 | crc_result = crc_result | (crc[i] << i); | ||
2594 | |||
2595 | table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num); | ||
2596 | eth_port_set_filter_table_entry(mp, table, crc_result); | ||
2597 | } | ||
2598 | |||
2599 | /* | ||
2600 | * Set the entire multicast list based on dev->mc_list. | ||
2601 | */ | ||
2602 | static void eth_port_set_multicast_list(struct net_device *dev) | ||
2603 | { | ||
2604 | |||
2605 | struct dev_mc_list *mc_list; | ||
2606 | int i; | ||
2607 | int table_index; | ||
2608 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2609 | unsigned int eth_port_num = mp->port_num; | ||
2610 | |||
2611 | /* If the device is in promiscuous mode or in all multicast mode, | ||
2612 | * we will fully populate both multicast tables with accept. | ||
2613 | * This is guaranteed to yield a match on all multicast addresses... | ||
2614 | */ | ||
2615 | if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) { | ||
2616 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | ||
2617 | /* Set all entries in DA filter special multicast | ||
2618 | * table (Ex_dFSMT) | ||
2619 | * Set for ETH_Q0 for now | ||
2620 | * Bits | ||
2621 | * 0 Accept=1, Drop=0 | ||
2622 | * 3-1 Queue ETH_Q0=0 | ||
2623 | * 7-4 Reserved = 0; | ||
2624 | */ | ||
2625 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | ||
2626 | |||
2627 | /* Set all entries in DA filter other multicast | ||
2628 | * table (Ex_dFOMT) | ||
2629 | * Set for ETH_Q0 for now | ||
2630 | * Bits | ||
2631 | * 0 Accept=1, Drop=0 | ||
2632 | * 3-1 Queue ETH_Q0=0 | ||
2633 | * 7-4 Reserved = 0; | ||
2634 | */ | ||
2635 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); | ||
2636 | } | ||
2637 | return; | ||
2638 | } | ||
2639 | |||
2640 | /* We will clear out multicast tables every time we get the list. | ||
2641 | * Then add the entire new list... | ||
2642 | */ | ||
2643 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | ||
2644 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | ||
2645 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | ||
2646 | (eth_port_num) + table_index, 0); | ||
2647 | |||
2648 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | ||
2649 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE | ||
2650 | (eth_port_num) + table_index, 0); | ||
2651 | } | ||
2652 | |||
2653 | /* Get pointer to net_device multicast list and add each one... */ | ||
2654 | for (i = 0, mc_list = dev->mc_list; | ||
2655 | (i < 256) && (mc_list != NULL) && (i < dev->mc_count); | ||
2656 | i++, mc_list = mc_list->next) | ||
2657 | if (mc_list->dmi_addrlen == 6) | ||
2658 | eth_port_mc_addr(mp, mc_list->dmi_addr); | ||
2659 | } | ||
2660 | |||
2661 | /* | ||
2662 | * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables | ||
2663 | * | ||
2664 | * DESCRIPTION: | ||
2665 | * Go through all the DA filter tables (Unicast, Special Multicast & | ||
2666 | * Other Multicast) and set each entry to 0. | ||
2667 | * | ||
2668 | * INPUT: | ||
2669 | * struct mv643xx_private *mp Ethernet Port. | ||
2670 | * | ||
2671 | * OUTPUT: | ||
2672 | * Multicast and Unicast packets are rejected. | ||
2673 | * | ||
2674 | * RETURN: | ||
2675 | * None. | ||
2676 | */ | ||
2677 | static void eth_port_init_mac_tables(struct mv643xx_private *mp) | ||
2678 | { | ||
2679 | unsigned int port_num = mp->port_num; | ||
2680 | int table_index; | ||
2681 | |||
2682 | /* Clear DA filter unicast table (Ex_dFUT) */ | ||
2683 | for (table_index = 0; table_index <= 0xC; table_index += 4) | ||
2684 | wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) + | ||
2685 | table_index, 0); | ||
2686 | |||
2687 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | ||
2688 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | ||
2689 | wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) + | ||
2690 | table_index, 0); | ||
2691 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | ||
2692 | wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) + | ||
2693 | table_index, 0); | ||
2694 | } | ||
2695 | } | ||
2696 | |||
2697 | /* | ||
2698 | * eth_clear_mib_counters - Clear all MIB counters | ||
2699 | * | ||
2700 | * DESCRIPTION: | ||
2701 | * This function clears all MIB counters of a specific ethernet port. | ||
2702 | * A read from the MIB counter will reset the counter. | ||
2703 | * | ||
2704 | * INPUT: | ||
2705 | * struct mv643xx_private *mp Ethernet Port. | ||
2706 | * | ||
2707 | * OUTPUT: | ||
2708 | * After reading all MIB counters, the counters resets. | ||
2709 | * | ||
2710 | * RETURN: | ||
2711 | * MIB counter value. | ||
2712 | * | ||
2713 | */ | ||
2714 | static void eth_clear_mib_counters(struct mv643xx_private *mp) | ||
2715 | { | ||
2716 | unsigned int port_num = mp->port_num; | ||
2717 | int i; | ||
2718 | |||
2719 | /* Perform dummy reads from MIB counters */ | ||
2720 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; | ||
2721 | i += 4) | ||
2722 | rdl(mp, MIB_COUNTERS_BASE(port_num) + i); | ||
2723 | } | ||
2724 | |||
2725 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) | ||
2726 | { | ||
2727 | return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset); | ||
2728 | } | ||
2729 | |||
2730 | static void eth_update_mib_counters(struct mv643xx_private *mp) | ||
2731 | { | ||
2732 | struct mv643xx_mib_counters *p = &mp->mib_counters; | ||
2733 | int offset; | ||
2734 | |||
2735 | p->good_octets_received += | ||
2736 | read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW); | ||
2737 | p->good_octets_received += | ||
2738 | (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH) << 32; | ||
2739 | |||
2740 | for (offset = ETH_MIB_BAD_OCTETS_RECEIVED; | ||
2741 | offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS; | ||
2742 | offset += 4) | ||
2743 | *(u32 *)((char *)p + offset) += read_mib(mp, offset); | ||
2744 | |||
2745 | p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW); | ||
2746 | p->good_octets_sent += | ||
2747 | (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_HIGH) << 32; | ||
2748 | |||
2749 | for (offset = ETH_MIB_GOOD_FRAMES_SENT; | ||
2750 | offset <= ETH_MIB_LATE_COLLISION; | ||
2751 | offset += 4) | ||
2752 | *(u32 *)((char *)p + offset) += read_mib(mp, offset); | ||
2753 | } | ||
2754 | |||
2755 | /* | ||
2756 | * ethernet_phy_detect - Detect whether a phy is present | ||
2757 | * | ||
2758 | * DESCRIPTION: | ||
2759 | * This function tests whether there is a PHY present on | ||
2760 | * the specified port. | ||
2761 | * | ||
2762 | * INPUT: | ||
2763 | * struct mv643xx_private *mp Ethernet Port. | ||
2764 | * | ||
2765 | * OUTPUT: | ||
2766 | * None | ||
2767 | * | ||
2768 | * RETURN: | ||
2769 | * 0 on success | ||
2770 | * -ENODEV on failure | ||
2771 | * | ||
2772 | */ | ||
2773 | static int ethernet_phy_detect(struct mv643xx_private *mp) | ||
2774 | { | ||
2775 | unsigned int phy_reg_data0; | ||
2776 | int auto_neg; | ||
2777 | |||
2778 | eth_port_read_smi_reg(mp, 0, &phy_reg_data0); | ||
2779 | auto_neg = phy_reg_data0 & 0x1000; | ||
2780 | phy_reg_data0 ^= 0x1000; /* invert auto_neg */ | ||
2781 | eth_port_write_smi_reg(mp, 0, phy_reg_data0); | ||
2782 | |||
2783 | eth_port_read_smi_reg(mp, 0, &phy_reg_data0); | ||
2784 | if ((phy_reg_data0 & 0x1000) == auto_neg) | ||
2785 | return -ENODEV; /* change didn't take */ | ||
2786 | |||
2787 | phy_reg_data0 ^= 0x1000; | ||
2788 | eth_port_write_smi_reg(mp, 0, phy_reg_data0); | ||
2789 | return 0; | ||
2790 | } | ||
2791 | |||
2792 | /* | ||
2793 | * ethernet_phy_get - Get the ethernet port PHY address. | ||
2794 | * | ||
2795 | * DESCRIPTION: | ||
2796 | * This routine returns the given ethernet port PHY address. | ||
2797 | * | ||
2798 | * INPUT: | ||
2799 | * struct mv643xx_private *mp Ethernet Port. | ||
2800 | * | ||
2801 | * OUTPUT: | ||
2802 | * None. | ||
2803 | * | ||
2804 | * RETURN: | ||
2805 | * PHY address. | ||
2806 | * | ||
2807 | */ | ||
2808 | static int ethernet_phy_get(struct mv643xx_private *mp) | ||
2809 | { | ||
2810 | unsigned int reg_data; | ||
2811 | |||
2812 | reg_data = rdl(mp, PHY_ADDR_REG); | ||
2813 | |||
2814 | return ((reg_data >> (5 * mp->port_num)) & 0x1f); | ||
2815 | } | ||
2816 | |||
2817 | /* | ||
2818 | * ethernet_phy_set - Set the ethernet port PHY address. | ||
2819 | * | ||
2820 | * DESCRIPTION: | ||
2821 | * This routine sets the given ethernet port PHY address. | ||
2822 | * | ||
2823 | * INPUT: | ||
2824 | * struct mv643xx_private *mp Ethernet Port. | ||
2825 | * int phy_addr PHY address. | ||
2826 | * | ||
2827 | * OUTPUT: | ||
2828 | * None. | ||
2829 | * | ||
2830 | * RETURN: | ||
2831 | * None. | ||
2832 | * | ||
2833 | */ | ||
2834 | static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr) | ||
2835 | { | ||
2836 | u32 reg_data; | ||
2837 | int addr_shift = 5 * mp->port_num; | ||
2838 | |||
2839 | reg_data = rdl(mp, PHY_ADDR_REG); | ||
2840 | reg_data &= ~(0x1f << addr_shift); | ||
2841 | reg_data |= (phy_addr & 0x1f) << addr_shift; | ||
2842 | wrl(mp, PHY_ADDR_REG, reg_data); | ||
2843 | } | ||
2844 | |||
2845 | /* | ||
2846 | * ethernet_phy_reset - Reset Ethernet port PHY. | ||
2847 | * | ||
2848 | * DESCRIPTION: | ||
2849 | * This routine utilizes the SMI interface to reset the ethernet port PHY. | ||
2850 | * | ||
2851 | * INPUT: | ||
2852 | * struct mv643xx_private *mp Ethernet Port. | ||
2853 | * | ||
2854 | * OUTPUT: | ||
2855 | * The PHY is reset. | ||
2856 | * | ||
2857 | * RETURN: | ||
2858 | * None. | ||
2859 | * | ||
2860 | */ | ||
2861 | static void ethernet_phy_reset(struct mv643xx_private *mp) | ||
2862 | { | ||
2863 | unsigned int phy_reg_data; | ||
2864 | |||
2865 | /* Reset the PHY */ | ||
2866 | eth_port_read_smi_reg(mp, 0, &phy_reg_data); | ||
2867 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ | ||
2868 | eth_port_write_smi_reg(mp, 0, phy_reg_data); | ||
2869 | |||
2870 | /* wait for PHY to come out of reset */ | ||
2871 | do { | ||
2872 | udelay(1); | ||
2873 | eth_port_read_smi_reg(mp, 0, &phy_reg_data); | ||
2874 | } while (phy_reg_data & 0x8000); | ||
2875 | } | ||
2876 | |||
2877 | static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp, | ||
2878 | unsigned int queues) | ||
2879 | { | ||
2880 | wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues); | ||
2881 | } | ||
2882 | |||
2883 | static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp, | ||
2884 | unsigned int queues) | ||
2885 | { | ||
2886 | wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues); | ||
2887 | } | ||
2888 | |||
2889 | static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp) | ||
2890 | { | ||
2891 | unsigned int port_num = mp->port_num; | ||
2892 | u32 queues; | ||
2893 | |||
2894 | /* Stop Tx port activity. Check port Tx activity. */ | ||
2895 | queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; | ||
2896 | if (queues) { | ||
2897 | /* Issue stop command for active queues only */ | ||
2898 | wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); | ||
2899 | |||
2900 | /* Wait for all Tx activity to terminate. */ | ||
2901 | /* Check port cause register that all Tx queues are stopped */ | ||
2902 | while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) | ||
2903 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2904 | |||
2905 | /* Wait for Tx FIFO to empty */ | ||
2906 | while (rdl(mp, PORT_STATUS_REG(port_num)) & | ||
2907 | ETH_PORT_TX_FIFO_EMPTY) | ||
2908 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2909 | } | ||
2910 | |||
2911 | return queues; | ||
2912 | } | ||
2913 | |||
2914 | static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp) | ||
2915 | { | ||
2916 | unsigned int port_num = mp->port_num; | ||
2917 | u32 queues; | ||
2918 | |||
2919 | /* Stop Rx port activity. Check port Rx activity. */ | ||
2920 | queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; | ||
2921 | if (queues) { | ||
2922 | /* Issue stop command for active queues only */ | ||
2923 | wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); | ||
2924 | |||
2925 | /* Wait for all Rx activity to terminate. */ | ||
2926 | /* Check port cause register that all Rx queues are stopped */ | ||
2927 | while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) | ||
2928 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2929 | } | ||
2930 | |||
2931 | return queues; | ||
2932 | } | ||
2933 | |||
2934 | /* | ||
2935 | * eth_port_reset - Reset Ethernet port | ||
2936 | * | ||
2937 | * DESCRIPTION: | ||
2938 | * This routine resets the chip by aborting any SDMA engine activity and | ||
2939 | * clearing the MIB counters. The Receiver and the Transmit unit are in | ||
2940 | * idle state after this command is performed and the port is disabled. | ||
2941 | * | ||
2942 | * INPUT: | ||
2943 | * struct mv643xx_private *mp Ethernet Port. | ||
2944 | * | ||
2945 | * OUTPUT: | ||
2946 | * Channel activity is halted. | ||
2947 | * | ||
2948 | * RETURN: | ||
2949 | * None. | ||
2950 | * | ||
2951 | */ | ||
2952 | static void eth_port_reset(struct mv643xx_private *mp) | ||
2953 | { | ||
2954 | unsigned int port_num = mp->port_num; | ||
2955 | unsigned int reg_data; | ||
2956 | |||
2957 | mv643xx_eth_port_disable_tx(mp); | ||
2958 | mv643xx_eth_port_disable_rx(mp); | ||
2959 | |||
2960 | /* Clear all MIB counters */ | ||
2961 | eth_clear_mib_counters(mp); | ||
2962 | |||
2963 | /* Reset the Enable bit in the Configuration Register */ | ||
2964 | reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num)); | ||
2965 | reg_data &= ~(SERIAL_PORT_ENABLE | | ||
2966 | DO_NOT_FORCE_LINK_FAIL | | ||
2967 | FORCE_LINK_PASS); | ||
2968 | wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data); | ||
2969 | } | ||
2970 | |||
2971 | |||
2972 | /* | ||
2973 | * eth_port_read_smi_reg - Read PHY registers | ||
2974 | * | ||
2975 | * DESCRIPTION: | ||
2976 | * This routine utilize the SMI interface to interact with the PHY in | ||
2977 | * order to perform PHY register read. | ||
2978 | * | ||
2979 | * INPUT: | ||
2980 | * struct mv643xx_private *mp Ethernet Port. | ||
2981 | * unsigned int phy_reg PHY register address offset. | ||
2982 | * unsigned int *value Register value buffer. | ||
2983 | * | ||
2984 | * OUTPUT: | ||
2985 | * Write the value of a specified PHY register into given buffer. | ||
2986 | * | ||
2987 | * RETURN: | ||
2988 | * false if the PHY is busy or read data is not in valid state. | ||
2989 | * true otherwise. | ||
2990 | * | ||
2991 | */ | ||
2992 | static void eth_port_read_smi_reg(struct mv643xx_private *mp, | ||
2993 | unsigned int phy_reg, unsigned int *value) | ||
2994 | { | ||
2995 | void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; | ||
2996 | int phy_addr = ethernet_phy_get(mp); | ||
2997 | unsigned long flags; | ||
2998 | int i; | ||
2999 | |||
3000 | /* the SMI register is a shared resource */ | ||
3001 | spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); | ||
3002 | |||
3003 | /* wait for the SMI register to become available */ | ||
3004 | for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { | ||
3005 | if (i == PHY_WAIT_ITERATIONS) { | ||
3006 | printk("%s: PHY busy timeout\n", mp->dev->name); | ||
3007 | goto out; | ||
3008 | } | ||
3009 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
3010 | } | ||
3011 | |||
3012 | writel((phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ, | ||
3013 | smi_reg); | ||
3014 | |||
3015 | /* now wait for the data to be valid */ | ||
3016 | for (i = 0; !(readl(smi_reg) & ETH_SMI_READ_VALID); i++) { | ||
3017 | if (i == PHY_WAIT_ITERATIONS) { | ||
3018 | printk("%s: PHY read timeout\n", mp->dev->name); | ||
3019 | goto out; | ||
3020 | } | ||
3021 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
3022 | } | ||
3023 | |||
3024 | *value = readl(smi_reg) & 0xffff; | ||
3025 | out: | ||
3026 | spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); | ||
3027 | } | ||
3028 | |||
3029 | /* | ||
3030 | * eth_port_write_smi_reg - Write to PHY registers | ||
3031 | * | ||
3032 | * DESCRIPTION: | ||
3033 | * This routine utilize the SMI interface to interact with the PHY in | ||
3034 | * order to perform writes to PHY registers. | ||
3035 | * | ||
3036 | * INPUT: | ||
3037 | * struct mv643xx_private *mp Ethernet Port. | ||
3038 | * unsigned int phy_reg PHY register address offset. | ||
3039 | * unsigned int value Register value. | ||
3040 | * | ||
3041 | * OUTPUT: | ||
3042 | * Write the given value to the specified PHY register. | ||
3043 | * | ||
3044 | * RETURN: | ||
3045 | * false if the PHY is busy. | ||
3046 | * true otherwise. | ||
3047 | * | ||
3048 | */ | ||
3049 | static void eth_port_write_smi_reg(struct mv643xx_private *mp, | ||
3050 | unsigned int phy_reg, unsigned int value) | ||
3051 | { | ||
3052 | void __iomem *smi_reg = mp->shared_smi->eth_base + SMI_REG; | ||
3053 | int phy_addr = ethernet_phy_get(mp); | ||
3054 | unsigned long flags; | ||
3055 | int i; | ||
3056 | |||
3057 | /* the SMI register is a shared resource */ | ||
3058 | spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); | ||
3059 | |||
3060 | /* wait for the SMI register to become available */ | ||
3061 | for (i = 0; readl(smi_reg) & ETH_SMI_BUSY; i++) { | ||
3062 | if (i == PHY_WAIT_ITERATIONS) { | ||
3063 | printk("%s: PHY busy timeout\n", mp->dev->name); | ||
3064 | goto out; | ||
3065 | } | ||
3066 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
3067 | } | ||
3068 | |||
3069 | writel((phy_addr << 16) | (phy_reg << 21) | | ||
3070 | ETH_SMI_OPCODE_WRITE | (value & 0xffff), smi_reg); | ||
3071 | out: | ||
3072 | spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); | ||
3073 | } | ||
3074 | |||
3075 | /* | ||
3076 | * Wrappers for MII support library. | ||
3077 | */ | ||
3078 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) | ||
3079 | { | ||
3080 | struct mv643xx_private *mp = netdev_priv(dev); | ||
3081 | int val; | ||
3082 | |||
3083 | eth_port_read_smi_reg(mp, location, &val); | ||
3084 | return val; | ||
3085 | } | ||
3086 | |||
3087 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) | ||
3088 | { | ||
3089 | struct mv643xx_private *mp = netdev_priv(dev); | ||
3090 | eth_port_write_smi_reg(mp, location, val); | ||
3091 | } | ||
3092 | |||
3093 | /* | ||
3094 | * eth_port_receive - Get received information from Rx ring. | ||
3095 | * | ||
3096 | * DESCRIPTION: | ||
3097 | * This routine returns the received data to the caller. There is no | ||
3098 | * data copying during routine operation. All information is returned | ||
3099 | * using pointer to packet information struct passed from the caller. | ||
3100 | * If the routine exhausts Rx ring resources then the resource error flag | ||
3101 | * is set. | ||
3102 | * | ||
3103 | * INPUT: | ||
3104 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
3105 | * struct pkt_info *p_pkt_info User packet buffer. | ||
3106 | * | ||
3107 | * OUTPUT: | ||
3108 | * Rx ring current and used indexes are updated. | ||
3109 | * | ||
3110 | * RETURN: | ||
3111 | * ETH_ERROR in case the routine can not access Rx desc ring. | ||
3112 | * ETH_QUEUE_FULL if Rx ring resources are exhausted. | ||
3113 | * ETH_END_OF_JOB if there is no received data. | ||
3114 | * ETH_OK otherwise. | ||
3115 | */ | ||
3116 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | ||
3117 | struct pkt_info *p_pkt_info) | ||
3118 | { | ||
3119 | int rx_next_curr_desc, rx_curr_desc, rx_used_desc; | ||
3120 | volatile struct eth_rx_desc *p_rx_desc; | ||
3121 | unsigned int command_status; | ||
3122 | unsigned long flags; | ||
3123 | |||
3124 | /* Do not process Rx ring in case of Rx ring resource error */ | ||
3125 | if (mp->rx_resource_err) | ||
3126 | return ETH_QUEUE_FULL; | ||
3127 | |||
3128 | spin_lock_irqsave(&mp->lock, flags); | ||
3129 | |||
3130 | /* Get the Rx Desc ring 'curr and 'used' indexes */ | ||
3131 | rx_curr_desc = mp->rx_curr_desc_q; | ||
3132 | rx_used_desc = mp->rx_used_desc_q; | ||
3133 | |||
3134 | p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc]; | ||
3135 | |||
3136 | /* The following parameters are used to save readings from memory */ | ||
3137 | command_status = p_rx_desc->cmd_sts; | ||
3138 | rmb(); | ||
3139 | |||
3140 | /* Nothing to receive... */ | ||
3141 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { | ||
3142 | spin_unlock_irqrestore(&mp->lock, flags); | ||
3143 | return ETH_END_OF_JOB; | ||
3144 | } | ||
3145 | |||
3146 | p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; | ||
3147 | p_pkt_info->cmd_sts = command_status; | ||
3148 | p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET; | ||
3149 | p_pkt_info->return_info = mp->rx_skb[rx_curr_desc]; | ||
3150 | p_pkt_info->l4i_chk = p_rx_desc->buf_size; | ||
3151 | |||
3152 | /* | ||
3153 | * Clean the return info field to indicate that the | ||
3154 | * packet has been moved to the upper layers | ||
3155 | */ | ||
3156 | mp->rx_skb[rx_curr_desc] = NULL; | ||
3157 | |||
3158 | /* Update current index in data structure */ | ||
3159 | rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size; | ||
3160 | mp->rx_curr_desc_q = rx_next_curr_desc; | ||
3161 | |||
3162 | /* Rx descriptors exhausted. Set the Rx ring resource error flag */ | ||
3163 | if (rx_next_curr_desc == rx_used_desc) | ||
3164 | mp->rx_resource_err = 1; | ||
3165 | |||
3166 | spin_unlock_irqrestore(&mp->lock, flags); | ||
3167 | |||
3168 | return ETH_OK; | ||
3169 | } | ||
3170 | |||
3171 | /* | ||
3172 | * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring. | ||
3173 | * | ||
3174 | * DESCRIPTION: | ||
3175 | * This routine returns a Rx buffer back to the Rx ring. It retrieves the | ||
3176 | * next 'used' descriptor and attached the returned buffer to it. | ||
3177 | * In case the Rx ring was in "resource error" condition, where there are | ||
3178 | * no available Rx resources, the function resets the resource error flag. | ||
3179 | * | ||
3180 | * INPUT: | ||
3181 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
3182 | * struct pkt_info *p_pkt_info Information on returned buffer. | ||
3183 | * | ||
3184 | * OUTPUT: | ||
3185 | * New available Rx resource in Rx descriptor ring. | ||
3186 | * | ||
3187 | * RETURN: | ||
3188 | * ETH_ERROR in case the routine can not access Rx desc ring. | ||
3189 | * ETH_OK otherwise. | ||
3190 | */ | ||
3191 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | ||
3192 | struct pkt_info *p_pkt_info) | ||
3193 | { | ||
3194 | int used_rx_desc; /* Where to return Rx resource */ | ||
3195 | volatile struct eth_rx_desc *p_used_rx_desc; | ||
3196 | unsigned long flags; | ||
3197 | |||
3198 | spin_lock_irqsave(&mp->lock, flags); | ||
3199 | |||
3200 | /* Get 'used' Rx descriptor */ | ||
3201 | used_rx_desc = mp->rx_used_desc_q; | ||
3202 | p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc]; | ||
3203 | |||
3204 | p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr; | ||
3205 | p_used_rx_desc->buf_size = p_pkt_info->byte_cnt; | ||
3206 | mp->rx_skb[used_rx_desc] = p_pkt_info->return_info; | ||
3207 | |||
3208 | /* Flush the write pipe */ | ||
3209 | |||
3210 | /* Return the descriptor to DMA ownership */ | ||
3211 | wmb(); | ||
3212 | p_used_rx_desc->cmd_sts = | ||
3213 | ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT; | ||
3214 | wmb(); | ||
3215 | |||
3216 | /* Move the used descriptor pointer to the next descriptor */ | ||
3217 | mp->rx_used_desc_q = (used_rx_desc + 1) % mp->rx_ring_size; | ||
3218 | |||
3219 | /* Any Rx return cancels the Rx resource error status */ | ||
3220 | mp->rx_resource_err = 0; | ||
3221 | |||
3222 | spin_unlock_irqrestore(&mp->lock, flags); | ||
3223 | |||
3224 | return ETH_OK; | ||
3225 | } | ||
3226 | |||
3227 | /************* Begin ethtool support *************************/ | ||
3228 | |||
3229 | struct mv643xx_stats { | ||
3230 | char stat_string[ETH_GSTRING_LEN]; | ||
3231 | int sizeof_stat; | ||
3232 | int stat_offset; | ||
3233 | }; | ||
3234 | |||
3235 | #define MV643XX_STAT(m) FIELD_SIZEOF(struct mv643xx_private, m), \ | ||
3236 | offsetof(struct mv643xx_private, m) | ||
3237 | |||
3238 | static const struct mv643xx_stats mv643xx_gstrings_stats[] = { | ||
3239 | { "rx_packets", MV643XX_STAT(stats.rx_packets) }, | ||
3240 | { "tx_packets", MV643XX_STAT(stats.tx_packets) }, | ||
3241 | { "rx_bytes", MV643XX_STAT(stats.rx_bytes) }, | ||
3242 | { "tx_bytes", MV643XX_STAT(stats.tx_bytes) }, | ||
3243 | { "rx_errors", MV643XX_STAT(stats.rx_errors) }, | ||
3244 | { "tx_errors", MV643XX_STAT(stats.tx_errors) }, | ||
3245 | { "rx_dropped", MV643XX_STAT(stats.rx_dropped) }, | ||
3246 | { "tx_dropped", MV643XX_STAT(stats.tx_dropped) }, | ||
3247 | { "good_octets_received", MV643XX_STAT(mib_counters.good_octets_received) }, | ||
3248 | { "bad_octets_received", MV643XX_STAT(mib_counters.bad_octets_received) }, | ||
3249 | { "internal_mac_transmit_err", MV643XX_STAT(mib_counters.internal_mac_transmit_err) }, | ||
3250 | { "good_frames_received", MV643XX_STAT(mib_counters.good_frames_received) }, | ||
3251 | { "bad_frames_received", MV643XX_STAT(mib_counters.bad_frames_received) }, | ||
3252 | { "broadcast_frames_received", MV643XX_STAT(mib_counters.broadcast_frames_received) }, | ||
3253 | { "multicast_frames_received", MV643XX_STAT(mib_counters.multicast_frames_received) }, | ||
3254 | { "frames_64_octets", MV643XX_STAT(mib_counters.frames_64_octets) }, | ||
3255 | { "frames_65_to_127_octets", MV643XX_STAT(mib_counters.frames_65_to_127_octets) }, | ||
3256 | { "frames_128_to_255_octets", MV643XX_STAT(mib_counters.frames_128_to_255_octets) }, | ||
3257 | { "frames_256_to_511_octets", MV643XX_STAT(mib_counters.frames_256_to_511_octets) }, | ||
3258 | { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters.frames_512_to_1023_octets) }, | ||
3259 | { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters.frames_1024_to_max_octets) }, | ||
3260 | { "good_octets_sent", MV643XX_STAT(mib_counters.good_octets_sent) }, | ||
3261 | { "good_frames_sent", MV643XX_STAT(mib_counters.good_frames_sent) }, | ||
3262 | { "excessive_collision", MV643XX_STAT(mib_counters.excessive_collision) }, | ||
3263 | { "multicast_frames_sent", MV643XX_STAT(mib_counters.multicast_frames_sent) }, | ||
3264 | { "broadcast_frames_sent", MV643XX_STAT(mib_counters.broadcast_frames_sent) }, | ||
3265 | { "unrec_mac_control_received", MV643XX_STAT(mib_counters.unrec_mac_control_received) }, | ||
3266 | { "fc_sent", MV643XX_STAT(mib_counters.fc_sent) }, | ||
3267 | { "good_fc_received", MV643XX_STAT(mib_counters.good_fc_received) }, | ||
3268 | { "bad_fc_received", MV643XX_STAT(mib_counters.bad_fc_received) }, | ||
3269 | { "undersize_received", MV643XX_STAT(mib_counters.undersize_received) }, | ||
3270 | { "fragments_received", MV643XX_STAT(mib_counters.fragments_received) }, | ||
3271 | { "oversize_received", MV643XX_STAT(mib_counters.oversize_received) }, | ||
3272 | { "jabber_received", MV643XX_STAT(mib_counters.jabber_received) }, | ||
3273 | { "mac_receive_error", MV643XX_STAT(mib_counters.mac_receive_error) }, | ||
3274 | { "bad_crc_event", MV643XX_STAT(mib_counters.bad_crc_event) }, | ||
3275 | { "collision", MV643XX_STAT(mib_counters.collision) }, | ||
3276 | { "late_collision", MV643XX_STAT(mib_counters.late_collision) }, | ||
3277 | }; | ||
3278 | |||
3279 | #define MV643XX_STATS_LEN ARRAY_SIZE(mv643xx_gstrings_stats) | ||
3280 | |||
3281 | static void mv643xx_get_drvinfo(struct net_device *netdev, | ||
3282 | struct ethtool_drvinfo *drvinfo) | ||
3283 | { | ||
3284 | strncpy(drvinfo->driver, mv643xx_driver_name, 32); | ||
3285 | strncpy(drvinfo->version, mv643xx_driver_version, 32); | ||
3286 | strncpy(drvinfo->fw_version, "N/A", 32); | ||
3287 | strncpy(drvinfo->bus_info, "mv643xx", 32); | ||
3288 | drvinfo->n_stats = MV643XX_STATS_LEN; | ||
3289 | } | ||
3290 | |||
3291 | static int mv643xx_get_sset_count(struct net_device *netdev, int sset) | ||
3292 | { | ||
3293 | switch (sset) { | ||
3294 | case ETH_SS_STATS: | ||
3295 | return MV643XX_STATS_LEN; | ||
3296 | default: | ||
3297 | return -EOPNOTSUPP; | ||
3298 | } | ||
3299 | } | ||
3300 | |||
3301 | static void mv643xx_get_ethtool_stats(struct net_device *netdev, | ||
3302 | struct ethtool_stats *stats, uint64_t *data) | ||
3303 | { | ||
3304 | struct mv643xx_private *mp = netdev->priv; | ||
3305 | int i; | ||
3306 | |||
3307 | eth_update_mib_counters(mp); | ||
3308 | |||
3309 | for (i = 0; i < MV643XX_STATS_LEN; i++) { | ||
3310 | char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset; | ||
3311 | data[i] = (mv643xx_gstrings_stats[i].sizeof_stat == | ||
3312 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; | ||
3313 | } | ||
3314 | } | ||
3315 | |||
3316 | static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, | ||
3317 | uint8_t *data) | ||
3318 | { | ||
3319 | int i; | ||
3320 | |||
3321 | switch(stringset) { | ||
3322 | case ETH_SS_STATS: | ||
3323 | for (i=0; i < MV643XX_STATS_LEN; i++) { | ||
3324 | memcpy(data + i * ETH_GSTRING_LEN, | ||
3325 | mv643xx_gstrings_stats[i].stat_string, | ||
3326 | ETH_GSTRING_LEN); | ||
3327 | } | ||
3328 | break; | ||
3329 | } | ||
3330 | } | ||
3331 | |||
3332 | static u32 mv643xx_eth_get_link(struct net_device *dev) | ||
3333 | { | ||
3334 | struct mv643xx_private *mp = netdev_priv(dev); | ||
3335 | |||
3336 | return mii_link_ok(&mp->mii); | ||
3337 | } | ||
3338 | |||
3339 | static int mv643xx_eth_nway_restart(struct net_device *dev) | ||
3340 | { | ||
3341 | struct mv643xx_private *mp = netdev_priv(dev); | ||
3342 | |||
3343 | return mii_nway_restart(&mp->mii); | ||
3344 | } | ||
3345 | |||
3346 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
3347 | { | ||
3348 | struct mv643xx_private *mp = netdev_priv(dev); | ||
3349 | |||
3350 | return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL); | ||
3351 | } | ||
3352 | |||
3353 | static const struct ethtool_ops mv643xx_ethtool_ops = { | ||
3354 | .get_settings = mv643xx_get_settings, | ||
3355 | .set_settings = mv643xx_set_settings, | ||
3356 | .get_drvinfo = mv643xx_get_drvinfo, | ||
3357 | .get_link = mv643xx_eth_get_link, | ||
3358 | .set_sg = ethtool_op_set_sg, | ||
3359 | .get_sset_count = mv643xx_get_sset_count, | ||
3360 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | ||
3361 | .get_strings = mv643xx_get_strings, | ||
3362 | .nway_reset = mv643xx_eth_nway_restart, | ||
3363 | }; | ||
3364 | |||
3365 | /************* End ethtool support *************************/ | ||