aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/mv643xx_eth.c949
-rw-r--r--include/linux/mv643xx_eth.h59
2 files changed, 479 insertions, 529 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index ff6460124307..cf18419f96ab 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -67,8 +67,6 @@ static char mv643xx_eth_driver_version[] = "1.0";
67#define MAX_DESCS_PER_SKB 1 67#define MAX_DESCS_PER_SKB 1
68#endif 68#endif
69 69
70#define ETH_HW_IP_ALIGN 2
71
72/* 70/*
73 * Registers shared between all ports. 71 * Registers shared between all ports.
74 */ 72 */
@@ -158,12 +156,6 @@ static char mv643xx_eth_driver_version[] = "1.0";
158#define DEFAULT_RX_QUEUE_SIZE 400 156#define DEFAULT_RX_QUEUE_SIZE 400
159#define DEFAULT_TX_QUEUE_SIZE 800 157#define DEFAULT_TX_QUEUE_SIZE 800
160 158
161/* SMI reg */
162#define SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */
163#define SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */
164#define SMI_OPCODE_WRITE 0 /* Completion of Read */
165#define SMI_OPCODE_READ 0x04000000 /* Operation is in progress */
166
167 159
168/* 160/*
169 * RX/TX descriptors. 161 * RX/TX descriptors.
@@ -231,13 +223,24 @@ struct tx_desc {
231 223
232/* global *******************************************************************/ 224/* global *******************************************************************/
233struct mv643xx_eth_shared_private { 225struct mv643xx_eth_shared_private {
226 /*
227 * Ethernet controller base address.
228 */
234 void __iomem *base; 229 void __iomem *base;
235 230
236 /* used to protect SMI_REG, which is shared across ports */ 231 /*
232 * Protects access to SMI_REG, which is shared between ports.
233 */
237 spinlock_t phy_lock; 234 spinlock_t phy_lock;
238 235
236 /*
237 * Per-port MBUS window access register value.
238 */
239 u32 win_protect; 239 u32 win_protect;
240 240
241 /*
242 * Hardware-specific parameters.
243 */
241 unsigned int t_clk; 244 unsigned int t_clk;
242}; 245};
243 246
@@ -306,16 +309,17 @@ struct tx_queue {
306 309
307struct mv643xx_eth_private { 310struct mv643xx_eth_private {
308 struct mv643xx_eth_shared_private *shared; 311 struct mv643xx_eth_shared_private *shared;
309 int port_num; /* User Ethernet port number */ 312 int port_num;
310 313
311 struct mv643xx_eth_shared_private *shared_smi; 314 struct net_device *dev;
312 315
313 struct work_struct tx_timeout_task; 316 struct mv643xx_eth_shared_private *shared_smi;
317 int phy_addr;
314 318
315 struct net_device *dev;
316 struct mib_counters mib_counters;
317 spinlock_t lock; 319 spinlock_t lock;
318 320
321 struct mib_counters mib_counters;
322 struct work_struct tx_timeout_task;
319 struct mii_if_info mii; 323 struct mii_if_info mii;
320 324
321 /* 325 /*
@@ -450,7 +454,12 @@ static void rxq_refill(struct rx_queue *rxq)
450 RX_ENABLE_INTERRUPT; 454 RX_ENABLE_INTERRUPT;
451 wmb(); 455 wmb();
452 456
453 skb_reserve(skb, ETH_HW_IP_ALIGN); 457 /*
458 * The hardware automatically prepends 2 bytes of
459 * dummy data to each received packet, so that the
460 * IP header ends up 16-byte aligned.
461 */
462 skb_reserve(skb, 2);
454 } 463 }
455 464
456 if (rxq->rx_desc_count == 0) { 465 if (rxq->rx_desc_count == 0) {
@@ -474,9 +483,9 @@ static int rxq_process(struct rx_queue *rxq, int budget)
474 483
475 rx = 0; 484 rx = 0;
476 while (rx < budget) { 485 while (rx < budget) {
477 struct sk_buff *skb; 486 struct rx_desc *rx_desc;
478 volatile struct rx_desc *rx_desc;
479 unsigned int cmd_sts; 487 unsigned int cmd_sts;
488 struct sk_buff *skb;
480 unsigned long flags; 489 unsigned long flags;
481 490
482 spin_lock_irqsave(&mp->lock, flags); 491 spin_lock_irqsave(&mp->lock, flags);
@@ -497,34 +506,40 @@ static int rxq_process(struct rx_queue *rxq, int budget)
497 506
498 spin_unlock_irqrestore(&mp->lock, flags); 507 spin_unlock_irqrestore(&mp->lock, flags);
499 508
500 dma_unmap_single(NULL, rx_desc->buf_ptr + ETH_HW_IP_ALIGN, 509 dma_unmap_single(NULL, rx_desc->buf_ptr + 2,
501 mp->dev->mtu + 24, DMA_FROM_DEVICE); 510 mp->dev->mtu + 24, DMA_FROM_DEVICE);
502 rxq->rx_desc_count--; 511 rxq->rx_desc_count--;
503 rx++; 512 rx++;
504 513
505 /* 514 /*
506 * Update statistics. 515 * Update statistics.
507 * Note byte count includes 4 byte CRC count 516 *
517 * Note that the descriptor byte count includes 2 dummy
518 * bytes automatically inserted by the hardware at the
519 * start of the packet (which we don't count), and a 4
520 * byte CRC at the end of the packet (which we do count).
508 */ 521 */
509 stats->rx_packets++; 522 stats->rx_packets++;
510 stats->rx_bytes += rx_desc->byte_cnt - ETH_HW_IP_ALIGN; 523 stats->rx_bytes += rx_desc->byte_cnt - 2;
511 524
512 /* 525 /*
513 * In case received a packet without first / last bits on OR 526 * In case we received a packet without first / last bits
514 * the error summary bit is on, the packets needs to be dropeed. 527 * on, or the error summary bit is set, the packet needs
528 * to be dropped.
515 */ 529 */
516 if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 530 if (((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
517 (RX_FIRST_DESC | RX_LAST_DESC)) 531 (RX_FIRST_DESC | RX_LAST_DESC))
518 || (cmd_sts & ERROR_SUMMARY)) { 532 || (cmd_sts & ERROR_SUMMARY)) {
519 stats->rx_dropped++; 533 stats->rx_dropped++;
534
520 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) != 535 if ((cmd_sts & (RX_FIRST_DESC | RX_LAST_DESC)) !=
521 (RX_FIRST_DESC | RX_LAST_DESC)) { 536 (RX_FIRST_DESC | RX_LAST_DESC)) {
522 if (net_ratelimit()) 537 if (net_ratelimit())
523 printk(KERN_ERR 538 dev_printk(KERN_ERR, &mp->dev->dev,
524 "%s: Received packet spread " 539 "received packet spanning "
525 "on multiple descriptors\n", 540 "multiple descriptors\n");
526 mp->dev->name);
527 } 541 }
542
528 if (cmd_sts & ERROR_SUMMARY) 543 if (cmd_sts & ERROR_SUMMARY)
529 stats->rx_errors++; 544 stats->rx_errors++;
530 545
@@ -534,7 +549,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
534 * The -4 is for the CRC in the trailer of the 549 * The -4 is for the CRC in the trailer of the
535 * received packet 550 * received packet
536 */ 551 */
537 skb_put(skb, rx_desc->byte_cnt - ETH_HW_IP_ALIGN - 4); 552 skb_put(skb, rx_desc->byte_cnt - 2 - 4);
538 553
539 if (cmd_sts & LAYER_4_CHECKSUM_OK) { 554 if (cmd_sts & LAYER_4_CHECKSUM_OK) {
540 skb->ip_summed = CHECKSUM_UNNECESSARY; 555 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -548,8 +563,10 @@ static int rxq_process(struct rx_queue *rxq, int budget)
548 netif_rx(skb); 563 netif_rx(skb);
549#endif 564#endif
550 } 565 }
566
551 mp->dev->last_rx = jiffies; 567 mp->dev->last_rx = jiffies;
552 } 568 }
569
553 rxq_refill(rxq); 570 rxq_refill(rxq);
554 571
555 return rx; 572 return rx;
@@ -716,7 +733,7 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
716 txq->tx_desc_count += nr_frags + 1; 733 txq->tx_desc_count += nr_frags + 1;
717} 734}
718 735
719static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) 736static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
720{ 737{
721 struct mv643xx_eth_private *mp = netdev_priv(dev); 738 struct mv643xx_eth_private *mp = netdev_priv(dev);
722 struct net_device_stats *stats = &dev->stats; 739 struct net_device_stats *stats = &dev->stats;
@@ -727,8 +744,9 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
727 744
728 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 745 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
729 stats->tx_dropped++; 746 stats->tx_dropped++;
730 printk(KERN_DEBUG "%s: failed to linearize tiny " 747 dev_printk(KERN_DEBUG, &dev->dev,
731 "unaligned fragment\n", dev->name); 748 "failed to linearize skb with tiny "
749 "unaligned fragment\n");
732 return NETDEV_TX_BUSY; 750 return NETDEV_TX_BUSY;
733 } 751 }
734 752
@@ -758,13 +776,15 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
758 776
759 777
760/* mii management interface *************************************************/ 778/* mii management interface *************************************************/
761static int phy_addr_get(struct mv643xx_eth_private *mp); 779#define SMI_BUSY 0x10000000
780#define SMI_READ_VALID 0x08000000
781#define SMI_OPCODE_READ 0x04000000
782#define SMI_OPCODE_WRITE 0x00000000
762 783
763static void read_smi_reg(struct mv643xx_eth_private *mp, 784static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr,
764 unsigned int phy_reg, unsigned int *value) 785 unsigned int reg, unsigned int *value)
765{ 786{
766 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; 787 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
767 int phy_addr = phy_addr_get(mp);
768 unsigned long flags; 788 unsigned long flags;
769 int i; 789 int i;
770 790
@@ -780,7 +800,7 @@ static void read_smi_reg(struct mv643xx_eth_private *mp,
780 udelay(10); 800 udelay(10);
781 } 801 }
782 802
783 writel((phy_addr << 16) | (phy_reg << 21) | SMI_OPCODE_READ, smi_reg); 803 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
784 804
785 /* now wait for the data to be valid */ 805 /* now wait for the data to be valid */
786 for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) { 806 for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) {
@@ -796,11 +816,11 @@ out:
796 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); 816 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
797} 817}
798 818
799static void write_smi_reg(struct mv643xx_eth_private *mp, 819static void smi_reg_write(struct mv643xx_eth_private *mp,
800 unsigned int phy_reg, unsigned int value) 820 unsigned int addr,
821 unsigned int reg, unsigned int value)
801{ 822{
802 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; 823 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG;
803 int phy_addr = phy_addr_get(mp);
804 unsigned long flags; 824 unsigned long flags;
805 int i; 825 int i;
806 826
@@ -816,65 +836,63 @@ static void write_smi_reg(struct mv643xx_eth_private *mp,
816 udelay(10); 836 udelay(10);
817 } 837 }
818 838
819 writel((phy_addr << 16) | (phy_reg << 21) | 839 writel(SMI_OPCODE_WRITE | (reg << 21) |
820 SMI_OPCODE_WRITE | (value & 0xffff), smi_reg); 840 (addr << 16) | (value & 0xffff), smi_reg);
821out: 841out:
822 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); 842 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags);
823} 843}
824 844
825 845
826/* mib counters *************************************************************/ 846/* mib counters *************************************************************/
827static void clear_mib_counters(struct mv643xx_eth_private *mp) 847static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
828{ 848{
829 unsigned int port_num = mp->port_num; 849 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
830 int i;
831
832 /* Perform dummy reads from MIB counters */
833 for (i = 0; i < 0x80; i += 4)
834 rdl(mp, MIB_COUNTERS(port_num) + i);
835} 850}
836 851
837static inline u32 read_mib(struct mv643xx_eth_private *mp, int offset) 852static void mib_counters_clear(struct mv643xx_eth_private *mp)
838{ 853{
839 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 854 int i;
855
856 for (i = 0; i < 0x80; i += 4)
857 mib_read(mp, i);
840} 858}
841 859
842static void update_mib_counters(struct mv643xx_eth_private *mp) 860static void mib_counters_update(struct mv643xx_eth_private *mp)
843{ 861{
844 struct mib_counters *p = &mp->mib_counters; 862 struct mib_counters *p = &mp->mib_counters;
845 863
846 p->good_octets_received += read_mib(mp, 0x00); 864 p->good_octets_received += mib_read(mp, 0x00);
847 p->good_octets_received += (u64)read_mib(mp, 0x04) << 32; 865 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
848 p->bad_octets_received += read_mib(mp, 0x08); 866 p->bad_octets_received += mib_read(mp, 0x08);
849 p->internal_mac_transmit_err += read_mib(mp, 0x0c); 867 p->internal_mac_transmit_err += mib_read(mp, 0x0c);
850 p->good_frames_received += read_mib(mp, 0x10); 868 p->good_frames_received += mib_read(mp, 0x10);
851 p->bad_frames_received += read_mib(mp, 0x14); 869 p->bad_frames_received += mib_read(mp, 0x14);
852 p->broadcast_frames_received += read_mib(mp, 0x18); 870 p->broadcast_frames_received += mib_read(mp, 0x18);
853 p->multicast_frames_received += read_mib(mp, 0x1c); 871 p->multicast_frames_received += mib_read(mp, 0x1c);
854 p->frames_64_octets += read_mib(mp, 0x20); 872 p->frames_64_octets += mib_read(mp, 0x20);
855 p->frames_65_to_127_octets += read_mib(mp, 0x24); 873 p->frames_65_to_127_octets += mib_read(mp, 0x24);
856 p->frames_128_to_255_octets += read_mib(mp, 0x28); 874 p->frames_128_to_255_octets += mib_read(mp, 0x28);
857 p->frames_256_to_511_octets += read_mib(mp, 0x2c); 875 p->frames_256_to_511_octets += mib_read(mp, 0x2c);
858 p->frames_512_to_1023_octets += read_mib(mp, 0x30); 876 p->frames_512_to_1023_octets += mib_read(mp, 0x30);
859 p->frames_1024_to_max_octets += read_mib(mp, 0x34); 877 p->frames_1024_to_max_octets += mib_read(mp, 0x34);
860 p->good_octets_sent += read_mib(mp, 0x38); 878 p->good_octets_sent += mib_read(mp, 0x38);
861 p->good_octets_sent += (u64)read_mib(mp, 0x3c) << 32; 879 p->good_octets_sent += (u64)mib_read(mp, 0x3c) << 32;
862 p->good_frames_sent += read_mib(mp, 0x40); 880 p->good_frames_sent += mib_read(mp, 0x40);
863 p->excessive_collision += read_mib(mp, 0x44); 881 p->excessive_collision += mib_read(mp, 0x44);
864 p->multicast_frames_sent += read_mib(mp, 0x48); 882 p->multicast_frames_sent += mib_read(mp, 0x48);
865 p->broadcast_frames_sent += read_mib(mp, 0x4c); 883 p->broadcast_frames_sent += mib_read(mp, 0x4c);
866 p->unrec_mac_control_received += read_mib(mp, 0x50); 884 p->unrec_mac_control_received += mib_read(mp, 0x50);
867 p->fc_sent += read_mib(mp, 0x54); 885 p->fc_sent += mib_read(mp, 0x54);
868 p->good_fc_received += read_mib(mp, 0x58); 886 p->good_fc_received += mib_read(mp, 0x58);
869 p->bad_fc_received += read_mib(mp, 0x5c); 887 p->bad_fc_received += mib_read(mp, 0x5c);
870 p->undersize_received += read_mib(mp, 0x60); 888 p->undersize_received += mib_read(mp, 0x60);
871 p->fragments_received += read_mib(mp, 0x64); 889 p->fragments_received += mib_read(mp, 0x64);
872 p->oversize_received += read_mib(mp, 0x68); 890 p->oversize_received += mib_read(mp, 0x68);
873 p->jabber_received += read_mib(mp, 0x6c); 891 p->jabber_received += mib_read(mp, 0x6c);
874 p->mac_receive_error += read_mib(mp, 0x70); 892 p->mac_receive_error += mib_read(mp, 0x70);
875 p->bad_crc_event += read_mib(mp, 0x74); 893 p->bad_crc_event += mib_read(mp, 0x74);
876 p->collision += read_mib(mp, 0x78); 894 p->collision += mib_read(mp, 0x78);
877 p->late_collision += read_mib(mp, 0x7c); 895 p->late_collision += mib_read(mp, 0x7c);
878} 896}
879 897
880 898
@@ -944,7 +962,9 @@ static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *
944 err = mii_ethtool_gset(&mp->mii, cmd); 962 err = mii_ethtool_gset(&mp->mii, cmd);
945 spin_unlock_irq(&mp->lock); 963 spin_unlock_irq(&mp->lock);
946 964
947 /* The PHY may support 1000baseT_Half, but the mv643xx does not */ 965 /*
966 * The MAC does not support 1000baseT_Half.
967 */
948 cmd->supported &= ~SUPPORTED_1000baseT_Half; 968 cmd->supported &= ~SUPPORTED_1000baseT_Half;
949 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 969 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
950 970
@@ -956,6 +976,11 @@ static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *
956 struct mv643xx_eth_private *mp = netdev_priv(dev); 976 struct mv643xx_eth_private *mp = netdev_priv(dev);
957 int err; 977 int err;
958 978
979 /*
980 * The MAC does not support 1000baseT_Half.
981 */
982 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
983
959 spin_lock_irq(&mp->lock); 984 spin_lock_irq(&mp->lock);
960 err = mii_ethtool_sset(&mp->mii, cmd); 985 err = mii_ethtool_sset(&mp->mii, cmd);
961 spin_unlock_irq(&mp->lock); 986 spin_unlock_irq(&mp->lock);
@@ -963,17 +988,17 @@ static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *
963 return err; 988 return err;
964} 989}
965 990
966static void mv643xx_eth_get_drvinfo(struct net_device *netdev, 991static void mv643xx_eth_get_drvinfo(struct net_device *dev,
967 struct ethtool_drvinfo *drvinfo) 992 struct ethtool_drvinfo *drvinfo)
968{ 993{
969 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32); 994 strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
970 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32); 995 strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
971 strncpy(drvinfo->fw_version, "N/A", 32); 996 strncpy(drvinfo->fw_version, "N/A", 32);
972 strncpy(drvinfo->bus_info, "mv643xx", 32); 997 strncpy(drvinfo->bus_info, "platform", 32);
973 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats); 998 drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
974} 999}
975 1000
976static int mv643xx_eth_nway_restart(struct net_device *dev) 1001static int mv643xx_eth_nway_reset(struct net_device *dev)
977{ 1002{
978 struct mv643xx_eth_private *mp = netdev_priv(dev); 1003 struct mv643xx_eth_private *mp = netdev_priv(dev);
979 1004
@@ -987,29 +1012,28 @@ static u32 mv643xx_eth_get_link(struct net_device *dev)
987 return mii_link_ok(&mp->mii); 1012 return mii_link_ok(&mp->mii);
988} 1013}
989 1014
990static void mv643xx_eth_get_strings(struct net_device *netdev, uint32_t stringset, 1015static void mv643xx_eth_get_strings(struct net_device *dev,
991 uint8_t *data) 1016 uint32_t stringset, uint8_t *data)
992{ 1017{
993 int i; 1018 int i;
994 1019
995 switch(stringset) { 1020 if (stringset == ETH_SS_STATS) {
996 case ETH_SS_STATS: 1021 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
997 for (i=0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
998 memcpy(data + i * ETH_GSTRING_LEN, 1022 memcpy(data + i * ETH_GSTRING_LEN,
999 mv643xx_eth_stats[i].stat_string, 1023 mv643xx_eth_stats[i].stat_string,
1000 ETH_GSTRING_LEN); 1024 ETH_GSTRING_LEN);
1001 } 1025 }
1002 break;
1003 } 1026 }
1004} 1027}
1005 1028
1006static void mv643xx_eth_get_ethtool_stats(struct net_device *netdev, 1029static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1007 struct ethtool_stats *stats, uint64_t *data) 1030 struct ethtool_stats *stats,
1031 uint64_t *data)
1008{ 1032{
1009 struct mv643xx_eth_private *mp = netdev->priv; 1033 struct mv643xx_eth_private *mp = dev->priv;
1010 int i; 1034 int i;
1011 1035
1012 update_mib_counters(mp); 1036 mib_counters_update(mp);
1013 1037
1014 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1038 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
1015 const struct mv643xx_eth_stats *stat; 1039 const struct mv643xx_eth_stats *stat;
@@ -1027,38 +1051,35 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *netdev,
1027 } 1051 }
1028} 1052}
1029 1053
1030static int mv643xx_eth_get_sset_count(struct net_device *netdev, int sset) 1054static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
1031{ 1055{
1032 switch (sset) { 1056 if (sset == ETH_SS_STATS)
1033 case ETH_SS_STATS:
1034 return ARRAY_SIZE(mv643xx_eth_stats); 1057 return ARRAY_SIZE(mv643xx_eth_stats);
1035 default: 1058
1036 return -EOPNOTSUPP; 1059 return -EOPNOTSUPP;
1037 }
1038} 1060}
1039 1061
1040static const struct ethtool_ops mv643xx_eth_ethtool_ops = { 1062static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1041 .get_settings = mv643xx_eth_get_settings, 1063 .get_settings = mv643xx_eth_get_settings,
1042 .set_settings = mv643xx_eth_set_settings, 1064 .set_settings = mv643xx_eth_set_settings,
1043 .get_drvinfo = mv643xx_eth_get_drvinfo, 1065 .get_drvinfo = mv643xx_eth_get_drvinfo,
1044 .get_link = mv643xx_eth_get_link, 1066 .nway_reset = mv643xx_eth_nway_reset,
1067 .get_link = mv643xx_eth_get_link,
1045 .set_sg = ethtool_op_set_sg, 1068 .set_sg = ethtool_op_set_sg,
1069 .get_strings = mv643xx_eth_get_strings,
1070 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1046 .get_sset_count = mv643xx_eth_get_sset_count, 1071 .get_sset_count = mv643xx_eth_get_sset_count,
1047 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1048 .get_strings = mv643xx_eth_get_strings,
1049 .nway_reset = mv643xx_eth_nway_restart,
1050}; 1072};
1051 1073
1052 1074
1053/* address handling *********************************************************/ 1075/* address handling *********************************************************/
1054static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr) 1076static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1055{ 1077{
1056 unsigned int port_num = mp->port_num;
1057 unsigned int mac_h; 1078 unsigned int mac_h;
1058 unsigned int mac_l; 1079 unsigned int mac_l;
1059 1080
1060 mac_h = rdl(mp, MAC_ADDR_HIGH(port_num)); 1081 mac_h = rdl(mp, MAC_ADDR_HIGH(mp->port_num));
1061 mac_l = rdl(mp, MAC_ADDR_LOW(port_num)); 1082 mac_l = rdl(mp, MAC_ADDR_LOW(mp->port_num));
1062 1083
1063 addr[0] = (mac_h >> 24) & 0xff; 1084 addr[0] = (mac_h >> 24) & 0xff;
1064 addr[1] = (mac_h >> 16) & 0xff; 1085 addr[1] = (mac_h >> 16) & 0xff;
@@ -1070,72 +1091,54 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
1070 1091
1071static void init_mac_tables(struct mv643xx_eth_private *mp) 1092static void init_mac_tables(struct mv643xx_eth_private *mp)
1072{ 1093{
1073 unsigned int port_num = mp->port_num; 1094 int i;
1074 int table_index;
1075
1076 /* Clear DA filter unicast table (Ex_dFUT) */
1077 for (table_index = 0; table_index <= 0xC; table_index += 4)
1078 wrl(mp, UNICAST_TABLE(port_num) + table_index, 0);
1079 1095
1080 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 1096 for (i = 0; i < 0x100; i += 4) {
1081 /* Clear DA filter special multicast table (Ex_dFSMT) */ 1097 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
1082 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0); 1098 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1083 /* Clear DA filter other multicast table (Ex_dFOMT) */
1084 wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
1085 } 1099 }
1100
1101 for (i = 0; i < 0x10; i += 4)
1102 wrl(mp, UNICAST_TABLE(mp->port_num) + i, 0);
1086} 1103}
1087 1104
1088static void set_filter_table_entry(struct mv643xx_eth_private *mp, 1105static void set_filter_table_entry(struct mv643xx_eth_private *mp,
1089 int table, unsigned char entry) 1106 int table, unsigned char entry)
1090{ 1107{
1091 unsigned int table_reg; 1108 unsigned int table_reg;
1092 unsigned int tbl_offset;
1093 unsigned int reg_offset;
1094
1095 tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */
1096 reg_offset = entry % 4; /* Entry offset within the register */
1097 1109
1098 /* Set "accepts frame bit" at specified table entry */ 1110 /* Set "accepts frame bit" at specified table entry */
1099 table_reg = rdl(mp, table + tbl_offset); 1111 table_reg = rdl(mp, table + (entry & 0xfc));
1100 table_reg |= 0x01 << (8 * reg_offset); 1112 table_reg |= 0x01 << (8 * (entry & 3));
1101 wrl(mp, table + tbl_offset, table_reg); 1113 wrl(mp, table + (entry & 0xfc), table_reg);
1102} 1114}
1103 1115
1104static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr) 1116static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
1105{ 1117{
1106 unsigned int port_num = mp->port_num;
1107 unsigned int mac_h; 1118 unsigned int mac_h;
1108 unsigned int mac_l; 1119 unsigned int mac_l;
1109 int table; 1120 int table;
1110 1121
1111 mac_l = (addr[4] << 8) | (addr[5]); 1122 mac_l = (addr[4] << 8) | addr[5];
1112 mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | 1123 mac_h = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
1113 (addr[3] << 0);
1114 1124
1115 wrl(mp, MAC_ADDR_LOW(port_num), mac_l); 1125 wrl(mp, MAC_ADDR_LOW(mp->port_num), mac_l);
1116 wrl(mp, MAC_ADDR_HIGH(port_num), mac_h); 1126 wrl(mp, MAC_ADDR_HIGH(mp->port_num), mac_h);
1117 1127
1118 /* Accept frames with this address */ 1128 table = UNICAST_TABLE(mp->port_num);
1119 table = UNICAST_TABLE(port_num);
1120 set_filter_table_entry(mp, table, addr[5] & 0x0f); 1129 set_filter_table_entry(mp, table, addr[5] & 0x0f);
1121} 1130}
1122 1131
1123static void mv643xx_eth_update_mac_address(struct net_device *dev) 1132static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1124{ 1133{
1125 struct mv643xx_eth_private *mp = netdev_priv(dev); 1134 struct mv643xx_eth_private *mp = netdev_priv(dev);
1126 1135
1136 /* +2 is for the offset of the HW addr type */
1137 memcpy(dev->dev_addr, addr + 2, 6);
1138
1127 init_mac_tables(mp); 1139 init_mac_tables(mp);
1128 uc_addr_set(mp, dev->dev_addr); 1140 uc_addr_set(mp, dev->dev_addr);
1129}
1130
1131static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
1132{
1133 int i;
1134 1141
1135 for (i = 0; i < 6; i++)
1136 /* +2 is for the offset of the HW addr type */
1137 dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
1138 mv643xx_eth_update_mac_address(dev);
1139 return 0; 1142 return 0;
1140} 1143}
1141 1144
@@ -1157,95 +1160,53 @@ static int addr_crc(unsigned char *addr)
1157 return crc; 1160 return crc;
1158} 1161}
1159 1162
1160static void mc_addr(struct mv643xx_eth_private *mp, unsigned char *addr) 1163static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1161{ 1164{
1162 unsigned int port_num = mp->port_num; 1165 struct mv643xx_eth_private *mp = netdev_priv(dev);
1163 int table; 1166 u32 port_config;
1164 int crc; 1167 struct dev_addr_list *addr;
1165 1168 int i;
1166 if ((addr[0] == 0x01) && (addr[1] == 0x00) &&
1167 (addr[2] == 0x5E) && (addr[3] == 0x00) && (addr[4] == 0x00)) {
1168 table = SPECIAL_MCAST_TABLE(port_num);
1169 set_filter_table_entry(mp, table, addr[5]);
1170 return;
1171 }
1172
1173 crc = addr_crc(addr);
1174
1175 table = OTHER_MCAST_TABLE(port_num);
1176 set_filter_table_entry(mp, table, crc);
1177}
1178 1169
1179static void set_multicast_list(struct net_device *dev) 1170 port_config = rdl(mp, PORT_CONFIG(mp->port_num));
1180{ 1171 if (dev->flags & IFF_PROMISC)
1172 port_config |= UNICAST_PROMISCUOUS_MODE;
1173 else
1174 port_config &= ~UNICAST_PROMISCUOUS_MODE;
1175 wrl(mp, PORT_CONFIG(mp->port_num), port_config);
1181 1176
1182 struct dev_mc_list *mc_list; 1177 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
1183 int i; 1178 int port_num = mp->port_num;
1184 int table_index; 1179 u32 accept = 0x01010101;
1185 struct mv643xx_eth_private *mp = netdev_priv(dev);
1186 unsigned int port_num = mp->port_num;
1187 1180
1188 /* If the device is in promiscuous mode or in all multicast mode, 1181 for (i = 0; i < 0x100; i += 4) {
1189 * we will fully populate both multicast tables with accept. 1182 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + i, accept);
1190 * This is guaranteed to yield a match on all multicast addresses... 1183 wrl(mp, OTHER_MCAST_TABLE(port_num) + i, accept);
1191 */
1192 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
1193 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
1194 /* Set all entries in DA filter special multicast
1195 * table (Ex_dFSMT)
1196 * Set for ETH_Q0 for now
1197 * Bits
1198 * 0 Accept=1, Drop=0
1199 * 3-1 Queue ETH_Q0=0
1200 * 7-4 Reserved = 0;
1201 */
1202 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0x01010101);
1203
1204 /* Set all entries in DA filter other multicast
1205 * table (Ex_dFOMT)
1206 * Set for ETH_Q0 for now
1207 * Bits
1208 * 0 Accept=1, Drop=0
1209 * 3-1 Queue ETH_Q0=0
1210 * 7-4 Reserved = 0;
1211 */
1212 wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0x01010101);
1213 } 1184 }
1214 return; 1185 return;
1215 } 1186 }
1216 1187
1217 /* We will clear out multicast tables every time we get the list. 1188 for (i = 0; i < 0x100; i += 4) {
1218 * Then add the entire new list... 1189 wrl(mp, SPECIAL_MCAST_TABLE(mp->port_num) + i, 0);
1219 */ 1190 wrl(mp, OTHER_MCAST_TABLE(mp->port_num) + i, 0);
1220 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
1221 /* Clear DA filter special multicast table (Ex_dFSMT) */
1222 wrl(mp, SPECIAL_MCAST_TABLE(port_num) + table_index, 0);
1223
1224 /* Clear DA filter other multicast table (Ex_dFOMT) */
1225 wrl(mp, OTHER_MCAST_TABLE(port_num) + table_index, 0);
1226 } 1191 }
1227 1192
1228 /* Get pointer to net_device multicast list and add each one... */ 1193 for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
1229 for (i = 0, mc_list = dev->mc_list; 1194 u8 *a = addr->da_addr;
1230 (i < 256) && (mc_list != NULL) && (i < dev->mc_count); 1195 int table;
1231 i++, mc_list = mc_list->next)
1232 if (mc_list->dmi_addrlen == 6)
1233 mc_addr(mp, mc_list->dmi_addr);
1234}
1235 1196
1236static void mv643xx_eth_set_rx_mode(struct net_device *dev) 1197 if (addr->da_addrlen != 6)
1237{ 1198 continue;
1238 struct mv643xx_eth_private *mp = netdev_priv(dev);
1239 u32 config_reg;
1240 1199
1241 config_reg = rdl(mp, PORT_CONFIG(mp->port_num)); 1200 if (memcmp(a, "\x01\x00\x5e\x00\x00", 5) == 0) {
1242 if (dev->flags & IFF_PROMISC) 1201 table = SPECIAL_MCAST_TABLE(mp->port_num);
1243 config_reg |= UNICAST_PROMISCUOUS_MODE; 1202 set_filter_table_entry(mp, table, a[5]);
1244 else 1203 } else {
1245 config_reg &= ~UNICAST_PROMISCUOUS_MODE; 1204 int crc = addr_crc(a);
1246 wrl(mp, PORT_CONFIG(mp->port_num), config_reg);
1247 1205
1248 set_multicast_list(dev); 1206 table = OTHER_MCAST_TABLE(mp->port_num);
1207 set_filter_table_entry(mp, table, crc);
1208 }
1209 }
1249} 1210}
1250 1211
1251 1212
@@ -1483,10 +1444,7 @@ static void txq_deinit(struct tx_queue *txq)
1483 1444
1484 1445
1485/* netdev ops and related ***************************************************/ 1446/* netdev ops and related ***************************************************/
1486static void port_reset(struct mv643xx_eth_private *mp); 1447static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
1487
1488static void mv643xx_eth_update_pscr(struct mv643xx_eth_private *mp,
1489 struct ethtool_cmd *ecmd)
1490{ 1448{
1491 u32 pscr_o; 1449 u32 pscr_o;
1492 u32 pscr_n; 1450 u32 pscr_n;
@@ -1499,15 +1457,15 @@ static void mv643xx_eth_update_pscr(struct mv643xx_eth_private *mp,
1499 SET_FULL_DUPLEX_MODE | 1457 SET_FULL_DUPLEX_MODE |
1500 MAX_RX_PACKET_MASK); 1458 MAX_RX_PACKET_MASK);
1501 1459
1502 if (ecmd->speed == SPEED_1000) { 1460 if (speed == SPEED_1000) {
1503 pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE; 1461 pscr_n |= SET_GMII_SPEED_TO_1000 | MAX_RX_PACKET_9700BYTE;
1504 } else { 1462 } else {
1505 if (ecmd->speed == SPEED_100) 1463 if (speed == SPEED_100)
1506 pscr_n |= SET_MII_SPEED_TO_100; 1464 pscr_n |= SET_MII_SPEED_TO_100;
1507 pscr_n |= MAX_RX_PACKET_1522BYTE; 1465 pscr_n |= MAX_RX_PACKET_1522BYTE;
1508 } 1466 }
1509 1467
1510 if (ecmd->duplex == DUPLEX_FULL) 1468 if (duplex == DUPLEX_FULL)
1511 pscr_n |= SET_FULL_DUPLEX_MODE; 1469 pscr_n |= SET_FULL_DUPLEX_MODE;
1512 1470
1513 if (pscr_n != pscr_o) { 1471 if (pscr_n != pscr_o) {
@@ -1524,27 +1482,30 @@ static void mv643xx_eth_update_pscr(struct mv643xx_eth_private *mp,
1524 } 1482 }
1525} 1483}
1526 1484
1527static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) 1485static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1528{ 1486{
1529 struct net_device *dev = (struct net_device *)dev_id; 1487 struct net_device *dev = (struct net_device *)dev_id;
1530 struct mv643xx_eth_private *mp = netdev_priv(dev); 1488 struct mv643xx_eth_private *mp = netdev_priv(dev);
1531 u32 int_cause, int_cause_ext = 0; 1489 u32 int_cause;
1490 u32 int_cause_ext;
1532 1491
1533 /* Read interrupt cause registers */
1534 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & (INT_RX | INT_EXT); 1492 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & (INT_RX | INT_EXT);
1493 if (int_cause == 0)
1494 return IRQ_NONE;
1495
1496 int_cause_ext = 0;
1535 if (int_cause & INT_EXT) { 1497 if (int_cause & INT_EXT) {
1536 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num)) 1498 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1537 & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); 1499 & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1538 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext); 1500 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1539 } 1501 }
1540 1502
1541 /* PHY status changed */ 1503 if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) {
1542 if (int_cause_ext & (INT_EXT_LINK | INT_EXT_PHY)) {
1543 if (mii_link_ok(&mp->mii)) { 1504 if (mii_link_ok(&mp->mii)) {
1544 struct ethtool_cmd cmd; 1505 struct ethtool_cmd cmd;
1545 1506
1546 mii_ethtool_gset(&mp->mii, &cmd); 1507 mii_ethtool_gset(&mp->mii, &cmd);
1547 mv643xx_eth_update_pscr(mp, &cmd); 1508 update_pscr(mp, cmd.speed, cmd.duplex);
1548 txq_enable(mp->txq); 1509 txq_enable(mp->txq);
1549 if (!netif_carrier_ok(dev)) { 1510 if (!netif_carrier_ok(dev)) {
1550 netif_carrier_on(dev); 1511 netif_carrier_on(dev);
@@ -1558,10 +1519,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1558 1519
1559#ifdef MV643XX_ETH_NAPI 1520#ifdef MV643XX_ETH_NAPI
1560 if (int_cause & INT_RX) { 1521 if (int_cause & INT_RX) {
1561 /* schedule the NAPI poll routine to maintain port */
1562 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 1522 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1563
1564 /* wait for previous write to complete */
1565 rdl(mp, INT_MASK(mp->port_num)); 1523 rdl(mp, INT_MASK(mp->port_num));
1566 1524
1567 netif_rx_schedule(dev, &mp->napi); 1525 netif_rx_schedule(dev, &mp->napi);
@@ -1570,40 +1528,31 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1570 if (int_cause & INT_RX) 1528 if (int_cause & INT_RX)
1571 rxq_process(mp->rxq, INT_MAX); 1529 rxq_process(mp->rxq, INT_MAX);
1572#endif 1530#endif
1531
1573 if (int_cause_ext & INT_EXT_TX) { 1532 if (int_cause_ext & INT_EXT_TX) {
1574 txq_reclaim(mp->txq, 0); 1533 txq_reclaim(mp->txq, 0);
1575 __txq_maybe_wake(mp->txq); 1534 __txq_maybe_wake(mp->txq);
1576 } 1535 }
1577 1536
1578 /*
1579 * If no real interrupt occured, exit.
1580 * This can happen when using gigE interrupt coalescing mechanism.
1581 */
1582 if ((int_cause == 0x0) && (int_cause_ext == 0x0))
1583 return IRQ_NONE;
1584
1585 return IRQ_HANDLED; 1537 return IRQ_HANDLED;
1586} 1538}
1587 1539
1588static void phy_reset(struct mv643xx_eth_private *mp) 1540static void phy_reset(struct mv643xx_eth_private *mp)
1589{ 1541{
1590 unsigned int phy_reg_data; 1542 unsigned int data;
1591 1543
1592 /* Reset the PHY */ 1544 smi_reg_read(mp, mp->phy_addr, 0, &data);
1593 read_smi_reg(mp, 0, &phy_reg_data); 1545 data |= 0x8000;
1594 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ 1546 smi_reg_write(mp, mp->phy_addr, 0, data);
1595 write_smi_reg(mp, 0, phy_reg_data);
1596 1547
1597 /* wait for PHY to come out of reset */
1598 do { 1548 do {
1599 udelay(1); 1549 udelay(1);
1600 read_smi_reg(mp, 0, &phy_reg_data); 1550 smi_reg_read(mp, mp->phy_addr, 0, &data);
1601 } while (phy_reg_data & 0x8000); 1551 } while (data & 0x8000);
1602} 1552}
1603 1553
1604static void port_start(struct net_device *dev) 1554static void port_start(struct mv643xx_eth_private *mp)
1605{ 1555{
1606 struct mv643xx_eth_private *mp = netdev_priv(dev);
1607 u32 pscr; 1556 u32 pscr;
1608 struct ethtool_cmd ethtool_cmd; 1557 struct ethtool_cmd ethtool_cmd;
1609 int i; 1558 int i;
@@ -1625,9 +1574,9 @@ static void port_start(struct net_device *dev)
1625 1574
1626 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE); 1575 wrl(mp, SDMA_CONFIG(mp->port_num), PORT_SDMA_CONFIG_DEFAULT_VALUE);
1627 1576
1628 mv643xx_eth_get_settings(dev, &ethtool_cmd); 1577 mv643xx_eth_get_settings(mp->dev, &ethtool_cmd);
1629 phy_reset(mp); 1578 phy_reset(mp);
1630 mv643xx_eth_set_settings(dev, &ethtool_cmd); 1579 mv643xx_eth_set_settings(mp->dev, &ethtool_cmd);
1631 1580
1632 /* 1581 /*
1633 * Configure TX path and queues. 1582 * Configure TX path and queues.
@@ -1643,8 +1592,10 @@ static void port_start(struct net_device *dev)
1643 wrl(mp, off, addr); 1592 wrl(mp, off, addr);
1644 } 1593 }
1645 1594
1646 /* Add the assigned Ethernet address to the port's address table */ 1595 /*
1647 uc_addr_set(mp, dev->dev_addr); 1596 * Add configured unicast address to address filter table.
1597 */
1598 uc_addr_set(mp, mp->dev->dev_addr);
1648 1599
1649 /* 1600 /*
1650 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 1601 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
@@ -1675,13 +1626,14 @@ static void port_start(struct net_device *dev)
1675 1626
1676static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay) 1627static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
1677{ 1628{
1678 unsigned int port_num = mp->port_num;
1679 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; 1629 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1680 1630
1681 /* Set RX Coalescing mechanism */ 1631 if (coal > 0x3fff)
1682 wrl(mp, SDMA_CONFIG(port_num), 1632 coal = 0x3fff;
1633
1634 wrl(mp, SDMA_CONFIG(mp->port_num),
1683 ((coal & 0x3fff) << 8) | 1635 ((coal & 0x3fff) << 8) |
1684 (rdl(mp, SDMA_CONFIG(port_num)) 1636 (rdl(mp, SDMA_CONFIG(mp->port_num))
1685 & 0xffc000ff)); 1637 & 0xffc000ff));
1686} 1638}
1687 1639
@@ -1689,68 +1641,59 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
1689{ 1641{
1690 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64; 1642 unsigned int coal = ((mp->shared->t_clk / 1000000) * delay) / 64;
1691 1643
1692 /* Set TX Coalescing mechanism */ 1644 if (coal > 0x3fff)
1693 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), coal << 4); 1645 coal = 0x3fff;
1694} 1646 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
1695
1696static void port_init(struct mv643xx_eth_private *mp)
1697{
1698 port_reset(mp);
1699
1700 init_mac_tables(mp);
1701} 1647}
1702 1648
1703static int mv643xx_eth_open(struct net_device *dev) 1649static int mv643xx_eth_open(struct net_device *dev)
1704{ 1650{
1705 struct mv643xx_eth_private *mp = netdev_priv(dev); 1651 struct mv643xx_eth_private *mp = netdev_priv(dev);
1706 unsigned int port_num = mp->port_num;
1707 int err; 1652 int err;
1708 1653
1709 /* Clear any pending ethernet port interrupts */ 1654 wrl(mp, INT_CAUSE(mp->port_num), 0);
1710 wrl(mp, INT_CAUSE(port_num), 0); 1655 wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
1711 wrl(mp, INT_CAUSE_EXT(port_num), 0); 1656 rdl(mp, INT_CAUSE_EXT(mp->port_num));
1712 /* wait for previous write to complete */
1713 rdl(mp, INT_CAUSE_EXT(port_num));
1714 1657
1715 err = request_irq(dev->irq, mv643xx_eth_int_handler, 1658 err = request_irq(dev->irq, mv643xx_eth_irq,
1716 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 1659 IRQF_SHARED | IRQF_SAMPLE_RANDOM,
1660 dev->name, dev);
1717 if (err) { 1661 if (err) {
1718 printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name); 1662 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
1719 return -EAGAIN; 1663 return -EAGAIN;
1720 } 1664 }
1721 1665
1722 port_init(mp); 1666 init_mac_tables(mp);
1723 1667
1724 err = rxq_init(mp); 1668 err = rxq_init(mp);
1725 if (err) 1669 if (err)
1726 goto out_free_irq; 1670 goto out;
1727 rxq_refill(mp->rxq); 1671 rxq_refill(mp->rxq);
1728 1672
1729 err = txq_init(mp); 1673 err = txq_init(mp);
1730 if (err) 1674 if (err)
1731 goto out_free_rx_skb; 1675 goto out_free;
1732 1676
1733#ifdef MV643XX_ETH_NAPI 1677#ifdef MV643XX_ETH_NAPI
1734 napi_enable(&mp->napi); 1678 napi_enable(&mp->napi);
1735#endif 1679#endif
1736 1680
1737 port_start(dev); 1681 port_start(mp);
1738 1682
1739 set_rx_coal(mp, 0); 1683 set_rx_coal(mp, 0);
1740 set_tx_coal(mp, 0); 1684 set_tx_coal(mp, 0);
1741 1685
1742 /* Unmask phy and link status changes interrupts */ 1686 wrl(mp, INT_MASK_EXT(mp->port_num),
1743 wrl(mp, INT_MASK_EXT(port_num), INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX); 1687 INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1744 1688
1745 /* Unmask RX buffer and TX end interrupt */ 1689 wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_EXT);
1746 wrl(mp, INT_MASK(port_num), INT_RX | INT_EXT);
1747 1690
1748 return 0; 1691 return 0;
1749 1692
1750 1693
1751out_free_rx_skb: 1694out_free:
1752 rxq_deinit(mp->rxq); 1695 rxq_deinit(mp->rxq);
1753out_free_irq: 1696out:
1754 free_irq(dev->irq, dev); 1697 free_irq(dev->irq, dev);
1755 1698
1756 return err; 1699 return err;
@@ -1758,34 +1701,27 @@ out_free_irq:
1758 1701
1759static void port_reset(struct mv643xx_eth_private *mp) 1702static void port_reset(struct mv643xx_eth_private *mp)
1760{ 1703{
1761 unsigned int port_num = mp->port_num; 1704 unsigned int data;
1762 unsigned int reg_data;
1763 1705
1764 txq_disable(mp->txq); 1706 txq_disable(mp->txq);
1765 rxq_disable(mp->rxq); 1707 rxq_disable(mp->rxq);
1766 while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) 1708 while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
1767 udelay(10); 1709 udelay(10);
1768 1710
1769 /* Clear all MIB counters */
1770 clear_mib_counters(mp);
1771
1772 /* Reset the Enable bit in the Configuration Register */ 1711 /* Reset the Enable bit in the Configuration Register */
1773 reg_data = rdl(mp, PORT_SERIAL_CONTROL(port_num)); 1712 data = rdl(mp, PORT_SERIAL_CONTROL(mp->port_num));
1774 reg_data &= ~(SERIAL_PORT_ENABLE | 1713 data &= ~(SERIAL_PORT_ENABLE |
1775 DO_NOT_FORCE_LINK_FAIL | 1714 DO_NOT_FORCE_LINK_FAIL |
1776 FORCE_LINK_PASS); 1715 FORCE_LINK_PASS);
1777 wrl(mp, PORT_SERIAL_CONTROL(port_num), reg_data); 1716 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), data);
1778} 1717}
1779 1718
1780static int mv643xx_eth_stop(struct net_device *dev) 1719static int mv643xx_eth_stop(struct net_device *dev)
1781{ 1720{
1782 struct mv643xx_eth_private *mp = netdev_priv(dev); 1721 struct mv643xx_eth_private *mp = netdev_priv(dev);
1783 unsigned int port_num = mp->port_num;
1784 1722
1785 /* Mask all interrupts on ethernet port */ 1723 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1786 wrl(mp, INT_MASK(port_num), 0x00000000); 1724 rdl(mp, INT_MASK(mp->port_num));
1787 /* wait for previous write to complete */
1788 rdl(mp, INT_MASK(port_num));
1789 1725
1790#ifdef MV643XX_ETH_NAPI 1726#ifdef MV643XX_ETH_NAPI
1791 napi_disable(&mp->napi); 1727 napi_disable(&mp->napi);
@@ -1793,17 +1729,18 @@ static int mv643xx_eth_stop(struct net_device *dev)
1793 netif_carrier_off(dev); 1729 netif_carrier_off(dev);
1794 netif_stop_queue(dev); 1730 netif_stop_queue(dev);
1795 1731
1732 free_irq(dev->irq, dev);
1733
1796 port_reset(mp); 1734 port_reset(mp);
1735 mib_counters_update(mp);
1797 1736
1798 txq_deinit(mp->txq); 1737 txq_deinit(mp->txq);
1799 rxq_deinit(mp->rxq); 1738 rxq_deinit(mp->rxq);
1800 1739
1801 free_irq(dev->irq, dev);
1802
1803 return 0; 1740 return 0;
1804} 1741}
1805 1742
1806static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1743static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1807{ 1744{
1808 struct mv643xx_eth_private *mp = netdev_priv(dev); 1745 struct mv643xx_eth_private *mp = netdev_priv(dev);
1809 1746
@@ -1812,7 +1749,7 @@ static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int c
1812 1749
1813static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) 1750static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
1814{ 1751{
1815 if ((new_mtu > 9500) || (new_mtu < 64)) 1752 if (new_mtu < 64 || new_mtu > 9500)
1816 return -EINVAL; 1753 return -EINVAL;
1817 1754
1818 dev->mtu = new_mtu; 1755 dev->mtu = new_mtu;
@@ -1823,73 +1760,70 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
1823 * Stop and then re-open the interface. This will allocate RX 1760 * Stop and then re-open the interface. This will allocate RX
1824 * skbs of the new MTU. 1761 * skbs of the new MTU.
1825 * There is a possible danger that the open will not succeed, 1762 * There is a possible danger that the open will not succeed,
1826 * due to memory being full, which might fail the open function. 1763 * due to memory being full.
1827 */ 1764 */
1828 mv643xx_eth_stop(dev); 1765 mv643xx_eth_stop(dev);
1829 if (mv643xx_eth_open(dev)) { 1766 if (mv643xx_eth_open(dev)) {
1830 printk(KERN_ERR "%s: Fatal error on opening device\n", 1767 dev_printk(KERN_ERR, &dev->dev,
1831 dev->name); 1768 "fatal error on re-opening device after "
1769 "MTU change\n");
1832 } 1770 }
1833 1771
1834 return 0; 1772 return 0;
1835} 1773}
1836 1774
1837static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly) 1775static void tx_timeout_task(struct work_struct *ugly)
1838{ 1776{
1839 struct mv643xx_eth_private *mp = container_of(ugly, struct mv643xx_eth_private, 1777 struct mv643xx_eth_private *mp;
1840 tx_timeout_task);
1841 struct net_device *dev = mp->dev;
1842
1843 if (!netif_running(dev))
1844 return;
1845 1778
1846 netif_stop_queue(dev); 1779 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
1780 if (netif_running(mp->dev)) {
1781 netif_stop_queue(mp->dev);
1847 1782
1848 port_reset(mp); 1783 port_reset(mp);
1849 port_start(dev); 1784 port_start(mp);
1850 1785
1851 __txq_maybe_wake(mp->txq); 1786 __txq_maybe_wake(mp->txq);
1787 }
1852} 1788}
1853 1789
1854static void mv643xx_eth_tx_timeout(struct net_device *dev) 1790static void mv643xx_eth_tx_timeout(struct net_device *dev)
1855{ 1791{
1856 struct mv643xx_eth_private *mp = netdev_priv(dev); 1792 struct mv643xx_eth_private *mp = netdev_priv(dev);
1857 1793
1858 printk(KERN_INFO "%s: TX timeout ", dev->name); 1794 dev_printk(KERN_INFO, &dev->dev, "tx timeout\n");
1859 1795
1860 /* Do the reset outside of interrupt context */
1861 schedule_work(&mp->tx_timeout_task); 1796 schedule_work(&mp->tx_timeout_task);
1862} 1797}
1863 1798
1864#ifdef CONFIG_NET_POLL_CONTROLLER 1799#ifdef CONFIG_NET_POLL_CONTROLLER
1865static void mv643xx_eth_netpoll(struct net_device *netdev) 1800static void mv643xx_eth_netpoll(struct net_device *dev)
1866{ 1801{
1867 struct mv643xx_eth_private *mp = netdev_priv(netdev); 1802 struct mv643xx_eth_private *mp = netdev_priv(dev);
1868 int port_num = mp->port_num;
1869 1803
1870 wrl(mp, INT_MASK(port_num), 0x00000000); 1804 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1871 /* wait for previous write to complete */ 1805 rdl(mp, INT_MASK(mp->port_num));
1872 rdl(mp, INT_MASK(port_num));
1873 1806
1874 mv643xx_eth_int_handler(netdev->irq, netdev); 1807 mv643xx_eth_irq(dev->irq, dev);
1875 1808
1876 wrl(mp, INT_MASK(port_num), INT_RX | INT_CAUSE_EXT); 1809 wrl(mp, INT_MASK(mp->port_num), INT_RX | INT_CAUSE_EXT);
1877} 1810}
1878#endif 1811#endif
1879 1812
1880static int mv643xx_eth_mdio_read(struct net_device *dev, int phy_id, int location) 1813static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
1881{ 1814{
1882 struct mv643xx_eth_private *mp = netdev_priv(dev); 1815 struct mv643xx_eth_private *mp = netdev_priv(dev);
1883 int val; 1816 int val;
1884 1817
1885 read_smi_reg(mp, location, &val); 1818 smi_reg_read(mp, addr, reg, &val);
1819
1886 return val; 1820 return val;
1887} 1821}
1888 1822
1889static void mv643xx_eth_mdio_write(struct net_device *dev, int phy_id, int location, int val) 1823static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
1890{ 1824{
1891 struct mv643xx_eth_private *mp = netdev_priv(dev); 1825 struct mv643xx_eth_private *mp = netdev_priv(dev);
1892 write_smi_reg(mp, location, val); 1826 smi_reg_write(mp, addr, reg, val);
1893} 1827}
1894 1828
1895 1829
@@ -1956,9 +1890,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1956 goto out_free; 1890 goto out_free;
1957 1891
1958 spin_lock_init(&msp->phy_lock); 1892 spin_lock_init(&msp->phy_lock);
1959 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
1960
1961 platform_set_drvdata(pdev, msp);
1962 1893
1963 /* 1894 /*
1964 * (Re-)program MBUS remapping windows if we are asked to. 1895 * (Re-)program MBUS remapping windows if we are asked to.
@@ -1966,6 +1897,13 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
1966 if (pd != NULL && pd->dram != NULL) 1897 if (pd != NULL && pd->dram != NULL)
1967 mv643xx_eth_conf_mbus_windows(msp, pd->dram); 1898 mv643xx_eth_conf_mbus_windows(msp, pd->dram);
1968 1899
1900 /*
1901 * Detect hardware parameters.
1902 */
1903 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
1904
1905 platform_set_drvdata(pdev, msp);
1906
1969 return 0; 1907 return 0;
1970 1908
1971out_free: 1909out_free:
@@ -1985,104 +1923,158 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
1985} 1923}
1986 1924
1987static struct platform_driver mv643xx_eth_shared_driver = { 1925static struct platform_driver mv643xx_eth_shared_driver = {
1988 .probe = mv643xx_eth_shared_probe, 1926 .probe = mv643xx_eth_shared_probe,
1989 .remove = mv643xx_eth_shared_remove, 1927 .remove = mv643xx_eth_shared_remove,
1990 .driver = { 1928 .driver = {
1991 .name = MV643XX_ETH_SHARED_NAME, 1929 .name = MV643XX_ETH_SHARED_NAME,
1992 .owner = THIS_MODULE, 1930 .owner = THIS_MODULE,
1993 }, 1931 },
1994}; 1932};
1995 1933
1996static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr) 1934static void phy_addr_set(struct mv643xx_eth_private *mp, int phy_addr)
1997{ 1935{
1998 u32 reg_data;
1999 int addr_shift = 5 * mp->port_num; 1936 int addr_shift = 5 * mp->port_num;
1937 u32 data;
2000 1938
2001 reg_data = rdl(mp, PHY_ADDR); 1939 data = rdl(mp, PHY_ADDR);
2002 reg_data &= ~(0x1f << addr_shift); 1940 data &= ~(0x1f << addr_shift);
2003 reg_data |= (phy_addr & 0x1f) << addr_shift; 1941 data |= (phy_addr & 0x1f) << addr_shift;
2004 wrl(mp, PHY_ADDR, reg_data); 1942 wrl(mp, PHY_ADDR, data);
2005} 1943}
2006 1944
2007static int phy_addr_get(struct mv643xx_eth_private *mp) 1945static int phy_addr_get(struct mv643xx_eth_private *mp)
2008{ 1946{
2009 unsigned int reg_data; 1947 unsigned int data;
1948
1949 data = rdl(mp, PHY_ADDR);
1950
1951 return (data >> (5 * mp->port_num)) & 0x1f;
1952}
1953
1954static void set_params(struct mv643xx_eth_private *mp,
1955 struct mv643xx_eth_platform_data *pd)
1956{
1957 struct net_device *dev = mp->dev;
1958
1959 if (is_valid_ether_addr(pd->mac_addr))
1960 memcpy(dev->dev_addr, pd->mac_addr, 6);
1961 else
1962 uc_addr_get(mp, dev->dev_addr);
1963
1964 if (pd->phy_addr == -1) {
1965 mp->shared_smi = NULL;
1966 mp->phy_addr = -1;
1967 } else {
1968 mp->shared_smi = mp->shared;
1969 if (pd->shared_smi != NULL)
1970 mp->shared_smi = platform_get_drvdata(pd->shared_smi);
1971
1972 if (pd->force_phy_addr || pd->phy_addr) {
1973 mp->phy_addr = pd->phy_addr & 0x3f;
1974 phy_addr_set(mp, mp->phy_addr);
1975 } else {
1976 mp->phy_addr = phy_addr_get(mp);
1977 }
1978 }
2010 1979
2011 reg_data = rdl(mp, PHY_ADDR); 1980 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
1981 if (pd->rx_queue_size)
1982 mp->default_rx_ring_size = pd->rx_queue_size;
1983 mp->rx_desc_sram_addr = pd->rx_sram_addr;
1984 mp->rx_desc_sram_size = pd->rx_sram_size;
2012 1985
2013 return ((reg_data >> (5 * mp->port_num)) & 0x1f); 1986 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
1987 if (pd->tx_queue_size)
1988 mp->default_tx_ring_size = pd->tx_queue_size;
1989 mp->tx_desc_sram_addr = pd->tx_sram_addr;
1990 mp->tx_desc_sram_size = pd->tx_sram_size;
2014} 1991}
2015 1992
2016static int phy_detect(struct mv643xx_eth_private *mp) 1993static int phy_detect(struct mv643xx_eth_private *mp)
2017{ 1994{
2018 unsigned int phy_reg_data0; 1995 unsigned int data;
2019 int auto_neg; 1996 unsigned int data2;
1997
1998 smi_reg_read(mp, mp->phy_addr, 0, &data);
1999 smi_reg_write(mp, mp->phy_addr, 0, data ^ 0x1000);
2020 2000
2021 read_smi_reg(mp, 0, &phy_reg_data0); 2001 smi_reg_read(mp, mp->phy_addr, 0, &data2);
2022 auto_neg = phy_reg_data0 & 0x1000; 2002 if (((data ^ data2) & 0x1000) == 0)
2023 phy_reg_data0 ^= 0x1000; /* invert auto_neg */ 2003 return -ENODEV;
2024 write_smi_reg(mp, 0, phy_reg_data0);
2025 2004
2026 read_smi_reg(mp, 0, &phy_reg_data0); 2005 smi_reg_write(mp, mp->phy_addr, 0, data);
2027 if ((phy_reg_data0 & 0x1000) == auto_neg)
2028 return -ENODEV; /* change didn't take */
2029 2006
2030 phy_reg_data0 ^= 0x1000;
2031 write_smi_reg(mp, 0, phy_reg_data0);
2032 return 0; 2007 return 0;
2033} 2008}
2034 2009
2035static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, 2010static int phy_init(struct mv643xx_eth_private *mp,
2036 int speed, int duplex, 2011 struct mv643xx_eth_platform_data *pd)
2037 struct ethtool_cmd *cmd)
2038{ 2012{
2039 struct mv643xx_eth_private *mp = netdev_priv(dev); 2013 struct ethtool_cmd cmd;
2014 int err;
2040 2015
2041 memset(cmd, 0, sizeof(*cmd)); 2016 err = phy_detect(mp);
2017 if (err) {
2018 dev_printk(KERN_INFO, &mp->dev->dev,
2019 "no PHY detected at addr %d\n", mp->phy_addr);
2020 return err;
2021 }
2022 phy_reset(mp);
2023
2024 mp->mii.phy_id = mp->phy_addr;
2025 mp->mii.phy_id_mask = 0x3f;
2026 mp->mii.reg_num_mask = 0x1f;
2027 mp->mii.dev = mp->dev;
2028 mp->mii.mdio_read = mv643xx_eth_mdio_read;
2029 mp->mii.mdio_write = mv643xx_eth_mdio_write;
2042 2030
2043 cmd->port = PORT_MII; 2031 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2044 cmd->transceiver = XCVR_INTERNAL;
2045 cmd->phy_address = phy_address;
2046 2032
2047 if (speed == 0) { 2033 memset(&cmd, 0, sizeof(cmd));
2048 cmd->autoneg = AUTONEG_ENABLE; 2034
2049 /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */ 2035 cmd.port = PORT_MII;
2050 cmd->speed = SPEED_100; 2036 cmd.transceiver = XCVR_INTERNAL;
2051 cmd->advertising = ADVERTISED_10baseT_Half | 2037 cmd.phy_address = mp->phy_addr;
2052 ADVERTISED_10baseT_Full | 2038 if (pd->speed == 0) {
2053 ADVERTISED_100baseT_Half | 2039 cmd.autoneg = AUTONEG_ENABLE;
2054 ADVERTISED_100baseT_Full; 2040 cmd.speed = SPEED_100;
2041 cmd.advertising = ADVERTISED_10baseT_Half |
2042 ADVERTISED_10baseT_Full |
2043 ADVERTISED_100baseT_Half |
2044 ADVERTISED_100baseT_Full;
2055 if (mp->mii.supports_gmii) 2045 if (mp->mii.supports_gmii)
2056 cmd->advertising |= ADVERTISED_1000baseT_Full; 2046 cmd.advertising |= ADVERTISED_1000baseT_Full;
2057 } else { 2047 } else {
2058 cmd->autoneg = AUTONEG_DISABLE; 2048 cmd.autoneg = AUTONEG_DISABLE;
2059 cmd->speed = speed; 2049 cmd.speed = pd->speed;
2060 cmd->duplex = duplex; 2050 cmd.duplex = pd->duplex;
2061 } 2051 }
2052
2053 update_pscr(mp, cmd.speed, cmd.duplex);
2054 mv643xx_eth_set_settings(mp->dev, &cmd);
2055
2056 return 0;
2062} 2057}
2063 2058
2064static int mv643xx_eth_probe(struct platform_device *pdev) 2059static int mv643xx_eth_probe(struct platform_device *pdev)
2065{ 2060{
2066 struct mv643xx_eth_platform_data *pd; 2061 struct mv643xx_eth_platform_data *pd;
2067 int port_num;
2068 struct mv643xx_eth_private *mp; 2062 struct mv643xx_eth_private *mp;
2069 struct net_device *dev; 2063 struct net_device *dev;
2070 u8 *p;
2071 struct resource *res; 2064 struct resource *res;
2072 int err;
2073 struct ethtool_cmd cmd;
2074 int duplex = DUPLEX_HALF;
2075 int speed = 0; /* default to auto-negotiation */
2076 DECLARE_MAC_BUF(mac); 2065 DECLARE_MAC_BUF(mac);
2066 int err;
2077 2067
2078 pd = pdev->dev.platform_data; 2068 pd = pdev->dev.platform_data;
2079 if (pd == NULL) { 2069 if (pd == NULL) {
2080 printk(KERN_ERR "No mv643xx_eth_platform_data\n"); 2070 dev_printk(KERN_ERR, &pdev->dev,
2071 "no mv643xx_eth_platform_data\n");
2081 return -ENODEV; 2072 return -ENODEV;
2082 } 2073 }
2083 2074
2084 if (pd->shared == NULL) { 2075 if (pd->shared == NULL) {
2085 printk(KERN_ERR "No mv643xx_eth_platform_data->shared\n"); 2076 dev_printk(KERN_ERR, &pdev->dev,
2077 "no mv643xx_eth_platform_data->shared\n");
2086 return -ENODEV; 2078 return -ENODEV;
2087 } 2079 }
2088 2080
@@ -2090,145 +2082,80 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2090 if (!dev) 2082 if (!dev)
2091 return -ENOMEM; 2083 return -ENOMEM;
2092 2084
2093 platform_set_drvdata(pdev, dev);
2094
2095 mp = netdev_priv(dev); 2085 mp = netdev_priv(dev);
2086 platform_set_drvdata(pdev, mp);
2087
2088 mp->shared = platform_get_drvdata(pd->shared);
2089 mp->port_num = pd->port_number;
2090
2096 mp->dev = dev; 2091 mp->dev = dev;
2097#ifdef MV643XX_ETH_NAPI 2092#ifdef MV643XX_ETH_NAPI
2098 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64); 2093 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2099#endif 2094#endif
2100 2095
2096 set_params(mp, pd);
2097
2098 spin_lock_init(&mp->lock);
2099
2100 mib_counters_clear(mp);
2101 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2102
2103 err = phy_init(mp, pd);
2104 if (err)
2105 goto out;
2106 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2107
2108
2101 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2109 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2102 BUG_ON(!res); 2110 BUG_ON(!res);
2103 dev->irq = res->start; 2111 dev->irq = res->start;
2104 2112
2113 dev->hard_start_xmit = mv643xx_eth_xmit;
2105 dev->open = mv643xx_eth_open; 2114 dev->open = mv643xx_eth_open;
2106 dev->stop = mv643xx_eth_stop; 2115 dev->stop = mv643xx_eth_stop;
2107 dev->hard_start_xmit = mv643xx_eth_start_xmit;
2108 dev->set_mac_address = mv643xx_eth_set_mac_address;
2109 dev->set_multicast_list = mv643xx_eth_set_rx_mode; 2116 dev->set_multicast_list = mv643xx_eth_set_rx_mode;
2110 2117 dev->set_mac_address = mv643xx_eth_set_mac_address;
2111 /* No need to Tx Timeout */ 2118 dev->do_ioctl = mv643xx_eth_ioctl;
2119 dev->change_mtu = mv643xx_eth_change_mtu;
2112 dev->tx_timeout = mv643xx_eth_tx_timeout; 2120 dev->tx_timeout = mv643xx_eth_tx_timeout;
2113
2114#ifdef CONFIG_NET_POLL_CONTROLLER 2121#ifdef CONFIG_NET_POLL_CONTROLLER
2115 dev->poll_controller = mv643xx_eth_netpoll; 2122 dev->poll_controller = mv643xx_eth_netpoll;
2116#endif 2123#endif
2117
2118 dev->watchdog_timeo = 2 * HZ; 2124 dev->watchdog_timeo = 2 * HZ;
2119 dev->base_addr = 0; 2125 dev->base_addr = 0;
2120 dev->change_mtu = mv643xx_eth_change_mtu;
2121 dev->do_ioctl = mv643xx_eth_do_ioctl;
2122 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2123 2126
2124#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX 2127#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2125#ifdef MAX_SKB_FRAGS
2126 /* 2128 /*
2127 * Zero copy can only work if we use Discovery II memory. Else, we will 2129 * Zero copy can only work if we use Discovery II memory. Else, we will
2128 * have to map the buffers to ISA memory which is only 16 MB 2130 * have to map the buffers to ISA memory which is only 16 MB
2129 */ 2131 */
2130 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2132 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2131#endif 2133#endif
2132#endif
2133 2134
2134 /* Configure the timeout task */ 2135 SET_NETDEV_DEV(dev, &pdev->dev);
2135 INIT_WORK(&mp->tx_timeout_task, mv643xx_eth_tx_timeout_task);
2136
2137 spin_lock_init(&mp->lock);
2138
2139 mp->shared = platform_get_drvdata(pd->shared);
2140 port_num = mp->port_num = pd->port_number;
2141 2136
2142 if (mp->shared->win_protect) 2137 if (mp->shared->win_protect)
2143 wrl(mp, WINDOW_PROTECT(port_num), mp->shared->win_protect); 2138 wrl(mp, WINDOW_PROTECT(mp->port_num), mp->shared->win_protect);
2144
2145 mp->shared_smi = mp->shared;
2146 if (pd->shared_smi != NULL)
2147 mp->shared_smi = platform_get_drvdata(pd->shared_smi);
2148
2149 /* set default config values */
2150 uc_addr_get(mp, dev->dev_addr);
2151
2152 if (is_valid_ether_addr(pd->mac_addr))
2153 memcpy(dev->dev_addr, pd->mac_addr, 6);
2154
2155 if (pd->phy_addr || pd->force_phy_addr)
2156 phy_addr_set(mp, pd->phy_addr);
2157
2158 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2159 if (pd->rx_queue_size)
2160 mp->default_rx_ring_size = pd->rx_queue_size;
2161
2162 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2163 if (pd->tx_queue_size)
2164 mp->default_tx_ring_size = pd->tx_queue_size;
2165
2166 if (pd->tx_sram_size) {
2167 mp->tx_desc_sram_size = pd->tx_sram_size;
2168 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2169 }
2170
2171 if (pd->rx_sram_size) {
2172 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2173 mp->rx_desc_sram_size = pd->rx_sram_size;
2174 }
2175
2176 duplex = pd->duplex;
2177 speed = pd->speed;
2178 2139
2179 /* Hook up MII support for ethtool */
2180 mp->mii.dev = dev;
2181 mp->mii.mdio_read = mv643xx_eth_mdio_read;
2182 mp->mii.mdio_write = mv643xx_eth_mdio_write;
2183 mp->mii.phy_id = phy_addr_get(mp);
2184 mp->mii.phy_id_mask = 0x3f;
2185 mp->mii.reg_num_mask = 0x1f;
2186
2187 err = phy_detect(mp);
2188 if (err) {
2189 pr_debug("%s: No PHY detected at addr %d\n",
2190 dev->name, phy_addr_get(mp));
2191 goto out;
2192 }
2193
2194 phy_reset(mp);
2195 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2196 mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
2197 mv643xx_eth_update_pscr(mp, &cmd);
2198 mv643xx_eth_set_settings(dev, &cmd);
2199
2200 SET_NETDEV_DEV(dev, &pdev->dev);
2201 err = register_netdev(dev); 2140 err = register_netdev(dev);
2202 if (err) 2141 if (err)
2203 goto out; 2142 goto out;
2204 2143
2205 p = dev->dev_addr; 2144 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
2206 printk(KERN_NOTICE 2145 mp->port_num, print_mac(mac, dev->dev_addr));
2207 "%s: port %d with MAC address %s\n",
2208 dev->name, port_num, print_mac(mac, p));
2209 2146
2210 if (dev->features & NETIF_F_SG) 2147 if (dev->features & NETIF_F_SG)
2211 printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name); 2148 dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
2212 2149
2213 if (dev->features & NETIF_F_IP_CSUM) 2150 if (dev->features & NETIF_F_IP_CSUM)
2214 printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n", 2151 dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
2215 dev->name);
2216
2217#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2218 printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name);
2219#endif
2220
2221#ifdef MV643XX_ETH_COAL
2222 printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n",
2223 dev->name);
2224#endif
2225 2152
2226#ifdef MV643XX_ETH_NAPI 2153#ifdef MV643XX_ETH_NAPI
2227 printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name); 2154 dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
2228#endif 2155#endif
2229 2156
2230 if (mp->tx_desc_sram_size > 0) 2157 if (mp->tx_desc_sram_size > 0)
2231 printk(KERN_NOTICE "%s: Using SRAM\n", dev->name); 2158 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
2232 2159
2233 return 0; 2160 return 0;
2234 2161
@@ -2240,35 +2167,35 @@ out:
2240 2167
2241static int mv643xx_eth_remove(struct platform_device *pdev) 2168static int mv643xx_eth_remove(struct platform_device *pdev)
2242{ 2169{
2243 struct net_device *dev = platform_get_drvdata(pdev); 2170 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2244 2171
2245 unregister_netdev(dev); 2172 unregister_netdev(mp->dev);
2246 flush_scheduled_work(); 2173 flush_scheduled_work();
2174 free_netdev(mp->dev);
2247 2175
2248 free_netdev(dev);
2249 platform_set_drvdata(pdev, NULL); 2176 platform_set_drvdata(pdev, NULL);
2177
2250 return 0; 2178 return 0;
2251} 2179}
2252 2180
2253static void mv643xx_eth_shutdown(struct platform_device *pdev) 2181static void mv643xx_eth_shutdown(struct platform_device *pdev)
2254{ 2182{
2255 struct net_device *dev = platform_get_drvdata(pdev); 2183 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2256 struct mv643xx_eth_private *mp = netdev_priv(dev);
2257 unsigned int port_num = mp->port_num;
2258 2184
2259 /* Mask all interrupts on ethernet port */ 2185 /* Mask all interrupts on ethernet port */
2260 wrl(mp, INT_MASK(port_num), 0); 2186 wrl(mp, INT_MASK(mp->port_num), 0);
2261 rdl(mp, INT_MASK(port_num)); 2187 rdl(mp, INT_MASK(mp->port_num));
2262 2188
2263 port_reset(mp); 2189 if (netif_running(mp->dev))
2190 port_reset(mp);
2264} 2191}
2265 2192
2266static struct platform_driver mv643xx_eth_driver = { 2193static struct platform_driver mv643xx_eth_driver = {
2267 .probe = mv643xx_eth_probe, 2194 .probe = mv643xx_eth_probe,
2268 .remove = mv643xx_eth_remove, 2195 .remove = mv643xx_eth_remove,
2269 .shutdown = mv643xx_eth_shutdown, 2196 .shutdown = mv643xx_eth_shutdown,
2270 .driver = { 2197 .driver = {
2271 .name = MV643XX_ETH_NAME, 2198 .name = MV643XX_ETH_NAME,
2272 .owner = THIS_MODULE, 2199 .owner = THIS_MODULE,
2273 }, 2200 },
2274}; 2201};
@@ -2283,21 +2210,21 @@ static int __init mv643xx_eth_init_module(void)
2283 if (rc) 2210 if (rc)
2284 platform_driver_unregister(&mv643xx_eth_shared_driver); 2211 platform_driver_unregister(&mv643xx_eth_shared_driver);
2285 } 2212 }
2213
2286 return rc; 2214 return rc;
2287} 2215}
2216module_init(mv643xx_eth_init_module);
2288 2217
2289static void __exit mv643xx_eth_cleanup_module(void) 2218static void __exit mv643xx_eth_cleanup_module(void)
2290{ 2219{
2291 platform_driver_unregister(&mv643xx_eth_driver); 2220 platform_driver_unregister(&mv643xx_eth_driver);
2292 platform_driver_unregister(&mv643xx_eth_shared_driver); 2221 platform_driver_unregister(&mv643xx_eth_shared_driver);
2293} 2222}
2294
2295module_init(mv643xx_eth_init_module);
2296module_exit(mv643xx_eth_cleanup_module); 2223module_exit(mv643xx_eth_cleanup_module);
2297 2224
2298MODULE_LICENSE("GPL"); 2225MODULE_AUTHOR("Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani "
2299MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani" 2226 "and Dale Farnsworth");
2300 " and Dale Farnsworth");
2301MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); 2227MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX");
2302MODULE_ALIAS("platform:" MV643XX_ETH_NAME); 2228MODULE_LICENSE("GPL");
2303MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME); 2229MODULE_ALIAS("platform:" MV643XX_ETH_SHARED_NAME);
2230MODULE_ALIAS("platform:" MV643XX_ETH_NAME);
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index a15cdd4a8e58..646177660495 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -17,30 +17,53 @@
17 17
18struct mv643xx_eth_shared_platform_data { 18struct mv643xx_eth_shared_platform_data {
19 struct mbus_dram_target_info *dram; 19 struct mbus_dram_target_info *dram;
20 unsigned int t_clk; 20 unsigned int t_clk;
21}; 21};
22 22
23struct mv643xx_eth_platform_data { 23struct mv643xx_eth_platform_data {
24 /*
25 * Pointer back to our parent instance, and our port number.
26 */
24 struct platform_device *shared; 27 struct platform_device *shared;
25 int port_number; 28 int port_number;
26 29
30 /*
31 * Whether a PHY is present, and if yes, at which address.
32 */
27 struct platform_device *shared_smi; 33 struct platform_device *shared_smi;
34 int force_phy_addr;
35 int phy_addr;
28 36
29 u16 force_phy_addr; /* force override if phy_addr == 0 */ 37 /*
30 u16 phy_addr; 38 * Use this MAC address if it is valid, overriding the
31 39 * address that is already in the hardware.
32 /* If speed is 0, then speed and duplex are autonegotiated. */ 40 */
33 int speed; /* 0, SPEED_10, SPEED_100, SPEED_1000 */ 41 u8 mac_addr[6];
34 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ 42
35 43 /*
36 /* non-zero values of the following fields override defaults */ 44 * If speed is 0, autonegotiation is enabled.
37 u32 tx_queue_size; 45 * Valid values for speed: 0, SPEED_10, SPEED_100, SPEED_1000.
38 u32 rx_queue_size; 46 * Valid values for duplex: DUPLEX_HALF, DUPLEX_FULL.
39 u32 tx_sram_addr; 47 */
40 u32 tx_sram_size; 48 int speed;
41 u32 rx_sram_addr; 49 int duplex;
42 u32 rx_sram_size; 50
43 u8 mac_addr[6]; /* mac address if non-zero*/ 51 /*
52 * Override default RX/TX queue sizes if nonzero.
53 */
54 int rx_queue_size;
55 int tx_queue_size;
56
57 /*
58 * Use on-chip SRAM for RX/TX descriptors if size is nonzero
59 * and sufficient to contain all descriptors for the requested
60 * ring sizes.
61 */
62 unsigned long rx_sram_addr;
63 int rx_sram_size;
64 unsigned long tx_sram_addr;
65 int tx_sram_size;
44}; 66};
45 67
46#endif /* __LINUX_MV643XX_ETH_H */ 68
69#endif