aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2012-08-21 14:45:06 -0400
committerDavid Daney <david.daney@cavium.com>2012-08-31 14:49:00 -0400
commiteeae05aa21695703e1979999a9a4a861447045c9 (patch)
tree88aaf61bba731e9085b7cc2c35190fbcb288286b /drivers/net/ethernet
parent70a26a219cc0eedae4529c27fe1abfb2a02e373b (diff)
netdev: octeon_mgmt: Add support for 1Gig ports.
The original hardware only supported 10M and 100M. Later versions added 1G support. Here we update the driver to make use of this. Also minor logic clean-ups for testing PHY registration error codes and TX complete high water marks. Signed-off-by: David Daney <david.daney@cavium.com> Acked-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c328
1 files changed, 255 insertions, 73 deletions
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index c42bbb16cdae..c4df1ab13b69 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2009 Cavium Networks 6 * Copyright (C) 2009-2012 Cavium, Inc
7 */ 7 */
8 8
9#include <linux/platform_device.h> 9#include <linux/platform_device.h>
@@ -93,6 +93,7 @@ union mgmt_port_ring_entry {
93#define AGL_GMX_RX_ADR_CAM4 0x1a0 93#define AGL_GMX_RX_ADR_CAM4 0x1a0
94#define AGL_GMX_RX_ADR_CAM5 0x1a8 94#define AGL_GMX_RX_ADR_CAM5 0x1a8
95 95
96#define AGL_GMX_TX_CLK 0x208
96#define AGL_GMX_TX_STATS_CTL 0x268 97#define AGL_GMX_TX_STATS_CTL 0x268
97#define AGL_GMX_TX_CTL 0x270 98#define AGL_GMX_TX_CTL 0x270
98#define AGL_GMX_TX_STAT0 0x280 99#define AGL_GMX_TX_STAT0 0x280
@@ -110,6 +111,7 @@ struct octeon_mgmt {
110 struct net_device *netdev; 111 struct net_device *netdev;
111 u64 mix; 112 u64 mix;
112 u64 agl; 113 u64 agl;
114 u64 agl_prt_ctl;
113 int port; 115 int port;
114 int irq; 116 int irq;
115 u64 *tx_ring; 117 u64 *tx_ring;
@@ -131,6 +133,7 @@ struct octeon_mgmt {
131 spinlock_t lock; 133 spinlock_t lock;
132 unsigned int last_duplex; 134 unsigned int last_duplex;
133 unsigned int last_link; 135 unsigned int last_link;
136 unsigned int last_speed;
134 struct device *dev; 137 struct device *dev;
135 struct napi_struct napi; 138 struct napi_struct napi;
136 struct tasklet_struct tx_clean_tasklet; 139 struct tasklet_struct tx_clean_tasklet;
@@ -140,6 +143,8 @@ struct octeon_mgmt {
140 resource_size_t mix_size; 143 resource_size_t mix_size;
141 resource_size_t agl_phys; 144 resource_size_t agl_phys;
142 resource_size_t agl_size; 145 resource_size_t agl_size;
146 resource_size_t agl_prt_ctl_phys;
147 resource_size_t agl_prt_ctl_size;
143}; 148};
144 149
145static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) 150static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
@@ -488,7 +493,7 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
488 mix_ctl.s.reset = 1; 493 mix_ctl.s.reset = 1;
489 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 494 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
490 cvmx_read_csr(p->mix + MIX_CTL); 495 cvmx_read_csr(p->mix + MIX_CTL);
491 cvmx_wait(64); 496 octeon_io_clk_delay(64);
492 497
493 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); 498 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
494 if (mix_bist.u64) 499 if (mix_bist.u64)
@@ -670,39 +675,148 @@ static int octeon_mgmt_ioctl(struct net_device *netdev,
670 return phy_mii_ioctl(p->phydev, rq, cmd); 675 return phy_mii_ioctl(p->phydev, rq, cmd);
671} 676}
672 677
678static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
679{
680 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
681
682 /* Disable GMX before we make any changes. */
683 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
684 prtx_cfg.s.en = 0;
685 prtx_cfg.s.tx_en = 0;
686 prtx_cfg.s.rx_en = 0;
687 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
688
689 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
690 int i;
691 for (i = 0; i < 10; i++) {
692 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
693 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
694 break;
695 mdelay(1);
696 i++;
697 }
698 }
699}
700
701static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
702{
703 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
704
705 /* Restore the GMX enable state only if link is set */
706 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
707 prtx_cfg.s.tx_en = 1;
708 prtx_cfg.s.rx_en = 1;
709 prtx_cfg.s.en = 1;
710 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
711}
712
713static void octeon_mgmt_update_link(struct octeon_mgmt *p)
714{
715 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
716
717 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
718
719 if (!p->phydev->link)
720 prtx_cfg.s.duplex = 1;
721 else
722 prtx_cfg.s.duplex = p->phydev->duplex;
723
724 switch (p->phydev->speed) {
725 case 10:
726 prtx_cfg.s.speed = 0;
727 prtx_cfg.s.slottime = 0;
728
729 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
730 prtx_cfg.s.burst = 1;
731 prtx_cfg.s.speed_msb = 1;
732 }
733 break;
734 case 100:
735 prtx_cfg.s.speed = 0;
736 prtx_cfg.s.slottime = 0;
737
738 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
739 prtx_cfg.s.burst = 1;
740 prtx_cfg.s.speed_msb = 0;
741 }
742 break;
743 case 1000:
744 /* 1000 MBits is only supported on 6XXX chips */
745 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
746 prtx_cfg.s.speed = 1;
747 prtx_cfg.s.speed_msb = 0;
748 /* Only matters for half-duplex */
749 prtx_cfg.s.slottime = 1;
750 prtx_cfg.s.burst = p->phydev->duplex;
751 }
752 break;
753 case 0: /* No link */
754 default:
755 break;
756 }
757
758 /* Write the new GMX setting with the port still disabled. */
759 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
760
761 /* Read GMX CFG again to make sure the config is completed. */
762 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
763
764 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
765 union cvmx_agl_gmx_txx_clk agl_clk;
766 union cvmx_agl_prtx_ctl prtx_ctl;
767
768 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
769 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
770 /* MII (both speeds) and RGMII 1000 speed. */
771 agl_clk.s.clk_cnt = 1;
772 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
773 if (p->phydev->speed == 10)
774 agl_clk.s.clk_cnt = 50;
775 else if (p->phydev->speed == 100)
776 agl_clk.s.clk_cnt = 5;
777 }
778 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
779 }
780}
781
673static void octeon_mgmt_adjust_link(struct net_device *netdev) 782static void octeon_mgmt_adjust_link(struct net_device *netdev)
674{ 783{
675 struct octeon_mgmt *p = netdev_priv(netdev); 784 struct octeon_mgmt *p = netdev_priv(netdev);
676 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
677 unsigned long flags; 785 unsigned long flags;
678 int link_changed = 0; 786 int link_changed = 0;
679 787
788 if (!p->phydev)
789 return;
790
680 spin_lock_irqsave(&p->lock, flags); 791 spin_lock_irqsave(&p->lock, flags);
681 if (p->phydev->link) { 792
682 if (!p->last_link) 793
683 link_changed = 1; 794 if (!p->phydev->link && p->last_link)
684 if (p->last_duplex != p->phydev->duplex) { 795 link_changed = -1;
685 p->last_duplex = p->phydev->duplex; 796
686 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 797 if (p->phydev->link
687 prtx_cfg.s.duplex = p->phydev->duplex; 798 && (p->last_duplex != p->phydev->duplex
688 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 799 || p->last_link != p->phydev->link
689 } 800 || p->last_speed != p->phydev->speed)) {
690 } else { 801 octeon_mgmt_disable_link(p);
691 if (p->last_link) 802 link_changed = 1;
692 link_changed = -1; 803 octeon_mgmt_update_link(p);
804 octeon_mgmt_enable_link(p);
693 } 805 }
806
694 p->last_link = p->phydev->link; 807 p->last_link = p->phydev->link;
808 p->last_speed = p->phydev->speed;
809 p->last_duplex = p->phydev->duplex;
810
695 spin_unlock_irqrestore(&p->lock, flags); 811 spin_unlock_irqrestore(&p->lock, flags);
696 812
697 if (link_changed != 0) { 813 if (link_changed != 0) {
698 if (link_changed > 0) { 814 if (link_changed > 0) {
699 netif_carrier_on(netdev);
700 pr_info("%s: Link is up - %d/%s\n", netdev->name, 815 pr_info("%s: Link is up - %d/%s\n", netdev->name,
701 p->phydev->speed, 816 p->phydev->speed,
702 DUPLEX_FULL == p->phydev->duplex ? 817 DUPLEX_FULL == p->phydev->duplex ?
703 "Full" : "Half"); 818 "Full" : "Half");
704 } else { 819 } else {
705 netif_carrier_off(netdev);
706 pr_info("%s: Link is down\n", netdev->name); 820 pr_info("%s: Link is down\n", netdev->name);
707 } 821 }
708 } 822 }
@@ -722,12 +836,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
722 octeon_mgmt_adjust_link, 0, 836 octeon_mgmt_adjust_link, 0,
723 PHY_INTERFACE_MODE_MII); 837 PHY_INTERFACE_MODE_MII);
724 838
725 if (IS_ERR(p->phydev)) { 839 if (p->phydev == NULL)
726 p->phydev = NULL; 840 return -ENODEV;
727 return -1;
728 }
729
730 phy_start_aneg(p->phydev);
731 841
732 return 0; 842 return 0;
733} 843}
@@ -735,12 +845,10 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
735static int octeon_mgmt_open(struct net_device *netdev) 845static int octeon_mgmt_open(struct net_device *netdev)
736{ 846{
737 struct octeon_mgmt *p = netdev_priv(netdev); 847 struct octeon_mgmt *p = netdev_priv(netdev);
738 int port = p->port;
739 union cvmx_mixx_ctl mix_ctl; 848 union cvmx_mixx_ctl mix_ctl;
740 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; 849 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
741 union cvmx_mixx_oring1 oring1; 850 union cvmx_mixx_oring1 oring1;
742 union cvmx_mixx_iring1 iring1; 851 union cvmx_mixx_iring1 iring1;
743 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
744 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; 852 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
745 union cvmx_mixx_irhwm mix_irhwm; 853 union cvmx_mixx_irhwm mix_irhwm;
746 union cvmx_mixx_orhwm mix_orhwm; 854 union cvmx_mixx_orhwm mix_orhwm;
@@ -787,9 +895,31 @@ static int octeon_mgmt_open(struct net_device *netdev)
787 } while (mix_ctl.s.reset); 895 } while (mix_ctl.s.reset);
788 } 896 }
789 897
790 agl_gmx_inf_mode.u64 = 0; 898 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
791 agl_gmx_inf_mode.s.en = 1; 899 agl_gmx_inf_mode.u64 = 0;
792 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 900 agl_gmx_inf_mode.s.en = 1;
901 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
902 }
903 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
904 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
905 /*
906 * Force compensation values, as they are not
907 * determined properly by HW
908 */
909 union cvmx_agl_gmx_drv_ctl drv_ctl;
910
911 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
912 if (p->port) {
913 drv_ctl.s.byp_en1 = 1;
914 drv_ctl.s.nctl1 = 6;
915 drv_ctl.s.pctl1 = 6;
916 } else {
917 drv_ctl.s.byp_en = 1;
918 drv_ctl.s.nctl = 6;
919 drv_ctl.s.pctl = 6;
920 }
921 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
922 }
793 923
794 oring1.u64 = 0; 924 oring1.u64 = 0;
795 oring1.s.obase = p->tx_ring_handle >> 3; 925 oring1.s.obase = p->tx_ring_handle >> 3;
@@ -801,11 +931,6 @@ static int octeon_mgmt_open(struct net_device *netdev)
801 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; 931 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
802 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); 932 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
803 933
804 /* Disable packet I/O. */
805 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
806 prtx_cfg.s.en = 0;
807 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
808
809 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); 934 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
810 octeon_mgmt_set_mac_address(netdev, &sa); 935 octeon_mgmt_set_mac_address(netdev, &sa);
811 936
@@ -821,27 +946,70 @@ static int octeon_mgmt_open(struct net_device *netdev)
821 mix_ctl.s.nbtarb = 0; /* Arbitration mode */ 946 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
822 /* MII CB-request FIFO programmable high watermark */ 947 /* MII CB-request FIFO programmable high watermark */
823 mix_ctl.s.mrq_hwm = 1; 948 mix_ctl.s.mrq_hwm = 1;
949#ifdef __LITTLE_ENDIAN
950 mix_ctl.s.lendian = 1;
951#endif
824 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 952 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
825 953
826 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) 954 /* Read the PHY to find the mode of the interface. */
827 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { 955 if (octeon_mgmt_init_phy(netdev)) {
828 /* 956 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
829 * Force compensation values, as they are not 957 goto err_noirq;
830 * determined properly by HW 958 }
831 */
832 union cvmx_agl_gmx_drv_ctl drv_ctl;
833 959
834 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); 960 /* Set the mode of the interface, RGMII/MII. */
835 if (port) { 961 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
836 drv_ctl.s.byp_en1 = 1; 962 union cvmx_agl_prtx_ctl agl_prtx_ctl;
837 drv_ctl.s.nctl1 = 6; 963 int rgmii_mode = (p->phydev->supported &
838 drv_ctl.s.pctl1 = 6; 964 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
839 } else { 965
840 drv_ctl.s.byp_en = 1; 966 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
841 drv_ctl.s.nctl = 6; 967 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
842 drv_ctl.s.pctl = 6; 968 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
969
970 /* MII clocks counts are based on the 125Mhz
971 * reference, which has an 8nS period. So our delays
972 * need to be multiplied by this factor.
973 */
974#define NS_PER_PHY_CLK 8
975
976 /* Take the DLL and clock tree out of reset */
977 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
978 agl_prtx_ctl.s.clkrst = 0;
979 if (rgmii_mode) {
980 agl_prtx_ctl.s.dllrst = 0;
981 agl_prtx_ctl.s.clktx_byp = 0;
843 } 982 }
844 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); 983 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
984 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
985
986 /* Wait for the DLL to lock. External 125 MHz
987 * reference clock must be stable at this point.
988 */
989 ndelay(256 * NS_PER_PHY_CLK);
990
991 /* Enable the interface */
992 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
993 agl_prtx_ctl.s.enable = 1;
994 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
995
996 /* Read the value back to force the previous write */
997 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
998
999 /* Enable the compensation controller */
1000 agl_prtx_ctl.s.comp = 1;
1001 agl_prtx_ctl.s.drv_byp = 0;
1002 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1003 /* Force write out before wait. */
1004 cvmx_read_csr(p->agl_prt_ctl);
1005
1006 /* For compensation state to lock. */
1007 ndelay(1040 * NS_PER_PHY_CLK);
1008
1009 /* Some Ethernet switches cannot handle standard
1010 * Interframe Gap, increase to 16 bytes.
1011 */
1012 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
845 } 1013 }
846 1014
847 octeon_mgmt_rx_fill_ring(netdev); 1015 octeon_mgmt_rx_fill_ring(netdev);
@@ -872,7 +1040,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
872 1040
873 /* Interrupt when we have 1 or more packets to clean. */ 1041 /* Interrupt when we have 1 or more packets to clean. */
874 mix_orhwm.u64 = 0; 1042 mix_orhwm.u64 = 0;
875 mix_orhwm.s.orhwm = 1; 1043 mix_orhwm.s.orhwm = 0;
876 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); 1044 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
877 1045
878 /* Enable receive and transmit interrupts */ 1046 /* Enable receive and transmit interrupts */
@@ -881,7 +1049,6 @@ static int octeon_mgmt_open(struct net_device *netdev)
881 mix_intena.s.othena = 1; 1049 mix_intena.s.othena = 1;
882 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 1050 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
883 1051
884
885 /* Enable packet I/O. */ 1052 /* Enable packet I/O. */
886 1053
887 rxx_frm_ctl.u64 = 0; 1054 rxx_frm_ctl.u64 = 0;
@@ -912,26 +1079,20 @@ static int octeon_mgmt_open(struct net_device *netdev)
912 rxx_frm_ctl.s.pre_chk = 1; 1079 rxx_frm_ctl.s.pre_chk = 1;
913 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 1080 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
914 1081
915 /* Enable the AGL block */ 1082 /* Configure the port duplex, speed and enables */
916 agl_gmx_inf_mode.u64 = 0; 1083 octeon_mgmt_disable_link(p);
917 agl_gmx_inf_mode.s.en = 1; 1084 if (p->phydev)
918 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 1085 octeon_mgmt_update_link(p);
919 1086 octeon_mgmt_enable_link(p);
920 /* Configure the port duplex and enables */
921 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
922 prtx_cfg.s.tx_en = 1;
923 prtx_cfg.s.rx_en = 1;
924 prtx_cfg.s.en = 1;
925 p->last_duplex = 1;
926 prtx_cfg.s.duplex = p->last_duplex;
927 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
928 1087
929 p->last_link = 0; 1088 p->last_link = 0;
930 netif_carrier_off(netdev); 1089 p->last_speed = 0;
931 1090 /* PHY is not present in simulator. The carrier is enabled
932 if (octeon_mgmt_init_phy(netdev)) { 1091 * while initializing the phy for simulator, leave it enabled.
933 dev_err(p->dev, "Cannot initialize PHY.\n"); 1092 */
934 goto err_noirq; 1093 if (p->phydev) {
1094 netif_carrier_off(netdev);
1095 phy_start_aneg(p->phydev);
935 } 1096 }
936 1097
937 netif_wake_queue(netdev); 1098 netif_wake_queue(netdev);
@@ -961,6 +1122,7 @@ static int octeon_mgmt_stop(struct net_device *netdev)
961 1122
962 if (p->phydev) 1123 if (p->phydev)
963 phy_disconnect(p->phydev); 1124 phy_disconnect(p->phydev);
1125 p->phydev = NULL;
964 1126
965 netif_carrier_off(netdev); 1127 netif_carrier_off(netdev);
966 1128
@@ -1033,6 +1195,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1033 /* Ring the bell. */ 1195 /* Ring the bell. */
1034 cvmx_write_csr(p->mix + MIX_ORING2, 1); 1196 cvmx_write_csr(p->mix + MIX_ORING2, 1);
1035 1197
1198 netdev->trans_start = jiffies;
1036 rv = NETDEV_TX_OK; 1199 rv = NETDEV_TX_OK;
1037out: 1200out:
1038 octeon_mgmt_update_tx_stats(netdev); 1201 octeon_mgmt_update_tx_stats(netdev);
@@ -1098,9 +1261,9 @@ static const struct net_device_ops octeon_mgmt_ops = {
1098 .ndo_open = octeon_mgmt_open, 1261 .ndo_open = octeon_mgmt_open,
1099 .ndo_stop = octeon_mgmt_stop, 1262 .ndo_stop = octeon_mgmt_stop,
1100 .ndo_start_xmit = octeon_mgmt_xmit, 1263 .ndo_start_xmit = octeon_mgmt_xmit,
1101 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, 1264 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1102 .ndo_set_mac_address = octeon_mgmt_set_mac_address, 1265 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1103 .ndo_do_ioctl = octeon_mgmt_ioctl, 1266 .ndo_do_ioctl = octeon_mgmt_ioctl,
1104 .ndo_change_mtu = octeon_mgmt_change_mtu, 1267 .ndo_change_mtu = octeon_mgmt_change_mtu,
1105#ifdef CONFIG_NET_POLL_CONTROLLER 1268#ifdef CONFIG_NET_POLL_CONTROLLER
1106 .ndo_poll_controller = octeon_mgmt_poll_controller, 1269 .ndo_poll_controller = octeon_mgmt_poll_controller,
@@ -1115,6 +1278,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1115 const u8 *mac; 1278 const u8 *mac;
1116 struct resource *res_mix; 1279 struct resource *res_mix;
1117 struct resource *res_agl; 1280 struct resource *res_agl;
1281 struct resource *res_agl_prt_ctl;
1118 int len; 1282 int len;
1119 int result; 1283 int result;
1120 1284
@@ -1161,10 +1325,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1161 goto err; 1325 goto err;
1162 } 1326 }
1163 1327
1328 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1329 if (res_agl_prt_ctl == NULL) {
1330 dev_err(&pdev->dev, "no 'reg' resource\n");
1331 result = -ENXIO;
1332 goto err;
1333 }
1334
1164 p->mix_phys = res_mix->start; 1335 p->mix_phys = res_mix->start;
1165 p->mix_size = resource_size(res_mix); 1336 p->mix_size = resource_size(res_mix);
1166 p->agl_phys = res_agl->start; 1337 p->agl_phys = res_agl->start;
1167 p->agl_size = resource_size(res_agl); 1338 p->agl_size = resource_size(res_agl);
1339 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1340 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1168 1341
1169 1342
1170 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, 1343 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
@@ -1183,10 +1356,18 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1183 goto err; 1356 goto err;
1184 } 1357 }
1185 1358
1359 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1360 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1361 result = -ENXIO;
1362 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1363 res_agl_prt_ctl->name);
1364 goto err;
1365 }
1186 1366
1187 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); 1367 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1188 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); 1368 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1189 1369 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1370 p->agl_prt_ctl_size);
1190 spin_lock_init(&p->lock); 1371 spin_lock_init(&p->lock);
1191 1372
1192 skb_queue_head_init(&p->tx_list); 1373 skb_queue_head_init(&p->tx_list);
@@ -1209,6 +1390,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1209 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 1390 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1210 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1391 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1211 1392
1393 netif_carrier_off(netdev);
1212 result = register_netdev(netdev); 1394 result = register_netdev(netdev);
1213 if (result) 1395 if (result)
1214 goto err; 1396 goto err;