aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c85
1 files changed, 54 insertions, 31 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bf55a4cfb3d2..25e028b7ce48 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
879 int i; 879 int i;
880 880
881 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 881 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
882 xm_read16(hw, port, XM_PHY_DATA); 882 *val = xm_read16(hw, port, XM_PHY_DATA);
883 883
884 /* Need to wait for external PHY */
885 for (i = 0; i < PHY_RETRIES; i++) { 884 for (i = 0; i < PHY_RETRIES; i++) {
886 udelay(1);
887 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) 885 if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
888 goto ready; 886 goto ready;
887 udelay(1);
889 } 888 }
890 889
891 return -ETIMEDOUT; 890 return -ETIMEDOUT;
@@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
918 917
919 ready: 918 ready:
920 xm_write16(hw, port, XM_PHY_DATA, val); 919 xm_write16(hw, port, XM_PHY_DATA, val);
921 return 0; 920 for (i = 0; i < PHY_RETRIES; i++) {
921 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
922 return 0;
923 udelay(1);
924 }
925 return -ETIMEDOUT;
922} 926}
923 927
924static void genesis_init(struct skge_hw *hw) 928static void genesis_init(struct skge_hw *hw)
@@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1168 u32 r; 1172 u32 r;
1169 const u8 zero[6] = { 0 }; 1173 const u8 zero[6] = { 0 };
1170 1174
1171 /* Clear MIB counters */ 1175 for (i = 0; i < 10; i++) {
1172 xm_write16(hw, port, XM_STAT_CMD, 1176 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
1173 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1177 MFF_SET_MAC_RST);
1174 /* Clear two times according to Errata #3 */ 1178 if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
1175 xm_write16(hw, port, XM_STAT_CMD, 1179 goto reset_ok;
1176 XM_SC_CLR_RXC | XM_SC_CLR_TXC); 1180 udelay(1);
1181 }
1177 1182
1183 printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
1184
1185 reset_ok:
1178 /* Unreset the XMAC. */ 1186 /* Unreset the XMAC. */
1179 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1187 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1180 1188
@@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1191 r |= GP_DIR_2|GP_IO_2; 1199 r |= GP_DIR_2|GP_IO_2;
1192 1200
1193 skge_write32(hw, B2_GP_IO, r); 1201 skge_write32(hw, B2_GP_IO, r);
1194 skge_read32(hw, B2_GP_IO); 1202
1195 1203
1196 /* Enable GMII interface */ 1204 /* Enable GMII interface */
1197 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); 1205 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
@@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1205 for (i = 1; i < 16; i++) 1213 for (i = 1; i < 16; i++)
1206 xm_outaddr(hw, port, XM_EXM(i), zero); 1214 xm_outaddr(hw, port, XM_EXM(i), zero);
1207 1215
1216 /* Clear MIB counters */
1217 xm_write16(hw, port, XM_STAT_CMD,
1218 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1219 /* Clear two times according to Errata #3 */
1220 xm_write16(hw, port, XM_STAT_CMD,
1221 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1222
1208 /* configure Rx High Water Mark (XM_RX_HI_WM) */ 1223 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1209 xm_write16(hw, port, XM_RX_HI_WM, 1450); 1224 xm_write16(hw, port, XM_RX_HI_WM, 1450);
1210 1225
@@ -1697,6 +1712,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1697 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1712 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1698 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 1713 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1699 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 1714 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1715
1700 if (skge->autoneg == AUTONEG_DISABLE) { 1716 if (skge->autoneg == AUTONEG_DISABLE) {
1701 reg = GM_GPCR_AU_ALL_DIS; 1717 reg = GM_GPCR_AU_ALL_DIS;
1702 gma_write16(hw, port, GM_GP_CTRL, 1718 gma_write16(hw, port, GM_GP_CTRL,
@@ -1704,16 +1720,23 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1704 1720
1705 switch (skge->speed) { 1721 switch (skge->speed) {
1706 case SPEED_1000: 1722 case SPEED_1000:
1723 reg &= ~GM_GPCR_SPEED_100;
1707 reg |= GM_GPCR_SPEED_1000; 1724 reg |= GM_GPCR_SPEED_1000;
1708 /* fallthru */ 1725 break;
1709 case SPEED_100: 1726 case SPEED_100:
1727 reg &= ~GM_GPCR_SPEED_1000;
1710 reg |= GM_GPCR_SPEED_100; 1728 reg |= GM_GPCR_SPEED_100;
1729 break;
1730 case SPEED_10:
1731 reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
1732 break;
1711 } 1733 }
1712 1734
1713 if (skge->duplex == DUPLEX_FULL) 1735 if (skge->duplex == DUPLEX_FULL)
1714 reg |= GM_GPCR_DUP_FULL; 1736 reg |= GM_GPCR_DUP_FULL;
1715 } else 1737 } else
1716 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 1738 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1739
1717 switch (skge->flow_control) { 1740 switch (skge->flow_control) {
1718 case FLOW_MODE_NONE: 1741 case FLOW_MODE_NONE:
1719 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1742 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -2162,8 +2185,10 @@ static int skge_up(struct net_device *dev)
2162 skge->tx_avail = skge->tx_ring.count - 1; 2185 skge->tx_avail = skge->tx_ring.count - 1;
2163 2186
2164 /* Enable IRQ from port */ 2187 /* Enable IRQ from port */
2188 spin_lock_irq(&hw->hw_lock);
2165 hw->intr_mask |= portirqmask[port]; 2189 hw->intr_mask |= portirqmask[port];
2166 skge_write32(hw, B0_IMSK, hw->intr_mask); 2190 skge_write32(hw, B0_IMSK, hw->intr_mask);
2191 spin_unlock_irq(&hw->hw_lock);
2167 2192
2168 /* Initialize MAC */ 2193 /* Initialize MAC */
2169 spin_lock_bh(&hw->phy_lock); 2194 spin_lock_bh(&hw->phy_lock);
@@ -2221,8 +2246,10 @@ static int skge_down(struct net_device *dev)
2221 else 2246 else
2222 yukon_stop(skge); 2247 yukon_stop(skge);
2223 2248
2249 spin_lock_irq(&hw->hw_lock);
2224 hw->intr_mask &= ~portirqmask[skge->port]; 2250 hw->intr_mask &= ~portirqmask[skge->port];
2225 skge_write32(hw, B0_IMSK, hw->intr_mask); 2251 skge_write32(hw, B0_IMSK, hw->intr_mask);
2252 spin_unlock_irq(&hw->hw_lock);
2226 2253
2227 /* Stop transmitter */ 2254 /* Stop transmitter */
2228 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2255 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@@ -2670,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget)
2670 2697
2671 /* restart receiver */ 2698 /* restart receiver */
2672 wmb(); 2699 wmb();
2673 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), 2700 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
2674 CSR_START | CSR_IRQ_CL_F);
2675 2701
2676 *budget -= work_done; 2702 *budget -= work_done;
2677 dev->quota -= work_done; 2703 dev->quota -= work_done;
@@ -2679,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget)
2679 if (work_done >= to_do) 2705 if (work_done >= to_do)
2680 return 1; /* not done */ 2706 return 1; /* not done */
2681 2707
2682 netif_rx_complete(dev); 2708 spin_lock_irq(&hw->hw_lock);
2683 hw->intr_mask |= portirqmask[skge->port]; 2709 __netif_rx_complete(dev);
2684 skge_write32(hw, B0_IMSK, hw->intr_mask); 2710 hw->intr_mask |= portirqmask[skge->port];
2685 skge_read32(hw, B0_IMSK); 2711 skge_write32(hw, B0_IMSK, hw->intr_mask);
2712 spin_unlock_irq(&hw->hw_lock);
2686 2713
2687 return 0; 2714 return 0;
2688} 2715}
@@ -2842,18 +2869,10 @@ static void skge_extirq(unsigned long data)
2842 } 2869 }
2843 spin_unlock(&hw->phy_lock); 2870 spin_unlock(&hw->phy_lock);
2844 2871
2845 local_irq_disable(); 2872 spin_lock_irq(&hw->hw_lock);
2846 hw->intr_mask |= IS_EXT_REG; 2873 hw->intr_mask |= IS_EXT_REG;
2847 skge_write32(hw, B0_IMSK, hw->intr_mask); 2874 skge_write32(hw, B0_IMSK, hw->intr_mask);
2848 local_irq_enable(); 2875 spin_unlock_irq(&hw->hw_lock);
2849}
2850
2851static inline void skge_wakeup(struct net_device *dev)
2852{
2853 struct skge_port *skge = netdev_priv(dev);
2854
2855 prefetch(skge->rx_ring.to_clean);
2856 netif_rx_schedule(dev);
2857} 2876}
2858 2877
2859static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) 2878static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2864,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2864 if (status == 0 || status == ~0) /* hotplug or shared irq */ 2883 if (status == 0 || status == ~0) /* hotplug or shared irq */
2865 return IRQ_NONE; 2884 return IRQ_NONE;
2866 2885
2867 status &= hw->intr_mask; 2886 spin_lock(&hw->hw_lock);
2868 if (status & IS_R1_F) { 2887 if (status & IS_R1_F) {
2888 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2869 hw->intr_mask &= ~IS_R1_F; 2889 hw->intr_mask &= ~IS_R1_F;
2870 skge_wakeup(hw->dev[0]); 2890 netif_rx_schedule(hw->dev[0]);
2871 } 2891 }
2872 2892
2873 if (status & IS_R2_F) { 2893 if (status & IS_R2_F) {
2894 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2874 hw->intr_mask &= ~IS_R2_F; 2895 hw->intr_mask &= ~IS_R2_F;
2875 skge_wakeup(hw->dev[1]); 2896 netif_rx_schedule(hw->dev[1]);
2876 } 2897 }
2877 2898
2878 if (status & IS_XA1_F) 2899 if (status & IS_XA1_F)
@@ -2914,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2914 } 2935 }
2915 2936
2916 skge_write32(hw, B0_IMSK, hw->intr_mask); 2937 skge_write32(hw, B0_IMSK, hw->intr_mask);
2938 spin_unlock(&hw->hw_lock);
2917 2939
2918 return IRQ_HANDLED; 2940 return IRQ_HANDLED;
2919} 2941}
@@ -3282,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3282 3304
3283 hw->pdev = pdev; 3305 hw->pdev = pdev;
3284 spin_lock_init(&hw->phy_lock); 3306 spin_lock_init(&hw->phy_lock);
3307 spin_lock_init(&hw->hw_lock);
3285 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); 3308 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
3286 3309
3287 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3310 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);