diff options
Diffstat (limited to 'drivers/net/skge.c')
| -rw-r--r-- | drivers/net/skge.c | 75 |
1 files changed, 45 insertions, 30 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 67fb19b8fde9..25e028b7ce48 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
| @@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) | |||
| 879 | int i; | 879 | int i; |
| 880 | 880 | ||
| 881 | xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); | 881 | xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); |
| 882 | xm_read16(hw, port, XM_PHY_DATA); | 882 | *val = xm_read16(hw, port, XM_PHY_DATA); |
| 883 | 883 | ||
| 884 | /* Need to wait for external PHY */ | ||
| 885 | for (i = 0; i < PHY_RETRIES; i++) { | 884 | for (i = 0; i < PHY_RETRIES; i++) { |
| 886 | udelay(1); | ||
| 887 | if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) | 885 | if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) |
| 888 | goto ready; | 886 | goto ready; |
| 887 | udelay(1); | ||
| 889 | } | 888 | } |
| 890 | 889 | ||
| 891 | return -ETIMEDOUT; | 890 | return -ETIMEDOUT; |
| @@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) | |||
| 918 | 917 | ||
| 919 | ready: | 918 | ready: |
| 920 | xm_write16(hw, port, XM_PHY_DATA, val); | 919 | xm_write16(hw, port, XM_PHY_DATA, val); |
| 921 | return 0; | 920 | for (i = 0; i < PHY_RETRIES; i++) { |
| 921 | if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) | ||
| 922 | return 0; | ||
| 923 | udelay(1); | ||
| 924 | } | ||
| 925 | return -ETIMEDOUT; | ||
| 922 | } | 926 | } |
| 923 | 927 | ||
| 924 | static void genesis_init(struct skge_hw *hw) | 928 | static void genesis_init(struct skge_hw *hw) |
| @@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port) | |||
| 1168 | u32 r; | 1172 | u32 r; |
| 1169 | const u8 zero[6] = { 0 }; | 1173 | const u8 zero[6] = { 0 }; |
| 1170 | 1174 | ||
| 1171 | /* Clear MIB counters */ | 1175 | for (i = 0; i < 10; i++) { |
| 1172 | xm_write16(hw, port, XM_STAT_CMD, | 1176 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), |
| 1173 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | 1177 | MFF_SET_MAC_RST); |
| 1174 | /* Clear two times according to Errata #3 */ | 1178 | if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) |
| 1175 | xm_write16(hw, port, XM_STAT_CMD, | 1179 | goto reset_ok; |
| 1176 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | 1180 | udelay(1); |
| 1181 | } | ||
| 1182 | |||
| 1183 | printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name); | ||
| 1177 | 1184 | ||
| 1185 | reset_ok: | ||
| 1178 | /* Unreset the XMAC. */ | 1186 | /* Unreset the XMAC. */ |
| 1179 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); | 1187 | skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); |
| 1180 | 1188 | ||
| @@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) | |||
| 1191 | r |= GP_DIR_2|GP_IO_2; | 1199 | r |= GP_DIR_2|GP_IO_2; |
| 1192 | 1200 | ||
| 1193 | skge_write32(hw, B2_GP_IO, r); | 1201 | skge_write32(hw, B2_GP_IO, r); |
| 1194 | skge_read32(hw, B2_GP_IO); | 1202 | |
| 1195 | 1203 | ||
| 1196 | /* Enable GMII interface */ | 1204 | /* Enable GMII interface */ |
| 1197 | xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); | 1205 | xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); |
| @@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port) | |||
| 1205 | for (i = 1; i < 16; i++) | 1213 | for (i = 1; i < 16; i++) |
| 1206 | xm_outaddr(hw, port, XM_EXM(i), zero); | 1214 | xm_outaddr(hw, port, XM_EXM(i), zero); |
| 1207 | 1215 | ||
| 1216 | /* Clear MIB counters */ | ||
| 1217 | xm_write16(hw, port, XM_STAT_CMD, | ||
| 1218 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | ||
| 1219 | /* Clear two times according to Errata #3 */ | ||
| 1220 | xm_write16(hw, port, XM_STAT_CMD, | ||
| 1221 | XM_SC_CLR_RXC | XM_SC_CLR_TXC); | ||
| 1222 | |||
| 1208 | /* configure Rx High Water Mark (XM_RX_HI_WM) */ | 1223 | /* configure Rx High Water Mark (XM_RX_HI_WM) */ |
| 1209 | xm_write16(hw, port, XM_RX_HI_WM, 1450); | 1224 | xm_write16(hw, port, XM_RX_HI_WM, 1450); |
| 1210 | 1225 | ||
| @@ -2170,8 +2185,10 @@ static int skge_up(struct net_device *dev) | |||
| 2170 | skge->tx_avail = skge->tx_ring.count - 1; | 2185 | skge->tx_avail = skge->tx_ring.count - 1; |
| 2171 | 2186 | ||
| 2172 | /* Enable IRQ from port */ | 2187 | /* Enable IRQ from port */ |
| 2188 | spin_lock_irq(&hw->hw_lock); | ||
| 2173 | hw->intr_mask |= portirqmask[port]; | 2189 | hw->intr_mask |= portirqmask[port]; |
| 2174 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2190 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
| 2191 | spin_unlock_irq(&hw->hw_lock); | ||
| 2175 | 2192 | ||
| 2176 | /* Initialize MAC */ | 2193 | /* Initialize MAC */ |
| 2177 | spin_lock_bh(&hw->phy_lock); | 2194 | spin_lock_bh(&hw->phy_lock); |
| @@ -2229,8 +2246,10 @@ static int skge_down(struct net_device *dev) | |||
| 2229 | else | 2246 | else |
| 2230 | yukon_stop(skge); | 2247 | yukon_stop(skge); |
| 2231 | 2248 | ||
| 2249 | spin_lock_irq(&hw->hw_lock); | ||
| 2232 | hw->intr_mask &= ~portirqmask[skge->port]; | 2250 | hw->intr_mask &= ~portirqmask[skge->port]; |
| 2233 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2251 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
| 2252 | spin_unlock_irq(&hw->hw_lock); | ||
| 2234 | 2253 | ||
| 2235 | /* Stop transmitter */ | 2254 | /* Stop transmitter */ |
| 2236 | skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); | 2255 | skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); |
| @@ -2678,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
| 2678 | 2697 | ||
| 2679 | /* restart receiver */ | 2698 | /* restart receiver */ |
| 2680 | wmb(); | 2699 | wmb(); |
| 2681 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), | 2700 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); |
| 2682 | CSR_START | CSR_IRQ_CL_F); | ||
| 2683 | 2701 | ||
| 2684 | *budget -= work_done; | 2702 | *budget -= work_done; |
| 2685 | dev->quota -= work_done; | 2703 | dev->quota -= work_done; |
| @@ -2687,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
| 2687 | if (work_done >= to_do) | 2705 | if (work_done >= to_do) |
| 2688 | return 1; /* not done */ | 2706 | return 1; /* not done */ |
| 2689 | 2707 | ||
| 2690 | netif_rx_complete(dev); | 2708 | spin_lock_irq(&hw->hw_lock); |
| 2691 | hw->intr_mask |= portirqmask[skge->port]; | 2709 | __netif_rx_complete(dev); |
| 2692 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2710 | hw->intr_mask |= portirqmask[skge->port]; |
| 2693 | skge_read32(hw, B0_IMSK); | 2711 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
| 2712 | spin_unlock_irq(&hw->hw_lock); | ||
| 2694 | 2713 | ||
| 2695 | return 0; | 2714 | return 0; |
| 2696 | } | 2715 | } |
| @@ -2850,18 +2869,10 @@ static void skge_extirq(unsigned long data) | |||
| 2850 | } | 2869 | } |
| 2851 | spin_unlock(&hw->phy_lock); | 2870 | spin_unlock(&hw->phy_lock); |
| 2852 | 2871 | ||
| 2853 | local_irq_disable(); | 2872 | spin_lock_irq(&hw->hw_lock); |
| 2854 | hw->intr_mask |= IS_EXT_REG; | 2873 | hw->intr_mask |= IS_EXT_REG; |
| 2855 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2874 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
| 2856 | local_irq_enable(); | 2875 | spin_unlock_irq(&hw->hw_lock); |
| 2857 | } | ||
| 2858 | |||
| 2859 | static inline void skge_wakeup(struct net_device *dev) | ||
| 2860 | { | ||
| 2861 | struct skge_port *skge = netdev_priv(dev); | ||
| 2862 | |||
| 2863 | prefetch(skge->rx_ring.to_clean); | ||
| 2864 | netif_rx_schedule(dev); | ||
| 2865 | } | 2876 | } |
| 2866 | 2877 | ||
| 2867 | static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | 2878 | static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) |
| @@ -2872,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
| 2872 | if (status == 0 || status == ~0) /* hotplug or shared irq */ | 2883 | if (status == 0 || status == ~0) /* hotplug or shared irq */ |
| 2873 | return IRQ_NONE; | 2884 | return IRQ_NONE; |
| 2874 | 2885 | ||
| 2875 | status &= hw->intr_mask; | 2886 | spin_lock(&hw->hw_lock); |
| 2876 | if (status & IS_R1_F) { | 2887 | if (status & IS_R1_F) { |
| 2888 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); | ||
| 2877 | hw->intr_mask &= ~IS_R1_F; | 2889 | hw->intr_mask &= ~IS_R1_F; |
| 2878 | skge_wakeup(hw->dev[0]); | 2890 | netif_rx_schedule(hw->dev[0]); |
| 2879 | } | 2891 | } |
| 2880 | 2892 | ||
| 2881 | if (status & IS_R2_F) { | 2893 | if (status & IS_R2_F) { |
| 2894 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); | ||
| 2882 | hw->intr_mask &= ~IS_R2_F; | 2895 | hw->intr_mask &= ~IS_R2_F; |
| 2883 | skge_wakeup(hw->dev[1]); | 2896 | netif_rx_schedule(hw->dev[1]); |
| 2884 | } | 2897 | } |
| 2885 | 2898 | ||
| 2886 | if (status & IS_XA1_F) | 2899 | if (status & IS_XA1_F) |
| @@ -2922,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
| 2922 | } | 2935 | } |
| 2923 | 2936 | ||
| 2924 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2937 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
| 2938 | spin_unlock(&hw->hw_lock); | ||
| 2925 | 2939 | ||
| 2926 | return IRQ_HANDLED; | 2940 | return IRQ_HANDLED; |
| 2927 | } | 2941 | } |
| @@ -3290,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
| 3290 | 3304 | ||
| 3291 | hw->pdev = pdev; | 3305 | hw->pdev = pdev; |
| 3292 | spin_lock_init(&hw->phy_lock); | 3306 | spin_lock_init(&hw->phy_lock); |
| 3307 | spin_lock_init(&hw->hw_lock); | ||
| 3293 | tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); | 3308 | tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); |
| 3294 | 3309 | ||
| 3295 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | 3310 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
