aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c215
1 files changed, 131 insertions, 84 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index eea75a401b0..d476a3cc2e9 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -105,7 +105,8 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
105static const int rxqaddr[] = { Q_R1, Q_R2 }; 105static const int rxqaddr[] = { Q_R1, Q_R2 };
106static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 106static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
107static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 107static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
108static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; 108static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
109static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 };
109 110
110static int skge_get_regs_len(struct net_device *dev) 111static int skge_get_regs_len(struct net_device *dev)
111{ 112{
@@ -162,27 +163,46 @@ static void skge_wol_init(struct skge_port *skge)
162{ 163{
163 struct skge_hw *hw = skge->hw; 164 struct skge_hw *hw = skge->hw;
164 int port = skge->port; 165 int port = skge->port;
165 enum pause_control save_mode; 166 u16 ctrl;
166 u32 ctrl;
167 167
168 /* Bring hardware out of reset */
169 skge_write16(hw, B0_CTST, CS_RST_CLR); 168 skge_write16(hw, B0_CTST, CS_RST_CLR);
170 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); 169 skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR);
171 170
172 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR); 171 /* Turn on Vaux */
173 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 172 skge_write8(hw, B0_POWER_CTRL,
173 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
174 174
175 /* Force to 10/100 skge_reset will re-enable on resume */ 175 /* WA code for COMA mode -- clear PHY reset */
176 save_mode = skge->flow_control; 176 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
177 skge->flow_control = FLOW_MODE_SYMMETRIC; 177 hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
178 u32 reg = skge_read32(hw, B2_GP_IO);
179 reg |= GP_DIR_9;
180 reg &= ~GP_IO_9;
181 skge_write32(hw, B2_GP_IO, reg);
182 }
178 183
179 ctrl = skge->advertising; 184 skge_write32(hw, SK_REG(port, GPHY_CTRL),
180 skge->advertising &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full); 185 GPC_DIS_SLEEP |
186 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
187 GPC_ANEG_1 | GPC_RST_SET);
181 188
182 skge_phy_reset(skge); 189 skge_write32(hw, SK_REG(port, GPHY_CTRL),
190 GPC_DIS_SLEEP |
191 GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 |
192 GPC_ANEG_1 | GPC_RST_CLR);
193
194 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
195
196 /* Force to 10/100 skge_reset will re-enable on resume */
197 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
198 PHY_AN_100FULL | PHY_AN_100HALF |
199 PHY_AN_10FULL | PHY_AN_10HALF| PHY_AN_CSMA);
200 /* no 1000 HD/FD */
201 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
202 gm_phy_write(hw, port, PHY_MARV_CTRL,
203 PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE |
204 PHY_CT_RE_CFG | PHY_CT_DUP_MD);
183 205
184 skge->flow_control = save_mode;
185 skge->advertising = ctrl;
186 206
187 /* Set GMAC to no flow control and auto update for speed/duplex */ 207 /* Set GMAC to no flow control and auto update for speed/duplex */
188 gma_write16(hw, port, GM_GP_CTRL, 208 gma_write16(hw, port, GM_GP_CTRL,
@@ -226,12 +246,10 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
226 struct skge_port *skge = netdev_priv(dev); 246 struct skge_port *skge = netdev_priv(dev);
227 struct skge_hw *hw = skge->hw; 247 struct skge_hw *hw = skge->hw;
228 248
229 if (wol->wolopts & wol_supported(hw)) 249 if (wol->wolopts & ~wol_supported(hw))
230 return -EOPNOTSUPP; 250 return -EOPNOTSUPP;
231 251
232 skge->wol = wol->wolopts; 252 skge->wol = wol->wolopts;
233 if (!netif_running(dev))
234 skge_wol_init(skge);
235 return 0; 253 return 0;
236} 254}
237 255
@@ -671,7 +689,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
671 struct skge_hw *hw = skge->hw; 689 struct skge_hw *hw = skge->hw;
672 int port = skge->port; 690 int port = skge->port;
673 691
674 mutex_lock(&hw->phy_mutex); 692 spin_lock_bh(&hw->phy_lock);
675 if (hw->chip_id == CHIP_ID_GENESIS) { 693 if (hw->chip_id == CHIP_ID_GENESIS) {
676 switch (mode) { 694 switch (mode) {
677 case LED_MODE_OFF: 695 case LED_MODE_OFF:
@@ -742,7 +760,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
742 PHY_M_LED_MO_RX(MO_LED_ON)); 760 PHY_M_LED_MO_RX(MO_LED_ON));
743 } 761 }
744 } 762 }
745 mutex_unlock(&hw->phy_mutex); 763 spin_unlock_bh(&hw->phy_lock);
746} 764}
747 765
748/* blink LED's for finding board */ 766/* blink LED's for finding board */
@@ -1316,7 +1334,7 @@ static void xm_phy_init(struct skge_port *skge)
1316 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); 1334 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
1317 1335
1318 /* Poll PHY for status changes */ 1336 /* Poll PHY for status changes */
1319 schedule_delayed_work(&skge->link_thread, LINK_HZ); 1337 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1320} 1338}
1321 1339
1322static void xm_check_link(struct net_device *dev) 1340static void xm_check_link(struct net_device *dev)
@@ -1391,10 +1409,9 @@ static void xm_check_link(struct net_device *dev)
1391 * Since internal PHY is wired to a level triggered pin, can't 1409 * Since internal PHY is wired to a level triggered pin, can't
1392 * get an interrupt when carrier is detected. 1410 * get an interrupt when carrier is detected.
1393 */ 1411 */
1394static void xm_link_timer(struct work_struct *work) 1412static void xm_link_timer(unsigned long arg)
1395{ 1413{
1396 struct skge_port *skge = 1414 struct skge_port *skge = (struct skge_port *) arg;
1397 container_of(work, struct skge_port, link_thread.work);
1398 struct net_device *dev = skge->netdev; 1415 struct net_device *dev = skge->netdev;
1399 struct skge_hw *hw = skge->hw; 1416 struct skge_hw *hw = skge->hw;
1400 int port = skge->port; 1417 int port = skge->port;
@@ -1414,13 +1431,13 @@ static void xm_link_timer(struct work_struct *work)
1414 goto nochange; 1431 goto nochange;
1415 } 1432 }
1416 1433
1417 mutex_lock(&hw->phy_mutex); 1434 spin_lock(&hw->phy_lock);
1418 xm_check_link(dev); 1435 xm_check_link(dev);
1419 mutex_unlock(&hw->phy_mutex); 1436 spin_unlock(&hw->phy_lock);
1420 1437
1421nochange: 1438nochange:
1422 if (netif_running(dev)) 1439 if (netif_running(dev))
1423 schedule_delayed_work(&skge->link_thread, LINK_HZ); 1440 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1424} 1441}
1425 1442
1426static void genesis_mac_init(struct skge_hw *hw, int port) 1443static void genesis_mac_init(struct skge_hw *hw, int port)
@@ -2323,7 +2340,7 @@ static void skge_phy_reset(struct skge_port *skge)
2323 netif_stop_queue(skge->netdev); 2340 netif_stop_queue(skge->netdev);
2324 netif_carrier_off(skge->netdev); 2341 netif_carrier_off(skge->netdev);
2325 2342
2326 mutex_lock(&hw->phy_mutex); 2343 spin_lock_bh(&hw->phy_lock);
2327 if (hw->chip_id == CHIP_ID_GENESIS) { 2344 if (hw->chip_id == CHIP_ID_GENESIS) {
2328 genesis_reset(hw, port); 2345 genesis_reset(hw, port);
2329 genesis_mac_init(hw, port); 2346 genesis_mac_init(hw, port);
@@ -2331,7 +2348,7 @@ static void skge_phy_reset(struct skge_port *skge)
2331 yukon_reset(hw, port); 2348 yukon_reset(hw, port);
2332 yukon_init(hw, port); 2349 yukon_init(hw, port);
2333 } 2350 }
2334 mutex_unlock(&hw->phy_mutex); 2351 spin_unlock_bh(&hw->phy_lock);
2335 2352
2336 dev->set_multicast_list(dev); 2353 dev->set_multicast_list(dev);
2337} 2354}
@@ -2354,12 +2371,12 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2354 /* fallthru */ 2371 /* fallthru */
2355 case SIOCGMIIREG: { 2372 case SIOCGMIIREG: {
2356 u16 val = 0; 2373 u16 val = 0;
2357 mutex_lock(&hw->phy_mutex); 2374 spin_lock_bh(&hw->phy_lock);
2358 if (hw->chip_id == CHIP_ID_GENESIS) 2375 if (hw->chip_id == CHIP_ID_GENESIS)
2359 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2376 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2360 else 2377 else
2361 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2378 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2362 mutex_unlock(&hw->phy_mutex); 2379 spin_unlock_bh(&hw->phy_lock);
2363 data->val_out = val; 2380 data->val_out = val;
2364 break; 2381 break;
2365 } 2382 }
@@ -2368,14 +2385,14 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2368 if (!capable(CAP_NET_ADMIN)) 2385 if (!capable(CAP_NET_ADMIN))
2369 return -EPERM; 2386 return -EPERM;
2370 2387
2371 mutex_lock(&hw->phy_mutex); 2388 spin_lock_bh(&hw->phy_lock);
2372 if (hw->chip_id == CHIP_ID_GENESIS) 2389 if (hw->chip_id == CHIP_ID_GENESIS)
2373 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2390 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2374 data->val_in); 2391 data->val_in);
2375 else 2392 else
2376 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2393 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2377 data->val_in); 2394 data->val_in);
2378 mutex_unlock(&hw->phy_mutex); 2395 spin_unlock_bh(&hw->phy_lock);
2379 break; 2396 break;
2380 } 2397 }
2381 return err; 2398 return err;
@@ -2481,12 +2498,12 @@ static int skge_up(struct net_device *dev)
2481 goto free_rx_ring; 2498 goto free_rx_ring;
2482 2499
2483 /* Initialize MAC */ 2500 /* Initialize MAC */
2484 mutex_lock(&hw->phy_mutex); 2501 spin_lock_bh(&hw->phy_lock);
2485 if (hw->chip_id == CHIP_ID_GENESIS) 2502 if (hw->chip_id == CHIP_ID_GENESIS)
2486 genesis_mac_init(hw, port); 2503 genesis_mac_init(hw, port);
2487 else 2504 else
2488 yukon_mac_init(hw, port); 2505 yukon_mac_init(hw, port);
2489 mutex_unlock(&hw->phy_mutex); 2506 spin_unlock_bh(&hw->phy_lock);
2490 2507
2491 /* Configure RAMbuffers */ 2508 /* Configure RAMbuffers */
2492 chunk = hw->ram_size / ((hw->ports + 1)*2); 2509 chunk = hw->ram_size / ((hw->ports + 1)*2);
@@ -2504,6 +2521,11 @@ static int skge_up(struct net_device *dev)
2504 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2521 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2505 skge_led(skge, LED_MODE_ON); 2522 skge_led(skge, LED_MODE_ON);
2506 2523
2524 spin_lock_irq(&hw->hw_lock);
2525 hw->intr_mask |= portmask[port];
2526 skge_write32(hw, B0_IMSK, hw->intr_mask);
2527 spin_unlock_irq(&hw->hw_lock);
2528
2507 netif_poll_enable(dev); 2529 netif_poll_enable(dev);
2508 return 0; 2530 return 0;
2509 2531
@@ -2530,8 +2552,17 @@ static int skge_down(struct net_device *dev)
2530 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name); 2552 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
2531 2553
2532 netif_stop_queue(dev); 2554 netif_stop_queue(dev);
2555
2533 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2556 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
2534 cancel_delayed_work(&skge->link_thread); 2557 del_timer_sync(&skge->link_timer);
2558
2559 netif_poll_disable(dev);
2560 netif_carrier_off(dev);
2561
2562 spin_lock_irq(&hw->hw_lock);
2563 hw->intr_mask &= ~portmask[port];
2564 skge_write32(hw, B0_IMSK, hw->intr_mask);
2565 spin_unlock_irq(&hw->hw_lock);
2535 2566
2536 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); 2567 skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
2537 if (hw->chip_id == CHIP_ID_GENESIS) 2568 if (hw->chip_id == CHIP_ID_GENESIS)
@@ -2575,8 +2606,10 @@ static int skge_down(struct net_device *dev)
2575 2606
2576 skge_led(skge, LED_MODE_OFF); 2607 skge_led(skge, LED_MODE_OFF);
2577 2608
2578 netif_poll_disable(dev); 2609 netif_tx_lock_bh(dev);
2579 skge_tx_clean(dev); 2610 skge_tx_clean(dev);
2611 netif_tx_unlock_bh(dev);
2612
2580 skge_rx_clean(skge); 2613 skge_rx_clean(skge);
2581 2614
2582 kfree(skge->rx_ring.start); 2615 kfree(skge->rx_ring.start);
@@ -2721,7 +2754,6 @@ static void skge_tx_clean(struct net_device *dev)
2721 struct skge_port *skge = netdev_priv(dev); 2754 struct skge_port *skge = netdev_priv(dev);
2722 struct skge_element *e; 2755 struct skge_element *e;
2723 2756
2724 netif_tx_lock_bh(dev);
2725 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2757 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2726 struct skge_tx_desc *td = e->desc; 2758 struct skge_tx_desc *td = e->desc;
2727 skge_tx_free(skge, e, td->control); 2759 skge_tx_free(skge, e, td->control);
@@ -2730,7 +2762,6 @@ static void skge_tx_clean(struct net_device *dev)
2730 2762
2731 skge->tx_ring.to_clean = e; 2763 skge->tx_ring.to_clean = e;
2732 netif_wake_queue(dev); 2764 netif_wake_queue(dev);
2733 netif_tx_unlock_bh(dev);
2734} 2765}
2735 2766
2736static void skge_tx_timeout(struct net_device *dev) 2767static void skge_tx_timeout(struct net_device *dev)
@@ -3049,7 +3080,7 @@ static int skge_poll(struct net_device *dev, int *budget)
3049 3080
3050 spin_lock_irqsave(&hw->hw_lock, flags); 3081 spin_lock_irqsave(&hw->hw_lock, flags);
3051 __netif_rx_complete(dev); 3082 __netif_rx_complete(dev);
3052 hw->intr_mask |= irqmask[skge->port]; 3083 hw->intr_mask |= napimask[skge->port];
3053 skge_write32(hw, B0_IMSK, hw->intr_mask); 3084 skge_write32(hw, B0_IMSK, hw->intr_mask);
3054 skge_read32(hw, B0_IMSK); 3085 skge_read32(hw, B0_IMSK);
3055 spin_unlock_irqrestore(&hw->hw_lock, flags); 3086 spin_unlock_irqrestore(&hw->hw_lock, flags);
@@ -3160,28 +3191,29 @@ static void skge_error_irq(struct skge_hw *hw)
3160} 3191}
3161 3192
3162/* 3193/*
3163 * Interrupt from PHY are handled in work queue 3194 * Interrupt from PHY are handled in tasklet (softirq)
3164 * because accessing phy registers requires spin wait which might 3195 * because accessing phy registers requires spin wait which might
3165 * cause excess interrupt latency. 3196 * cause excess interrupt latency.
3166 */ 3197 */
3167static void skge_extirq(struct work_struct *work) 3198static void skge_extirq(unsigned long arg)
3168{ 3199{
3169 struct skge_hw *hw = container_of(work, struct skge_hw, phy_work); 3200 struct skge_hw *hw = (struct skge_hw *) arg;
3170 int port; 3201 int port;
3171 3202
3172 mutex_lock(&hw->phy_mutex);
3173 for (port = 0; port < hw->ports; port++) { 3203 for (port = 0; port < hw->ports; port++) {
3174 struct net_device *dev = hw->dev[port]; 3204 struct net_device *dev = hw->dev[port];
3175 struct skge_port *skge = netdev_priv(dev);
3176 3205
3177 if (netif_running(dev)) { 3206 if (netif_running(dev)) {
3207 struct skge_port *skge = netdev_priv(dev);
3208
3209 spin_lock(&hw->phy_lock);
3178 if (hw->chip_id != CHIP_ID_GENESIS) 3210 if (hw->chip_id != CHIP_ID_GENESIS)
3179 yukon_phy_intr(skge); 3211 yukon_phy_intr(skge);
3180 else if (hw->phy_type == SK_PHY_BCOM) 3212 else if (hw->phy_type == SK_PHY_BCOM)
3181 bcom_phy_intr(skge); 3213 bcom_phy_intr(skge);
3214 spin_unlock(&hw->phy_lock);
3182 } 3215 }
3183 } 3216 }
3184 mutex_unlock(&hw->phy_mutex);
3185 3217
3186 spin_lock_irq(&hw->hw_lock); 3218 spin_lock_irq(&hw->hw_lock);
3187 hw->intr_mask |= IS_EXT_REG; 3219 hw->intr_mask |= IS_EXT_REG;
@@ -3206,7 +3238,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3206 status &= hw->intr_mask; 3238 status &= hw->intr_mask;
3207 if (status & IS_EXT_REG) { 3239 if (status & IS_EXT_REG) {
3208 hw->intr_mask &= ~IS_EXT_REG; 3240 hw->intr_mask &= ~IS_EXT_REG;
3209 schedule_work(&hw->phy_work); 3241 tasklet_schedule(&hw->phy_task);
3210 } 3242 }
3211 3243
3212 if (status & (IS_XA1_F|IS_R1_F)) { 3244 if (status & (IS_XA1_F|IS_R1_F)) {
@@ -3275,24 +3307,35 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
3275 struct skge_hw *hw = skge->hw; 3307 struct skge_hw *hw = skge->hw;
3276 unsigned port = skge->port; 3308 unsigned port = skge->port;
3277 const struct sockaddr *addr = p; 3309 const struct sockaddr *addr = p;
3310 u16 ctrl;
3278 3311
3279 if (!is_valid_ether_addr(addr->sa_data)) 3312 if (!is_valid_ether_addr(addr->sa_data))
3280 return -EADDRNOTAVAIL; 3313 return -EADDRNOTAVAIL;
3281 3314
3282 mutex_lock(&hw->phy_mutex);
3283 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 3315 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3284 memcpy_toio(hw->regs + B2_MAC_1 + port*8,
3285 dev->dev_addr, ETH_ALEN);
3286 memcpy_toio(hw->regs + B2_MAC_2 + port*8,
3287 dev->dev_addr, ETH_ALEN);
3288 3316
3289 if (hw->chip_id == CHIP_ID_GENESIS) 3317 if (!netif_running(dev)) {
3290 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 3318 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3291 else { 3319 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3292 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 3320 } else {
3293 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 3321 /* disable Rx */
3322 spin_lock_bh(&hw->phy_lock);
3323 ctrl = gma_read16(hw, port, GM_GP_CTRL);
3324 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA);
3325
3326 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3327 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3328
3329 if (hw->chip_id == CHIP_ID_GENESIS)
3330 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
3331 else {
3332 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3333 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3334 }
3335
3336 gma_write16(hw, port, GM_GP_CTRL, ctrl);
3337 spin_unlock_bh(&hw->phy_lock);
3294 } 3338 }
3295 mutex_unlock(&hw->phy_mutex);
3296 3339
3297 return 0; 3340 return 0;
3298} 3341}
@@ -3407,10 +3450,9 @@ static int skge_reset(struct skge_hw *hw)
3407 else 3450 else
3408 hw->ram_size = t8 * 4096; 3451 hw->ram_size = t8 * 4096;
3409 3452
3410 hw->intr_mask = IS_HW_ERR | IS_PORT_1; 3453 hw->intr_mask = IS_HW_ERR;
3411 if (hw->ports > 1)
3412 hw->intr_mask |= IS_PORT_2;
3413 3454
3455 /* Use PHY IRQ for all but fiber based Genesis board */
3414 if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)) 3456 if (!(hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC))
3415 hw->intr_mask |= IS_EXT_REG; 3457 hw->intr_mask |= IS_EXT_REG;
3416 3458
@@ -3478,14 +3520,12 @@ static int skge_reset(struct skge_hw *hw)
3478 3520
3479 skge_write32(hw, B0_IMSK, hw->intr_mask); 3521 skge_write32(hw, B0_IMSK, hw->intr_mask);
3480 3522
3481 mutex_lock(&hw->phy_mutex);
3482 for (i = 0; i < hw->ports; i++) { 3523 for (i = 0; i < hw->ports; i++) {
3483 if (hw->chip_id == CHIP_ID_GENESIS) 3524 if (hw->chip_id == CHIP_ID_GENESIS)
3484 genesis_reset(hw, i); 3525 genesis_reset(hw, i);
3485 else 3526 else
3486 yukon_reset(hw, i); 3527 yukon_reset(hw, i);
3487 } 3528 }
3488 mutex_unlock(&hw->phy_mutex);
3489 3529
3490 return 0; 3530 return 0;
3491} 3531}
@@ -3533,6 +3573,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3533 skge->netdev = dev; 3573 skge->netdev = dev;
3534 skge->hw = hw; 3574 skge->hw = hw;
3535 skge->msg_enable = netif_msg_init(debug, default_msg); 3575 skge->msg_enable = netif_msg_init(debug, default_msg);
3576
3536 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; 3577 skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3537 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; 3578 skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3538 3579
@@ -3549,7 +3590,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3549 skge->port = port; 3590 skge->port = port;
3550 3591
3551 /* Only used for Genesis XMAC */ 3592 /* Only used for Genesis XMAC */
3552 INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer); 3593 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
3553 3594
3554 if (hw->chip_id != CHIP_ID_GENESIS) { 3595 if (hw->chip_id != CHIP_ID_GENESIS) {
3555 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3596 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3631,9 +3672,9 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3631 } 3672 }
3632 3673
3633 hw->pdev = pdev; 3674 hw->pdev = pdev;
3634 mutex_init(&hw->phy_mutex);
3635 INIT_WORK(&hw->phy_work, skge_extirq);
3636 spin_lock_init(&hw->hw_lock); 3675 spin_lock_init(&hw->hw_lock);
3676 spin_lock_init(&hw->phy_lock);
3677 tasklet_init(&hw->phy_task, &skge_extirq, (unsigned long) hw);
3637 3678
3638 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3679 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3639 if (!hw->regs) { 3680 if (!hw->regs) {
@@ -3719,6 +3760,8 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3719 dev0 = hw->dev[0]; 3760 dev0 = hw->dev[0];
3720 unregister_netdev(dev0); 3761 unregister_netdev(dev0);
3721 3762
3763 tasklet_disable(&hw->phy_task);
3764
3722 spin_lock_irq(&hw->hw_lock); 3765 spin_lock_irq(&hw->hw_lock);
3723 hw->intr_mask = 0; 3766 hw->intr_mask = 0;
3724 skge_write32(hw, B0_IMSK, 0); 3767 skge_write32(hw, B0_IMSK, 0);
@@ -3741,21 +3784,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3741} 3784}
3742 3785
3743#ifdef CONFIG_PM 3786#ifdef CONFIG_PM
3744static int vaux_avail(struct pci_dev *pdev)
3745{
3746 int pm_cap;
3747
3748 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
3749 if (pm_cap) {
3750 u16 ctl;
3751 pci_read_config_word(pdev, pm_cap + PCI_PM_PMC, &ctl);
3752 if (ctl & PCI_PM_CAP_AUX_POWER)
3753 return 1;
3754 }
3755 return 0;
3756}
3757
3758
3759static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 3787static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3760{ 3788{
3761 struct skge_hw *hw = pci_get_drvdata(pdev); 3789 struct skge_hw *hw = pci_get_drvdata(pdev);
@@ -3777,10 +3805,6 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3777 wol |= skge->wol; 3805 wol |= skge->wol;
3778 } 3806 }
3779 3807
3780 if (wol && vaux_avail(pdev))
3781 skge_write8(hw, B0_POWER_CTRL,
3782 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF);
3783
3784 skge_write32(hw, B0_IMSK, 0); 3808 skge_write32(hw, B0_IMSK, 0);
3785 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 3809 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3786 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3810 pci_set_power_state(pdev, pci_choose_state(pdev, state));
@@ -3826,6 +3850,28 @@ out:
3826} 3850}
3827#endif 3851#endif
3828 3852
3853static void skge_shutdown(struct pci_dev *pdev)
3854{
3855 struct skge_hw *hw = pci_get_drvdata(pdev);
3856 int i, wol = 0;
3857
3858 for (i = 0; i < hw->ports; i++) {
3859 struct net_device *dev = hw->dev[i];
3860 struct skge_port *skge = netdev_priv(dev);
3861
3862 if (skge->wol)
3863 skge_wol_init(skge);
3864 wol |= skge->wol;
3865 }
3866
3867 pci_enable_wake(pdev, PCI_D3hot, wol);
3868 pci_enable_wake(pdev, PCI_D3cold, wol);
3869
3870 pci_disable_device(pdev);
3871 pci_set_power_state(pdev, PCI_D3hot);
3872
3873}
3874
3829static struct pci_driver skge_driver = { 3875static struct pci_driver skge_driver = {
3830 .name = DRV_NAME, 3876 .name = DRV_NAME,
3831 .id_table = skge_id_table, 3877 .id_table = skge_id_table,
@@ -3835,6 +3881,7 @@ static struct pci_driver skge_driver = {
3835 .suspend = skge_suspend, 3881 .suspend = skge_suspend,
3836 .resume = skge_resume, 3882 .resume = skge_resume,
3837#endif 3883#endif
3884 .shutdown = skge_shutdown,
3838}; 3885};
3839 3886
3840static int __init skge_init_module(void) 3887static int __init skge_init_module(void)