aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@linux-foundation.org>2007-03-16 17:01:28 -0400
committerJeff Garzik <jeff@garzik.org>2007-03-23 01:48:33 -0400
commit9cbe330f1fbbc8de15a5914aa6e91d89eb9daac4 (patch)
tree42cf338b904d2d2e538416359c1b96fe80c62301 /drivers/net/skge.c
parent4ebabfcb1d6af5191ef5c8305717ccbc24979f6c (diff)
skge: use per-port phy locking
Rather than a workqueue and a per-board mutex to control PHY, use a tasklet and spinlock. Tasklet is lower overhead and works just as well for this. Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c82
1 files changed, 44 insertions, 38 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 6e1eb23d93c4..39c6677dff5e 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -672,7 +672,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
672 struct skge_hw *hw = skge->hw; 672 struct skge_hw *hw = skge->hw;
673 int port = skge->port; 673 int port = skge->port;
674 674
675 mutex_lock(&hw->phy_mutex); 675 spin_lock_bh(&hw->phy_lock);
676 if (hw->chip_id == CHIP_ID_GENESIS) { 676 if (hw->chip_id == CHIP_ID_GENESIS) {
677 switch (mode) { 677 switch (mode) {
678 case LED_MODE_OFF: 678 case LED_MODE_OFF:
@@ -743,7 +743,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
743 PHY_M_LED_MO_RX(MO_LED_ON)); 743 PHY_M_LED_MO_RX(MO_LED_ON));
744 } 744 }
745 } 745 }
746 mutex_unlock(&hw->phy_mutex); 746 spin_unlock_bh(&hw->phy_lock);
747} 747}
748 748
749/* blink LED's for finding board */ 749/* blink LED's for finding board */
@@ -1317,7 +1317,7 @@ static void xm_phy_init(struct skge_port *skge)
1317 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); 1317 xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl);
1318 1318
1319 /* Poll PHY for status changes */ 1319 /* Poll PHY for status changes */
1320 schedule_delayed_work(&skge->link_thread, LINK_HZ); 1320 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1321} 1321}
1322 1322
1323static void xm_check_link(struct net_device *dev) 1323static void xm_check_link(struct net_device *dev)
@@ -1392,10 +1392,9 @@ static void xm_check_link(struct net_device *dev)
1392 * Since internal PHY is wired to a level triggered pin, can't 1392 * Since internal PHY is wired to a level triggered pin, can't
1393 * get an interrupt when carrier is detected. 1393 * get an interrupt when carrier is detected.
1394 */ 1394 */
1395static void xm_link_timer(struct work_struct *work) 1395static void xm_link_timer(unsigned long arg)
1396{ 1396{
1397 struct skge_port *skge = 1397 struct skge_port *skge = (struct skge_port *) arg;
1398 container_of(work, struct skge_port, link_thread.work);
1399 struct net_device *dev = skge->netdev; 1398 struct net_device *dev = skge->netdev;
1400 struct skge_hw *hw = skge->hw; 1399 struct skge_hw *hw = skge->hw;
1401 int port = skge->port; 1400 int port = skge->port;
@@ -1415,13 +1414,13 @@ static void xm_link_timer(struct work_struct *work)
1415 goto nochange; 1414 goto nochange;
1416 } 1415 }
1417 1416
1418 mutex_lock(&hw->phy_mutex); 1417 spin_lock(&hw->phy_lock);
1419 xm_check_link(dev); 1418 xm_check_link(dev);
1420 mutex_unlock(&hw->phy_mutex); 1419 spin_unlock(&hw->phy_lock);
1421 1420
1422nochange: 1421nochange:
1423 if (netif_running(dev)) 1422 if (netif_running(dev))
1424 schedule_delayed_work(&skge->link_thread, LINK_HZ); 1423 mod_timer(&skge->link_timer, jiffies + LINK_HZ);
1425} 1424}
1426 1425
1427static void genesis_mac_init(struct skge_hw *hw, int port) 1426static void genesis_mac_init(struct skge_hw *hw, int port)
@@ -2324,7 +2323,7 @@ static void skge_phy_reset(struct skge_port *skge)
2324 netif_stop_queue(skge->netdev); 2323 netif_stop_queue(skge->netdev);
2325 netif_carrier_off(skge->netdev); 2324 netif_carrier_off(skge->netdev);
2326 2325
2327 mutex_lock(&hw->phy_mutex); 2326 spin_lock_bh(&hw->phy_lock);
2328 if (hw->chip_id == CHIP_ID_GENESIS) { 2327 if (hw->chip_id == CHIP_ID_GENESIS) {
2329 genesis_reset(hw, port); 2328 genesis_reset(hw, port);
2330 genesis_mac_init(hw, port); 2329 genesis_mac_init(hw, port);
@@ -2332,7 +2331,7 @@ static void skge_phy_reset(struct skge_port *skge)
2332 yukon_reset(hw, port); 2331 yukon_reset(hw, port);
2333 yukon_init(hw, port); 2332 yukon_init(hw, port);
2334 } 2333 }
2335 mutex_unlock(&hw->phy_mutex); 2334 spin_unlock_bh(&hw->phy_lock);
2336 2335
2337 dev->set_multicast_list(dev); 2336 dev->set_multicast_list(dev);
2338} 2337}
@@ -2355,12 +2354,12 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2355 /* fallthru */ 2354 /* fallthru */
2356 case SIOCGMIIREG: { 2355 case SIOCGMIIREG: {
2357 u16 val = 0; 2356 u16 val = 0;
2358 mutex_lock(&hw->phy_mutex); 2357 spin_lock_bh(&hw->phy_lock);
2359 if (hw->chip_id == CHIP_ID_GENESIS) 2358 if (hw->chip_id == CHIP_ID_GENESIS)
2360 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2359 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2361 else 2360 else
2362 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2361 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2363 mutex_unlock(&hw->phy_mutex); 2362 spin_unlock_bh(&hw->phy_lock);
2364 data->val_out = val; 2363 data->val_out = val;
2365 break; 2364 break;
2366 } 2365 }
@@ -2369,14 +2368,14 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2369 if (!capable(CAP_NET_ADMIN)) 2368 if (!capable(CAP_NET_ADMIN))
2370 return -EPERM; 2369 return -EPERM;
2371 2370
2372 mutex_lock(&hw->phy_mutex); 2371 spin_lock_bh(&hw->phy_lock);
2373 if (hw->chip_id == CHIP_ID_GENESIS) 2372 if (hw->chip_id == CHIP_ID_GENESIS)
2374 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2373 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2375 data->val_in); 2374 data->val_in);
2376 else 2375 else
2377 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2376 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2378 data->val_in); 2377 data->val_in);
2379 mutex_unlock(&hw->phy_mutex); 2378 spin_unlock_bh(&hw->phy_lock);
2380 break; 2379 break;
2381 } 2380 }
2382 return err; 2381 return err;
@@ -2482,12 +2481,12 @@ static int skge_up(struct net_device *dev)
2482 goto free_rx_ring; 2481 goto free_rx_ring;
2483 2482
2484 /* Initialize MAC */ 2483 /* Initialize MAC */
2485 mutex_lock(&hw->phy_mutex); 2484 spin_lock_bh(&hw->phy_lock);
2486 if (hw->chip_id == CHIP_ID_GENESIS) 2485 if (hw->chip_id == CHIP_ID_GENESIS)
2487 genesis_mac_init(hw, port); 2486 genesis_mac_init(hw, port);
2488 else 2487 else
2489 yukon_mac_init(hw, port); 2488 yukon_mac_init(hw, port);
2490 mutex_unlock(&hw->phy_mutex); 2489 spin_unlock_bh(&hw->phy_lock);
2491 2490
2492 /* Configure RAMbuffers */ 2491 /* Configure RAMbuffers */
2493 chunk = hw->ram_size / ((hw->ports + 1)*2); 2492 chunk = hw->ram_size / ((hw->ports + 1)*2);
@@ -2537,7 +2536,7 @@ static int skge_down(struct net_device *dev)
2537 2536
2538 netif_stop_queue(dev); 2537 netif_stop_queue(dev);
2539 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC) 2538 if (hw->chip_id == CHIP_ID_GENESIS && hw->phy_type == SK_PHY_XMAC)
2540 cancel_delayed_work(&skge->link_thread); 2539 del_timer_sync(&skge->link_timer);
2541 2540
2542 netif_poll_disable(dev); 2541 netif_poll_disable(dev);
2543 2542
@@ -3173,28 +3172,29 @@ static void skge_error_irq(struct skge_hw *hw)
3173} 3172}
3174 3173
3175/* 3174/*
3176 * Interrupt from PHY are handled in work queue 3175 * Interrupt from PHY are handled in tasklet (softirq)
3177 * because accessing phy registers requires spin wait which might 3176 * because accessing phy registers requires spin wait which might
3178 * cause excess interrupt latency. 3177 * cause excess interrupt latency.
3179 */ 3178 */
3180static void skge_extirq(struct work_struct *work) 3179static void skge_extirq(unsigned long arg)
3181{ 3180{
3182 struct skge_hw *hw = container_of(work, struct skge_hw, phy_work); 3181 struct skge_hw *hw = (struct skge_hw *) arg;
3183 int port; 3182 int port;
3184 3183
3185 mutex_lock(&hw->phy_mutex);
3186 for (port = 0; port < hw->ports; port++) { 3184 for (port = 0; port < hw->ports; port++) {
3187 struct net_device *dev = hw->dev[port]; 3185 struct net_device *dev = hw->dev[port];
3188 struct skge_port *skge = netdev_priv(dev);
3189 3186
3190 if (netif_running(dev)) { 3187 if (netif_running(dev)) {
3188 struct skge_port *skge = netdev_priv(dev);
3189
3190 spin_lock(&hw->phy_lock);
3191 if (hw->chip_id != CHIP_ID_GENESIS) 3191 if (hw->chip_id != CHIP_ID_GENESIS)
3192 yukon_phy_intr(skge); 3192 yukon_phy_intr(skge);
3193 else if (hw->phy_type == SK_PHY_BCOM) 3193 else if (hw->phy_type == SK_PHY_BCOM)
3194 bcom_phy_intr(skge); 3194 bcom_phy_intr(skge);
3195 spin_unlock(&hw->phy_lock);
3195 } 3196 }
3196 } 3197 }
3197 mutex_unlock(&hw->phy_mutex);
3198 3198
3199 spin_lock_irq(&hw->hw_lock); 3199 spin_lock_irq(&hw->hw_lock);
3200 hw->intr_mask |= IS_EXT_REG; 3200 hw->intr_mask |= IS_EXT_REG;
@@ -3219,7 +3219,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id)
3219 status &= hw->intr_mask; 3219 status &= hw->intr_mask;
3220 if (status & IS_EXT_REG) { 3220 if (status & IS_EXT_REG) {
3221 hw->intr_mask &= ~IS_EXT_REG; 3221 hw->intr_mask &= ~IS_EXT_REG;
3222 schedule_work(&hw->phy_work); 3222 tasklet_schedule(&hw->phy_task);
3223 } 3223 }
3224 3224
3225 if (status & (IS_XA1_F|IS_R1_F)) { 3225 if (status & (IS_XA1_F|IS_R1_F)) {
@@ -3295,23 +3295,28 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
3295 3295
3296 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 3296 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
3297 3297
3298 /* disable Rx */ 3298 if (!netif_running(dev)) {
3299 ctrl = gma_read16(hw, port, GM_GP_CTRL); 3299 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3300 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); 3300 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3301 } else {
3302 /* disable Rx */
3303 spin_lock_bh(&hw->phy_lock);
3304 ctrl = gma_read16(hw, port, GM_GP_CTRL);
3305 gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA);
3301 3306
3302 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); 3307 memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
3303 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); 3308 memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN);
3304 3309
3305 if (netif_running(dev)) {
3306 if (hw->chip_id == CHIP_ID_GENESIS) 3310 if (hw->chip_id == CHIP_ID_GENESIS)
3307 xm_outaddr(hw, port, XM_SA, dev->dev_addr); 3311 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
3308 else { 3312 else {
3309 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 3313 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
3310 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 3314 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
3311 } 3315 }
3312 }
3313 3316
3314 gma_write16(hw, port, GM_GP_CTRL, ctrl); 3317 gma_write16(hw, port, GM_GP_CTRL, ctrl);
3318 spin_unlock_bh(&hw->phy_lock);
3319 }
3315 3320
3316 return 0; 3321 return 0;
3317} 3322}
@@ -3496,14 +3501,12 @@ static int skge_reset(struct skge_hw *hw)
3496 3501
3497 skge_write32(hw, B0_IMSK, hw->intr_mask); 3502 skge_write32(hw, B0_IMSK, hw->intr_mask);
3498 3503
3499 mutex_lock(&hw->phy_mutex);
3500 for (i = 0; i < hw->ports; i++) { 3504 for (i = 0; i < hw->ports; i++) {
3501 if (hw->chip_id == CHIP_ID_GENESIS) 3505 if (hw->chip_id == CHIP_ID_GENESIS)
3502 genesis_reset(hw, i); 3506 genesis_reset(hw, i);
3503 else 3507 else
3504 yukon_reset(hw, i); 3508 yukon_reset(hw, i);
3505 } 3509 }
3506 mutex_unlock(&hw->phy_mutex);
3507 3510
3508 return 0; 3511 return 0;
3509} 3512}
@@ -3551,6 +3554,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3551 skge->netdev = dev; 3554 skge->netdev = dev;
3552 skge->hw = hw; 3555 skge->hw = hw;
3553 skge->msg_enable = netif_msg_init(debug, default_msg); 3556 skge->msg_enable = netif_msg_init(debug, default_msg);
3557
3554 skge->tx_ring.count = DEFAULT_TX_RING_SIZE; 3558 skge->tx_ring.count = DEFAULT_TX_RING_SIZE;
3555 skge->rx_ring.count = DEFAULT_RX_RING_SIZE; 3559 skge->rx_ring.count = DEFAULT_RX_RING_SIZE;
3556 3560
@@ -3567,7 +3571,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3567 skge->port = port; 3571 skge->port = port;
3568 3572
3569 /* Only used for Genesis XMAC */ 3573 /* Only used for Genesis XMAC */
3570 INIT_DELAYED_WORK(&skge->link_thread, xm_link_timer); 3574 setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
3571 3575
3572 if (hw->chip_id != CHIP_ID_GENESIS) { 3576 if (hw->chip_id != CHIP_ID_GENESIS) {
3573 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3577 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
@@ -3649,9 +3653,9 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3649 } 3653 }
3650 3654
3651 hw->pdev = pdev; 3655 hw->pdev = pdev;
3652 mutex_init(&hw->phy_mutex);
3653 INIT_WORK(&hw->phy_work, skge_extirq);
3654 spin_lock_init(&hw->hw_lock); 3656 spin_lock_init(&hw->hw_lock);
3657 spin_lock_init(&hw->phy_lock);
3658 tasklet_init(&hw->phy_task, &skge_extirq, (unsigned long) hw);
3655 3659
3656 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3660 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3657 if (!hw->regs) { 3661 if (!hw->regs) {
@@ -3737,6 +3741,8 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3737 dev0 = hw->dev[0]; 3741 dev0 = hw->dev[0];
3738 unregister_netdev(dev0); 3742 unregister_netdev(dev0);
3739 3743
3744 tasklet_disable(&hw->phy_task);
3745
3740 spin_lock_irq(&hw->hw_lock); 3746 spin_lock_irq(&hw->hw_lock);
3741 hw->intr_mask = 0; 3747 hw->intr_mask = 0;
3742 skge_write32(hw, B0_IMSK, 0); 3748 skge_write32(hw, B0_IMSK, 0);