aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c232
1 files changed, 136 insertions, 96 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5ca5a1b546a1..536dd1cf7f79 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,12 +44,13 @@
44#include "skge.h" 44#include "skge.h"
45 45
46#define DRV_NAME "skge" 46#define DRV_NAME "skge"
47#define DRV_VERSION "1.5" 47#define DRV_VERSION "1.6"
48#define PFX DRV_NAME " " 48#define PFX DRV_NAME " "
49 49
50#define DEFAULT_TX_RING_SIZE 128 50#define DEFAULT_TX_RING_SIZE 128
51#define DEFAULT_RX_RING_SIZE 512 51#define DEFAULT_RX_RING_SIZE 512
52#define MAX_TX_RING_SIZE 1024 52#define MAX_TX_RING_SIZE 1024
53#define TX_LOW_WATER (MAX_SKB_FRAGS + 1)
53#define MAX_RX_RING_SIZE 4096 54#define MAX_RX_RING_SIZE 4096
54#define RX_COPY_THRESHOLD 128 55#define RX_COPY_THRESHOLD 128
55#define RX_BUF_SIZE 1536 56#define RX_BUF_SIZE 1536
@@ -401,7 +402,7 @@ static int skge_set_ring_param(struct net_device *dev,
401 int err; 402 int err;
402 403
403 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || 404 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
404 p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE) 405 p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
405 return -EINVAL; 406 return -EINVAL;
406 407
407 skge->rx_ring.count = p->rx_pending; 408 skge->rx_ring.count = p->rx_pending;
@@ -603,7 +604,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
603 struct skge_hw *hw = skge->hw; 604 struct skge_hw *hw = skge->hw;
604 int port = skge->port; 605 int port = skge->port;
605 606
606 spin_lock_bh(&hw->phy_lock); 607 mutex_lock(&hw->phy_mutex);
607 if (hw->chip_id == CHIP_ID_GENESIS) { 608 if (hw->chip_id == CHIP_ID_GENESIS) {
608 switch (mode) { 609 switch (mode) {
609 case LED_MODE_OFF: 610 case LED_MODE_OFF:
@@ -663,7 +664,7 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
663 PHY_M_LED_MO_RX(MO_LED_ON)); 664 PHY_M_LED_MO_RX(MO_LED_ON));
664 } 665 }
665 } 666 }
666 spin_unlock_bh(&hw->phy_lock); 667 mutex_unlock(&hw->phy_mutex);
667} 668}
668 669
669/* blink LED's for finding board */ 670/* blink LED's for finding board */
@@ -2038,7 +2039,7 @@ static void skge_phy_reset(struct skge_port *skge)
2038 netif_stop_queue(skge->netdev); 2039 netif_stop_queue(skge->netdev);
2039 netif_carrier_off(skge->netdev); 2040 netif_carrier_off(skge->netdev);
2040 2041
2041 spin_lock_bh(&hw->phy_lock); 2042 mutex_lock(&hw->phy_mutex);
2042 if (hw->chip_id == CHIP_ID_GENESIS) { 2043 if (hw->chip_id == CHIP_ID_GENESIS) {
2043 genesis_reset(hw, port); 2044 genesis_reset(hw, port);
2044 genesis_mac_init(hw, port); 2045 genesis_mac_init(hw, port);
@@ -2046,7 +2047,7 @@ static void skge_phy_reset(struct skge_port *skge)
2046 yukon_reset(hw, port); 2047 yukon_reset(hw, port);
2047 yukon_init(hw, port); 2048 yukon_init(hw, port);
2048 } 2049 }
2049 spin_unlock_bh(&hw->phy_lock); 2050 mutex_unlock(&hw->phy_mutex);
2050} 2051}
2051 2052
2052/* Basic MII support */ 2053/* Basic MII support */
@@ -2067,12 +2068,12 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2067 /* fallthru */ 2068 /* fallthru */
2068 case SIOCGMIIREG: { 2069 case SIOCGMIIREG: {
2069 u16 val = 0; 2070 u16 val = 0;
2070 spin_lock_bh(&hw->phy_lock); 2071 mutex_lock(&hw->phy_mutex);
2071 if (hw->chip_id == CHIP_ID_GENESIS) 2072 if (hw->chip_id == CHIP_ID_GENESIS)
2072 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2073 err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2073 else 2074 else
2074 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); 2075 err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
2075 spin_unlock_bh(&hw->phy_lock); 2076 mutex_unlock(&hw->phy_mutex);
2076 data->val_out = val; 2077 data->val_out = val;
2077 break; 2078 break;
2078 } 2079 }
@@ -2081,14 +2082,14 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2081 if (!capable(CAP_NET_ADMIN)) 2082 if (!capable(CAP_NET_ADMIN))
2082 return -EPERM; 2083 return -EPERM;
2083 2084
2084 spin_lock_bh(&hw->phy_lock); 2085 mutex_lock(&hw->phy_mutex);
2085 if (hw->chip_id == CHIP_ID_GENESIS) 2086 if (hw->chip_id == CHIP_ID_GENESIS)
2086 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2087 err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2087 data->val_in); 2088 data->val_in);
2088 else 2089 else
2089 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, 2090 err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
2090 data->val_in); 2091 data->val_in);
2091 spin_unlock_bh(&hw->phy_lock); 2092 mutex_unlock(&hw->phy_mutex);
2092 break; 2093 break;
2093 } 2094 }
2094 return err; 2095 return err;
@@ -2191,12 +2192,12 @@ static int skge_up(struct net_device *dev)
2191 goto free_rx_ring; 2192 goto free_rx_ring;
2192 2193
2193 /* Initialize MAC */ 2194 /* Initialize MAC */
2194 spin_lock_bh(&hw->phy_lock); 2195 mutex_lock(&hw->phy_mutex);
2195 if (hw->chip_id == CHIP_ID_GENESIS) 2196 if (hw->chip_id == CHIP_ID_GENESIS)
2196 genesis_mac_init(hw, port); 2197 genesis_mac_init(hw, port);
2197 else 2198 else
2198 yukon_mac_init(hw, port); 2199 yukon_mac_init(hw, port);
2199 spin_unlock_bh(&hw->phy_lock); 2200 mutex_unlock(&hw->phy_mutex);
2200 2201
2201 /* Configure RAMbuffers */ 2202 /* Configure RAMbuffers */
2202 chunk = hw->ram_size / ((hw->ports + 1)*2); 2203 chunk = hw->ram_size / ((hw->ports + 1)*2);
@@ -2302,21 +2303,20 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2302{ 2303{
2303 struct skge_port *skge = netdev_priv(dev); 2304 struct skge_port *skge = netdev_priv(dev);
2304 struct skge_hw *hw = skge->hw; 2305 struct skge_hw *hw = skge->hw;
2305 struct skge_ring *ring = &skge->tx_ring;
2306 struct skge_element *e; 2306 struct skge_element *e;
2307 struct skge_tx_desc *td; 2307 struct skge_tx_desc *td;
2308 int i; 2308 int i;
2309 u32 control, len; 2309 u32 control, len;
2310 u64 map; 2310 u64 map;
2311 unsigned long flags;
2311 2312
2312 skb = skb_padto(skb, ETH_ZLEN); 2313 skb = skb_padto(skb, ETH_ZLEN);
2313 if (!skb) 2314 if (!skb)
2314 return NETDEV_TX_OK; 2315 return NETDEV_TX_OK;
2315 2316
2316 if (!spin_trylock(&skge->tx_lock)) { 2317 if (!spin_trylock_irqsave(&skge->tx_lock, flags))
2317 /* Collision - tell upper layer to requeue */ 2318 /* Collision - tell upper layer to requeue */
2318 return NETDEV_TX_LOCKED; 2319 return NETDEV_TX_LOCKED;
2319 }
2320 2320
2321 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) { 2321 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
2322 if (!netif_queue_stopped(dev)) { 2322 if (!netif_queue_stopped(dev)) {
@@ -2325,12 +2325,13 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2325 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", 2325 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2326 dev->name); 2326 dev->name);
2327 } 2327 }
2328 spin_unlock(&skge->tx_lock); 2328 spin_unlock_irqrestore(&skge->tx_lock, flags);
2329 return NETDEV_TX_BUSY; 2329 return NETDEV_TX_BUSY;
2330 } 2330 }
2331 2331
2332 e = ring->to_use; 2332 e = skge->tx_ring.to_use;
2333 td = e->desc; 2333 td = e->desc;
2334 BUG_ON(td->control & BMU_OWN);
2334 e->skb = skb; 2335 e->skb = skb;
2335 len = skb_headlen(skb); 2336 len = skb_headlen(skb);
2336 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2337 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -2371,8 +2372,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2371 frag->size, PCI_DMA_TODEVICE); 2372 frag->size, PCI_DMA_TODEVICE);
2372 2373
2373 e = e->next; 2374 e = e->next;
2374 e->skb = NULL; 2375 e->skb = skb;
2375 tf = e->desc; 2376 tf = e->desc;
2377 BUG_ON(tf->control & BMU_OWN);
2378
2376 tf->dma_lo = map; 2379 tf->dma_lo = map;
2377 tf->dma_hi = (u64) map >> 32; 2380 tf->dma_hi = (u64) map >> 32;
2378 pci_unmap_addr_set(e, mapaddr, map); 2381 pci_unmap_addr_set(e, mapaddr, map);
@@ -2389,56 +2392,68 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2389 2392
2390 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); 2393 skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
2391 2394
2392 if (netif_msg_tx_queued(skge)) 2395 if (unlikely(netif_msg_tx_queued(skge)))
2393 printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n", 2396 printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
2394 dev->name, e - ring->start, skb->len); 2397 dev->name, e - skge->tx_ring.start, skb->len);
2395 2398
2396 ring->to_use = e->next; 2399 skge->tx_ring.to_use = e->next;
2397 if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) { 2400 if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
2398 pr_debug("%s: transmit queue full\n", dev->name); 2401 pr_debug("%s: transmit queue full\n", dev->name);
2399 netif_stop_queue(dev); 2402 netif_stop_queue(dev);
2400 } 2403 }
2401 2404
2402 mmiowb(); 2405 spin_unlock_irqrestore(&skge->tx_lock, flags);
2403 spin_unlock(&skge->tx_lock);
2404 2406
2405 dev->trans_start = jiffies; 2407 dev->trans_start = jiffies;
2406 2408
2407 return NETDEV_TX_OK; 2409 return NETDEV_TX_OK;
2408} 2410}
2409 2411
2410static void skge_tx_complete(struct skge_port *skge, struct skge_element *last) 2412
2413/* Free resources associated with this reing element */
2414static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2415 u32 control)
2411{ 2416{
2412 struct pci_dev *pdev = skge->hw->pdev; 2417 struct pci_dev *pdev = skge->hw->pdev;
2413 struct skge_element *e;
2414 2418
2415 for (e = skge->tx_ring.to_clean; e != last; e = e->next) { 2419 BUG_ON(!e->skb);
2416 struct sk_buff *skb = e->skb;
2417 int i;
2418 2420
2419 e->skb = NULL; 2421 /* skb header vs. fragment */
2422 if (control & BMU_STF)
2420 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), 2423 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
2421 skb_headlen(skb), PCI_DMA_TODEVICE); 2424 pci_unmap_len(e, maplen),
2425 PCI_DMA_TODEVICE);
2426 else
2427 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
2428 pci_unmap_len(e, maplen),
2429 PCI_DMA_TODEVICE);
2422 2430
2423 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2431 if (control & BMU_EOF) {
2424 e = e->next; 2432 if (unlikely(netif_msg_tx_done(skge)))
2425 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), 2433 printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
2426 skb_shinfo(skb)->frags[i].size, 2434 skge->netdev->name, e - skge->tx_ring.start);
2427 PCI_DMA_TODEVICE);
2428 }
2429 2435
2430 dev_kfree_skb(skb); 2436 dev_kfree_skb_any(e->skb);
2431 } 2437 }
2432 skge->tx_ring.to_clean = e; 2438 e->skb = NULL;
2433} 2439}
2434 2440
2441/* Free all buffers in transmit ring */
2435static void skge_tx_clean(struct skge_port *skge) 2442static void skge_tx_clean(struct skge_port *skge)
2436{ 2443{
2444 struct skge_element *e;
2445 unsigned long flags;
2446
2447 spin_lock_irqsave(&skge->tx_lock, flags);
2448 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2449 struct skge_tx_desc *td = e->desc;
2450 skge_tx_free(skge, e, td->control);
2451 td->control = 0;
2452 }
2437 2453
2438 spin_lock_bh(&skge->tx_lock); 2454 skge->tx_ring.to_clean = e;
2439 skge_tx_complete(skge, skge->tx_ring.to_use);
2440 netif_wake_queue(skge->netdev); 2455 netif_wake_queue(skge->netdev);
2441 spin_unlock_bh(&skge->tx_lock); 2456 spin_unlock_irqrestore(&skge->tx_lock, flags);
2442} 2457}
2443 2458
2444static void skge_tx_timeout(struct net_device *dev) 2459static void skge_tx_timeout(struct net_device *dev)
@@ -2664,32 +2679,28 @@ resubmit:
2664 return NULL; 2679 return NULL;
2665} 2680}
2666 2681
2667static void skge_tx_done(struct skge_port *skge) 2682/* Free all buffers in Tx ring which are no longer owned by device */
2683static void skge_txirq(struct net_device *dev)
2668{ 2684{
2685 struct skge_port *skge = netdev_priv(dev);
2669 struct skge_ring *ring = &skge->tx_ring; 2686 struct skge_ring *ring = &skge->tx_ring;
2670 struct skge_element *e, *last; 2687 struct skge_element *e;
2688
2689 rmb();
2671 2690
2672 spin_lock(&skge->tx_lock); 2691 spin_lock(&skge->tx_lock);
2673 last = ring->to_clean;
2674 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 2692 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2675 struct skge_tx_desc *td = e->desc; 2693 struct skge_tx_desc *td = e->desc;
2676 2694
2677 if (td->control & BMU_OWN) 2695 if (td->control & BMU_OWN)
2678 break; 2696 break;
2679 2697
2680 if (td->control & BMU_EOF) { 2698 skge_tx_free(skge, e, td->control);
2681 last = e->next;
2682 if (unlikely(netif_msg_tx_done(skge)))
2683 printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
2684 skge->netdev->name, e - ring->start);
2685 }
2686 } 2699 }
2700 skge->tx_ring.to_clean = e;
2687 2701
2688 skge_tx_complete(skge, last); 2702 if (netif_queue_stopped(skge->netdev)
2689 2703 && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
2690 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2691
2692 if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
2693 netif_wake_queue(skge->netdev); 2704 netif_wake_queue(skge->netdev);
2694 2705
2695 spin_unlock(&skge->tx_lock); 2706 spin_unlock(&skge->tx_lock);
@@ -2704,8 +2715,6 @@ static int skge_poll(struct net_device *dev, int *budget)
2704 int to_do = min(dev->quota, *budget); 2715 int to_do = min(dev->quota, *budget);
2705 int work_done = 0; 2716 int work_done = 0;
2706 2717
2707 skge_tx_done(skge);
2708
2709 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { 2718 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
2710 struct skge_rx_desc *rd = e->desc; 2719 struct skge_rx_desc *rd = e->desc;
2711 struct sk_buff *skb; 2720 struct sk_buff *skb;
@@ -2737,10 +2746,12 @@ static int skge_poll(struct net_device *dev, int *budget)
2737 return 1; /* not done */ 2746 return 1; /* not done */
2738 2747
2739 netif_rx_complete(dev); 2748 netif_rx_complete(dev);
2740 mmiowb();
2741 2749
2742 hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F); 2750 spin_lock_irq(&hw->hw_lock);
2751 hw->intr_mask |= rxirqmask[skge->port];
2743 skge_write32(hw, B0_IMSK, hw->intr_mask); 2752 skge_write32(hw, B0_IMSK, hw->intr_mask);
2753 mmiowb();
2754 spin_unlock_irq(&hw->hw_lock);
2744 2755
2745 return 0; 2756 return 0;
2746} 2757}
@@ -2847,16 +2858,16 @@ static void skge_error_irq(struct skge_hw *hw)
2847} 2858}
2848 2859
2849/* 2860/*
2850 * Interrupt from PHY are handled in tasklet (soft irq) 2861 * Interrupt from PHY are handled in work queue
2851 * because accessing phy registers requires spin wait which might 2862 * because accessing phy registers requires spin wait which might
2852 * cause excess interrupt latency. 2863 * cause excess interrupt latency.
2853 */ 2864 */
2854static void skge_extirq(unsigned long data) 2865static void skge_extirq(void *arg)
2855{ 2866{
2856 struct skge_hw *hw = (struct skge_hw *) data; 2867 struct skge_hw *hw = arg;
2857 int port; 2868 int port;
2858 2869
2859 spin_lock(&hw->phy_lock); 2870 mutex_lock(&hw->phy_mutex);
2860 for (port = 0; port < hw->ports; port++) { 2871 for (port = 0; port < hw->ports; port++) {
2861 struct net_device *dev = hw->dev[port]; 2872 struct net_device *dev = hw->dev[port];
2862 struct skge_port *skge = netdev_priv(dev); 2873 struct skge_port *skge = netdev_priv(dev);
@@ -2868,10 +2879,12 @@ static void skge_extirq(unsigned long data)
2868 bcom_phy_intr(skge); 2879 bcom_phy_intr(skge);
2869 } 2880 }
2870 } 2881 }
2871 spin_unlock(&hw->phy_lock); 2882 mutex_unlock(&hw->phy_mutex);
2872 2883
2884 spin_lock_irq(&hw->hw_lock);
2873 hw->intr_mask |= IS_EXT_REG; 2885 hw->intr_mask |= IS_EXT_REG;
2874 skge_write32(hw, B0_IMSK, hw->intr_mask); 2886 skge_write32(hw, B0_IMSK, hw->intr_mask);
2887 spin_unlock_irq(&hw->hw_lock);
2875} 2888}
2876 2889
2877static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) 2890static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2884,54 +2897,68 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2884 if (status == 0) 2897 if (status == 0)
2885 return IRQ_NONE; 2898 return IRQ_NONE;
2886 2899
2900 spin_lock(&hw->hw_lock);
2901 status &= hw->intr_mask;
2887 if (status & IS_EXT_REG) { 2902 if (status & IS_EXT_REG) {
2888 hw->intr_mask &= ~IS_EXT_REG; 2903 hw->intr_mask &= ~IS_EXT_REG;
2889 tasklet_schedule(&hw->ext_tasklet); 2904 schedule_work(&hw->phy_work);
2890 } 2905 }
2891 2906
2892 if (status & (IS_R1_F|IS_XA1_F)) { 2907 if (status & IS_XA1_F) {
2893 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); 2908 skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
2894 hw->intr_mask &= ~(IS_R1_F|IS_XA1_F); 2909 skge_txirq(hw->dev[0]);
2895 netif_rx_schedule(hw->dev[0]);
2896 } 2910 }
2897 2911
2898 if (status & (IS_R2_F|IS_XA2_F)) { 2912 if (status & IS_R1_F) {
2899 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); 2913 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2900 hw->intr_mask &= ~(IS_R2_F|IS_XA2_F); 2914 hw->intr_mask &= ~IS_R1_F;
2901 netif_rx_schedule(hw->dev[1]); 2915 netif_rx_schedule(hw->dev[0]);
2902 } 2916 }
2903 2917
2904 if (likely((status & hw->intr_mask) == 0)) 2918 if (status & IS_PA_TO_TX1)
2905 return IRQ_HANDLED; 2919 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
2906 2920
2907 if (status & IS_PA_TO_RX1) { 2921 if (status & IS_PA_TO_RX1) {
2908 struct skge_port *skge = netdev_priv(hw->dev[0]); 2922 struct skge_port *skge = netdev_priv(hw->dev[0]);
2909 ++skge->net_stats.rx_over_errors;
2910 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
2911 }
2912 2923
2913 if (status & IS_PA_TO_RX2) {
2914 struct skge_port *skge = netdev_priv(hw->dev[1]);
2915 ++skge->net_stats.rx_over_errors; 2924 ++skge->net_stats.rx_over_errors;
2916 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); 2925 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
2917 } 2926 }
2918 2927
2919 if (status & IS_PA_TO_TX1)
2920 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
2921
2922 if (status & IS_PA_TO_TX2)
2923 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
2924 2928
2925 if (status & IS_MAC1) 2929 if (status & IS_MAC1)
2926 skge_mac_intr(hw, 0); 2930 skge_mac_intr(hw, 0);
2927 2931
2928 if (status & IS_MAC2) 2932 if (hw->dev[1]) {
2929 skge_mac_intr(hw, 1); 2933 if (status & IS_XA2_F) {
2934 skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
2935 skge_txirq(hw->dev[1]);
2936 }
2937
2938 if (status & IS_R2_F) {
2939 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2940 hw->intr_mask &= ~IS_R2_F;
2941 netif_rx_schedule(hw->dev[1]);
2942 }
2943
2944 if (status & IS_PA_TO_RX2) {
2945 struct skge_port *skge = netdev_priv(hw->dev[1]);
2946 ++skge->net_stats.rx_over_errors;
2947 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
2948 }
2949
2950 if (status & IS_PA_TO_TX2)
2951 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
2952
2953 if (status & IS_MAC2)
2954 skge_mac_intr(hw, 1);
2955 }
2930 2956
2931 if (status & IS_HW_ERR) 2957 if (status & IS_HW_ERR)
2932 skge_error_irq(hw); 2958 skge_error_irq(hw);
2933 2959
2934 skge_write32(hw, B0_IMSK, hw->intr_mask); 2960 skge_write32(hw, B0_IMSK, hw->intr_mask);
2961 spin_unlock(&hw->hw_lock);
2935 2962
2936 return IRQ_HANDLED; 2963 return IRQ_HANDLED;
2937} 2964}
@@ -2957,7 +2984,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
2957 if (!is_valid_ether_addr(addr->sa_data)) 2984 if (!is_valid_ether_addr(addr->sa_data))
2958 return -EADDRNOTAVAIL; 2985 return -EADDRNOTAVAIL;
2959 2986
2960 spin_lock_bh(&hw->phy_lock); 2987 mutex_lock(&hw->phy_mutex);
2961 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 2988 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
2962 memcpy_toio(hw->regs + B2_MAC_1 + port*8, 2989 memcpy_toio(hw->regs + B2_MAC_1 + port*8,
2963 dev->dev_addr, ETH_ALEN); 2990 dev->dev_addr, ETH_ALEN);
@@ -2970,7 +2997,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
2970 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); 2997 gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
2971 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); 2998 gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
2972 } 2999 }
2973 spin_unlock_bh(&hw->phy_lock); 3000 mutex_unlock(&hw->phy_mutex);
2974 3001
2975 return 0; 3002 return 0;
2976} 3003}
@@ -3082,6 +3109,7 @@ static int skge_reset(struct skge_hw *hw)
3082 else 3109 else
3083 hw->ram_size = t8 * 4096; 3110 hw->ram_size = t8 * 4096;
3084 3111
3112 spin_lock_init(&hw->hw_lock);
3085 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; 3113 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3086 if (hw->ports > 1) 3114 if (hw->ports > 1)
3087 hw->intr_mask |= IS_PORT_2; 3115 hw->intr_mask |= IS_PORT_2;
@@ -3150,14 +3178,14 @@ static int skge_reset(struct skge_hw *hw)
3150 3178
3151 skge_write32(hw, B0_IMSK, hw->intr_mask); 3179 skge_write32(hw, B0_IMSK, hw->intr_mask);
3152 3180
3153 spin_lock_bh(&hw->phy_lock); 3181 mutex_lock(&hw->phy_mutex);
3154 for (i = 0; i < hw->ports; i++) { 3182 for (i = 0; i < hw->ports; i++) {
3155 if (hw->chip_id == CHIP_ID_GENESIS) 3183 if (hw->chip_id == CHIP_ID_GENESIS)
3156 genesis_reset(hw, i); 3184 genesis_reset(hw, i);
3157 else 3185 else
3158 yukon_reset(hw, i); 3186 yukon_reset(hw, i);
3159 } 3187 }
3160 spin_unlock_bh(&hw->phy_lock); 3188 mutex_unlock(&hw->phy_mutex);
3161 3189
3162 return 0; 3190 return 0;
3163} 3191}
@@ -3305,8 +3333,8 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3305 } 3333 }
3306 3334
3307 hw->pdev = pdev; 3335 hw->pdev = pdev;
3308 spin_lock_init(&hw->phy_lock); 3336 mutex_init(&hw->phy_mutex);
3309 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); 3337 INIT_WORK(&hw->phy_work, skge_extirq, hw);
3310 3338
3311 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3339 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3312 if (!hw->regs) { 3340 if (!hw->regs) {
@@ -3334,6 +3362,14 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3334 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) 3362 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3335 goto err_out_led_off; 3363 goto err_out_led_off;
3336 3364
3365 if (!is_valid_ether_addr(dev->dev_addr)) {
3366 printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n",
3367 pci_name(pdev));
3368 err = -EIO;
3369 goto err_out_free_netdev;
3370 }
3371
3372
3337 err = register_netdev(dev); 3373 err = register_netdev(dev);
3338 if (err) { 3374 if (err) {
3339 printk(KERN_ERR PFX "%s: cannot register net device\n", 3375 printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3388,11 +3424,15 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3388 dev0 = hw->dev[0]; 3424 dev0 = hw->dev[0];
3389 unregister_netdev(dev0); 3425 unregister_netdev(dev0);
3390 3426
3427 spin_lock_irq(&hw->hw_lock);
3428 hw->intr_mask = 0;
3391 skge_write32(hw, B0_IMSK, 0); 3429 skge_write32(hw, B0_IMSK, 0);
3430 spin_unlock_irq(&hw->hw_lock);
3431
3392 skge_write16(hw, B0_LED, LED_STAT_OFF); 3432 skge_write16(hw, B0_LED, LED_STAT_OFF);
3393 skge_write8(hw, B0_CTST, CS_RST_SET); 3433 skge_write8(hw, B0_CTST, CS_RST_SET);
3394 3434
3395 tasklet_kill(&hw->ext_tasklet); 3435 flush_scheduled_work();
3396 3436
3397 free_irq(pdev->irq, hw); 3437 free_irq(pdev->irq, hw);
3398 pci_release_regions(pdev); 3438 pci_release_regions(pdev);