diff options
| -rw-r--r-- | drivers/net/skge.c | 171 | ||||
| -rw-r--r-- | drivers/net/skge.h | 1 |
2 files changed, 102 insertions, 70 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 7d8e2ab80604..f377c259a398 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
| @@ -2303,21 +2303,20 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
| 2303 | { | 2303 | { |
| 2304 | struct skge_port *skge = netdev_priv(dev); | 2304 | struct skge_port *skge = netdev_priv(dev); |
| 2305 | struct skge_hw *hw = skge->hw; | 2305 | struct skge_hw *hw = skge->hw; |
| 2306 | struct skge_ring *ring = &skge->tx_ring; | ||
| 2307 | struct skge_element *e; | 2306 | struct skge_element *e; |
| 2308 | struct skge_tx_desc *td; | 2307 | struct skge_tx_desc *td; |
| 2309 | int i; | 2308 | int i; |
| 2310 | u32 control, len; | 2309 | u32 control, len; |
| 2311 | u64 map; | 2310 | u64 map; |
| 2311 | unsigned long flags; | ||
| 2312 | 2312 | ||
| 2313 | skb = skb_padto(skb, ETH_ZLEN); | 2313 | skb = skb_padto(skb, ETH_ZLEN); |
| 2314 | if (!skb) | 2314 | if (!skb) |
| 2315 | return NETDEV_TX_OK; | 2315 | return NETDEV_TX_OK; |
| 2316 | 2316 | ||
| 2317 | if (!spin_trylock(&skge->tx_lock)) { | 2317 | if (!spin_trylock_irqsave(&skge->tx_lock, flags)) |
| 2318 | /* Collision - tell upper layer to requeue */ | 2318 | /* Collision - tell upper layer to requeue */ |
| 2319 | return NETDEV_TX_LOCKED; | 2319 | return NETDEV_TX_LOCKED; |
| 2320 | } | ||
| 2321 | 2320 | ||
| 2322 | if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) { | 2321 | if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) { |
| 2323 | if (!netif_queue_stopped(dev)) { | 2322 | if (!netif_queue_stopped(dev)) { |
| @@ -2326,12 +2325,13 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
| 2326 | printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", | 2325 | printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", |
| 2327 | dev->name); | 2326 | dev->name); |
| 2328 | } | 2327 | } |
| 2329 | spin_unlock(&skge->tx_lock); | 2328 | spin_unlock_irqrestore(&skge->tx_lock, flags); |
| 2330 | return NETDEV_TX_BUSY; | 2329 | return NETDEV_TX_BUSY; |
| 2331 | } | 2330 | } |
| 2332 | 2331 | ||
| 2333 | e = ring->to_use; | 2332 | e = skge->tx_ring.to_use; |
| 2334 | td = e->desc; | 2333 | td = e->desc; |
| 2334 | BUG_ON(td->control & BMU_OWN); | ||
| 2335 | e->skb = skb; | 2335 | e->skb = skb; |
| 2336 | len = skb_headlen(skb); | 2336 | len = skb_headlen(skb); |
| 2337 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2337 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
| @@ -2372,8 +2372,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
| 2372 | frag->size, PCI_DMA_TODEVICE); | 2372 | frag->size, PCI_DMA_TODEVICE); |
| 2373 | 2373 | ||
| 2374 | e = e->next; | 2374 | e = e->next; |
| 2375 | e->skb = NULL; | 2375 | e->skb = skb; |
| 2376 | tf = e->desc; | 2376 | tf = e->desc; |
| 2377 | BUG_ON(tf->control & BMU_OWN); | ||
| 2378 | |||
| 2377 | tf->dma_lo = map; | 2379 | tf->dma_lo = map; |
| 2378 | tf->dma_hi = (u64) map >> 32; | 2380 | tf->dma_hi = (u64) map >> 32; |
| 2379 | pci_unmap_addr_set(e, mapaddr, map); | 2381 | pci_unmap_addr_set(e, mapaddr, map); |
| @@ -2390,56 +2392,68 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
| 2390 | 2392 | ||
| 2391 | skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); | 2393 | skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); |
| 2392 | 2394 | ||
| 2393 | if (netif_msg_tx_queued(skge)) | 2395 | if (unlikely(netif_msg_tx_queued(skge))) |
| 2394 | printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n", | 2396 | printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n", |
| 2395 | dev->name, e - ring->start, skb->len); | 2397 | dev->name, e - skge->tx_ring.start, skb->len); |
| 2396 | 2398 | ||
| 2397 | ring->to_use = e->next; | 2399 | skge->tx_ring.to_use = e->next; |
| 2398 | if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { | 2400 | if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { |
| 2399 | pr_debug("%s: transmit queue full\n", dev->name); | 2401 | pr_debug("%s: transmit queue full\n", dev->name); |
| 2400 | netif_stop_queue(dev); | 2402 | netif_stop_queue(dev); |
| 2401 | } | 2403 | } |
| 2402 | 2404 | ||
| 2403 | mmiowb(); | 2405 | spin_unlock_irqrestore(&skge->tx_lock, flags); |
| 2404 | spin_unlock(&skge->tx_lock); | ||
| 2405 | 2406 | ||
| 2406 | dev->trans_start = jiffies; | 2407 | dev->trans_start = jiffies; |
| 2407 | 2408 | ||
| 2408 | return NETDEV_TX_OK; | 2409 | return NETDEV_TX_OK; |
| 2409 | } | 2410 | } |
| 2410 | 2411 | ||
| 2411 | static void skge_tx_complete(struct skge_port *skge, struct skge_element *last) | 2412 | |
| 2413 | /* Free resources associated with this reing element */ | ||
| 2414 | static void skge_tx_free(struct skge_port *skge, struct skge_element *e, | ||
| 2415 | u32 control) | ||
| 2412 | { | 2416 | { |
| 2413 | struct pci_dev *pdev = skge->hw->pdev; | 2417 | struct pci_dev *pdev = skge->hw->pdev; |
| 2414 | struct skge_element *e; | ||
| 2415 | 2418 | ||
| 2416 | for (e = skge->tx_ring.to_clean; e != last; e = e->next) { | 2419 | BUG_ON(!e->skb); |
| 2417 | struct sk_buff *skb = e->skb; | ||
| 2418 | int i; | ||
| 2419 | 2420 | ||
| 2420 | e->skb = NULL; | 2421 | /* skb header vs. fragment */ |
| 2422 | if (control & BMU_STF) | ||
| 2421 | pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), | 2423 | pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), |
| 2422 | skb_headlen(skb), PCI_DMA_TODEVICE); | 2424 | pci_unmap_len(e, maplen), |
| 2425 | PCI_DMA_TODEVICE); | ||
| 2426 | else | ||
| 2427 | pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), | ||
| 2428 | pci_unmap_len(e, maplen), | ||
| 2429 | PCI_DMA_TODEVICE); | ||
| 2423 | 2430 | ||
| 2424 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2431 | if (control & BMU_EOF) { |
| 2425 | e = e->next; | 2432 | if (unlikely(netif_msg_tx_done(skge))) |
| 2426 | pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), | 2433 | printk(KERN_DEBUG PFX "%s: tx done slot %td\n", |
| 2427 | skb_shinfo(skb)->frags[i].size, | 2434 | skge->netdev->name, e - skge->tx_ring.start); |
| 2428 | PCI_DMA_TODEVICE); | ||
| 2429 | } | ||
| 2430 | 2435 | ||
| 2431 | dev_kfree_skb(skb); | 2436 | dev_kfree_skb_any(e->skb); |
| 2432 | } | 2437 | } |
| 2433 | skge->tx_ring.to_clean = e; | 2438 | e->skb = NULL; |
| 2434 | } | 2439 | } |
| 2435 | 2440 | ||
| 2441 | /* Free all buffers in transmit ring */ | ||
| 2436 | static void skge_tx_clean(struct skge_port *skge) | 2442 | static void skge_tx_clean(struct skge_port *skge) |
| 2437 | { | 2443 | { |
| 2444 | struct skge_element *e; | ||
| 2445 | unsigned long flags; | ||
| 2438 | 2446 | ||
| 2439 | spin_lock_bh(&skge->tx_lock); | 2447 | spin_lock_irqsave(&skge->tx_lock, flags); |
| 2440 | skge_tx_complete(skge, skge->tx_ring.to_use); | 2448 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { |
| 2449 | struct skge_tx_desc *td = e->desc; | ||
| 2450 | skge_tx_free(skge, e, td->control); | ||
| 2451 | td->control = 0; | ||
| 2452 | } | ||
| 2453 | |||
| 2454 | skge->tx_ring.to_clean = e; | ||
| 2441 | netif_wake_queue(skge->netdev); | 2455 | netif_wake_queue(skge->netdev); |
| 2442 | spin_unlock_bh(&skge->tx_lock); | 2456 | spin_unlock_irqrestore(&skge->tx_lock, flags); |
| 2443 | } | 2457 | } |
| 2444 | 2458 | ||
| 2445 | static void skge_tx_timeout(struct net_device *dev) | 2459 | static void skge_tx_timeout(struct net_device *dev) |
| @@ -2665,32 +2679,28 @@ resubmit: | |||
| 2665 | return NULL; | 2679 | return NULL; |
| 2666 | } | 2680 | } |
| 2667 | 2681 | ||
| 2668 | static void skge_tx_done(struct skge_port *skge) | 2682 | /* Free all buffers in Tx ring which are no longer owned by device */ |
| 2683 | static void skge_txirq(struct net_device *dev) | ||
| 2669 | { | 2684 | { |
| 2685 | struct skge_port *skge = netdev_priv(dev); | ||
| 2670 | struct skge_ring *ring = &skge->tx_ring; | 2686 | struct skge_ring *ring = &skge->tx_ring; |
| 2671 | struct skge_element *e, *last; | 2687 | struct skge_element *e; |
| 2688 | |||
| 2689 | rmb(); | ||
| 2672 | 2690 | ||
| 2673 | spin_lock(&skge->tx_lock); | 2691 | spin_lock(&skge->tx_lock); |
| 2674 | last = ring->to_clean; | ||
| 2675 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { | 2692 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { |
| 2676 | struct skge_tx_desc *td = e->desc; | 2693 | struct skge_tx_desc *td = e->desc; |
| 2677 | 2694 | ||
| 2678 | if (td->control & BMU_OWN) | 2695 | if (td->control & BMU_OWN) |
| 2679 | break; | 2696 | break; |
| 2680 | 2697 | ||
| 2681 | if (td->control & BMU_EOF) { | 2698 | skge_tx_free(skge, e, td->control); |
| 2682 | last = e->next; | ||
| 2683 | if (unlikely(netif_msg_tx_done(skge))) | ||
| 2684 | printk(KERN_DEBUG PFX "%s: tx done slot %td\n", | ||
| 2685 | skge->netdev->name, e - ring->start); | ||
| 2686 | } | ||
| 2687 | } | 2699 | } |
| 2700 | skge->tx_ring.to_clean = e; | ||
| 2688 | 2701 | ||
| 2689 | skge_tx_complete(skge, last); | 2702 | if (netif_queue_stopped(skge->netdev) |
| 2690 | 2703 | && skge_avail(&skge->tx_ring) > TX_LOW_WATER) | |
| 2691 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | ||
| 2692 | |||
| 2693 | if (skge_avail(&skge->tx_ring) > TX_LOW_WATER) | ||
| 2694 | netif_wake_queue(skge->netdev); | 2704 | netif_wake_queue(skge->netdev); |
| 2695 | 2705 | ||
| 2696 | spin_unlock(&skge->tx_lock); | 2706 | spin_unlock(&skge->tx_lock); |
| @@ -2705,8 +2715,6 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
| 2705 | int to_do = min(dev->quota, *budget); | 2715 | int to_do = min(dev->quota, *budget); |
| 2706 | int work_done = 0; | 2716 | int work_done = 0; |
| 2707 | 2717 | ||
| 2708 | skge_tx_done(skge); | ||
| 2709 | |||
| 2710 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { | 2718 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { |
| 2711 | struct skge_rx_desc *rd = e->desc; | 2719 | struct skge_rx_desc *rd = e->desc; |
| 2712 | struct sk_buff *skb; | 2720 | struct sk_buff *skb; |
| @@ -2738,10 +2746,12 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
| 2738 | return 1; /* not done */ | 2746 | return 1; /* not done */ |
| 2739 | 2747 | ||
| 2740 | netif_rx_complete(dev); | 2748 | netif_rx_complete(dev); |
| 2741 | mmiowb(); | ||
| 2742 | 2749 | ||
| 2743 | hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F); | 2750 | spin_lock_irq(&hw->hw_lock); |
| 2751 | hw->intr_mask |= rxirqmask[skge->port]; | ||
| 2744 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2752 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
| 2753 | mmiowb(); | ||
| 2754 | spin_unlock_irq(&hw->hw_lock); | ||
| 2745 | 2755 | ||
| 2746 | return 0; | 2756 | return 0; |
| 2747 | } | 2757 | } |
| @@ -2871,8 +2881,10 @@ static void skge_extirq(void *arg) | |||
| 2871 | } | 2881 | } |
| 2872 | mutex_unlock(&hw->phy_mutex); | 2882 | mutex_unlock(&hw->phy_mutex); |
| 2873 | 2883 | ||
| 2884 | spin_lock_irq(&hw->hw_lock); | ||
| 2874 | hw->intr_mask |= IS_EXT_REG; | 2885 | hw->intr_mask |= IS_EXT_REG; |
| 2875 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2886 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
| 2887 | spin_unlock_irq(&hw->hw_lock); | ||
| 2876 | } | 2888 | } |
| 2877 | 2889 | ||
| 2878 | static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | 2890 | static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) |
| @@ -2885,54 +2897,68 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
| 2885 | if (status == 0) | 2897 | if (status == 0) |
| 2886 | return IRQ_NONE; | 2898 | return IRQ_NONE; |
| 2887 | 2899 | ||
| 2900 | spin_lock(&hw->hw_lock); | ||
| 2901 | status &= hw->intr_mask; | ||
| 2888 | if (status & IS_EXT_REG) { | 2902 | if (status & IS_EXT_REG) { |
| 2889 | hw->intr_mask &= ~IS_EXT_REG; | 2903 | hw->intr_mask &= ~IS_EXT_REG; |
| 2890 | schedule_work(&hw->phy_work); | 2904 | schedule_work(&hw->phy_work); |
| 2891 | } | 2905 | } |
| 2892 | 2906 | ||
| 2893 | if (status & (IS_R1_F|IS_XA1_F)) { | 2907 | if (status & IS_XA1_F) { |
| 2894 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); | 2908 | skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F); |
| 2895 | hw->intr_mask &= ~(IS_R1_F|IS_XA1_F); | 2909 | skge_txirq(hw->dev[0]); |
| 2896 | netif_rx_schedule(hw->dev[0]); | ||
| 2897 | } | 2910 | } |
| 2898 | 2911 | ||
| 2899 | if (status & (IS_R2_F|IS_XA2_F)) { | 2912 | if (status & IS_R1_F) { |
| 2900 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); | 2913 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); |
| 2901 | hw->intr_mask &= ~(IS_R2_F|IS_XA2_F); | 2914 | hw->intr_mask &= ~IS_R1_F; |
| 2902 | netif_rx_schedule(hw->dev[1]); | 2915 | netif_rx_schedule(hw->dev[0]); |
| 2903 | } | 2916 | } |
| 2904 | 2917 | ||
| 2905 | if (likely((status & hw->intr_mask) == 0)) | 2918 | if (status & IS_PA_TO_TX1) |
| 2906 | return IRQ_HANDLED; | 2919 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); |
| 2907 | 2920 | ||
| 2908 | if (status & IS_PA_TO_RX1) { | 2921 | if (status & IS_PA_TO_RX1) { |
| 2909 | struct skge_port *skge = netdev_priv(hw->dev[0]); | 2922 | struct skge_port *skge = netdev_priv(hw->dev[0]); |
| 2910 | ++skge->net_stats.rx_over_errors; | ||
| 2911 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); | ||
| 2912 | } | ||
| 2913 | 2923 | ||
| 2914 | if (status & IS_PA_TO_RX2) { | ||
| 2915 | struct skge_port *skge = netdev_priv(hw->dev[1]); | ||
| 2916 | ++skge->net_stats.rx_over_errors; | 2924 | ++skge->net_stats.rx_over_errors; |
| 2917 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); | 2925 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); |
| 2918 | } | 2926 | } |
| 2919 | 2927 | ||
| 2920 | if (status & IS_PA_TO_TX1) | ||
| 2921 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); | ||
| 2922 | |||
| 2923 | if (status & IS_PA_TO_TX2) | ||
| 2924 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); | ||
| 2925 | 2928 | ||
| 2926 | if (status & IS_MAC1) | 2929 | if (status & IS_MAC1) |
| 2927 | skge_mac_intr(hw, 0); | 2930 | skge_mac_intr(hw, 0); |
| 2928 | 2931 | ||
| 2929 | if (status & IS_MAC2) | 2932 | if (hw->dev[1]) { |
| 2930 | skge_mac_intr(hw, 1); | 2933 | if (status & IS_XA2_F) { |
| 2934 | skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F); | ||
| 2935 | skge_txirq(hw->dev[1]); | ||
| 2936 | } | ||
| 2937 | |||
| 2938 | if (status & IS_R2_F) { | ||
| 2939 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); | ||
| 2940 | hw->intr_mask &= ~IS_R2_F; | ||
| 2941 | netif_rx_schedule(hw->dev[1]); | ||
| 2942 | } | ||
| 2943 | |||
| 2944 | if (status & IS_PA_TO_RX2) { | ||
| 2945 | struct skge_port *skge = netdev_priv(hw->dev[1]); | ||
| 2946 | ++skge->net_stats.rx_over_errors; | ||
| 2947 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); | ||
| 2948 | } | ||
| 2949 | |||
| 2950 | if (status & IS_PA_TO_TX2) | ||
| 2951 | skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); | ||
| 2952 | |||
| 2953 | if (status & IS_MAC2) | ||
| 2954 | skge_mac_intr(hw, 1); | ||
| 2955 | } | ||
| 2931 | 2956 | ||
| 2932 | if (status & IS_HW_ERR) | 2957 | if (status & IS_HW_ERR) |
| 2933 | skge_error_irq(hw); | 2958 | skge_error_irq(hw); |
| 2934 | 2959 | ||
| 2935 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2960 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
| 2961 | spin_unlock(&hw->hw_lock); | ||
| 2936 | 2962 | ||
| 2937 | return IRQ_HANDLED; | 2963 | return IRQ_HANDLED; |
| 2938 | } | 2964 | } |
| @@ -3083,6 +3109,7 @@ static int skge_reset(struct skge_hw *hw) | |||
| 3083 | else | 3109 | else |
| 3084 | hw->ram_size = t8 * 4096; | 3110 | hw->ram_size = t8 * 4096; |
| 3085 | 3111 | ||
| 3112 | spin_lock_init(&hw->hw_lock); | ||
| 3086 | hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; | 3113 | hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; |
| 3087 | if (hw->ports > 1) | 3114 | if (hw->ports > 1) |
| 3088 | hw->intr_mask |= IS_PORT_2; | 3115 | hw->intr_mask |= IS_PORT_2; |
| @@ -3389,7 +3416,11 @@ static void __devexit skge_remove(struct pci_dev *pdev) | |||
| 3389 | dev0 = hw->dev[0]; | 3416 | dev0 = hw->dev[0]; |
| 3390 | unregister_netdev(dev0); | 3417 | unregister_netdev(dev0); |
| 3391 | 3418 | ||
| 3419 | spin_lock_irq(&hw->hw_lock); | ||
| 3420 | hw->intr_mask = 0; | ||
| 3392 | skge_write32(hw, B0_IMSK, 0); | 3421 | skge_write32(hw, B0_IMSK, 0); |
| 3422 | spin_unlock_irq(&hw->hw_lock); | ||
| 3423 | |||
| 3393 | skge_write16(hw, B0_LED, LED_STAT_OFF); | 3424 | skge_write16(hw, B0_LED, LED_STAT_OFF); |
| 3394 | skge_write8(hw, B0_CTST, CS_RST_SET); | 3425 | skge_write8(hw, B0_CTST, CS_RST_SET); |
| 3395 | 3426 | ||
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 46bd950612e5..ed19ff47ce11 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
| @@ -2388,6 +2388,7 @@ struct skge_ring { | |||
| 2388 | struct skge_hw { | 2388 | struct skge_hw { |
| 2389 | void __iomem *regs; | 2389 | void __iomem *regs; |
| 2390 | struct pci_dev *pdev; | 2390 | struct pci_dev *pdev; |
| 2391 | spinlock_t hw_lock; | ||
| 2391 | u32 intr_mask; | 2392 | u32 intr_mask; |
| 2392 | struct net_device *dev[2]; | 2393 | struct net_device *dev[2]; |
| 2393 | 2394 | ||
