aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/skge.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r--drivers/net/skge.c211
1 files changed, 100 insertions, 111 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 82200bfaa8ed..fba8b7455d8b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -43,7 +43,7 @@
43#include "skge.h" 43#include "skge.h"
44 44
45#define DRV_NAME "skge" 45#define DRV_NAME "skge"
46#define DRV_VERSION "1.6" 46#define DRV_VERSION "1.8"
47#define PFX DRV_NAME " " 47#define PFX DRV_NAME " "
48 48
49#define DEFAULT_TX_RING_SIZE 128 49#define DEFAULT_TX_RING_SIZE 128
@@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
91static int skge_up(struct net_device *dev); 91static int skge_up(struct net_device *dev);
92static int skge_down(struct net_device *dev); 92static int skge_down(struct net_device *dev);
93static void skge_phy_reset(struct skge_port *skge); 93static void skge_phy_reset(struct skge_port *skge);
94static void skge_tx_clean(struct skge_port *skge); 94static void skge_tx_clean(struct net_device *dev);
95static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 95static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
96static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 96static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
97static void genesis_get_stats(struct skge_port *skge, u64 *data); 97static void genesis_get_stats(struct skge_port *skge, u64 *data);
@@ -105,6 +105,7 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 };
105static const int rxqaddr[] = { Q_R1, Q_R2 }; 105static const int rxqaddr[] = { Q_R1, Q_R2 };
106static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 106static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
107static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 107static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
108static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F };
108 109
109static int skge_get_regs_len(struct net_device *dev) 110static int skge_get_regs_len(struct net_device *dev)
110{ 111{
@@ -516,10 +517,7 @@ static int skge_set_pauseparam(struct net_device *dev,
516/* Chip internal frequency for clock calculations */ 517/* Chip internal frequency for clock calculations */
517static inline u32 hwkhz(const struct skge_hw *hw) 518static inline u32 hwkhz(const struct skge_hw *hw)
518{ 519{
519 if (hw->chip_id == CHIP_ID_GENESIS) 520 return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
520 return 53215; /* or: 53.125 MHz */
521 else
522 return 78215; /* or: 78.125 MHz */
523} 521}
524 522
525/* Chip HZ to microseconds */ 523/* Chip HZ to microseconds */
@@ -821,8 +819,9 @@ static void skge_rx_clean(struct skge_port *skge)
821/* Allocate buffers for receive ring 819/* Allocate buffers for receive ring
822 * For receive: to_clean is next received frame. 820 * For receive: to_clean is next received frame.
823 */ 821 */
824static int skge_rx_fill(struct skge_port *skge) 822static int skge_rx_fill(struct net_device *dev)
825{ 823{
824 struct skge_port *skge = netdev_priv(dev);
826 struct skge_ring *ring = &skge->rx_ring; 825 struct skge_ring *ring = &skge->rx_ring;
827 struct skge_element *e; 826 struct skge_element *e;
828 827
@@ -830,7 +829,8 @@ static int skge_rx_fill(struct skge_port *skge)
830 do { 829 do {
831 struct sk_buff *skb; 830 struct sk_buff *skb;
832 831
833 skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL); 832 skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN,
833 GFP_KERNEL);
834 if (!skb) 834 if (!skb)
835 return -ENOMEM; 835 return -ENOMEM;
836 836
@@ -2181,7 +2181,7 @@ static int skge_up(struct net_device *dev)
2181 if (err) 2181 if (err)
2182 goto free_pci_mem; 2182 goto free_pci_mem;
2183 2183
2184 err = skge_rx_fill(skge); 2184 err = skge_rx_fill(dev);
2185 if (err) 2185 if (err)
2186 goto free_rx_ring; 2186 goto free_rx_ring;
2187 2187
@@ -2214,6 +2214,7 @@ static int skge_up(struct net_device *dev)
2214 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2214 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2215 skge_led(skge, LED_MODE_ON); 2215 skge_led(skge, LED_MODE_ON);
2216 2216
2217 netif_poll_enable(dev);
2217 return 0; 2218 return 0;
2218 2219
2219 free_rx_ring: 2220 free_rx_ring:
@@ -2282,7 +2283,8 @@ static int skge_down(struct net_device *dev)
2282 2283
2283 skge_led(skge, LED_MODE_OFF); 2284 skge_led(skge, LED_MODE_OFF);
2284 2285
2285 skge_tx_clean(skge); 2286 netif_poll_disable(dev);
2287 skge_tx_clean(dev);
2286 skge_rx_clean(skge); 2288 skge_rx_clean(skge);
2287 2289
2288 kfree(skge->rx_ring.start); 2290 kfree(skge->rx_ring.start);
@@ -2307,25 +2309,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2307 int i; 2309 int i;
2308 u32 control, len; 2310 u32 control, len;
2309 u64 map; 2311 u64 map;
2310 unsigned long flags;
2311 2312
2312 if (skb_padto(skb, ETH_ZLEN)) 2313 if (skb_padto(skb, ETH_ZLEN))
2313 return NETDEV_TX_OK; 2314 return NETDEV_TX_OK;
2314 2315
2315 if (!spin_trylock_irqsave(&skge->tx_lock, flags)) 2316 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1))
2316 /* Collision - tell upper layer to requeue */
2317 return NETDEV_TX_LOCKED;
2318
2319 if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
2320 if (!netif_queue_stopped(dev)) {
2321 netif_stop_queue(dev);
2322
2323 printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
2324 dev->name);
2325 }
2326 spin_unlock_irqrestore(&skge->tx_lock, flags);
2327 return NETDEV_TX_BUSY; 2317 return NETDEV_TX_BUSY;
2328 }
2329 2318
2330 e = skge->tx_ring.to_use; 2319 e = skge->tx_ring.to_use;
2331 td = e->desc; 2320 td = e->desc;
@@ -2400,8 +2389,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2400 netif_stop_queue(dev); 2389 netif_stop_queue(dev);
2401 } 2390 }
2402 2391
2403 spin_unlock_irqrestore(&skge->tx_lock, flags);
2404
2405 dev->trans_start = jiffies; 2392 dev->trans_start = jiffies;
2406 2393
2407 return NETDEV_TX_OK; 2394 return NETDEV_TX_OK;
@@ -2431,18 +2418,18 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2431 printk(KERN_DEBUG PFX "%s: tx done slot %td\n", 2418 printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
2432 skge->netdev->name, e - skge->tx_ring.start); 2419 skge->netdev->name, e - skge->tx_ring.start);
2433 2420
2434 dev_kfree_skb_any(e->skb); 2421 dev_kfree_skb(e->skb);
2435 } 2422 }
2436 e->skb = NULL; 2423 e->skb = NULL;
2437} 2424}
2438 2425
2439/* Free all buffers in transmit ring */ 2426/* Free all buffers in transmit ring */
2440static void skge_tx_clean(struct skge_port *skge) 2427static void skge_tx_clean(struct net_device *dev)
2441{ 2428{
2429 struct skge_port *skge = netdev_priv(dev);
2442 struct skge_element *e; 2430 struct skge_element *e;
2443 unsigned long flags;
2444 2431
2445 spin_lock_irqsave(&skge->tx_lock, flags); 2432 netif_tx_lock_bh(dev);
2446 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { 2433 for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
2447 struct skge_tx_desc *td = e->desc; 2434 struct skge_tx_desc *td = e->desc;
2448 skge_tx_free(skge, e, td->control); 2435 skge_tx_free(skge, e, td->control);
@@ -2450,8 +2437,8 @@ static void skge_tx_clean(struct skge_port *skge)
2450 } 2437 }
2451 2438
2452 skge->tx_ring.to_clean = e; 2439 skge->tx_ring.to_clean = e;
2453 netif_wake_queue(skge->netdev); 2440 netif_wake_queue(dev);
2454 spin_unlock_irqrestore(&skge->tx_lock, flags); 2441 netif_tx_unlock_bh(dev);
2455} 2442}
2456 2443
2457static void skge_tx_timeout(struct net_device *dev) 2444static void skge_tx_timeout(struct net_device *dev)
@@ -2462,7 +2449,7 @@ static void skge_tx_timeout(struct net_device *dev)
2462 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name); 2449 printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
2463 2450
2464 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); 2451 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
2465 skge_tx_clean(skge); 2452 skge_tx_clean(dev);
2466} 2453}
2467 2454
2468static int skge_change_mtu(struct net_device *dev, int new_mtu) 2455static int skge_change_mtu(struct net_device *dev, int new_mtu)
@@ -2585,16 +2572,17 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
2585/* Get receive buffer from descriptor. 2572/* Get receive buffer from descriptor.
2586 * Handles copy of small buffers and reallocation failures 2573 * Handles copy of small buffers and reallocation failures
2587 */ 2574 */
2588static inline struct sk_buff *skge_rx_get(struct skge_port *skge, 2575static struct sk_buff *skge_rx_get(struct net_device *dev,
2589 struct skge_element *e, 2576 struct skge_element *e,
2590 u32 control, u32 status, u16 csum) 2577 u32 control, u32 status, u16 csum)
2591{ 2578{
2579 struct skge_port *skge = netdev_priv(dev);
2592 struct sk_buff *skb; 2580 struct sk_buff *skb;
2593 u16 len = control & BMU_BBC; 2581 u16 len = control & BMU_BBC;
2594 2582
2595 if (unlikely(netif_msg_rx_status(skge))) 2583 if (unlikely(netif_msg_rx_status(skge)))
2596 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", 2584 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2597 skge->netdev->name, e - skge->rx_ring.start, 2585 dev->name, e - skge->rx_ring.start,
2598 status, len); 2586 status, len);
2599 2587
2600 if (len > skge->rx_buf_size) 2588 if (len > skge->rx_buf_size)
@@ -2610,7 +2598,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2610 goto error; 2598 goto error;
2611 2599
2612 if (len < RX_COPY_THRESHOLD) { 2600 if (len < RX_COPY_THRESHOLD) {
2613 skb = alloc_skb(len + 2, GFP_ATOMIC); 2601 skb = netdev_alloc_skb(dev, len + 2);
2614 if (!skb) 2602 if (!skb)
2615 goto resubmit; 2603 goto resubmit;
2616 2604
@@ -2625,7 +2613,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2625 skge_rx_reuse(e, skge->rx_buf_size); 2613 skge_rx_reuse(e, skge->rx_buf_size);
2626 } else { 2614 } else {
2627 struct sk_buff *nskb; 2615 struct sk_buff *nskb;
2628 nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC); 2616 nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN);
2629 if (!nskb) 2617 if (!nskb)
2630 goto resubmit; 2618 goto resubmit;
2631 2619
@@ -2640,20 +2628,19 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2640 } 2628 }
2641 2629
2642 skb_put(skb, len); 2630 skb_put(skb, len);
2643 skb->dev = skge->netdev;
2644 if (skge->rx_csum) { 2631 if (skge->rx_csum) {
2645 skb->csum = csum; 2632 skb->csum = csum;
2646 skb->ip_summed = CHECKSUM_HW; 2633 skb->ip_summed = CHECKSUM_HW;
2647 } 2634 }
2648 2635
2649 skb->protocol = eth_type_trans(skb, skge->netdev); 2636 skb->protocol = eth_type_trans(skb, dev);
2650 2637
2651 return skb; 2638 return skb;
2652error: 2639error:
2653 2640
2654 if (netif_msg_rx_err(skge)) 2641 if (netif_msg_rx_err(skge))
2655 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", 2642 printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
2656 skge->netdev->name, e - skge->rx_ring.start, 2643 dev->name, e - skge->rx_ring.start,
2657 control, status); 2644 control, status);
2658 2645
2659 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 2646 if (skge->hw->chip_id == CHIP_ID_GENESIS) {
@@ -2678,15 +2665,15 @@ resubmit:
2678} 2665}
2679 2666
2680/* Free all buffers in Tx ring which are no longer owned by device */ 2667/* Free all buffers in Tx ring which are no longer owned by device */
2681static void skge_txirq(struct net_device *dev) 2668static void skge_tx_done(struct net_device *dev)
2682{ 2669{
2683 struct skge_port *skge = netdev_priv(dev); 2670 struct skge_port *skge = netdev_priv(dev);
2684 struct skge_ring *ring = &skge->tx_ring; 2671 struct skge_ring *ring = &skge->tx_ring;
2685 struct skge_element *e; 2672 struct skge_element *e;
2686 2673
2687 rmb(); 2674 skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2688 2675
2689 spin_lock(&skge->tx_lock); 2676 netif_tx_lock(dev);
2690 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 2677 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2691 struct skge_tx_desc *td = e->desc; 2678 struct skge_tx_desc *td = e->desc;
2692 2679
@@ -2697,11 +2684,10 @@ static void skge_txirq(struct net_device *dev)
2697 } 2684 }
2698 skge->tx_ring.to_clean = e; 2685 skge->tx_ring.to_clean = e;
2699 2686
2700 if (netif_queue_stopped(skge->netdev) 2687 if (skge_avail(&skge->tx_ring) > TX_LOW_WATER)
2701 && skge_avail(&skge->tx_ring) > TX_LOW_WATER) 2688 netif_wake_queue(dev);
2702 netif_wake_queue(skge->netdev);
2703 2689
2704 spin_unlock(&skge->tx_lock); 2690 netif_tx_unlock(dev);
2705} 2691}
2706 2692
2707static int skge_poll(struct net_device *dev, int *budget) 2693static int skge_poll(struct net_device *dev, int *budget)
@@ -2713,6 +2699,10 @@ static int skge_poll(struct net_device *dev, int *budget)
2713 int to_do = min(dev->quota, *budget); 2699 int to_do = min(dev->quota, *budget);
2714 int work_done = 0; 2700 int work_done = 0;
2715 2701
2702 skge_tx_done(dev);
2703
2704 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
2705
2716 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { 2706 for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
2717 struct skge_rx_desc *rd = e->desc; 2707 struct skge_rx_desc *rd = e->desc;
2718 struct sk_buff *skb; 2708 struct sk_buff *skb;
@@ -2723,7 +2713,7 @@ static int skge_poll(struct net_device *dev, int *budget)
2723 if (control & BMU_OWN) 2713 if (control & BMU_OWN)
2724 break; 2714 break;
2725 2715
2726 skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); 2716 skb = skge_rx_get(dev, e, control, rd->status, rd->csum2);
2727 if (likely(skb)) { 2717 if (likely(skb)) {
2728 dev->last_rx = jiffies; 2718 dev->last_rx = jiffies;
2729 netif_receive_skb(skb); 2719 netif_receive_skb(skb);
@@ -2743,12 +2733,11 @@ static int skge_poll(struct net_device *dev, int *budget)
2743 if (work_done >= to_do) 2733 if (work_done >= to_do)
2744 return 1; /* not done */ 2734 return 1; /* not done */
2745 2735
2746 netif_rx_complete(dev);
2747
2748 spin_lock_irq(&hw->hw_lock); 2736 spin_lock_irq(&hw->hw_lock);
2749 hw->intr_mask |= rxirqmask[skge->port]; 2737 __netif_rx_complete(dev);
2738 hw->intr_mask |= irqmask[skge->port];
2750 skge_write32(hw, B0_IMSK, hw->intr_mask); 2739 skge_write32(hw, B0_IMSK, hw->intr_mask);
2751 mmiowb(); 2740 skge_read32(hw, B0_IMSK);
2752 spin_unlock_irq(&hw->hw_lock); 2741 spin_unlock_irq(&hw->hw_lock);
2753 2742
2754 return 0; 2743 return 0;
@@ -2882,6 +2871,7 @@ static void skge_extirq(void *arg)
2882 spin_lock_irq(&hw->hw_lock); 2871 spin_lock_irq(&hw->hw_lock);
2883 hw->intr_mask |= IS_EXT_REG; 2872 hw->intr_mask |= IS_EXT_REG;
2884 skge_write32(hw, B0_IMSK, hw->intr_mask); 2873 skge_write32(hw, B0_IMSK, hw->intr_mask);
2874 skge_read32(hw, B0_IMSK);
2885 spin_unlock_irq(&hw->hw_lock); 2875 spin_unlock_irq(&hw->hw_lock);
2886} 2876}
2887 2877
@@ -2889,27 +2879,23 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2889{ 2879{
2890 struct skge_hw *hw = dev_id; 2880 struct skge_hw *hw = dev_id;
2891 u32 status; 2881 u32 status;
2882 int handled = 0;
2892 2883
2884 spin_lock(&hw->hw_lock);
2893 /* Reading this register masks IRQ */ 2885 /* Reading this register masks IRQ */
2894 status = skge_read32(hw, B0_SP_ISRC); 2886 status = skge_read32(hw, B0_SP_ISRC);
2895 if (status == 0) 2887 if (status == 0 || status == ~0)
2896 return IRQ_NONE; 2888 goto out;
2897 2889
2898 spin_lock(&hw->hw_lock); 2890 handled = 1;
2899 status &= hw->intr_mask; 2891 status &= hw->intr_mask;
2900 if (status & IS_EXT_REG) { 2892 if (status & IS_EXT_REG) {
2901 hw->intr_mask &= ~IS_EXT_REG; 2893 hw->intr_mask &= ~IS_EXT_REG;
2902 schedule_work(&hw->phy_work); 2894 schedule_work(&hw->phy_work);
2903 } 2895 }
2904 2896
2905 if (status & IS_XA1_F) { 2897 if (status & (IS_XA1_F|IS_R1_F)) {
2906 skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F); 2898 hw->intr_mask &= ~(IS_XA1_F|IS_R1_F);
2907 skge_txirq(hw->dev[0]);
2908 }
2909
2910 if (status & IS_R1_F) {
2911 skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
2912 hw->intr_mask &= ~IS_R1_F;
2913 netif_rx_schedule(hw->dev[0]); 2899 netif_rx_schedule(hw->dev[0]);
2914 } 2900 }
2915 2901
@@ -2928,14 +2914,8 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2928 skge_mac_intr(hw, 0); 2914 skge_mac_intr(hw, 0);
2929 2915
2930 if (hw->dev[1]) { 2916 if (hw->dev[1]) {
2931 if (status & IS_XA2_F) { 2917 if (status & (IS_XA2_F|IS_R2_F)) {
2932 skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F); 2918 hw->intr_mask &= ~(IS_XA2_F|IS_R2_F);
2933 skge_txirq(hw->dev[1]);
2934 }
2935
2936 if (status & IS_R2_F) {
2937 skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
2938 hw->intr_mask &= ~IS_R2_F;
2939 netif_rx_schedule(hw->dev[1]); 2919 netif_rx_schedule(hw->dev[1]);
2940 } 2920 }
2941 2921
@@ -2956,9 +2936,11 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2956 skge_error_irq(hw); 2936 skge_error_irq(hw);
2957 2937
2958 skge_write32(hw, B0_IMSK, hw->intr_mask); 2938 skge_write32(hw, B0_IMSK, hw->intr_mask);
2939 skge_read32(hw, B0_IMSK);
2940out:
2959 spin_unlock(&hw->hw_lock); 2941 spin_unlock(&hw->hw_lock);
2960 2942
2961 return IRQ_HANDLED; 2943 return IRQ_RETVAL(handled);
2962} 2944}
2963 2945
2964#ifdef CONFIG_NET_POLL_CONTROLLER 2946#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3107,7 +3089,6 @@ static int skge_reset(struct skge_hw *hw)
3107 else 3089 else
3108 hw->ram_size = t8 * 4096; 3090 hw->ram_size = t8 * 4096;
3109 3091
3110 spin_lock_init(&hw->hw_lock);
3111 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; 3092 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
3112 if (hw->ports > 1) 3093 if (hw->ports > 1)
3113 hw->intr_mask |= IS_PORT_2; 3094 hw->intr_mask |= IS_PORT_2;
@@ -3223,7 +3204,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3223 dev->poll_controller = skge_netpoll; 3204 dev->poll_controller = skge_netpoll;
3224#endif 3205#endif
3225 dev->irq = hw->pdev->irq; 3206 dev->irq = hw->pdev->irq;
3226 dev->features = NETIF_F_LLTX; 3207
3227 if (highmem) 3208 if (highmem)
3228 dev->features |= NETIF_F_HIGHDMA; 3209 dev->features |= NETIF_F_HIGHDMA;
3229 3210
@@ -3245,8 +3226,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3245 3226
3246 skge->port = port; 3227 skge->port = port;
3247 3228
3248 spin_lock_init(&skge->tx_lock);
3249
3250 if (hw->chip_id != CHIP_ID_GENESIS) { 3229 if (hw->chip_id != CHIP_ID_GENESIS) {
3251 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 3230 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
3252 skge->rx_csum = 1; 3231 skge->rx_csum = 1;
@@ -3333,6 +3312,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3333 hw->pdev = pdev; 3312 hw->pdev = pdev;
3334 mutex_init(&hw->phy_mutex); 3313 mutex_init(&hw->phy_mutex);
3335 INIT_WORK(&hw->phy_work, skge_extirq, hw); 3314 INIT_WORK(&hw->phy_work, skge_extirq, hw);
3315 spin_lock_init(&hw->hw_lock);
3336 3316
3337 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); 3317 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
3338 if (!hw->regs) { 3318 if (!hw->regs) {
@@ -3341,23 +3321,16 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3341 goto err_out_free_hw; 3321 goto err_out_free_hw;
3342 } 3322 }
3343 3323
3344 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw);
3345 if (err) {
3346 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3347 pci_name(pdev), pdev->irq);
3348 goto err_out_iounmap;
3349 }
3350 pci_set_drvdata(pdev, hw);
3351
3352 err = skge_reset(hw); 3324 err = skge_reset(hw);
3353 if (err) 3325 if (err)
3354 goto err_out_free_irq; 3326 goto err_out_iounmap;
3355 3327
3356 printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", 3328 printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
3357 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, 3329 (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
3358 skge_board_name(hw), hw->chip_rev); 3330 skge_board_name(hw), hw->chip_rev);
3359 3331
3360 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) 3332 dev = skge_devinit(hw, 0, using_dac);
3333 if (!dev)
3361 goto err_out_led_off; 3334 goto err_out_led_off;
3362 3335
3363 if (!is_valid_ether_addr(dev->dev_addr)) { 3336 if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -3367,7 +3340,6 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3367 goto err_out_free_netdev; 3340 goto err_out_free_netdev;
3368 } 3341 }
3369 3342
3370
3371 err = register_netdev(dev); 3343 err = register_netdev(dev);
3372 if (err) { 3344 if (err) {
3373 printk(KERN_ERR PFX "%s: cannot register net device\n", 3345 printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3375,6 +3347,12 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3375 goto err_out_free_netdev; 3347 goto err_out_free_netdev;
3376 } 3348 }
3377 3349
3350 err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw);
3351 if (err) {
3352 printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
3353 dev->name, pdev->irq);
3354 goto err_out_unregister;
3355 }
3378 skge_show_addr(dev); 3356 skge_show_addr(dev);
3379 3357
3380 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { 3358 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
@@ -3387,15 +3365,16 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3387 free_netdev(dev1); 3365 free_netdev(dev1);
3388 } 3366 }
3389 } 3367 }
3368 pci_set_drvdata(pdev, hw);
3390 3369
3391 return 0; 3370 return 0;
3392 3371
3372err_out_unregister:
3373 unregister_netdev(dev);
3393err_out_free_netdev: 3374err_out_free_netdev:
3394 free_netdev(dev); 3375 free_netdev(dev);
3395err_out_led_off: 3376err_out_led_off:
3396 skge_write16(hw, B0_LED, LED_STAT_OFF); 3377 skge_write16(hw, B0_LED, LED_STAT_OFF);
3397err_out_free_irq:
3398 free_irq(pdev->irq, hw);
3399err_out_iounmap: 3378err_out_iounmap:
3400 iounmap(hw->regs); 3379 iounmap(hw->regs);
3401err_out_free_hw: 3380err_out_free_hw:
@@ -3425,6 +3404,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3425 spin_lock_irq(&hw->hw_lock); 3404 spin_lock_irq(&hw->hw_lock);
3426 hw->intr_mask = 0; 3405 hw->intr_mask = 0;
3427 skge_write32(hw, B0_IMSK, 0); 3406 skge_write32(hw, B0_IMSK, 0);
3407 skge_read32(hw, B0_IMSK);
3428 spin_unlock_irq(&hw->hw_lock); 3408 spin_unlock_irq(&hw->hw_lock);
3429 3409
3430 skge_write16(hw, B0_LED, LED_STAT_OFF); 3410 skge_write16(hw, B0_LED, LED_STAT_OFF);
@@ -3450,26 +3430,25 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3450 struct skge_hw *hw = pci_get_drvdata(pdev); 3430 struct skge_hw *hw = pci_get_drvdata(pdev);
3451 int i, wol = 0; 3431 int i, wol = 0;
3452 3432
3453 for (i = 0; i < 2; i++) { 3433 pci_save_state(pdev);
3434 for (i = 0; i < hw->ports; i++) {
3454 struct net_device *dev = hw->dev[i]; 3435 struct net_device *dev = hw->dev[i];
3455 3436
3456 if (dev) { 3437 if (netif_running(dev)) {
3457 struct skge_port *skge = netdev_priv(dev); 3438 struct skge_port *skge = netdev_priv(dev);
3458 if (netif_running(dev)) { 3439
3459 netif_carrier_off(dev); 3440 netif_carrier_off(dev);
3460 if (skge->wol) 3441 if (skge->wol)
3461 netif_stop_queue(dev); 3442 netif_stop_queue(dev);
3462 else 3443 else
3463 skge_down(dev); 3444 skge_down(dev);
3464 }
3465 netif_device_detach(dev);
3466 wol |= skge->wol; 3445 wol |= skge->wol;
3467 } 3446 }
3447 netif_device_detach(dev);
3468 } 3448 }
3469 3449
3470 pci_save_state(pdev); 3450 skge_write32(hw, B0_IMSK, 0);
3471 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); 3451 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
3472 pci_disable_device(pdev);
3473 pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3452 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3474 3453
3475 return 0; 3454 return 0;
@@ -3478,23 +3457,33 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
3478static int skge_resume(struct pci_dev *pdev) 3457static int skge_resume(struct pci_dev *pdev)
3479{ 3458{
3480 struct skge_hw *hw = pci_get_drvdata(pdev); 3459 struct skge_hw *hw = pci_get_drvdata(pdev);
3481 int i; 3460 int i, err;
3482 3461
3483 pci_set_power_state(pdev, PCI_D0); 3462 pci_set_power_state(pdev, PCI_D0);
3484 pci_restore_state(pdev); 3463 pci_restore_state(pdev);
3485 pci_enable_wake(pdev, PCI_D0, 0); 3464 pci_enable_wake(pdev, PCI_D0, 0);
3486 3465
3487 skge_reset(hw); 3466 err = skge_reset(hw);
3467 if (err)
3468 goto out;
3488 3469
3489 for (i = 0; i < 2; i++) { 3470 for (i = 0; i < hw->ports; i++) {
3490 struct net_device *dev = hw->dev[i]; 3471 struct net_device *dev = hw->dev[i];
3491 if (dev) { 3472
3492 netif_device_attach(dev); 3473 netif_device_attach(dev);
3493 if (netif_running(dev) && skge_up(dev)) 3474 if (netif_running(dev)) {
3475 err = skge_up(dev);
3476
3477 if (err) {
3478 printk(KERN_ERR PFX "%s: could not up: %d\n",
3479 dev->name, err);
3494 dev_close(dev); 3480 dev_close(dev);
3481 goto out;
3482 }
3495 } 3483 }
3496 } 3484 }
3497 return 0; 3485out:
3486 return err;
3498} 3487}
3499#endif 3488#endif
3500 3489
@@ -3511,7 +3500,7 @@ static struct pci_driver skge_driver = {
3511 3500
3512static int __init skge_init_module(void) 3501static int __init skge_init_module(void)
3513{ 3502{
3514 return pci_module_init(&skge_driver); 3503 return pci_register_driver(&skge_driver);
3515} 3504}
3516 3505
3517static void __exit skge_cleanup_module(void) 3506static void __exit skge_cleanup_module(void)