diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-24 13:15:13 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-24 13:15:13 -0400 |
commit | a319a2773a13bab56a0d0b3744ba8703324313b5 (patch) | |
tree | f02c86acabd1031439fd422a167784007e84ebb1 /drivers/net/skge.c | |
parent | e18fa700c9a31360bc8f193aa543b7ef7b39a06b (diff) | |
parent | 183798799216fad36c7219fe8d4d6dee6b8fa755 (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (217 commits)
net/ieee80211: fix more crypto-related build breakage
[PATCH] Spidernet: add ethtool -S (show statistics)
[NET] GT96100: Delete bitrotting ethernet driver
[PATCH] mv643xx_eth: restrict to 32-bit PPC_MULTIPLATFORM
[PATCH] Cirrus Logic ep93xx ethernet driver
r8169: the MMIO region of the 8167 stands behin BAR#1
e1000, ixgb: Remove pointless wrappers
[PATCH] Remove powerpc specific parts of 3c509 driver
[PATCH] s2io: Switch to pci_get_device
[PATCH] gt96100: move to pci_get_device API
[PATCH] ehea: bugfix for register access functions
[PATCH] e1000 disable device on PCI error
drivers/net/phy/fixed: #if 0 some incomplete code
drivers/net: const-ify ethtool_ops declarations
[PATCH] ethtool: allow const ethtool_ops
[PATCH] sky2: big endian
[PATCH] sky2: fiber support
[PATCH] sky2: tx pause bug fix
drivers/net: Trim trailing whitespace
[PATCH] ehea: IBM eHEA Ethernet Device Driver
...
Manually resolved conflicts in drivers/net/ixgb/ixgb_main.c and
drivers/net/sky2.c related to CHECKSUM_HW/CHECKSUM_PARTIAL changes by
commit 84fa7933a33f806bbbaae6775e87459b1ec584c0 that just happened to be
next to unrelated changes in this update.
Diffstat (limited to 'drivers/net/skge.c')
-rw-r--r-- | drivers/net/skge.c | 206 |
1 files changed, 98 insertions, 108 deletions
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index b3d6fa3d6df4..9142d91355bc 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -43,7 +43,7 @@ | |||
43 | #include "skge.h" | 43 | #include "skge.h" |
44 | 44 | ||
45 | #define DRV_NAME "skge" | 45 | #define DRV_NAME "skge" |
46 | #define DRV_VERSION "1.6" | 46 | #define DRV_VERSION "1.8" |
47 | #define PFX DRV_NAME " " | 47 | #define PFX DRV_NAME " " |
48 | 48 | ||
49 | #define DEFAULT_TX_RING_SIZE 128 | 49 | #define DEFAULT_TX_RING_SIZE 128 |
@@ -91,7 +91,7 @@ MODULE_DEVICE_TABLE(pci, skge_id_table); | |||
91 | static int skge_up(struct net_device *dev); | 91 | static int skge_up(struct net_device *dev); |
92 | static int skge_down(struct net_device *dev); | 92 | static int skge_down(struct net_device *dev); |
93 | static void skge_phy_reset(struct skge_port *skge); | 93 | static void skge_phy_reset(struct skge_port *skge); |
94 | static void skge_tx_clean(struct skge_port *skge); | 94 | static void skge_tx_clean(struct net_device *dev); |
95 | static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); | 95 | static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); |
96 | static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); | 96 | static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); |
97 | static void genesis_get_stats(struct skge_port *skge, u64 *data); | 97 | static void genesis_get_stats(struct skge_port *skge, u64 *data); |
@@ -105,6 +105,7 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 }; | |||
105 | static const int rxqaddr[] = { Q_R1, Q_R2 }; | 105 | static const int rxqaddr[] = { Q_R1, Q_R2 }; |
106 | static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; | 106 | static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; |
107 | static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; | 107 | static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; |
108 | static const u32 irqmask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; | ||
108 | 109 | ||
109 | static int skge_get_regs_len(struct net_device *dev) | 110 | static int skge_get_regs_len(struct net_device *dev) |
110 | { | 111 | { |
@@ -690,7 +691,7 @@ static int skge_phys_id(struct net_device *dev, u32 data) | |||
690 | return 0; | 691 | return 0; |
691 | } | 692 | } |
692 | 693 | ||
693 | static struct ethtool_ops skge_ethtool_ops = { | 694 | static const struct ethtool_ops skge_ethtool_ops = { |
694 | .get_settings = skge_get_settings, | 695 | .get_settings = skge_get_settings, |
695 | .set_settings = skge_set_settings, | 696 | .set_settings = skge_set_settings, |
696 | .get_drvinfo = skge_get_drvinfo, | 697 | .get_drvinfo = skge_get_drvinfo, |
@@ -818,8 +819,9 @@ static void skge_rx_clean(struct skge_port *skge) | |||
818 | /* Allocate buffers for receive ring | 819 | /* Allocate buffers for receive ring |
819 | * For receive: to_clean is next received frame. | 820 | * For receive: to_clean is next received frame. |
820 | */ | 821 | */ |
821 | static int skge_rx_fill(struct skge_port *skge) | 822 | static int skge_rx_fill(struct net_device *dev) |
822 | { | 823 | { |
824 | struct skge_port *skge = netdev_priv(dev); | ||
823 | struct skge_ring *ring = &skge->rx_ring; | 825 | struct skge_ring *ring = &skge->rx_ring; |
824 | struct skge_element *e; | 826 | struct skge_element *e; |
825 | 827 | ||
@@ -827,7 +829,8 @@ static int skge_rx_fill(struct skge_port *skge) | |||
827 | do { | 829 | do { |
828 | struct sk_buff *skb; | 830 | struct sk_buff *skb; |
829 | 831 | ||
830 | skb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_KERNEL); | 832 | skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, |
833 | GFP_KERNEL); | ||
831 | if (!skb) | 834 | if (!skb) |
832 | return -ENOMEM; | 835 | return -ENOMEM; |
833 | 836 | ||
@@ -2178,7 +2181,7 @@ static int skge_up(struct net_device *dev) | |||
2178 | if (err) | 2181 | if (err) |
2179 | goto free_pci_mem; | 2182 | goto free_pci_mem; |
2180 | 2183 | ||
2181 | err = skge_rx_fill(skge); | 2184 | err = skge_rx_fill(dev); |
2182 | if (err) | 2185 | if (err) |
2183 | goto free_rx_ring; | 2186 | goto free_rx_ring; |
2184 | 2187 | ||
@@ -2281,7 +2284,7 @@ static int skge_down(struct net_device *dev) | |||
2281 | skge_led(skge, LED_MODE_OFF); | 2284 | skge_led(skge, LED_MODE_OFF); |
2282 | 2285 | ||
2283 | netif_poll_disable(dev); | 2286 | netif_poll_disable(dev); |
2284 | skge_tx_clean(skge); | 2287 | skge_tx_clean(dev); |
2285 | skge_rx_clean(skge); | 2288 | skge_rx_clean(skge); |
2286 | 2289 | ||
2287 | kfree(skge->rx_ring.start); | 2290 | kfree(skge->rx_ring.start); |
@@ -2306,25 +2309,12 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2306 | int i; | 2309 | int i; |
2307 | u32 control, len; | 2310 | u32 control, len; |
2308 | u64 map; | 2311 | u64 map; |
2309 | unsigned long flags; | ||
2310 | 2312 | ||
2311 | if (skb_padto(skb, ETH_ZLEN)) | 2313 | if (skb_padto(skb, ETH_ZLEN)) |
2312 | return NETDEV_TX_OK; | 2314 | return NETDEV_TX_OK; |
2313 | 2315 | ||
2314 | if (!spin_trylock_irqsave(&skge->tx_lock, flags)) | 2316 | if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) |
2315 | /* Collision - tell upper layer to requeue */ | ||
2316 | return NETDEV_TX_LOCKED; | ||
2317 | |||
2318 | if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) { | ||
2319 | if (!netif_queue_stopped(dev)) { | ||
2320 | netif_stop_queue(dev); | ||
2321 | |||
2322 | printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", | ||
2323 | dev->name); | ||
2324 | } | ||
2325 | spin_unlock_irqrestore(&skge->tx_lock, flags); | ||
2326 | return NETDEV_TX_BUSY; | 2317 | return NETDEV_TX_BUSY; |
2327 | } | ||
2328 | 2318 | ||
2329 | e = skge->tx_ring.to_use; | 2319 | e = skge->tx_ring.to_use; |
2330 | td = e->desc; | 2320 | td = e->desc; |
@@ -2399,8 +2389,6 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2399 | netif_stop_queue(dev); | 2389 | netif_stop_queue(dev); |
2400 | } | 2390 | } |
2401 | 2391 | ||
2402 | spin_unlock_irqrestore(&skge->tx_lock, flags); | ||
2403 | |||
2404 | dev->trans_start = jiffies; | 2392 | dev->trans_start = jiffies; |
2405 | 2393 | ||
2406 | return NETDEV_TX_OK; | 2394 | return NETDEV_TX_OK; |
@@ -2430,18 +2418,18 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e, | |||
2430 | printk(KERN_DEBUG PFX "%s: tx done slot %td\n", | 2418 | printk(KERN_DEBUG PFX "%s: tx done slot %td\n", |
2431 | skge->netdev->name, e - skge->tx_ring.start); | 2419 | skge->netdev->name, e - skge->tx_ring.start); |
2432 | 2420 | ||
2433 | dev_kfree_skb_any(e->skb); | 2421 | dev_kfree_skb(e->skb); |
2434 | } | 2422 | } |
2435 | e->skb = NULL; | 2423 | e->skb = NULL; |
2436 | } | 2424 | } |
2437 | 2425 | ||
2438 | /* Free all buffers in transmit ring */ | 2426 | /* Free all buffers in transmit ring */ |
2439 | static void skge_tx_clean(struct skge_port *skge) | 2427 | static void skge_tx_clean(struct net_device *dev) |
2440 | { | 2428 | { |
2429 | struct skge_port *skge = netdev_priv(dev); | ||
2441 | struct skge_element *e; | 2430 | struct skge_element *e; |
2442 | unsigned long flags; | ||
2443 | 2431 | ||
2444 | spin_lock_irqsave(&skge->tx_lock, flags); | 2432 | netif_tx_lock_bh(dev); |
2445 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { | 2433 | for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { |
2446 | struct skge_tx_desc *td = e->desc; | 2434 | struct skge_tx_desc *td = e->desc; |
2447 | skge_tx_free(skge, e, td->control); | 2435 | skge_tx_free(skge, e, td->control); |
@@ -2449,8 +2437,8 @@ static void skge_tx_clean(struct skge_port *skge) | |||
2449 | } | 2437 | } |
2450 | 2438 | ||
2451 | skge->tx_ring.to_clean = e; | 2439 | skge->tx_ring.to_clean = e; |
2452 | netif_wake_queue(skge->netdev); | 2440 | netif_wake_queue(dev); |
2453 | spin_unlock_irqrestore(&skge->tx_lock, flags); | 2441 | netif_tx_unlock_bh(dev); |
2454 | } | 2442 | } |
2455 | 2443 | ||
2456 | static void skge_tx_timeout(struct net_device *dev) | 2444 | static void skge_tx_timeout(struct net_device *dev) |
@@ -2461,7 +2449,7 @@ static void skge_tx_timeout(struct net_device *dev) | |||
2461 | printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name); | 2449 | printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name); |
2462 | 2450 | ||
2463 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); | 2451 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); |
2464 | skge_tx_clean(skge); | 2452 | skge_tx_clean(dev); |
2465 | } | 2453 | } |
2466 | 2454 | ||
2467 | static int skge_change_mtu(struct net_device *dev, int new_mtu) | 2455 | static int skge_change_mtu(struct net_device *dev, int new_mtu) |
@@ -2584,16 +2572,17 @@ static inline int bad_phy_status(const struct skge_hw *hw, u32 status) | |||
2584 | /* Get receive buffer from descriptor. | 2572 | /* Get receive buffer from descriptor. |
2585 | * Handles copy of small buffers and reallocation failures | 2573 | * Handles copy of small buffers and reallocation failures |
2586 | */ | 2574 | */ |
2587 | static inline struct sk_buff *skge_rx_get(struct skge_port *skge, | 2575 | static struct sk_buff *skge_rx_get(struct net_device *dev, |
2588 | struct skge_element *e, | 2576 | struct skge_element *e, |
2589 | u32 control, u32 status, u16 csum) | 2577 | u32 control, u32 status, u16 csum) |
2590 | { | 2578 | { |
2579 | struct skge_port *skge = netdev_priv(dev); | ||
2591 | struct sk_buff *skb; | 2580 | struct sk_buff *skb; |
2592 | u16 len = control & BMU_BBC; | 2581 | u16 len = control & BMU_BBC; |
2593 | 2582 | ||
2594 | if (unlikely(netif_msg_rx_status(skge))) | 2583 | if (unlikely(netif_msg_rx_status(skge))) |
2595 | printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", | 2584 | printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", |
2596 | skge->netdev->name, e - skge->rx_ring.start, | 2585 | dev->name, e - skge->rx_ring.start, |
2597 | status, len); | 2586 | status, len); |
2598 | 2587 | ||
2599 | if (len > skge->rx_buf_size) | 2588 | if (len > skge->rx_buf_size) |
@@ -2609,7 +2598,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, | |||
2609 | goto error; | 2598 | goto error; |
2610 | 2599 | ||
2611 | if (len < RX_COPY_THRESHOLD) { | 2600 | if (len < RX_COPY_THRESHOLD) { |
2612 | skb = alloc_skb(len + 2, GFP_ATOMIC); | 2601 | skb = netdev_alloc_skb(dev, len + 2); |
2613 | if (!skb) | 2602 | if (!skb) |
2614 | goto resubmit; | 2603 | goto resubmit; |
2615 | 2604 | ||
@@ -2624,7 +2613,7 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, | |||
2624 | skge_rx_reuse(e, skge->rx_buf_size); | 2613 | skge_rx_reuse(e, skge->rx_buf_size); |
2625 | } else { | 2614 | } else { |
2626 | struct sk_buff *nskb; | 2615 | struct sk_buff *nskb; |
2627 | nskb = alloc_skb(skge->rx_buf_size + NET_IP_ALIGN, GFP_ATOMIC); | 2616 | nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN); |
2628 | if (!nskb) | 2617 | if (!nskb) |
2629 | goto resubmit; | 2618 | goto resubmit; |
2630 | 2619 | ||
@@ -2639,20 +2628,19 @@ static inline struct sk_buff *skge_rx_get(struct skge_port *skge, | |||
2639 | } | 2628 | } |
2640 | 2629 | ||
2641 | skb_put(skb, len); | 2630 | skb_put(skb, len); |
2642 | skb->dev = skge->netdev; | ||
2643 | if (skge->rx_csum) { | 2631 | if (skge->rx_csum) { |
2644 | skb->csum = csum; | 2632 | skb->csum = csum; |
2645 | skb->ip_summed = CHECKSUM_COMPLETE; | 2633 | skb->ip_summed = CHECKSUM_COMPLETE; |
2646 | } | 2634 | } |
2647 | 2635 | ||
2648 | skb->protocol = eth_type_trans(skb, skge->netdev); | 2636 | skb->protocol = eth_type_trans(skb, dev); |
2649 | 2637 | ||
2650 | return skb; | 2638 | return skb; |
2651 | error: | 2639 | error: |
2652 | 2640 | ||
2653 | if (netif_msg_rx_err(skge)) | 2641 | if (netif_msg_rx_err(skge)) |
2654 | printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", | 2642 | printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n", |
2655 | skge->netdev->name, e - skge->rx_ring.start, | 2643 | dev->name, e - skge->rx_ring.start, |
2656 | control, status); | 2644 | control, status); |
2657 | 2645 | ||
2658 | if (skge->hw->chip_id == CHIP_ID_GENESIS) { | 2646 | if (skge->hw->chip_id == CHIP_ID_GENESIS) { |
@@ -2677,15 +2665,15 @@ resubmit: | |||
2677 | } | 2665 | } |
2678 | 2666 | ||
2679 | /* Free all buffers in Tx ring which are no longer owned by device */ | 2667 | /* Free all buffers in Tx ring which are no longer owned by device */ |
2680 | static void skge_txirq(struct net_device *dev) | 2668 | static void skge_tx_done(struct net_device *dev) |
2681 | { | 2669 | { |
2682 | struct skge_port *skge = netdev_priv(dev); | 2670 | struct skge_port *skge = netdev_priv(dev); |
2683 | struct skge_ring *ring = &skge->tx_ring; | 2671 | struct skge_ring *ring = &skge->tx_ring; |
2684 | struct skge_element *e; | 2672 | struct skge_element *e; |
2685 | 2673 | ||
2686 | rmb(); | 2674 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); |
2687 | 2675 | ||
2688 | spin_lock(&skge->tx_lock); | 2676 | netif_tx_lock(dev); |
2689 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { | 2677 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { |
2690 | struct skge_tx_desc *td = e->desc; | 2678 | struct skge_tx_desc *td = e->desc; |
2691 | 2679 | ||
@@ -2696,11 +2684,10 @@ static void skge_txirq(struct net_device *dev) | |||
2696 | } | 2684 | } |
2697 | skge->tx_ring.to_clean = e; | 2685 | skge->tx_ring.to_clean = e; |
2698 | 2686 | ||
2699 | if (netif_queue_stopped(skge->netdev) | 2687 | if (skge_avail(&skge->tx_ring) > TX_LOW_WATER) |
2700 | && skge_avail(&skge->tx_ring) > TX_LOW_WATER) | 2688 | netif_wake_queue(dev); |
2701 | netif_wake_queue(skge->netdev); | ||
2702 | 2689 | ||
2703 | spin_unlock(&skge->tx_lock); | 2690 | netif_tx_unlock(dev); |
2704 | } | 2691 | } |
2705 | 2692 | ||
2706 | static int skge_poll(struct net_device *dev, int *budget) | 2693 | static int skge_poll(struct net_device *dev, int *budget) |
@@ -2712,6 +2699,10 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2712 | int to_do = min(dev->quota, *budget); | 2699 | int to_do = min(dev->quota, *budget); |
2713 | int work_done = 0; | 2700 | int work_done = 0; |
2714 | 2701 | ||
2702 | skge_tx_done(dev); | ||
2703 | |||
2704 | skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | ||
2705 | |||
2715 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { | 2706 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { |
2716 | struct skge_rx_desc *rd = e->desc; | 2707 | struct skge_rx_desc *rd = e->desc; |
2717 | struct sk_buff *skb; | 2708 | struct sk_buff *skb; |
@@ -2722,7 +2713,7 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2722 | if (control & BMU_OWN) | 2713 | if (control & BMU_OWN) |
2723 | break; | 2714 | break; |
2724 | 2715 | ||
2725 | skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); | 2716 | skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); |
2726 | if (likely(skb)) { | 2717 | if (likely(skb)) { |
2727 | dev->last_rx = jiffies; | 2718 | dev->last_rx = jiffies; |
2728 | netif_receive_skb(skb); | 2719 | netif_receive_skb(skb); |
@@ -2742,12 +2733,11 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2742 | if (work_done >= to_do) | 2733 | if (work_done >= to_do) |
2743 | return 1; /* not done */ | 2734 | return 1; /* not done */ |
2744 | 2735 | ||
2745 | netif_rx_complete(dev); | ||
2746 | |||
2747 | spin_lock_irq(&hw->hw_lock); | 2736 | spin_lock_irq(&hw->hw_lock); |
2748 | hw->intr_mask |= rxirqmask[skge->port]; | 2737 | __netif_rx_complete(dev); |
2738 | hw->intr_mask |= irqmask[skge->port]; | ||
2749 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2739 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2750 | mmiowb(); | 2740 | skge_read32(hw, B0_IMSK); |
2751 | spin_unlock_irq(&hw->hw_lock); | 2741 | spin_unlock_irq(&hw->hw_lock); |
2752 | 2742 | ||
2753 | return 0; | 2743 | return 0; |
@@ -2881,6 +2871,7 @@ static void skge_extirq(void *arg) | |||
2881 | spin_lock_irq(&hw->hw_lock); | 2871 | spin_lock_irq(&hw->hw_lock); |
2882 | hw->intr_mask |= IS_EXT_REG; | 2872 | hw->intr_mask |= IS_EXT_REG; |
2883 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2873 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2874 | skge_read32(hw, B0_IMSK); | ||
2884 | spin_unlock_irq(&hw->hw_lock); | 2875 | spin_unlock_irq(&hw->hw_lock); |
2885 | } | 2876 | } |
2886 | 2877 | ||
@@ -2888,27 +2879,23 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2888 | { | 2879 | { |
2889 | struct skge_hw *hw = dev_id; | 2880 | struct skge_hw *hw = dev_id; |
2890 | u32 status; | 2881 | u32 status; |
2882 | int handled = 0; | ||
2891 | 2883 | ||
2884 | spin_lock(&hw->hw_lock); | ||
2892 | /* Reading this register masks IRQ */ | 2885 | /* Reading this register masks IRQ */ |
2893 | status = skge_read32(hw, B0_SP_ISRC); | 2886 | status = skge_read32(hw, B0_SP_ISRC); |
2894 | if (status == 0) | 2887 | if (status == 0 || status == ~0) |
2895 | return IRQ_NONE; | 2888 | goto out; |
2896 | 2889 | ||
2897 | spin_lock(&hw->hw_lock); | 2890 | handled = 1; |
2898 | status &= hw->intr_mask; | 2891 | status &= hw->intr_mask; |
2899 | if (status & IS_EXT_REG) { | 2892 | if (status & IS_EXT_REG) { |
2900 | hw->intr_mask &= ~IS_EXT_REG; | 2893 | hw->intr_mask &= ~IS_EXT_REG; |
2901 | schedule_work(&hw->phy_work); | 2894 | schedule_work(&hw->phy_work); |
2902 | } | 2895 | } |
2903 | 2896 | ||
2904 | if (status & IS_XA1_F) { | 2897 | if (status & (IS_XA1_F|IS_R1_F)) { |
2905 | skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F); | 2898 | hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); |
2906 | skge_txirq(hw->dev[0]); | ||
2907 | } | ||
2908 | |||
2909 | if (status & IS_R1_F) { | ||
2910 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); | ||
2911 | hw->intr_mask &= ~IS_R1_F; | ||
2912 | netif_rx_schedule(hw->dev[0]); | 2899 | netif_rx_schedule(hw->dev[0]); |
2913 | } | 2900 | } |
2914 | 2901 | ||
@@ -2927,14 +2914,8 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2927 | skge_mac_intr(hw, 0); | 2914 | skge_mac_intr(hw, 0); |
2928 | 2915 | ||
2929 | if (hw->dev[1]) { | 2916 | if (hw->dev[1]) { |
2930 | if (status & IS_XA2_F) { | 2917 | if (status & (IS_XA2_F|IS_R2_F)) { |
2931 | skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F); | 2918 | hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); |
2932 | skge_txirq(hw->dev[1]); | ||
2933 | } | ||
2934 | |||
2935 | if (status & IS_R2_F) { | ||
2936 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); | ||
2937 | hw->intr_mask &= ~IS_R2_F; | ||
2938 | netif_rx_schedule(hw->dev[1]); | 2919 | netif_rx_schedule(hw->dev[1]); |
2939 | } | 2920 | } |
2940 | 2921 | ||
@@ -2955,9 +2936,11 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2955 | skge_error_irq(hw); | 2936 | skge_error_irq(hw); |
2956 | 2937 | ||
2957 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2938 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2939 | skge_read32(hw, B0_IMSK); | ||
2940 | out: | ||
2958 | spin_unlock(&hw->hw_lock); | 2941 | spin_unlock(&hw->hw_lock); |
2959 | 2942 | ||
2960 | return IRQ_HANDLED; | 2943 | return IRQ_RETVAL(handled); |
2961 | } | 2944 | } |
2962 | 2945 | ||
2963 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2946 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -3106,7 +3089,6 @@ static int skge_reset(struct skge_hw *hw) | |||
3106 | else | 3089 | else |
3107 | hw->ram_size = t8 * 4096; | 3090 | hw->ram_size = t8 * 4096; |
3108 | 3091 | ||
3109 | spin_lock_init(&hw->hw_lock); | ||
3110 | hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; | 3092 | hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; |
3111 | if (hw->ports > 1) | 3093 | if (hw->ports > 1) |
3112 | hw->intr_mask |= IS_PORT_2; | 3094 | hw->intr_mask |= IS_PORT_2; |
@@ -3222,7 +3204,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3222 | dev->poll_controller = skge_netpoll; | 3204 | dev->poll_controller = skge_netpoll; |
3223 | #endif | 3205 | #endif |
3224 | dev->irq = hw->pdev->irq; | 3206 | dev->irq = hw->pdev->irq; |
3225 | dev->features = NETIF_F_LLTX; | 3207 | |
3226 | if (highmem) | 3208 | if (highmem) |
3227 | dev->features |= NETIF_F_HIGHDMA; | 3209 | dev->features |= NETIF_F_HIGHDMA; |
3228 | 3210 | ||
@@ -3244,8 +3226,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3244 | 3226 | ||
3245 | skge->port = port; | 3227 | skge->port = port; |
3246 | 3228 | ||
3247 | spin_lock_init(&skge->tx_lock); | ||
3248 | |||
3249 | if (hw->chip_id != CHIP_ID_GENESIS) { | 3229 | if (hw->chip_id != CHIP_ID_GENESIS) { |
3250 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; | 3230 | dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; |
3251 | skge->rx_csum = 1; | 3231 | skge->rx_csum = 1; |
@@ -3332,6 +3312,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3332 | hw->pdev = pdev; | 3312 | hw->pdev = pdev; |
3333 | mutex_init(&hw->phy_mutex); | 3313 | mutex_init(&hw->phy_mutex); |
3334 | INIT_WORK(&hw->phy_work, skge_extirq, hw); | 3314 | INIT_WORK(&hw->phy_work, skge_extirq, hw); |
3315 | spin_lock_init(&hw->hw_lock); | ||
3335 | 3316 | ||
3336 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | 3317 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
3337 | if (!hw->regs) { | 3318 | if (!hw->regs) { |
@@ -3340,23 +3321,16 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3340 | goto err_out_free_hw; | 3321 | goto err_out_free_hw; |
3341 | } | 3322 | } |
3342 | 3323 | ||
3343 | err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, DRV_NAME, hw); | ||
3344 | if (err) { | ||
3345 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | ||
3346 | pci_name(pdev), pdev->irq); | ||
3347 | goto err_out_iounmap; | ||
3348 | } | ||
3349 | pci_set_drvdata(pdev, hw); | ||
3350 | |||
3351 | err = skge_reset(hw); | 3324 | err = skge_reset(hw); |
3352 | if (err) | 3325 | if (err) |
3353 | goto err_out_free_irq; | 3326 | goto err_out_iounmap; |
3354 | 3327 | ||
3355 | printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", | 3328 | printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n", |
3356 | (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, | 3329 | (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, |
3357 | skge_board_name(hw), hw->chip_rev); | 3330 | skge_board_name(hw), hw->chip_rev); |
3358 | 3331 | ||
3359 | if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) | 3332 | dev = skge_devinit(hw, 0, using_dac); |
3333 | if (!dev) | ||
3360 | goto err_out_led_off; | 3334 | goto err_out_led_off; |
3361 | 3335 | ||
3362 | if (!is_valid_ether_addr(dev->dev_addr)) { | 3336 | if (!is_valid_ether_addr(dev->dev_addr)) { |
@@ -3366,7 +3340,6 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3366 | goto err_out_free_netdev; | 3340 | goto err_out_free_netdev; |
3367 | } | 3341 | } |
3368 | 3342 | ||
3369 | |||
3370 | err = register_netdev(dev); | 3343 | err = register_netdev(dev); |
3371 | if (err) { | 3344 | if (err) { |
3372 | printk(KERN_ERR PFX "%s: cannot register net device\n", | 3345 | printk(KERN_ERR PFX "%s: cannot register net device\n", |
@@ -3374,6 +3347,12 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3374 | goto err_out_free_netdev; | 3347 | goto err_out_free_netdev; |
3375 | } | 3348 | } |
3376 | 3349 | ||
3350 | err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, dev->name, hw); | ||
3351 | if (err) { | ||
3352 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | ||
3353 | dev->name, pdev->irq); | ||
3354 | goto err_out_unregister; | ||
3355 | } | ||
3377 | skge_show_addr(dev); | 3356 | skge_show_addr(dev); |
3378 | 3357 | ||
3379 | if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { | 3358 | if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) { |
@@ -3386,15 +3365,16 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3386 | free_netdev(dev1); | 3365 | free_netdev(dev1); |
3387 | } | 3366 | } |
3388 | } | 3367 | } |
3368 | pci_set_drvdata(pdev, hw); | ||
3389 | 3369 | ||
3390 | return 0; | 3370 | return 0; |
3391 | 3371 | ||
3372 | err_out_unregister: | ||
3373 | unregister_netdev(dev); | ||
3392 | err_out_free_netdev: | 3374 | err_out_free_netdev: |
3393 | free_netdev(dev); | 3375 | free_netdev(dev); |
3394 | err_out_led_off: | 3376 | err_out_led_off: |
3395 | skge_write16(hw, B0_LED, LED_STAT_OFF); | 3377 | skge_write16(hw, B0_LED, LED_STAT_OFF); |
3396 | err_out_free_irq: | ||
3397 | free_irq(pdev->irq, hw); | ||
3398 | err_out_iounmap: | 3378 | err_out_iounmap: |
3399 | iounmap(hw->regs); | 3379 | iounmap(hw->regs); |
3400 | err_out_free_hw: | 3380 | err_out_free_hw: |
@@ -3424,6 +3404,7 @@ static void __devexit skge_remove(struct pci_dev *pdev) | |||
3424 | spin_lock_irq(&hw->hw_lock); | 3404 | spin_lock_irq(&hw->hw_lock); |
3425 | hw->intr_mask = 0; | 3405 | hw->intr_mask = 0; |
3426 | skge_write32(hw, B0_IMSK, 0); | 3406 | skge_write32(hw, B0_IMSK, 0); |
3407 | skge_read32(hw, B0_IMSK); | ||
3427 | spin_unlock_irq(&hw->hw_lock); | 3408 | spin_unlock_irq(&hw->hw_lock); |
3428 | 3409 | ||
3429 | skge_write16(hw, B0_LED, LED_STAT_OFF); | 3410 | skge_write16(hw, B0_LED, LED_STAT_OFF); |
@@ -3449,26 +3430,25 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3449 | struct skge_hw *hw = pci_get_drvdata(pdev); | 3430 | struct skge_hw *hw = pci_get_drvdata(pdev); |
3450 | int i, wol = 0; | 3431 | int i, wol = 0; |
3451 | 3432 | ||
3452 | for (i = 0; i < 2; i++) { | 3433 | pci_save_state(pdev); |
3434 | for (i = 0; i < hw->ports; i++) { | ||
3453 | struct net_device *dev = hw->dev[i]; | 3435 | struct net_device *dev = hw->dev[i]; |
3454 | 3436 | ||
3455 | if (dev) { | 3437 | if (netif_running(dev)) { |
3456 | struct skge_port *skge = netdev_priv(dev); | 3438 | struct skge_port *skge = netdev_priv(dev); |
3457 | if (netif_running(dev)) { | 3439 | |
3458 | netif_carrier_off(dev); | 3440 | netif_carrier_off(dev); |
3459 | if (skge->wol) | 3441 | if (skge->wol) |
3460 | netif_stop_queue(dev); | 3442 | netif_stop_queue(dev); |
3461 | else | 3443 | else |
3462 | skge_down(dev); | 3444 | skge_down(dev); |
3463 | } | ||
3464 | netif_device_detach(dev); | ||
3465 | wol |= skge->wol; | 3445 | wol |= skge->wol; |
3466 | } | 3446 | } |
3447 | netif_device_detach(dev); | ||
3467 | } | 3448 | } |
3468 | 3449 | ||
3469 | pci_save_state(pdev); | 3450 | skge_write32(hw, B0_IMSK, 0); |
3470 | pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); | 3451 | pci_enable_wake(pdev, pci_choose_state(pdev, state), wol); |
3471 | pci_disable_device(pdev); | ||
3472 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 3452 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
3473 | 3453 | ||
3474 | return 0; | 3454 | return 0; |
@@ -3477,23 +3457,33 @@ static int skge_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3477 | static int skge_resume(struct pci_dev *pdev) | 3457 | static int skge_resume(struct pci_dev *pdev) |
3478 | { | 3458 | { |
3479 | struct skge_hw *hw = pci_get_drvdata(pdev); | 3459 | struct skge_hw *hw = pci_get_drvdata(pdev); |
3480 | int i; | 3460 | int i, err; |
3481 | 3461 | ||
3482 | pci_set_power_state(pdev, PCI_D0); | 3462 | pci_set_power_state(pdev, PCI_D0); |
3483 | pci_restore_state(pdev); | 3463 | pci_restore_state(pdev); |
3484 | pci_enable_wake(pdev, PCI_D0, 0); | 3464 | pci_enable_wake(pdev, PCI_D0, 0); |
3485 | 3465 | ||
3486 | skge_reset(hw); | 3466 | err = skge_reset(hw); |
3467 | if (err) | ||
3468 | goto out; | ||
3487 | 3469 | ||
3488 | for (i = 0; i < 2; i++) { | 3470 | for (i = 0; i < hw->ports; i++) { |
3489 | struct net_device *dev = hw->dev[i]; | 3471 | struct net_device *dev = hw->dev[i]; |
3490 | if (dev) { | 3472 | |
3491 | netif_device_attach(dev); | 3473 | netif_device_attach(dev); |
3492 | if (netif_running(dev) && skge_up(dev)) | 3474 | if (netif_running(dev)) { |
3475 | err = skge_up(dev); | ||
3476 | |||
3477 | if (err) { | ||
3478 | printk(KERN_ERR PFX "%s: could not up: %d\n", | ||
3479 | dev->name, err); | ||
3493 | dev_close(dev); | 3480 | dev_close(dev); |
3481 | goto out; | ||
3482 | } | ||
3494 | } | 3483 | } |
3495 | } | 3484 | } |
3496 | return 0; | 3485 | out: |
3486 | return err; | ||
3497 | } | 3487 | } |
3498 | #endif | 3488 | #endif |
3499 | 3489 | ||
@@ -3510,7 +3500,7 @@ static struct pci_driver skge_driver = { | |||
3510 | 3500 | ||
3511 | static int __init skge_init_module(void) | 3501 | static int __init skge_init_module(void) |
3512 | { | 3502 | { |
3513 | return pci_module_init(&skge_driver); | 3503 | return pci_register_driver(&skge_driver); |
3514 | } | 3504 | } |
3515 | 3505 | ||
3516 | static void __exit skge_cleanup_module(void) | 3506 | static void __exit skge_cleanup_module(void) |