aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/forcedeth.c312
-rw-r--r--drivers/net/ixp2000/enp2611.c13
-rw-r--r--drivers/net/ixp2000/pm3386.c30
-rw-r--r--drivers/net/ixp2000/pm3386.h1
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/sky2.c49
-rw-r--r--drivers/net/sky2.h2
-rw-r--r--drivers/net/via-rhine.c34
8 files changed, 159 insertions, 285 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index f7235c9bc421..7e078b4cca7c 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -106,7 +106,6 @@
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. 108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
110 * 109 *
111 * Known bugs: 110 * Known bugs:
112 * We suspect that on some hardware no TX done interrupts are generated. 111 * We suspect that on some hardware no TX done interrupts are generated.
@@ -118,7 +117,7 @@
118 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 117 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
119 * superfluous timer interrupts from the nic. 118 * superfluous timer interrupts from the nic.
120 */ 119 */
121#define FORCEDETH_VERSION "0.54" 120#define FORCEDETH_VERSION "0.53"
122#define DRV_NAME "forcedeth" 121#define DRV_NAME "forcedeth"
123 122
124#include <linux/module.h> 123#include <linux/module.h>
@@ -711,72 +710,6 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
711 } 710 }
712} 711}
713 712
714static int using_multi_irqs(struct net_device *dev)
715{
716 struct fe_priv *np = get_nvpriv(dev);
717
718 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
719 ((np->msi_flags & NV_MSI_X_ENABLED) &&
720 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
721 return 0;
722 else
723 return 1;
724}
725
726static void nv_enable_irq(struct net_device *dev)
727{
728 struct fe_priv *np = get_nvpriv(dev);
729
730 if (!using_multi_irqs(dev)) {
731 if (np->msi_flags & NV_MSI_X_ENABLED)
732 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
733 else
734 enable_irq(dev->irq);
735 } else {
736 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
737 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
738 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
739 }
740}
741
742static void nv_disable_irq(struct net_device *dev)
743{
744 struct fe_priv *np = get_nvpriv(dev);
745
746 if (!using_multi_irqs(dev)) {
747 if (np->msi_flags & NV_MSI_X_ENABLED)
748 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
749 else
750 disable_irq(dev->irq);
751 } else {
752 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
753 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
754 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
755 }
756}
757
758/* In MSIX mode, a write to irqmask behaves as XOR */
759static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
760{
761 u8 __iomem *base = get_hwbase(dev);
762
763 writel(mask, base + NvRegIrqMask);
764}
765
766static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
767{
768 struct fe_priv *np = get_nvpriv(dev);
769 u8 __iomem *base = get_hwbase(dev);
770
771 if (np->msi_flags & NV_MSI_X_ENABLED) {
772 writel(mask, base + NvRegIrqMask);
773 } else {
774 if (np->msi_flags & NV_MSI_ENABLED)
775 writel(0, base + NvRegMSIIrqMask);
776 writel(0, base + NvRegIrqMask);
777 }
778}
779
780#define MII_READ (-1) 713#define MII_READ (-1)
781/* mii_rw: read/write a register on the PHY. 714/* mii_rw: read/write a register on the PHY.
782 * 715 *
@@ -1086,25 +1019,24 @@ static void nv_do_rx_refill(unsigned long data)
1086 struct net_device *dev = (struct net_device *) data; 1019 struct net_device *dev = (struct net_device *) data;
1087 struct fe_priv *np = netdev_priv(dev); 1020 struct fe_priv *np = netdev_priv(dev);
1088 1021
1089 if (!using_multi_irqs(dev)) { 1022
1090 if (np->msi_flags & NV_MSI_X_ENABLED) 1023 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1091 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1024 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1092 else 1025 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1093 disable_irq(dev->irq); 1026 disable_irq(dev->irq);
1094 } else { 1027 } else {
1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1028 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1096 } 1029 }
1097 if (nv_alloc_rx(dev)) { 1030 if (nv_alloc_rx(dev)) {
1098 spin_lock_irq(&np->lock); 1031 spin_lock(&np->lock);
1099 if (!np->in_shutdown) 1032 if (!np->in_shutdown)
1100 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1033 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1101 spin_unlock_irq(&np->lock); 1034 spin_unlock(&np->lock);
1102 } 1035 }
1103 if (!using_multi_irqs(dev)) { 1036 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1104 if (np->msi_flags & NV_MSI_X_ENABLED) 1037 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1105 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 1038 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1106 else 1039 enable_irq(dev->irq);
1107 enable_irq(dev->irq);
1108 } else { 1040 } else {
1109 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1041 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1110 } 1042 }
@@ -1736,7 +1668,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1736 * guessed, there is probably a simpler approach. 1668 * guessed, there is probably a simpler approach.
1737 * Changing the MTU is a rare event, it shouldn't matter. 1669 * Changing the MTU is a rare event, it shouldn't matter.
1738 */ 1670 */
1739 nv_disable_irq(dev); 1671 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1672 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1673 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1674 disable_irq(dev->irq);
1675 } else {
1676 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1677 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1678 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1679 }
1740 spin_lock_bh(&dev->xmit_lock); 1680 spin_lock_bh(&dev->xmit_lock);
1741 spin_lock(&np->lock); 1681 spin_lock(&np->lock);
1742 /* stop engines */ 1682 /* stop engines */
@@ -1769,7 +1709,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1769 nv_start_tx(dev); 1709 nv_start_tx(dev);
1770 spin_unlock(&np->lock); 1710 spin_unlock(&np->lock);
1771 spin_unlock_bh(&dev->xmit_lock); 1711 spin_unlock_bh(&dev->xmit_lock);
1772 nv_enable_irq(dev); 1712 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
1713 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1714 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1715 enable_irq(dev->irq);
1716 } else {
1717 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1718 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1719 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1720 }
1773 } 1721 }
1774 return 0; 1722 return 0;
1775} 1723}
@@ -2160,16 +2108,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2160 if (!(events & np->irqmask)) 2108 if (!(events & np->irqmask))
2161 break; 2109 break;
2162 2110
2163 spin_lock_irq(&np->lock); 2111 spin_lock(&np->lock);
2164 nv_tx_done(dev); 2112 nv_tx_done(dev);
2165 spin_unlock_irq(&np->lock); 2113 spin_unlock(&np->lock);
2166 2114
2167 if (events & (NVREG_IRQ_TX_ERR)) { 2115 if (events & (NVREG_IRQ_TX_ERR)) {
2168 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2116 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2169 dev->name, events); 2117 dev->name, events);
2170 } 2118 }
2171 if (i > max_interrupt_work) { 2119 if (i > max_interrupt_work) {
2172 spin_lock_irq(&np->lock); 2120 spin_lock(&np->lock);
2173 /* disable interrupts on the nic */ 2121 /* disable interrupts on the nic */
2174 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2122 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2175 pci_push(base); 2123 pci_push(base);
@@ -2179,7 +2127,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2179 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2127 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2180 } 2128 }
2181 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2129 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2182 spin_unlock_irq(&np->lock); 2130 spin_unlock(&np->lock);
2183 break; 2131 break;
2184 } 2132 }
2185 2133
@@ -2209,14 +2157,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2209 2157
2210 nv_rx_process(dev); 2158 nv_rx_process(dev);
2211 if (nv_alloc_rx(dev)) { 2159 if (nv_alloc_rx(dev)) {
2212 spin_lock_irq(&np->lock); 2160 spin_lock(&np->lock);
2213 if (!np->in_shutdown) 2161 if (!np->in_shutdown)
2214 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2162 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2215 spin_unlock_irq(&np->lock); 2163 spin_unlock(&np->lock);
2216 } 2164 }
2217 2165
2218 if (i > max_interrupt_work) { 2166 if (i > max_interrupt_work) {
2219 spin_lock_irq(&np->lock); 2167 spin_lock(&np->lock);
2220 /* disable interrupts on the nic */ 2168 /* disable interrupts on the nic */
2221 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2169 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2222 pci_push(base); 2170 pci_push(base);
@@ -2226,7 +2174,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2226 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2174 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2227 } 2175 }
2228 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2176 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2229 spin_unlock_irq(&np->lock); 2177 spin_unlock(&np->lock);
2230 break; 2178 break;
2231 } 2179 }
2232 2180
@@ -2255,14 +2203,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2255 break; 2203 break;
2256 2204
2257 if (events & NVREG_IRQ_LINK) { 2205 if (events & NVREG_IRQ_LINK) {
2258 spin_lock_irq(&np->lock); 2206 spin_lock(&np->lock);
2259 nv_link_irq(dev); 2207 nv_link_irq(dev);
2260 spin_unlock_irq(&np->lock); 2208 spin_unlock(&np->lock);
2261 } 2209 }
2262 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2210 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2263 spin_lock_irq(&np->lock); 2211 spin_lock(&np->lock);
2264 nv_linkchange(dev); 2212 nv_linkchange(dev);
2265 spin_unlock_irq(&np->lock); 2213 spin_unlock(&np->lock);
2266 np->link_timeout = jiffies + LINK_TIMEOUT; 2214 np->link_timeout = jiffies + LINK_TIMEOUT;
2267 } 2215 }
2268 if (events & (NVREG_IRQ_UNKNOWN)) { 2216 if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2270,7 +2218,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2270 dev->name, events); 2218 dev->name, events);
2271 } 2219 }
2272 if (i > max_interrupt_work) { 2220 if (i > max_interrupt_work) {
2273 spin_lock_irq(&np->lock); 2221 spin_lock(&np->lock);
2274 /* disable interrupts on the nic */ 2222 /* disable interrupts on the nic */
2275 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2223 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2276 pci_push(base); 2224 pci_push(base);
@@ -2280,7 +2228,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2280 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2228 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2281 } 2229 }
2282 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2230 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2283 spin_unlock_irq(&np->lock); 2231 spin_unlock(&np->lock);
2284 break; 2232 break;
2285 } 2233 }
2286 2234
@@ -2303,11 +2251,10 @@ static void nv_do_nic_poll(unsigned long data)
2303 * nv_nic_irq because that may decide to do otherwise 2251 * nv_nic_irq because that may decide to do otherwise
2304 */ 2252 */
2305 2253
2306 if (!using_multi_irqs(dev)) { 2254 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
2307 if (np->msi_flags & NV_MSI_X_ENABLED) 2255 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2308 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); 2256 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2309 else 2257 disable_irq(dev->irq);
2310 disable_irq(dev->irq);
2311 mask = np->irqmask; 2258 mask = np->irqmask;
2312 } else { 2259 } else {
2313 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2260 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2330,12 +2277,11 @@ static void nv_do_nic_poll(unsigned long data)
2330 writel(mask, base + NvRegIrqMask); 2277 writel(mask, base + NvRegIrqMask);
2331 pci_push(base); 2278 pci_push(base);
2332 2279
2333 if (!using_multi_irqs(dev)) { 2280 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
2281 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2282 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2334 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2283 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2335 if (np->msi_flags & NV_MSI_X_ENABLED) 2284 enable_irq(dev->irq);
2336 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2337 else
2338 enable_irq(dev->irq);
2339 } else { 2285 } else {
2340 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2286 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2341 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); 2287 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2682,113 +2628,6 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2682 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 2628 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2683} 2629}
2684 2630
2685static int nv_request_irq(struct net_device *dev)
2686{
2687 struct fe_priv *np = get_nvpriv(dev);
2688 u8 __iomem *base = get_hwbase(dev);
2689 int ret = 1;
2690 int i;
2691
2692 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2693 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2694 np->msi_x_entry[i].entry = i;
2695 }
2696 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2697 np->msi_flags |= NV_MSI_X_ENABLED;
2698 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2699 /* Request irq for rx handling */
2700 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2701 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2702 pci_disable_msix(np->pci_dev);
2703 np->msi_flags &= ~NV_MSI_X_ENABLED;
2704 goto out_err;
2705 }
2706 /* Request irq for tx handling */
2707 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2708 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2709 pci_disable_msix(np->pci_dev);
2710 np->msi_flags &= ~NV_MSI_X_ENABLED;
2711 goto out_free_rx;
2712 }
2713 /* Request irq for link and timer handling */
2714 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2715 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2716 pci_disable_msix(np->pci_dev);
2717 np->msi_flags &= ~NV_MSI_X_ENABLED;
2718 goto out_free_tx;
2719 }
2720 /* map interrupts to their respective vector */
2721 writel(0, base + NvRegMSIXMap0);
2722 writel(0, base + NvRegMSIXMap1);
2723 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2724 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2725 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2726 } else {
2727 /* Request irq for all interrupts */
2728 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2729 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2730 pci_disable_msix(np->pci_dev);
2731 np->msi_flags &= ~NV_MSI_X_ENABLED;
2732 goto out_err;
2733 }
2734
2735 /* map interrupts to vector 0 */
2736 writel(0, base + NvRegMSIXMap0);
2737 writel(0, base + NvRegMSIXMap1);
2738 }
2739 }
2740 }
2741 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2742 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2743 np->msi_flags |= NV_MSI_ENABLED;
2744 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2745 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2746 pci_disable_msi(np->pci_dev);
2747 np->msi_flags &= ~NV_MSI_ENABLED;
2748 goto out_err;
2749 }
2750
2751 /* map interrupts to vector 0 */
2752 writel(0, base + NvRegMSIMap0);
2753 writel(0, base + NvRegMSIMap1);
2754 /* enable msi vector 0 */
2755 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2756 }
2757 }
2758 if (ret != 0) {
2759 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2760 goto out_err;
2761 }
2762
2763 return 0;
2764out_free_tx:
2765 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
2766out_free_rx:
2767 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
2768out_err:
2769 return 1;
2770}
2771
2772static void nv_free_irq(struct net_device *dev)
2773{
2774 struct fe_priv *np = get_nvpriv(dev);
2775 int i;
2776
2777 if (np->msi_flags & NV_MSI_X_ENABLED) {
2778 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2779 free_irq(np->msi_x_entry[i].vector, dev);
2780 }
2781 pci_disable_msix(np->pci_dev);
2782 np->msi_flags &= ~NV_MSI_X_ENABLED;
2783 } else {
2784 free_irq(np->pci_dev->irq, dev);
2785 if (np->msi_flags & NV_MSI_ENABLED) {
2786 pci_disable_msi(np->pci_dev);
2787 np->msi_flags &= ~NV_MSI_ENABLED;
2788 }
2789 }
2790}
2791
2792static int nv_open(struct net_device *dev) 2631static int nv_open(struct net_device *dev)
2793{ 2632{
2794 struct fe_priv *np = netdev_priv(dev); 2633 struct fe_priv *np = netdev_priv(dev);
@@ -2881,16 +2720,12 @@ static int nv_open(struct net_device *dev)
2881 udelay(10); 2720 udelay(10);
2882 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 2721 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
2883 2722
2884 nv_disable_hw_interrupts(dev, np->irqmask); 2723 writel(0, base + NvRegIrqMask);
2885 pci_push(base); 2724 pci_push(base);
2886 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 2725 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
2887 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2726 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2888 pci_push(base); 2727 pci_push(base);
2889 2728
2890 if (nv_request_irq(dev)) {
2891 goto out_drain;
2892 }
2893
2894 if (np->msi_flags & NV_MSI_X_CAPABLE) { 2729 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2895 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2730 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2896 np->msi_x_entry[i].entry = i; 2731 np->msi_x_entry[i].entry = i;
@@ -2964,7 +2799,7 @@ static int nv_open(struct net_device *dev)
2964 } 2799 }
2965 2800
2966 /* ask for interrupts */ 2801 /* ask for interrupts */
2967 nv_enable_hw_interrupts(dev, np->irqmask); 2802 writel(np->irqmask, base + NvRegIrqMask);
2968 2803
2969 spin_lock_irq(&np->lock); 2804 spin_lock_irq(&np->lock);
2970 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2805 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -3008,6 +2843,7 @@ static int nv_close(struct net_device *dev)
3008{ 2843{
3009 struct fe_priv *np = netdev_priv(dev); 2844 struct fe_priv *np = netdev_priv(dev);
3010 u8 __iomem *base; 2845 u8 __iomem *base;
2846 int i;
3011 2847
3012 spin_lock_irq(&np->lock); 2848 spin_lock_irq(&np->lock);
3013 np->in_shutdown = 1; 2849 np->in_shutdown = 1;
@@ -3025,13 +2861,31 @@ static int nv_close(struct net_device *dev)
3025 2861
3026 /* disable interrupts on the nic or we will lock up */ 2862 /* disable interrupts on the nic or we will lock up */
3027 base = get_hwbase(dev); 2863 base = get_hwbase(dev);
3028 nv_disable_hw_interrupts(dev, np->irqmask); 2864 if (np->msi_flags & NV_MSI_X_ENABLED) {
2865 writel(np->irqmask, base + NvRegIrqMask);
2866 } else {
2867 if (np->msi_flags & NV_MSI_ENABLED)
2868 writel(0, base + NvRegMSIIrqMask);
2869 writel(0, base + NvRegIrqMask);
2870 }
3029 pci_push(base); 2871 pci_push(base);
3030 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 2872 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
3031 2873
3032 spin_unlock_irq(&np->lock); 2874 spin_unlock_irq(&np->lock);
3033 2875
3034 nv_free_irq(dev); 2876 if (np->msi_flags & NV_MSI_X_ENABLED) {
2877 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2878 free_irq(np->msi_x_entry[i].vector, dev);
2879 }
2880 pci_disable_msix(np->pci_dev);
2881 np->msi_flags &= ~NV_MSI_X_ENABLED;
2882 } else {
2883 free_irq(np->pci_dev->irq, dev);
2884 if (np->msi_flags & NV_MSI_ENABLED) {
2885 pci_disable_msi(np->pci_dev);
2886 np->msi_flags &= ~NV_MSI_ENABLED;
2887 }
2888 }
3035 2889
3036 drain_ring(dev); 2890 drain_ring(dev);
3037 2891
@@ -3120,18 +2974,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
3120 if (id->driver_data & DEV_HAS_HIGH_DMA) { 2974 if (id->driver_data & DEV_HAS_HIGH_DMA) {
3121 /* packet format 3: supports 40-bit addressing */ 2975 /* packet format 3: supports 40-bit addressing */
3122 np->desc_ver = DESC_VER_3; 2976 np->desc_ver = DESC_VER_3;
3123 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
3124 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 2977 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
3125 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 2978 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
3126 pci_name(pci_dev)); 2979 pci_name(pci_dev));
3127 } else { 2980 } else {
3128 dev->features |= NETIF_F_HIGHDMA; 2981 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
3129 printk(KERN_INFO "forcedeth: using HIGHDMA\n"); 2982 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
3130 } 2983 pci_name(pci_dev));
3131 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 2984 goto out_relreg;
3132 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", 2985 } else {
3133 pci_name(pci_dev)); 2986 dev->features |= NETIF_F_HIGHDMA;
2987 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2988 }
3134 } 2989 }
2990 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
3135 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 2991 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
3136 /* packet format 2: supports jumbo frames */ 2992 /* packet format 2: supports jumbo frames */
3137 np->desc_ver = DESC_VER_2; 2993 np->desc_ver = DESC_VER_2;
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c
index 6f7dce8eba51..b67f586d7392 100644
--- a/drivers/net/ixp2000/enp2611.c
+++ b/drivers/net/ixp2000/enp2611.c
@@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy)
149 int status; 149 int status;
150 150
151 dev = nds[i]; 151 dev = nds[i];
152 if (dev == NULL)
153 continue;
152 154
153 status = pm3386_is_link_up(i); 155 status = pm3386_is_link_up(i);
154 if (status && !netif_carrier_ok(dev)) { 156 if (status && !netif_carrier_ok(dev)) {
@@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up)
191 193
192static int __init enp2611_init_module(void) 194static int __init enp2611_init_module(void)
193{ 195{
196 int ports;
194 int i; 197 int i;
195 198
196 if (!machine_is_enp2611()) 199 if (!machine_is_enp2611())
@@ -199,7 +202,8 @@ static int __init enp2611_init_module(void)
199 caleb_reset(); 202 caleb_reset();
200 pm3386_reset(); 203 pm3386_reset();
201 204
202 for (i = 0; i < 3; i++) { 205 ports = pm3386_port_count();
206 for (i = 0; i < ports; i++) {
203 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); 207 nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
204 if (nds[i] == NULL) { 208 if (nds[i] == NULL) {
205 while (--i >= 0) 209 while (--i >= 0)
@@ -215,9 +219,10 @@ static int __init enp2611_init_module(void)
215 219
216 ixp2400_msf_init(&enp2611_msf_parameters); 220 ixp2400_msf_init(&enp2611_msf_parameters);
217 221
218 if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) { 222 if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
219 for (i = 0; i < 3; i++) 223 for (i = 0; i < ports; i++)
220 free_netdev(nds[i]); 224 if (nds[i])
225 free_netdev(nds[i]);
221 return -EINVAL; 226 return -EINVAL;
222 } 227 }
223 228
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c
index 5c7ab7564053..5224651c9aac 100644
--- a/drivers/net/ixp2000/pm3386.c
+++ b/drivers/net/ixp2000/pm3386.c
@@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
86 pm3386_reg_write(port >> 1, reg, value); 86 pm3386_reg_write(port >> 1, reg, value);
87} 87}
88 88
89int pm3386_secondary_present(void)
90{
91 return pm3386_reg_read(1, 0) == 0x3386;
92}
89 93
90void pm3386_reset(void) 94void pm3386_reset(void)
91{ 95{
92 u8 mac[3][6]; 96 u8 mac[3][6];
97 int secondary;
98
99 secondary = pm3386_secondary_present();
93 100
94 /* Save programmed MAC addresses. */ 101 /* Save programmed MAC addresses. */
95 pm3386_get_mac(0, mac[0]); 102 pm3386_get_mac(0, mac[0]);
96 pm3386_get_mac(1, mac[1]); 103 pm3386_get_mac(1, mac[1]);
97 pm3386_get_mac(2, mac[2]); 104 if (secondary)
105 pm3386_get_mac(2, mac[2]);
98 106
99 /* Assert analog and digital reset. */ 107 /* Assert analog and digital reset. */
100 pm3386_reg_write(0, 0x002, 0x0060); 108 pm3386_reg_write(0, 0x002, 0x0060);
101 pm3386_reg_write(1, 0x002, 0x0060); 109 if (secondary)
110 pm3386_reg_write(1, 0x002, 0x0060);
102 mdelay(1); 111 mdelay(1);
103 112
104 /* Deassert analog reset. */ 113 /* Deassert analog reset. */
105 pm3386_reg_write(0, 0x002, 0x0062); 114 pm3386_reg_write(0, 0x002, 0x0062);
106 pm3386_reg_write(1, 0x002, 0x0062); 115 if (secondary)
116 pm3386_reg_write(1, 0x002, 0x0062);
107 mdelay(10); 117 mdelay(10);
108 118
109 /* Deassert digital reset. */ 119 /* Deassert digital reset. */
110 pm3386_reg_write(0, 0x002, 0x0063); 120 pm3386_reg_write(0, 0x002, 0x0063);
111 pm3386_reg_write(1, 0x002, 0x0063); 121 if (secondary)
122 pm3386_reg_write(1, 0x002, 0x0063);
112 mdelay(10); 123 mdelay(10);
113 124
114 /* Restore programmed MAC addresses. */ 125 /* Restore programmed MAC addresses. */
115 pm3386_set_mac(0, mac[0]); 126 pm3386_set_mac(0, mac[0]);
116 pm3386_set_mac(1, mac[1]); 127 pm3386_set_mac(1, mac[1]);
117 pm3386_set_mac(2, mac[2]); 128 if (secondary)
129 pm3386_set_mac(2, mac[2]);
118 130
119 /* Disable carrier on all ports. */ 131 /* Disable carrier on all ports. */
120 pm3386_set_carrier(0, 0); 132 pm3386_set_carrier(0, 0);
121 pm3386_set_carrier(1, 0); 133 pm3386_set_carrier(1, 0);
122 pm3386_set_carrier(2, 0); 134 if (secondary)
135 pm3386_set_carrier(2, 0);
123} 136}
124 137
125static u16 swaph(u16 x) 138static u16 swaph(u16 x)
@@ -127,6 +140,11 @@ static u16 swaph(u16 x)
127 return ((x << 8) | (x >> 8)) & 0xffff; 140 return ((x << 8) | (x >> 8)) & 0xffff;
128} 141}
129 142
143int pm3386_port_count(void)
144{
145 return 2 + pm3386_secondary_present();
146}
147
130void pm3386_init_port(int port) 148void pm3386_init_port(int port)
131{ 149{
132 int pm = port >> 1; 150 int pm = port >> 1;
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h
index fe92bb056ac4..cc4183dca911 100644
--- a/drivers/net/ixp2000/pm3386.h
+++ b/drivers/net/ixp2000/pm3386.h
@@ -13,6 +13,7 @@
13#define __PM3386_H 13#define __PM3386_H
14 14
15void pm3386_reset(void); 15void pm3386_reset(void);
16int pm3386_port_count(void);
16void pm3386_init_port(int port); 17void pm3386_init_port(int port);
17void pm3386_get_mac(int port, u8 *mac); 18void pm3386_get_mac(int port, u8 *mac);
18void pm3386_set_mac(int port, u8 *mac); 19void pm3386_set_mac(int port, u8 *mac);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index a70c2b0cc104..63a6b3dfc095 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,8 +78,7 @@ static const struct pci_device_id skge_id_table[] = {
78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, 78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, 80 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, 81 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */
82 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, 82 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
84 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ 83 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
85 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, 84 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index ffd267fab21d..60779ebf2ff6 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -51,7 +51,7 @@
51#include "sky2.h" 51#include "sky2.h"
52 52
53#define DRV_NAME "sky2" 53#define DRV_NAME "sky2"
54#define DRV_VERSION "1.3" 54#define DRV_VERSION "1.4"
55#define PFX DRV_NAME " " 55#define PFX DRV_NAME " "
56 56
57/* 57/*
@@ -105,6 +105,7 @@ MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms)
105static const struct pci_device_id sky2_id_table[] = { 105static const struct pci_device_id sky2_id_table[] = {
106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, 106 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) },
107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, 107 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
108 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, 109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
109 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, 110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
110 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, 111 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
@@ -235,6 +236,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
235 } 236 }
236 237
237 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 238 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
239 sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
238 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 240 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
239 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); 241 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4);
240 reg1 &= P_ASPM_CONTROL_MSK; 242 reg1 &= P_ASPM_CONTROL_MSK;
@@ -306,7 +308,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
306 u16 ctrl, ct1000, adv, pg, ledctrl, ledover; 308 u16 ctrl, ct1000, adv, pg, ledctrl, ledover;
307 309
308 if (sky2->autoneg == AUTONEG_ENABLE && 310 if (sky2->autoneg == AUTONEG_ENABLE &&
309 (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { 311 !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) {
310 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 312 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
311 313
312 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 314 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
@@ -1020,7 +1022,25 @@ static int sky2_up(struct net_device *dev)
1020 struct sky2_hw *hw = sky2->hw; 1022 struct sky2_hw *hw = sky2->hw;
1021 unsigned port = sky2->port; 1023 unsigned port = sky2->port;
1022 u32 ramsize, rxspace, imask; 1024 u32 ramsize, rxspace, imask;
1023 int err = -ENOMEM; 1025 int cap, err = -ENOMEM;
1026 struct net_device *otherdev = hw->dev[sky2->port^1];
1027
1028 /*
1029 * On dual port PCI-X card, there is an problem where status
1030 * can be received out of order due to split transactions
1031 */
1032 if (otherdev && netif_running(otherdev) &&
1033 (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) {
1034 struct sky2_port *osky2 = netdev_priv(otherdev);
1035 u16 cmd;
1036
1037 cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
1038 cmd &= ~PCI_X_CMD_MAX_SPLIT;
1039 sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
1040
1041 sky2->rx_csum = 0;
1042 osky2->rx_csum = 0;
1043 }
1024 1044
1025 if (netif_msg_ifup(sky2)) 1045 if (netif_msg_ifup(sky2))
1026 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 1046 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
@@ -1899,6 +1919,12 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
1899 } 1919 }
1900} 1920}
1901 1921
1922/* Is status ring empty or is there more to do? */
1923static inline int sky2_more_work(const struct sky2_hw *hw)
1924{
1925 return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX));
1926}
1927
1902/* Process status response ring */ 1928/* Process status response ring */
1903static int sky2_status_intr(struct sky2_hw *hw, int to_do) 1929static int sky2_status_intr(struct sky2_hw *hw, int to_do)
1904{ 1930{
@@ -2171,19 +2197,19 @@ static int sky2_poll(struct net_device *dev0, int *budget)
2171 if (status & Y2_IS_CHK_TXA2) 2197 if (status & Y2_IS_CHK_TXA2)
2172 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); 2198 sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2);
2173 2199
2174 if (status & Y2_IS_STAT_BMU)
2175 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2176
2177 work_done = sky2_status_intr(hw, work_limit); 2200 work_done = sky2_status_intr(hw, work_limit);
2178 *budget -= work_done; 2201 *budget -= work_done;
2179 dev0->quota -= work_done; 2202 dev0->quota -= work_done;
2180 2203
2181 if (work_done >= work_limit) 2204 if (status & Y2_IS_STAT_BMU)
2205 sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
2206
2207 if (sky2_more_work(hw))
2182 return 1; 2208 return 1;
2183 2209
2184 netif_rx_complete(dev0); 2210 netif_rx_complete(dev0);
2185 2211
2186 status = sky2_read32(hw, B0_Y2_SP_LISR); 2212 sky2_read32(hw, B0_Y2_SP_LISR);
2187 return 0; 2213 return 0;
2188} 2214}
2189 2215
@@ -3067,12 +3093,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
3067 sky2->duplex = -1; 3093 sky2->duplex = -1;
3068 sky2->speed = -1; 3094 sky2->speed = -1;
3069 sky2->advertising = sky2_supported_modes(hw); 3095 sky2->advertising = sky2_supported_modes(hw);
3070 3096 sky2->rx_csum = 1;
3071 /* Receive checksum disabled for Yukon XL
3072 * because of observed problems with incorrect
3073 * values when multiple packets are received in one interrupt
3074 */
3075 sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
3076 3097
3077 spin_lock_init(&sky2->phy_lock); 3098 spin_lock_init(&sky2->phy_lock);
3078 sky2->tx_pending = TX_DEF_PENDING; 3099 sky2->tx_pending = TX_DEF_PENDING;
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 8012994c9b93..8a0bc5525f0a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -214,6 +214,8 @@ enum csr_regs {
214enum { 214enum {
215 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ 215 Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */
216 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ 216 Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */
217 Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */
218 Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */
217 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ 219 Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */
218 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ 220 Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */
219 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ 221 Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index a6dc53b4250d..fdc21037f6dc 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -491,8 +491,6 @@ struct rhine_private {
491 u8 tx_thresh, rx_thresh; 491 u8 tx_thresh, rx_thresh;
492 492
493 struct mii_if_info mii_if; 493 struct mii_if_info mii_if;
494 struct work_struct tx_timeout_task;
495 struct work_struct check_media_task;
496 void __iomem *base; 494 void __iomem *base;
497}; 495};
498 496
@@ -500,8 +498,6 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
500static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 498static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
501static int rhine_open(struct net_device *dev); 499static int rhine_open(struct net_device *dev);
502static void rhine_tx_timeout(struct net_device *dev); 500static void rhine_tx_timeout(struct net_device *dev);
503static void rhine_tx_timeout_task(struct net_device *dev);
504static void rhine_check_media_task(struct net_device *dev);
505static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 501static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
506static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 502static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
507static void rhine_tx(struct net_device *dev); 503static void rhine_tx(struct net_device *dev);
@@ -856,12 +852,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
856 if (rp->quirks & rqRhineI) 852 if (rp->quirks & rqRhineI)
857 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 853 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
858 854
859 INIT_WORK(&rp->tx_timeout_task,
860 (void (*)(void *))rhine_tx_timeout_task, dev);
861
862 INIT_WORK(&rp->check_media_task,
863 (void (*)(void *))rhine_check_media_task, dev);
864
865 /* dev->name not defined before register_netdev()! */ 855 /* dev->name not defined before register_netdev()! */
866 rc = register_netdev(dev); 856 rc = register_netdev(dev);
867 if (rc) 857 if (rc)
@@ -1108,11 +1098,6 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1108 netif_carrier_ok(mii->dev)); 1098 netif_carrier_ok(mii->dev));
1109} 1099}
1110 1100
1111static void rhine_check_media_task(struct net_device *dev)
1112{
1113 rhine_check_media(dev, 0);
1114}
1115
1116static void init_registers(struct net_device *dev) 1101static void init_registers(struct net_device *dev)
1117{ 1102{
1118 struct rhine_private *rp = netdev_priv(dev); 1103 struct rhine_private *rp = netdev_priv(dev);
@@ -1166,8 +1151,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1166 if (quirks & rqRhineI) { 1151 if (quirks & rqRhineI) {
1167 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR 1152 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1168 1153
1169 /* Do not call from ISR! */ 1154 /* Can be called from ISR. Evil. */
1170 msleep(1); 1155 mdelay(1);
1171 1156
1172 /* 0x80 must be set immediately before turning it off */ 1157 /* 0x80 must be set immediately before turning it off */
1173 iowrite8(0x80, ioaddr + MIICmd); 1158 iowrite8(0x80, ioaddr + MIICmd);
@@ -1257,16 +1242,6 @@ static int rhine_open(struct net_device *dev)
1257static void rhine_tx_timeout(struct net_device *dev) 1242static void rhine_tx_timeout(struct net_device *dev)
1258{ 1243{
1259 struct rhine_private *rp = netdev_priv(dev); 1244 struct rhine_private *rp = netdev_priv(dev);
1260
1261 /*
1262 * Move bulk of work outside of interrupt context
1263 */
1264 schedule_work(&rp->tx_timeout_task);
1265}
1266
1267static void rhine_tx_timeout_task(struct net_device *dev)
1268{
1269 struct rhine_private *rp = netdev_priv(dev);
1270 void __iomem *ioaddr = rp->base; 1245 void __iomem *ioaddr = rp->base;
1271 1246
1272 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " 1247 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
@@ -1677,7 +1652,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1677 spin_lock(&rp->lock); 1652 spin_lock(&rp->lock);
1678 1653
1679 if (intr_status & IntrLinkChange) 1654 if (intr_status & IntrLinkChange)
1680 schedule_work(&rp->check_media_task); 1655 rhine_check_media(dev, 0);
1681 if (intr_status & IntrStatsMax) { 1656 if (intr_status & IntrStatsMax) {
1682 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); 1657 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1683 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); 1658 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
@@ -1927,9 +1902,6 @@ static int rhine_close(struct net_device *dev)
1927 spin_unlock_irq(&rp->lock); 1902 spin_unlock_irq(&rp->lock);
1928 1903
1929 free_irq(rp->pdev->irq, dev); 1904 free_irq(rp->pdev->irq, dev);
1930
1931 flush_scheduled_work();
1932
1933 free_rbufs(dev); 1905 free_rbufs(dev);
1934 free_tbufs(dev); 1906 free_tbufs(dev);
1935 free_ring(dev); 1907 free_ring(dev);