diff options
| -rw-r--r-- | drivers/net/forcedeth.c | 312 | ||||
| -rw-r--r-- | drivers/net/pcmcia/axnet_cs.c | 13 | ||||
| -rw-r--r-- | drivers/net/skge.c | 8 | ||||
| -rw-r--r-- | drivers/net/sky2.c | 54 | ||||
| -rw-r--r-- | drivers/net/sky2.h | 2 | ||||
| -rw-r--r-- | drivers/net/tulip/winbond-840.c | 4 | ||||
| -rw-r--r-- | drivers/net/via-rhine.c | 34 | ||||
| -rw-r--r-- | drivers/net/wireless/bcm43xx/bcm43xx_main.c | 6 |
8 files changed, 131 insertions, 302 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index f7235c9bc421..7e078b4cca7c 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
| @@ -106,7 +106,6 @@ | |||
| 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. | 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. |
| 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. |
| 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. | 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. |
| 109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. | ||
| 110 | * | 109 | * |
| 111 | * Known bugs: | 110 | * Known bugs: |
| 112 | * We suspect that on some hardware no TX done interrupts are generated. | 111 | * We suspect that on some hardware no TX done interrupts are generated. |
| @@ -118,7 +117,7 @@ | |||
| 118 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 117 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
| 119 | * superfluous timer interrupts from the nic. | 118 | * superfluous timer interrupts from the nic. |
| 120 | */ | 119 | */ |
| 121 | #define FORCEDETH_VERSION "0.54" | 120 | #define FORCEDETH_VERSION "0.53" |
| 122 | #define DRV_NAME "forcedeth" | 121 | #define DRV_NAME "forcedeth" |
| 123 | 122 | ||
| 124 | #include <linux/module.h> | 123 | #include <linux/module.h> |
| @@ -711,72 +710,6 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |||
| 711 | } | 710 | } |
| 712 | } | 711 | } |
| 713 | 712 | ||
| 714 | static int using_multi_irqs(struct net_device *dev) | ||
| 715 | { | ||
| 716 | struct fe_priv *np = get_nvpriv(dev); | ||
| 717 | |||
| 718 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
| 719 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 720 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) | ||
| 721 | return 0; | ||
| 722 | else | ||
| 723 | return 1; | ||
| 724 | } | ||
| 725 | |||
| 726 | static void nv_enable_irq(struct net_device *dev) | ||
| 727 | { | ||
| 728 | struct fe_priv *np = get_nvpriv(dev); | ||
| 729 | |||
| 730 | if (!using_multi_irqs(dev)) { | ||
| 731 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
| 732 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
| 733 | else | ||
| 734 | enable_irq(dev->irq); | ||
| 735 | } else { | ||
| 736 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 737 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 738 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 739 | } | ||
| 740 | } | ||
| 741 | |||
| 742 | static void nv_disable_irq(struct net_device *dev) | ||
| 743 | { | ||
| 744 | struct fe_priv *np = get_nvpriv(dev); | ||
| 745 | |||
| 746 | if (!using_multi_irqs(dev)) { | ||
| 747 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
| 748 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
| 749 | else | ||
| 750 | disable_irq(dev->irq); | ||
| 751 | } else { | ||
| 752 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 753 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 754 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 755 | } | ||
| 756 | } | ||
| 757 | |||
| 758 | /* In MSIX mode, a write to irqmask behaves as XOR */ | ||
| 759 | static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) | ||
| 760 | { | ||
| 761 | u8 __iomem *base = get_hwbase(dev); | ||
| 762 | |||
| 763 | writel(mask, base + NvRegIrqMask); | ||
| 764 | } | ||
| 765 | |||
| 766 | static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) | ||
| 767 | { | ||
| 768 | struct fe_priv *np = get_nvpriv(dev); | ||
| 769 | u8 __iomem *base = get_hwbase(dev); | ||
| 770 | |||
| 771 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
| 772 | writel(mask, base + NvRegIrqMask); | ||
| 773 | } else { | ||
| 774 | if (np->msi_flags & NV_MSI_ENABLED) | ||
| 775 | writel(0, base + NvRegMSIIrqMask); | ||
| 776 | writel(0, base + NvRegIrqMask); | ||
| 777 | } | ||
| 778 | } | ||
| 779 | |||
| 780 | #define MII_READ (-1) | 713 | #define MII_READ (-1) |
| 781 | /* mii_rw: read/write a register on the PHY. | 714 | /* mii_rw: read/write a register on the PHY. |
| 782 | * | 715 | * |
| @@ -1086,25 +1019,24 @@ static void nv_do_rx_refill(unsigned long data) | |||
| 1086 | struct net_device *dev = (struct net_device *) data; | 1019 | struct net_device *dev = (struct net_device *) data; |
| 1087 | struct fe_priv *np = netdev_priv(dev); | 1020 | struct fe_priv *np = netdev_priv(dev); |
| 1088 | 1021 | ||
| 1089 | if (!using_multi_irqs(dev)) { | 1022 | |
| 1090 | if (np->msi_flags & NV_MSI_X_ENABLED) | 1023 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 1091 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | 1024 | ((np->msi_flags & NV_MSI_X_ENABLED) && |
| 1092 | else | 1025 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { |
| 1093 | disable_irq(dev->irq); | 1026 | disable_irq(dev->irq); |
| 1094 | } else { | 1027 | } else { |
| 1095 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1028 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
| 1096 | } | 1029 | } |
| 1097 | if (nv_alloc_rx(dev)) { | 1030 | if (nv_alloc_rx(dev)) { |
| 1098 | spin_lock_irq(&np->lock); | 1031 | spin_lock(&np->lock); |
| 1099 | if (!np->in_shutdown) | 1032 | if (!np->in_shutdown) |
| 1100 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1033 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
| 1101 | spin_unlock_irq(&np->lock); | 1034 | spin_unlock(&np->lock); |
| 1102 | } | 1035 | } |
| 1103 | if (!using_multi_irqs(dev)) { | 1036 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 1104 | if (np->msi_flags & NV_MSI_X_ENABLED) | 1037 | ((np->msi_flags & NV_MSI_X_ENABLED) && |
| 1105 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | 1038 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { |
| 1106 | else | 1039 | enable_irq(dev->irq); |
| 1107 | enable_irq(dev->irq); | ||
| 1108 | } else { | 1040 | } else { |
| 1109 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1041 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
| 1110 | } | 1042 | } |
| @@ -1736,7 +1668,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1736 | * guessed, there is probably a simpler approach. | 1668 | * guessed, there is probably a simpler approach. |
| 1737 | * Changing the MTU is a rare event, it shouldn't matter. | 1669 | * Changing the MTU is a rare event, it shouldn't matter. |
| 1738 | */ | 1670 | */ |
| 1739 | nv_disable_irq(dev); | 1671 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 1672 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 1673 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 1674 | disable_irq(dev->irq); | ||
| 1675 | } else { | ||
| 1676 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 1677 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 1678 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 1679 | } | ||
| 1740 | spin_lock_bh(&dev->xmit_lock); | 1680 | spin_lock_bh(&dev->xmit_lock); |
| 1741 | spin_lock(&np->lock); | 1681 | spin_lock(&np->lock); |
| 1742 | /* stop engines */ | 1682 | /* stop engines */ |
| @@ -1769,7 +1709,15 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1769 | nv_start_tx(dev); | 1709 | nv_start_tx(dev); |
| 1770 | spin_unlock(&np->lock); | 1710 | spin_unlock(&np->lock); |
| 1771 | spin_unlock_bh(&dev->xmit_lock); | 1711 | spin_unlock_bh(&dev->xmit_lock); |
| 1772 | nv_enable_irq(dev); | 1712 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 1713 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 1714 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 1715 | enable_irq(dev->irq); | ||
| 1716 | } else { | ||
| 1717 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 1718 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 1719 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 1720 | } | ||
| 1773 | } | 1721 | } |
| 1774 | return 0; | 1722 | return 0; |
| 1775 | } | 1723 | } |
| @@ -2160,16 +2108,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
| 2160 | if (!(events & np->irqmask)) | 2108 | if (!(events & np->irqmask)) |
| 2161 | break; | 2109 | break; |
| 2162 | 2110 | ||
| 2163 | spin_lock_irq(&np->lock); | 2111 | spin_lock(&np->lock); |
| 2164 | nv_tx_done(dev); | 2112 | nv_tx_done(dev); |
| 2165 | spin_unlock_irq(&np->lock); | 2113 | spin_unlock(&np->lock); |
| 2166 | 2114 | ||
| 2167 | if (events & (NVREG_IRQ_TX_ERR)) { | 2115 | if (events & (NVREG_IRQ_TX_ERR)) { |
| 2168 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 2116 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
| 2169 | dev->name, events); | 2117 | dev->name, events); |
| 2170 | } | 2118 | } |
| 2171 | if (i > max_interrupt_work) { | 2119 | if (i > max_interrupt_work) { |
| 2172 | spin_lock_irq(&np->lock); | 2120 | spin_lock(&np->lock); |
| 2173 | /* disable interrupts on the nic */ | 2121 | /* disable interrupts on the nic */ |
| 2174 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | 2122 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); |
| 2175 | pci_push(base); | 2123 | pci_push(base); |
| @@ -2179,7 +2127,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
| 2179 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2127 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
| 2180 | } | 2128 | } |
| 2181 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | 2129 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); |
| 2182 | spin_unlock_irq(&np->lock); | 2130 | spin_unlock(&np->lock); |
| 2183 | break; | 2131 | break; |
| 2184 | } | 2132 | } |
| 2185 | 2133 | ||
| @@ -2209,14 +2157,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
| 2209 | 2157 | ||
| 2210 | nv_rx_process(dev); | 2158 | nv_rx_process(dev); |
| 2211 | if (nv_alloc_rx(dev)) { | 2159 | if (nv_alloc_rx(dev)) { |
| 2212 | spin_lock_irq(&np->lock); | 2160 | spin_lock(&np->lock); |
| 2213 | if (!np->in_shutdown) | 2161 | if (!np->in_shutdown) |
| 2214 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2162 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
| 2215 | spin_unlock_irq(&np->lock); | 2163 | spin_unlock(&np->lock); |
| 2216 | } | 2164 | } |
| 2217 | 2165 | ||
| 2218 | if (i > max_interrupt_work) { | 2166 | if (i > max_interrupt_work) { |
| 2219 | spin_lock_irq(&np->lock); | 2167 | spin_lock(&np->lock); |
| 2220 | /* disable interrupts on the nic */ | 2168 | /* disable interrupts on the nic */ |
| 2221 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 2169 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
| 2222 | pci_push(base); | 2170 | pci_push(base); |
| @@ -2226,7 +2174,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
| 2226 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2174 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
| 2227 | } | 2175 | } |
| 2228 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | 2176 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); |
| 2229 | spin_unlock_irq(&np->lock); | 2177 | spin_unlock(&np->lock); |
| 2230 | break; | 2178 | break; |
| 2231 | } | 2179 | } |
| 2232 | 2180 | ||
| @@ -2255,14 +2203,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
| 2255 | break; | 2203 | break; |
| 2256 | 2204 | ||
| 2257 | if (events & NVREG_IRQ_LINK) { | 2205 | if (events & NVREG_IRQ_LINK) { |
| 2258 | spin_lock_irq(&np->lock); | 2206 | spin_lock(&np->lock); |
| 2259 | nv_link_irq(dev); | 2207 | nv_link_irq(dev); |
| 2260 | spin_unlock_irq(&np->lock); | 2208 | spin_unlock(&np->lock); |
| 2261 | } | 2209 | } |
| 2262 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | 2210 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { |
| 2263 | spin_lock_irq(&np->lock); | 2211 | spin_lock(&np->lock); |
| 2264 | nv_linkchange(dev); | 2212 | nv_linkchange(dev); |
| 2265 | spin_unlock_irq(&np->lock); | 2213 | spin_unlock(&np->lock); |
| 2266 | np->link_timeout = jiffies + LINK_TIMEOUT; | 2214 | np->link_timeout = jiffies + LINK_TIMEOUT; |
| 2267 | } | 2215 | } |
| 2268 | if (events & (NVREG_IRQ_UNKNOWN)) { | 2216 | if (events & (NVREG_IRQ_UNKNOWN)) { |
| @@ -2270,7 +2218,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
| 2270 | dev->name, events); | 2218 | dev->name, events); |
| 2271 | } | 2219 | } |
| 2272 | if (i > max_interrupt_work) { | 2220 | if (i > max_interrupt_work) { |
| 2273 | spin_lock_irq(&np->lock); | 2221 | spin_lock(&np->lock); |
| 2274 | /* disable interrupts on the nic */ | 2222 | /* disable interrupts on the nic */ |
| 2275 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | 2223 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); |
| 2276 | pci_push(base); | 2224 | pci_push(base); |
| @@ -2280,7 +2228,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
| 2280 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2228 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
| 2281 | } | 2229 | } |
| 2282 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | 2230 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); |
| 2283 | spin_unlock_irq(&np->lock); | 2231 | spin_unlock(&np->lock); |
| 2284 | break; | 2232 | break; |
| 2285 | } | 2233 | } |
| 2286 | 2234 | ||
| @@ -2303,11 +2251,10 @@ static void nv_do_nic_poll(unsigned long data) | |||
| 2303 | * nv_nic_irq because that may decide to do otherwise | 2251 | * nv_nic_irq because that may decide to do otherwise |
| 2304 | */ | 2252 | */ |
| 2305 | 2253 | ||
| 2306 | if (!using_multi_irqs(dev)) { | 2254 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 2307 | if (np->msi_flags & NV_MSI_X_ENABLED) | 2255 | ((np->msi_flags & NV_MSI_X_ENABLED) && |
| 2308 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | 2256 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { |
| 2309 | else | 2257 | disable_irq(dev->irq); |
| 2310 | disable_irq(dev->irq); | ||
| 2311 | mask = np->irqmask; | 2258 | mask = np->irqmask; |
| 2312 | } else { | 2259 | } else { |
| 2313 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | 2260 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { |
| @@ -2330,12 +2277,11 @@ static void nv_do_nic_poll(unsigned long data) | |||
| 2330 | writel(mask, base + NvRegIrqMask); | 2277 | writel(mask, base + NvRegIrqMask); |
| 2331 | pci_push(base); | 2278 | pci_push(base); |
| 2332 | 2279 | ||
| 2333 | if (!using_multi_irqs(dev)) { | 2280 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || |
| 2281 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 2282 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 2334 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | 2283 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); |
| 2335 | if (np->msi_flags & NV_MSI_X_ENABLED) | 2284 | enable_irq(dev->irq); |
| 2336 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
| 2337 | else | ||
| 2338 | enable_irq(dev->irq); | ||
| 2339 | } else { | 2285 | } else { |
| 2340 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | 2286 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { |
| 2341 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | 2287 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); |
| @@ -2682,113 +2628,6 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | |||
| 2682 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | 2628 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); |
| 2683 | } | 2629 | } |
| 2684 | 2630 | ||
| 2685 | static int nv_request_irq(struct net_device *dev) | ||
| 2686 | { | ||
| 2687 | struct fe_priv *np = get_nvpriv(dev); | ||
| 2688 | u8 __iomem *base = get_hwbase(dev); | ||
| 2689 | int ret = 1; | ||
| 2690 | int i; | ||
| 2691 | |||
| 2692 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | ||
| 2693 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
| 2694 | np->msi_x_entry[i].entry = i; | ||
| 2695 | } | ||
| 2696 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
| 2697 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
| 2698 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
| 2699 | /* Request irq for rx handling */ | ||
| 2700 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2701 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
| 2702 | pci_disable_msix(np->pci_dev); | ||
| 2703 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2704 | goto out_err; | ||
| 2705 | } | ||
| 2706 | /* Request irq for tx handling */ | ||
| 2707 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2708 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
| 2709 | pci_disable_msix(np->pci_dev); | ||
| 2710 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2711 | goto out_free_rx; | ||
| 2712 | } | ||
| 2713 | /* Request irq for link and timer handling */ | ||
| 2714 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2715 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
| 2716 | pci_disable_msix(np->pci_dev); | ||
| 2717 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2718 | goto out_free_tx; | ||
| 2719 | } | ||
| 2720 | /* map interrupts to their respective vector */ | ||
| 2721 | writel(0, base + NvRegMSIXMap0); | ||
| 2722 | writel(0, base + NvRegMSIXMap1); | ||
| 2723 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
| 2724 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
| 2725 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
| 2726 | } else { | ||
| 2727 | /* Request irq for all interrupts */ | ||
| 2728 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2729 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
| 2730 | pci_disable_msix(np->pci_dev); | ||
| 2731 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2732 | goto out_err; | ||
| 2733 | } | ||
| 2734 | |||
| 2735 | /* map interrupts to vector 0 */ | ||
| 2736 | writel(0, base + NvRegMSIXMap0); | ||
| 2737 | writel(0, base + NvRegMSIXMap1); | ||
| 2738 | } | ||
| 2739 | } | ||
| 2740 | } | ||
| 2741 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
| 2742 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
| 2743 | np->msi_flags |= NV_MSI_ENABLED; | ||
| 2744 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2745 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
| 2746 | pci_disable_msi(np->pci_dev); | ||
| 2747 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2748 | goto out_err; | ||
| 2749 | } | ||
| 2750 | |||
| 2751 | /* map interrupts to vector 0 */ | ||
| 2752 | writel(0, base + NvRegMSIMap0); | ||
| 2753 | writel(0, base + NvRegMSIMap1); | ||
| 2754 | /* enable msi vector 0 */ | ||
| 2755 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
| 2756 | } | ||
| 2757 | } | ||
| 2758 | if (ret != 0) { | ||
| 2759 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
| 2760 | goto out_err; | ||
| 2761 | } | ||
| 2762 | |||
| 2763 | return 0; | ||
| 2764 | out_free_tx: | ||
| 2765 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | ||
| 2766 | out_free_rx: | ||
| 2767 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | ||
| 2768 | out_err: | ||
| 2769 | return 1; | ||
| 2770 | } | ||
| 2771 | |||
| 2772 | static void nv_free_irq(struct net_device *dev) | ||
| 2773 | { | ||
| 2774 | struct fe_priv *np = get_nvpriv(dev); | ||
| 2775 | int i; | ||
| 2776 | |||
| 2777 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
| 2778 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
| 2779 | free_irq(np->msi_x_entry[i].vector, dev); | ||
| 2780 | } | ||
| 2781 | pci_disable_msix(np->pci_dev); | ||
| 2782 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2783 | } else { | ||
| 2784 | free_irq(np->pci_dev->irq, dev); | ||
| 2785 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
| 2786 | pci_disable_msi(np->pci_dev); | ||
| 2787 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2788 | } | ||
| 2789 | } | ||
| 2790 | } | ||
| 2791 | |||
| 2792 | static int nv_open(struct net_device *dev) | 2631 | static int nv_open(struct net_device *dev) |
| 2793 | { | 2632 | { |
| 2794 | struct fe_priv *np = netdev_priv(dev); | 2633 | struct fe_priv *np = netdev_priv(dev); |
| @@ -2881,16 +2720,12 @@ static int nv_open(struct net_device *dev) | |||
| 2881 | udelay(10); | 2720 | udelay(10); |
| 2882 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); | 2721 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); |
| 2883 | 2722 | ||
| 2884 | nv_disable_hw_interrupts(dev, np->irqmask); | 2723 | writel(0, base + NvRegIrqMask); |
| 2885 | pci_push(base); | 2724 | pci_push(base); |
| 2886 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | 2725 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); |
| 2887 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 2726 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
| 2888 | pci_push(base); | 2727 | pci_push(base); |
| 2889 | 2728 | ||
| 2890 | if (nv_request_irq(dev)) { | ||
| 2891 | goto out_drain; | ||
| 2892 | } | ||
| 2893 | |||
| 2894 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | 2729 | if (np->msi_flags & NV_MSI_X_CAPABLE) { |
| 2895 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | 2730 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { |
| 2896 | np->msi_x_entry[i].entry = i; | 2731 | np->msi_x_entry[i].entry = i; |
| @@ -2964,7 +2799,7 @@ static int nv_open(struct net_device *dev) | |||
| 2964 | } | 2799 | } |
| 2965 | 2800 | ||
| 2966 | /* ask for interrupts */ | 2801 | /* ask for interrupts */ |
| 2967 | nv_enable_hw_interrupts(dev, np->irqmask); | 2802 | writel(np->irqmask, base + NvRegIrqMask); |
| 2968 | 2803 | ||
| 2969 | spin_lock_irq(&np->lock); | 2804 | spin_lock_irq(&np->lock); |
| 2970 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | 2805 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); |
| @@ -3008,6 +2843,7 @@ static int nv_close(struct net_device *dev) | |||
| 3008 | { | 2843 | { |
| 3009 | struct fe_priv *np = netdev_priv(dev); | 2844 | struct fe_priv *np = netdev_priv(dev); |
| 3010 | u8 __iomem *base; | 2845 | u8 __iomem *base; |
| 2846 | int i; | ||
| 3011 | 2847 | ||
| 3012 | spin_lock_irq(&np->lock); | 2848 | spin_lock_irq(&np->lock); |
| 3013 | np->in_shutdown = 1; | 2849 | np->in_shutdown = 1; |
| @@ -3025,13 +2861,31 @@ static int nv_close(struct net_device *dev) | |||
| 3025 | 2861 | ||
| 3026 | /* disable interrupts on the nic or we will lock up */ | 2862 | /* disable interrupts on the nic or we will lock up */ |
| 3027 | base = get_hwbase(dev); | 2863 | base = get_hwbase(dev); |
| 3028 | nv_disable_hw_interrupts(dev, np->irqmask); | 2864 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
| 2865 | writel(np->irqmask, base + NvRegIrqMask); | ||
| 2866 | } else { | ||
| 2867 | if (np->msi_flags & NV_MSI_ENABLED) | ||
| 2868 | writel(0, base + NvRegMSIIrqMask); | ||
| 2869 | writel(0, base + NvRegIrqMask); | ||
| 2870 | } | ||
| 3029 | pci_push(base); | 2871 | pci_push(base); |
| 3030 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | 2872 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); |
| 3031 | 2873 | ||
| 3032 | spin_unlock_irq(&np->lock); | 2874 | spin_unlock_irq(&np->lock); |
| 3033 | 2875 | ||
| 3034 | nv_free_irq(dev); | 2876 | if (np->msi_flags & NV_MSI_X_ENABLED) { |
| 2877 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
| 2878 | free_irq(np->msi_x_entry[i].vector, dev); | ||
| 2879 | } | ||
| 2880 | pci_disable_msix(np->pci_dev); | ||
| 2881 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2882 | } else { | ||
| 2883 | free_irq(np->pci_dev->irq, dev); | ||
| 2884 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
| 2885 | pci_disable_msi(np->pci_dev); | ||
| 2886 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2887 | } | ||
| 2888 | } | ||
| 3035 | 2889 | ||
| 3036 | drain_ring(dev); | 2890 | drain_ring(dev); |
| 3037 | 2891 | ||
| @@ -3120,18 +2974,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 3120 | if (id->driver_data & DEV_HAS_HIGH_DMA) { | 2974 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
| 3121 | /* packet format 3: supports 40-bit addressing */ | 2975 | /* packet format 3: supports 40-bit addressing */ |
| 3122 | np->desc_ver = DESC_VER_3; | 2976 | np->desc_ver = DESC_VER_3; |
| 3123 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | ||
| 3124 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { | 2977 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { |
| 3125 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | 2978 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
| 3126 | pci_name(pci_dev)); | 2979 | pci_name(pci_dev)); |
| 3127 | } else { | 2980 | } else { |
| 3128 | dev->features |= NETIF_F_HIGHDMA; | 2981 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { |
| 3129 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | 2982 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", |
| 3130 | } | 2983 | pci_name(pci_dev)); |
| 3131 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { | 2984 | goto out_relreg; |
| 3132 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", | 2985 | } else { |
| 3133 | pci_name(pci_dev)); | 2986 | dev->features |= NETIF_F_HIGHDMA; |
| 2987 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | ||
| 2988 | } | ||
| 3134 | } | 2989 | } |
| 2990 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | ||
| 3135 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | 2991 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
| 3136 | /* packet format 2: supports jumbo frames */ | 2992 | /* packet format 2: supports jumbo frames */ |
| 3137 | np->desc_ver = DESC_VER_2; | 2993 | np->desc_ver = DESC_VER_2; |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 448a09488529..2ea66aca648b 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
| @@ -1691,17 +1691,6 @@ static void do_set_multicast_list(struct net_device *dev) | |||
| 1691 | memset(ei_local->mcfilter, 0xFF, 8); | 1691 | memset(ei_local->mcfilter, 0xFF, 8); |
| 1692 | } | 1692 | } |
| 1693 | 1693 | ||
| 1694 | /* | ||
| 1695 | * DP8390 manuals don't specify any magic sequence for altering | ||
| 1696 | * the multicast regs on an already running card. To be safe, we | ||
| 1697 | * ensure multicast mode is off prior to loading up the new hash | ||
| 1698 | * table. If this proves to be not enough, we can always resort | ||
| 1699 | * to stopping the NIC, loading the table and then restarting. | ||
| 1700 | */ | ||
| 1701 | |||
| 1702 | if (netif_running(dev)) | ||
| 1703 | outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); | ||
| 1704 | |||
| 1705 | outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); | 1694 | outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); |
| 1706 | for(i = 0; i < 8; i++) | 1695 | for(i = 0; i < 8; i++) |
| 1707 | { | 1696 | { |
| @@ -1715,6 +1704,8 @@ static void do_set_multicast_list(struct net_device *dev) | |||
| 1715 | outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); | 1704 | outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); |
| 1716 | else | 1705 | else |
| 1717 | outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); | 1706 | outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); |
| 1707 | |||
| 1708 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); | ||
| 1718 | } | 1709 | } |
| 1719 | 1710 | ||
| 1720 | /* | 1711 | /* |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index a70c2b0cc104..5ca5a1b546a1 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
| @@ -78,8 +78,7 @@ static const struct pci_device_id skge_id_table[] = { | |||
| 78 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, | 78 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, |
| 79 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, | 79 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, |
| 80 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, | 80 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, |
| 81 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, | 81 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ |
| 82 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, | ||
| 83 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, | 82 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, |
| 84 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ | 83 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ |
| 85 | { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, | 84 | { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, |
| @@ -402,7 +401,7 @@ static int skge_set_ring_param(struct net_device *dev, | |||
| 402 | int err; | 401 | int err; |
| 403 | 402 | ||
| 404 | if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || | 403 | if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || |
| 405 | p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE) | 404 | p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE) |
| 406 | return -EINVAL; | 405 | return -EINVAL; |
| 407 | 406 | ||
| 408 | skge->rx_ring.count = p->rx_pending; | 407 | skge->rx_ring.count = p->rx_pending; |
| @@ -2717,8 +2716,7 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
| 2717 | if (control & BMU_OWN) | 2716 | if (control & BMU_OWN) |
| 2718 | break; | 2717 | break; |
| 2719 | 2718 | ||
| 2720 | skb = skge_rx_get(skge, e, control, rd->status, | 2719 | skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); |
| 2721 | le16_to_cpu(rd->csum2)); | ||
| 2722 | if (likely(skb)) { | 2720 | if (likely(skb)) { |
| 2723 | dev->last_rx = jiffies; | 2721 | dev->last_rx = jiffies; |
| 2724 | netif_receive_skb(skb); | 2722 | netif_receive_skb(skb); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 62be6d99d05c..60779ebf2ff6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
| @@ -51,7 +51,7 @@ | |||
| 51 | #include "sky2.h" | 51 | #include "sky2.h" |
| 52 | 52 | ||
| 53 | #define DRV_NAME "sky2" | 53 | #define DRV_NAME "sky2" |
| 54 | #define DRV_VERSION "1.3" | 54 | #define DRV_VERSION "1.4" |
| 55 | #define PFX DRV_NAME " " | 55 | #define PFX DRV_NAME " " |
| 56 | 56 | ||
| 57 | /* | 57 | /* |
| @@ -105,6 +105,7 @@ MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms) | |||
| 105 | static const struct pci_device_id sky2_id_table[] = { | 105 | static const struct pci_device_id sky2_id_table[] = { |
| 106 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, | 106 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, |
| 107 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, | 107 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, |
| 108 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ | ||
| 108 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, | 109 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, |
| 109 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, | 110 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, |
| 110 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, | 111 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, |
| @@ -235,6 +236,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
| 235 | } | 236 | } |
| 236 | 237 | ||
| 237 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | 238 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { |
| 239 | sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); | ||
| 238 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | 240 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); |
| 239 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); | 241 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); |
| 240 | reg1 &= P_ASPM_CONTROL_MSK; | 242 | reg1 &= P_ASPM_CONTROL_MSK; |
| @@ -306,7 +308,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
| 306 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover; | 308 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover; |
| 307 | 309 | ||
| 308 | if (sky2->autoneg == AUTONEG_ENABLE && | 310 | if (sky2->autoneg == AUTONEG_ENABLE && |
| 309 | (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { | 311 | !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { |
| 310 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); | 312 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); |
| 311 | 313 | ||
| 312 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | | 314 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | |
| @@ -1020,19 +1022,26 @@ static int sky2_up(struct net_device *dev) | |||
| 1020 | struct sky2_hw *hw = sky2->hw; | 1022 | struct sky2_hw *hw = sky2->hw; |
| 1021 | unsigned port = sky2->port; | 1023 | unsigned port = sky2->port; |
| 1022 | u32 ramsize, rxspace, imask; | 1024 | u32 ramsize, rxspace, imask; |
| 1023 | int err; | 1025 | int cap, err = -ENOMEM; |
| 1024 | struct net_device *otherdev = hw->dev[sky2->port^1]; | 1026 | struct net_device *otherdev = hw->dev[sky2->port^1]; |
| 1025 | 1027 | ||
| 1026 | /* Block bringing up both ports at the same time on a dual port card. | 1028 | /* |
| 1027 | * There is an unfixed bug where receiver gets confused and picks up | 1029 | * On dual port PCI-X card, there is an problem where status |
| 1028 | * packets out of order. Until this is fixed, prevent data corruption. | 1030 | * can be received out of order due to split transactions |
| 1029 | */ | 1031 | */ |
| 1030 | if (otherdev && netif_running(otherdev)) { | 1032 | if (otherdev && netif_running(otherdev) && |
| 1031 | printk(KERN_INFO PFX "dual port support is disabled.\n"); | 1033 | (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { |
| 1032 | return -EBUSY; | 1034 | struct sky2_port *osky2 = netdev_priv(otherdev); |
| 1033 | } | 1035 | u16 cmd; |
| 1036 | |||
| 1037 | cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); | ||
| 1038 | cmd &= ~PCI_X_CMD_MAX_SPLIT; | ||
| 1039 | sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); | ||
| 1040 | |||
| 1041 | sky2->rx_csum = 0; | ||
| 1042 | osky2->rx_csum = 0; | ||
| 1043 | } | ||
| 1034 | 1044 | ||
| 1035 | err = -ENOMEM; | ||
| 1036 | if (netif_msg_ifup(sky2)) | 1045 | if (netif_msg_ifup(sky2)) |
| 1037 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | 1046 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); |
| 1038 | 1047 | ||
| @@ -1910,6 +1919,12 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) | |||
| 1910 | } | 1919 | } |
| 1911 | } | 1920 | } |
| 1912 | 1921 | ||
| 1922 | /* Is status ring empty or is there more to do? */ | ||
| 1923 | static inline int sky2_more_work(const struct sky2_hw *hw) | ||
| 1924 | { | ||
| 1925 | return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)); | ||
| 1926 | } | ||
| 1927 | |||
| 1913 | /* Process status response ring */ | 1928 | /* Process status response ring */ |
| 1914 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) | 1929 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) |
| 1915 | { | 1930 | { |
| @@ -2182,19 +2197,19 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
| 2182 | if (status & Y2_IS_CHK_TXA2) | 2197 | if (status & Y2_IS_CHK_TXA2) |
| 2183 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); | 2198 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); |
| 2184 | 2199 | ||
| 2185 | if (status & Y2_IS_STAT_BMU) | ||
| 2186 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | ||
| 2187 | |||
| 2188 | work_done = sky2_status_intr(hw, work_limit); | 2200 | work_done = sky2_status_intr(hw, work_limit); |
| 2189 | *budget -= work_done; | 2201 | *budget -= work_done; |
| 2190 | dev0->quota -= work_done; | 2202 | dev0->quota -= work_done; |
| 2191 | 2203 | ||
| 2192 | if (work_done >= work_limit) | 2204 | if (status & Y2_IS_STAT_BMU) |
| 2205 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | ||
| 2206 | |||
| 2207 | if (sky2_more_work(hw)) | ||
| 2193 | return 1; | 2208 | return 1; |
| 2194 | 2209 | ||
| 2195 | netif_rx_complete(dev0); | 2210 | netif_rx_complete(dev0); |
| 2196 | 2211 | ||
| 2197 | status = sky2_read32(hw, B0_Y2_SP_LISR); | 2212 | sky2_read32(hw, B0_Y2_SP_LISR); |
| 2198 | return 0; | 2213 | return 0; |
| 2199 | } | 2214 | } |
| 2200 | 2215 | ||
| @@ -3078,12 +3093,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
| 3078 | sky2->duplex = -1; | 3093 | sky2->duplex = -1; |
| 3079 | sky2->speed = -1; | 3094 | sky2->speed = -1; |
| 3080 | sky2->advertising = sky2_supported_modes(hw); | 3095 | sky2->advertising = sky2_supported_modes(hw); |
| 3081 | 3096 | sky2->rx_csum = 1; | |
| 3082 | /* Receive checksum disabled for Yukon XL | ||
| 3083 | * because of observed problems with incorrect | ||
| 3084 | * values when multiple packets are received in one interrupt | ||
| 3085 | */ | ||
| 3086 | sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); | ||
| 3087 | 3097 | ||
| 3088 | spin_lock_init(&sky2->phy_lock); | 3098 | spin_lock_init(&sky2->phy_lock); |
| 3089 | sky2->tx_pending = TX_DEF_PENDING; | 3099 | sky2->tx_pending = TX_DEF_PENDING; |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 8012994c9b93..8a0bc5525f0a 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
| @@ -214,6 +214,8 @@ enum csr_regs { | |||
| 214 | enum { | 214 | enum { |
| 215 | Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ | 215 | Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ |
| 216 | Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ | 216 | Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ |
| 217 | Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ | ||
| 218 | Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ | ||
| 217 | Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ | 219 | Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ |
| 218 | Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ | 220 | Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ |
| 219 | Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ | 221 | Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index ba05dedf29d3..136a70c4d5e4 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
| @@ -850,7 +850,7 @@ static void init_rxtx_rings(struct net_device *dev) | |||
| 850 | break; | 850 | break; |
| 851 | skb->dev = dev; /* Mark as being used by this device. */ | 851 | skb->dev = dev; /* Mark as being used by this device. */ |
| 852 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, | 852 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, |
| 853 | skb->len,PCI_DMA_FROMDEVICE); | 853 | np->rx_buf_sz,PCI_DMA_FROMDEVICE); |
| 854 | 854 | ||
| 855 | np->rx_ring[i].buffer1 = np->rx_addr[i]; | 855 | np->rx_ring[i].buffer1 = np->rx_addr[i]; |
| 856 | np->rx_ring[i].status = DescOwn; | 856 | np->rx_ring[i].status = DescOwn; |
| @@ -1316,7 +1316,7 @@ static int netdev_rx(struct net_device *dev) | |||
| 1316 | skb->dev = dev; /* Mark as being used by this device. */ | 1316 | skb->dev = dev; /* Mark as being used by this device. */ |
| 1317 | np->rx_addr[entry] = pci_map_single(np->pci_dev, | 1317 | np->rx_addr[entry] = pci_map_single(np->pci_dev, |
| 1318 | skb->data, | 1318 | skb->data, |
| 1319 | skb->len, PCI_DMA_FROMDEVICE); | 1319 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
| 1320 | np->rx_ring[entry].buffer1 = np->rx_addr[entry]; | 1320 | np->rx_ring[entry].buffer1 = np->rx_addr[entry]; |
| 1321 | } | 1321 | } |
| 1322 | wmb(); | 1322 | wmb(); |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index a6dc53b4250d..fdc21037f6dc 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
| @@ -491,8 +491,6 @@ struct rhine_private { | |||
| 491 | u8 tx_thresh, rx_thresh; | 491 | u8 tx_thresh, rx_thresh; |
| 492 | 492 | ||
| 493 | struct mii_if_info mii_if; | 493 | struct mii_if_info mii_if; |
| 494 | struct work_struct tx_timeout_task; | ||
| 495 | struct work_struct check_media_task; | ||
| 496 | void __iomem *base; | 494 | void __iomem *base; |
| 497 | }; | 495 | }; |
| 498 | 496 | ||
| @@ -500,8 +498,6 @@ static int mdio_read(struct net_device *dev, int phy_id, int location); | |||
| 500 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | 498 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); |
| 501 | static int rhine_open(struct net_device *dev); | 499 | static int rhine_open(struct net_device *dev); |
| 502 | static void rhine_tx_timeout(struct net_device *dev); | 500 | static void rhine_tx_timeout(struct net_device *dev); |
| 503 | static void rhine_tx_timeout_task(struct net_device *dev); | ||
| 504 | static void rhine_check_media_task(struct net_device *dev); | ||
| 505 | static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); | 501 | static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); |
| 506 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); | 502 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); |
| 507 | static void rhine_tx(struct net_device *dev); | 503 | static void rhine_tx(struct net_device *dev); |
| @@ -856,12 +852,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
| 856 | if (rp->quirks & rqRhineI) | 852 | if (rp->quirks & rqRhineI) |
| 857 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; | 853 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; |
| 858 | 854 | ||
| 859 | INIT_WORK(&rp->tx_timeout_task, | ||
| 860 | (void (*)(void *))rhine_tx_timeout_task, dev); | ||
| 861 | |||
| 862 | INIT_WORK(&rp->check_media_task, | ||
| 863 | (void (*)(void *))rhine_check_media_task, dev); | ||
| 864 | |||
| 865 | /* dev->name not defined before register_netdev()! */ | 855 | /* dev->name not defined before register_netdev()! */ |
| 866 | rc = register_netdev(dev); | 856 | rc = register_netdev(dev); |
| 867 | if (rc) | 857 | if (rc) |
| @@ -1108,11 +1098,6 @@ static void rhine_set_carrier(struct mii_if_info *mii) | |||
| 1108 | netif_carrier_ok(mii->dev)); | 1098 | netif_carrier_ok(mii->dev)); |
| 1109 | } | 1099 | } |
| 1110 | 1100 | ||
| 1111 | static void rhine_check_media_task(struct net_device *dev) | ||
| 1112 | { | ||
| 1113 | rhine_check_media(dev, 0); | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | static void init_registers(struct net_device *dev) | 1101 | static void init_registers(struct net_device *dev) |
| 1117 | { | 1102 | { |
| 1118 | struct rhine_private *rp = netdev_priv(dev); | 1103 | struct rhine_private *rp = netdev_priv(dev); |
| @@ -1166,8 +1151,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) | |||
| 1166 | if (quirks & rqRhineI) { | 1151 | if (quirks & rqRhineI) { |
| 1167 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR | 1152 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR |
| 1168 | 1153 | ||
| 1169 | /* Do not call from ISR! */ | 1154 | /* Can be called from ISR. Evil. */ |
| 1170 | msleep(1); | 1155 | mdelay(1); |
| 1171 | 1156 | ||
| 1172 | /* 0x80 must be set immediately before turning it off */ | 1157 | /* 0x80 must be set immediately before turning it off */ |
| 1173 | iowrite8(0x80, ioaddr + MIICmd); | 1158 | iowrite8(0x80, ioaddr + MIICmd); |
| @@ -1257,16 +1242,6 @@ static int rhine_open(struct net_device *dev) | |||
| 1257 | static void rhine_tx_timeout(struct net_device *dev) | 1242 | static void rhine_tx_timeout(struct net_device *dev) |
| 1258 | { | 1243 | { |
| 1259 | struct rhine_private *rp = netdev_priv(dev); | 1244 | struct rhine_private *rp = netdev_priv(dev); |
| 1260 | |||
| 1261 | /* | ||
| 1262 | * Move bulk of work outside of interrupt context | ||
| 1263 | */ | ||
| 1264 | schedule_work(&rp->tx_timeout_task); | ||
| 1265 | } | ||
| 1266 | |||
| 1267 | static void rhine_tx_timeout_task(struct net_device *dev) | ||
| 1268 | { | ||
| 1269 | struct rhine_private *rp = netdev_priv(dev); | ||
| 1270 | void __iomem *ioaddr = rp->base; | 1245 | void __iomem *ioaddr = rp->base; |
| 1271 | 1246 | ||
| 1272 | printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " | 1247 | printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " |
| @@ -1677,7 +1652,7 @@ static void rhine_error(struct net_device *dev, int intr_status) | |||
| 1677 | spin_lock(&rp->lock); | 1652 | spin_lock(&rp->lock); |
| 1678 | 1653 | ||
| 1679 | if (intr_status & IntrLinkChange) | 1654 | if (intr_status & IntrLinkChange) |
| 1680 | schedule_work(&rp->check_media_task); | 1655 | rhine_check_media(dev, 0); |
| 1681 | if (intr_status & IntrStatsMax) { | 1656 | if (intr_status & IntrStatsMax) { |
| 1682 | rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); | 1657 | rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); |
| 1683 | rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); | 1658 | rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); |
| @@ -1927,9 +1902,6 @@ static int rhine_close(struct net_device *dev) | |||
| 1927 | spin_unlock_irq(&rp->lock); | 1902 | spin_unlock_irq(&rp->lock); |
| 1928 | 1903 | ||
| 1929 | free_irq(rp->pdev->irq, dev); | 1904 | free_irq(rp->pdev->irq, dev); |
| 1930 | |||
| 1931 | flush_scheduled_work(); | ||
| 1932 | |||
| 1933 | free_rbufs(dev); | 1905 | free_rbufs(dev); |
| 1934 | free_tbufs(dev); | 1906 | free_tbufs(dev); |
| 1935 | free_ring(dev); | 1907 | free_ring(dev); |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index 8d0f61816c19..2adf02ac400e 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
| @@ -3271,6 +3271,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) | |||
| 3271 | bcm43xx_sysfs_register(bcm); | 3271 | bcm43xx_sysfs_register(bcm); |
| 3272 | //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though... | 3272 | //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though... |
| 3273 | 3273 | ||
| 3274 | /*FIXME: This should be handled by softmac instead. */ | ||
| 3275 | schedule_work(&bcm->softmac->associnfo.work); | ||
| 3276 | |||
| 3274 | assert(err == 0); | 3277 | assert(err == 0); |
| 3275 | out: | 3278 | out: |
| 3276 | return err; | 3279 | return err; |
| @@ -3931,9 +3934,6 @@ static int bcm43xx_resume(struct pci_dev *pdev) | |||
| 3931 | 3934 | ||
| 3932 | netif_device_attach(net_dev); | 3935 | netif_device_attach(net_dev); |
| 3933 | 3936 | ||
| 3934 | /*FIXME: This should be handled by softmac instead. */ | ||
| 3935 | schedule_work(&bcm->softmac->associnfo.work); | ||
| 3936 | |||
| 3937 | dprintk(KERN_INFO PFX "Device resumed.\n"); | 3937 | dprintk(KERN_INFO PFX "Device resumed.\n"); |
| 3938 | 3938 | ||
| 3939 | return 0; | 3939 | return 0; |
