diff options
| author | Ayaz Abdulla <aabdulla@nvidia.com> | 2006-05-20 17:59:48 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-05-21 15:59:16 -0400 |
| commit | 84b3932bf0fd8cdc8c75a5be77e1dded1e6479c6 (patch) | |
| tree | 731b287b0635ef97c1621c49d0be555d6f351bbe | |
| parent | 6566a3f8f3281497a81815dfe2b64eb54dafe05d (diff) | |
[PATCH] forcedeth: fix multi irq issues
With Manfred Spraul <manfred@colorfullife.com> and
Andrew Morton <akpm@osdl.org>
Bring back this recently-reverted patch, only fixed.
Original changelog:
From: Ayaz Abdulla <aabdulla@nvidia.com>
This patch fixes the issues with multiple irqs.
I am resending based on feedback. I decoupled the dma mask for
consistent memory and fixed leak with multiple irq in error path.
Thanks to Manfred for catching the spin lock problem.
Fix it:
From: Manfred Spraul <manfred@colorfullife.com>
Fix bug introduced by ebf34c9b6fcd22338ef764b039b3ac55ed0e297b, covered in
http://bugzilla.kernel.org/show_bug.cgi?id=6568.
Remove second instance of the request_irq() calls: they were moved
from nv_open into nv_request_irq.
Thanks to Alistair Strachan <alistair@devzero.co.uk> for reporting and
persisting.
Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Cc: Jeff Garzik <jeff@garzik.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
| -rw-r--r-- | drivers/net/forcedeth.c | 380 |
1 files changed, 226 insertions, 154 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 7e078b4cca7c..705e1229d89d 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
| @@ -106,6 +106,7 @@ | |||
| 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. | 106 | * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. |
| 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. | 107 | * 0.52: 20 Jan 2006: Add MSI/MSIX support. |
| 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. | 108 | * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset. |
| 109 | * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup. | ||
| 109 | * | 110 | * |
| 110 | * Known bugs: | 111 | * Known bugs: |
| 111 | * We suspect that on some hardware no TX done interrupts are generated. | 112 | * We suspect that on some hardware no TX done interrupts are generated. |
| @@ -117,7 +118,7 @@ | |||
| 117 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 118 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
| 118 | * superfluous timer interrupts from the nic. | 119 | * superfluous timer interrupts from the nic. |
| 119 | */ | 120 | */ |
| 120 | #define FORCEDETH_VERSION "0.53" | 121 | #define FORCEDETH_VERSION "0.54" |
| 121 | #define DRV_NAME "forcedeth" | 122 | #define DRV_NAME "forcedeth" |
| 122 | 123 | ||
| 123 | #include <linux/module.h> | 124 | #include <linux/module.h> |
| @@ -710,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags) | |||
| 710 | } | 711 | } |
| 711 | } | 712 | } |
| 712 | 713 | ||
| 714 | static int using_multi_irqs(struct net_device *dev) | ||
| 715 | { | ||
| 716 | struct fe_priv *np = get_nvpriv(dev); | ||
| 717 | |||
| 718 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | ||
| 719 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 720 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) | ||
| 721 | return 0; | ||
| 722 | else | ||
| 723 | return 1; | ||
| 724 | } | ||
| 725 | |||
| 726 | static void nv_enable_irq(struct net_device *dev) | ||
| 727 | { | ||
| 728 | struct fe_priv *np = get_nvpriv(dev); | ||
| 729 | |||
| 730 | if (!using_multi_irqs(dev)) { | ||
| 731 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
| 732 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
| 733 | else | ||
| 734 | enable_irq(dev->irq); | ||
| 735 | } else { | ||
| 736 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 737 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 738 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 739 | } | ||
| 740 | } | ||
| 741 | |||
| 742 | static void nv_disable_irq(struct net_device *dev) | ||
| 743 | { | ||
| 744 | struct fe_priv *np = get_nvpriv(dev); | ||
| 745 | |||
| 746 | if (!using_multi_irqs(dev)) { | ||
| 747 | if (np->msi_flags & NV_MSI_X_ENABLED) | ||
| 748 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
| 749 | else | ||
| 750 | disable_irq(dev->irq); | ||
| 751 | } else { | ||
| 752 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 753 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 754 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 755 | } | ||
| 756 | } | ||
| 757 | |||
| 758 | /* In MSIX mode, a write to irqmask behaves as XOR */ | ||
| 759 | static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask) | ||
| 760 | { | ||
| 761 | u8 __iomem *base = get_hwbase(dev); | ||
| 762 | |||
| 763 | writel(mask, base + NvRegIrqMask); | ||
| 764 | } | ||
| 765 | |||
| 766 | static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask) | ||
| 767 | { | ||
| 768 | struct fe_priv *np = get_nvpriv(dev); | ||
| 769 | u8 __iomem *base = get_hwbase(dev); | ||
| 770 | |||
| 771 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
| 772 | writel(mask, base + NvRegIrqMask); | ||
| 773 | } else { | ||
| 774 | if (np->msi_flags & NV_MSI_ENABLED) | ||
| 775 | writel(0, base + NvRegMSIIrqMask); | ||
| 776 | writel(0, base + NvRegIrqMask); | ||
| 777 | } | ||
| 778 | } | ||
| 779 | |||
| 713 | #define MII_READ (-1) | 780 | #define MII_READ (-1) |
| 714 | /* mii_rw: read/write a register on the PHY. | 781 | /* mii_rw: read/write a register on the PHY. |
| 715 | * | 782 | * |
| @@ -1019,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data) | |||
| 1019 | struct net_device *dev = (struct net_device *) data; | 1086 | struct net_device *dev = (struct net_device *) data; |
| 1020 | struct fe_priv *np = netdev_priv(dev); | 1087 | struct fe_priv *np = netdev_priv(dev); |
| 1021 | 1088 | ||
| 1022 | 1089 | if (!using_multi_irqs(dev)) { | |
| 1023 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 1090 | if (np->msi_flags & NV_MSI_X_ENABLED) |
| 1024 | ((np->msi_flags & NV_MSI_X_ENABLED) && | 1091 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
| 1025 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | 1092 | else |
| 1026 | disable_irq(dev->irq); | 1093 | disable_irq(dev->irq); |
| 1027 | } else { | 1094 | } else { |
| 1028 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1095 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
| 1029 | } | 1096 | } |
| 1030 | if (nv_alloc_rx(dev)) { | 1097 | if (nv_alloc_rx(dev)) { |
| 1031 | spin_lock(&np->lock); | 1098 | spin_lock_irq(&np->lock); |
| 1032 | if (!np->in_shutdown) | 1099 | if (!np->in_shutdown) |
| 1033 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 1100 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
| 1034 | spin_unlock(&np->lock); | 1101 | spin_unlock_irq(&np->lock); |
| 1035 | } | 1102 | } |
| 1036 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 1103 | if (!using_multi_irqs(dev)) { |
| 1037 | ((np->msi_flags & NV_MSI_X_ENABLED) && | 1104 | if (np->msi_flags & NV_MSI_X_ENABLED) |
| 1038 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | 1105 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
| 1039 | enable_irq(dev->irq); | 1106 | else |
| 1107 | enable_irq(dev->irq); | ||
| 1040 | } else { | 1108 | } else { |
| 1041 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | 1109 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); |
| 1042 | } | 1110 | } |
| @@ -1668,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1668 | * guessed, there is probably a simpler approach. | 1736 | * guessed, there is probably a simpler approach. |
| 1669 | * Changing the MTU is a rare event, it shouldn't matter. | 1737 | * Changing the MTU is a rare event, it shouldn't matter. |
| 1670 | */ | 1738 | */ |
| 1671 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 1739 | nv_disable_irq(dev); |
| 1672 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 1673 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 1674 | disable_irq(dev->irq); | ||
| 1675 | } else { | ||
| 1676 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 1677 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 1678 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 1679 | } | ||
| 1680 | spin_lock_bh(&dev->xmit_lock); | 1740 | spin_lock_bh(&dev->xmit_lock); |
| 1681 | spin_lock(&np->lock); | 1741 | spin_lock(&np->lock); |
| 1682 | /* stop engines */ | 1742 | /* stop engines */ |
| @@ -1709,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu) | |||
| 1709 | nv_start_tx(dev); | 1769 | nv_start_tx(dev); |
| 1710 | spin_unlock(&np->lock); | 1770 | spin_unlock(&np->lock); |
| 1711 | spin_unlock_bh(&dev->xmit_lock); | 1771 | spin_unlock_bh(&dev->xmit_lock); |
| 1712 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 1772 | nv_enable_irq(dev); |
| 1713 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 1714 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 1715 | enable_irq(dev->irq); | ||
| 1716 | } else { | ||
| 1717 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); | ||
| 1718 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector); | ||
| 1719 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector); | ||
| 1720 | } | ||
| 1721 | } | 1773 | } |
| 1722 | return 0; | 1774 | return 0; |
| 1723 | } | 1775 | } |
| @@ -2108,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
| 2108 | if (!(events & np->irqmask)) | 2160 | if (!(events & np->irqmask)) |
| 2109 | break; | 2161 | break; |
| 2110 | 2162 | ||
| 2111 | spin_lock(&np->lock); | 2163 | spin_lock_irq(&np->lock); |
| 2112 | nv_tx_done(dev); | 2164 | nv_tx_done(dev); |
| 2113 | spin_unlock(&np->lock); | 2165 | spin_unlock_irq(&np->lock); |
| 2114 | 2166 | ||
| 2115 | if (events & (NVREG_IRQ_TX_ERR)) { | 2167 | if (events & (NVREG_IRQ_TX_ERR)) { |
| 2116 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", | 2168 | dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", |
| 2117 | dev->name, events); | 2169 | dev->name, events); |
| 2118 | } | 2170 | } |
| 2119 | if (i > max_interrupt_work) { | 2171 | if (i > max_interrupt_work) { |
| 2120 | spin_lock(&np->lock); | 2172 | spin_lock_irq(&np->lock); |
| 2121 | /* disable interrupts on the nic */ | 2173 | /* disable interrupts on the nic */ |
| 2122 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); | 2174 | writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); |
| 2123 | pci_push(base); | 2175 | pci_push(base); |
| @@ -2127,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs) | |||
| 2127 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2179 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
| 2128 | } | 2180 | } |
| 2129 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); | 2181 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); |
| 2130 | spin_unlock(&np->lock); | 2182 | spin_unlock_irq(&np->lock); |
| 2131 | break; | 2183 | break; |
| 2132 | } | 2184 | } |
| 2133 | 2185 | ||
| @@ -2157,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
| 2157 | 2209 | ||
| 2158 | nv_rx_process(dev); | 2210 | nv_rx_process(dev); |
| 2159 | if (nv_alloc_rx(dev)) { | 2211 | if (nv_alloc_rx(dev)) { |
| 2160 | spin_lock(&np->lock); | 2212 | spin_lock_irq(&np->lock); |
| 2161 | if (!np->in_shutdown) | 2213 | if (!np->in_shutdown) |
| 2162 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); | 2214 | mod_timer(&np->oom_kick, jiffies + OOM_REFILL); |
| 2163 | spin_unlock(&np->lock); | 2215 | spin_unlock_irq(&np->lock); |
| 2164 | } | 2216 | } |
| 2165 | 2217 | ||
| 2166 | if (i > max_interrupt_work) { | 2218 | if (i > max_interrupt_work) { |
| 2167 | spin_lock(&np->lock); | 2219 | spin_lock_irq(&np->lock); |
| 2168 | /* disable interrupts on the nic */ | 2220 | /* disable interrupts on the nic */ |
| 2169 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); | 2221 | writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); |
| 2170 | pci_push(base); | 2222 | pci_push(base); |
| @@ -2174,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs) | |||
| 2174 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2226 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
| 2175 | } | 2227 | } |
| 2176 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); | 2228 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); |
| 2177 | spin_unlock(&np->lock); | 2229 | spin_unlock_irq(&np->lock); |
| 2178 | break; | 2230 | break; |
| 2179 | } | 2231 | } |
| 2180 | 2232 | ||
| @@ -2203,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
| 2203 | break; | 2255 | break; |
| 2204 | 2256 | ||
| 2205 | if (events & NVREG_IRQ_LINK) { | 2257 | if (events & NVREG_IRQ_LINK) { |
| 2206 | spin_lock(&np->lock); | 2258 | spin_lock_irq(&np->lock); |
| 2207 | nv_link_irq(dev); | 2259 | nv_link_irq(dev); |
| 2208 | spin_unlock(&np->lock); | 2260 | spin_unlock_irq(&np->lock); |
| 2209 | } | 2261 | } |
| 2210 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { | 2262 | if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { |
| 2211 | spin_lock(&np->lock); | 2263 | spin_lock_irq(&np->lock); |
| 2212 | nv_linkchange(dev); | 2264 | nv_linkchange(dev); |
| 2213 | spin_unlock(&np->lock); | 2265 | spin_unlock_irq(&np->lock); |
| 2214 | np->link_timeout = jiffies + LINK_TIMEOUT; | 2266 | np->link_timeout = jiffies + LINK_TIMEOUT; |
| 2215 | } | 2267 | } |
| 2216 | if (events & (NVREG_IRQ_UNKNOWN)) { | 2268 | if (events & (NVREG_IRQ_UNKNOWN)) { |
| @@ -2218,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
| 2218 | dev->name, events); | 2270 | dev->name, events); |
| 2219 | } | 2271 | } |
| 2220 | if (i > max_interrupt_work) { | 2272 | if (i > max_interrupt_work) { |
| 2221 | spin_lock(&np->lock); | 2273 | spin_lock_irq(&np->lock); |
| 2222 | /* disable interrupts on the nic */ | 2274 | /* disable interrupts on the nic */ |
| 2223 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); | 2275 | writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); |
| 2224 | pci_push(base); | 2276 | pci_push(base); |
| @@ -2228,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs) | |||
| 2228 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); | 2280 | mod_timer(&np->nic_poll, jiffies + POLL_WAIT); |
| 2229 | } | 2281 | } |
| 2230 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); | 2282 | printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); |
| 2231 | spin_unlock(&np->lock); | 2283 | spin_unlock_irq(&np->lock); |
| 2232 | break; | 2284 | break; |
| 2233 | } | 2285 | } |
| 2234 | 2286 | ||
| @@ -2251,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data) | |||
| 2251 | * nv_nic_irq because that may decide to do otherwise | 2303 | * nv_nic_irq because that may decide to do otherwise |
| 2252 | */ | 2304 | */ |
| 2253 | 2305 | ||
| 2254 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 2306 | if (!using_multi_irqs(dev)) { |
| 2255 | ((np->msi_flags & NV_MSI_X_ENABLED) && | 2307 | if (np->msi_flags & NV_MSI_X_ENABLED) |
| 2256 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | 2308 | disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); |
| 2257 | disable_irq(dev->irq); | 2309 | else |
| 2310 | disable_irq(dev->irq); | ||
| 2258 | mask = np->irqmask; | 2311 | mask = np->irqmask; |
| 2259 | } else { | 2312 | } else { |
| 2260 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | 2313 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { |
| @@ -2277,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data) | |||
| 2277 | writel(mask, base + NvRegIrqMask); | 2330 | writel(mask, base + NvRegIrqMask); |
| 2278 | pci_push(base); | 2331 | pci_push(base); |
| 2279 | 2332 | ||
| 2280 | if (!(np->msi_flags & NV_MSI_X_ENABLED) || | 2333 | if (!using_multi_irqs(dev)) { |
| 2281 | ((np->msi_flags & NV_MSI_X_ENABLED) && | ||
| 2282 | ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { | ||
| 2283 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); | 2334 | nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); |
| 2284 | enable_irq(dev->irq); | 2335 | if (np->msi_flags & NV_MSI_X_ENABLED) |
| 2336 | enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector); | ||
| 2337 | else | ||
| 2338 | enable_irq(dev->irq); | ||
| 2285 | } else { | 2339 | } else { |
| 2286 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { | 2340 | if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { |
| 2287 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); | 2341 | nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); |
| @@ -2628,6 +2682,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask) | |||
| 2628 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); | 2682 | writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); |
| 2629 | } | 2683 | } |
| 2630 | 2684 | ||
| 2685 | static int nv_request_irq(struct net_device *dev) | ||
| 2686 | { | ||
| 2687 | struct fe_priv *np = get_nvpriv(dev); | ||
| 2688 | u8 __iomem *base = get_hwbase(dev); | ||
| 2689 | int ret = 1; | ||
| 2690 | int i; | ||
| 2691 | |||
| 2692 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | ||
| 2693 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
| 2694 | np->msi_x_entry[i].entry = i; | ||
| 2695 | } | ||
| 2696 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
| 2697 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
| 2698 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
| 2699 | /* Request irq for rx handling */ | ||
| 2700 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2701 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
| 2702 | pci_disable_msix(np->pci_dev); | ||
| 2703 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2704 | goto out_err; | ||
| 2705 | } | ||
| 2706 | /* Request irq for tx handling */ | ||
| 2707 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2708 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
| 2709 | pci_disable_msix(np->pci_dev); | ||
| 2710 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2711 | goto out_free_rx; | ||
| 2712 | } | ||
| 2713 | /* Request irq for link and timer handling */ | ||
| 2714 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2715 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
| 2716 | pci_disable_msix(np->pci_dev); | ||
| 2717 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2718 | goto out_free_tx; | ||
| 2719 | } | ||
| 2720 | /* map interrupts to their respective vector */ | ||
| 2721 | writel(0, base + NvRegMSIXMap0); | ||
| 2722 | writel(0, base + NvRegMSIXMap1); | ||
| 2723 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
| 2724 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
| 2725 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
| 2726 | } else { | ||
| 2727 | /* Request irq for all interrupts */ | ||
| 2728 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2729 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
| 2730 | pci_disable_msix(np->pci_dev); | ||
| 2731 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2732 | goto out_err; | ||
| 2733 | } | ||
| 2734 | |||
| 2735 | /* map interrupts to vector 0 */ | ||
| 2736 | writel(0, base + NvRegMSIXMap0); | ||
| 2737 | writel(0, base + NvRegMSIXMap1); | ||
| 2738 | } | ||
| 2739 | } | ||
| 2740 | } | ||
| 2741 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
| 2742 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
| 2743 | np->msi_flags |= NV_MSI_ENABLED; | ||
| 2744 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2745 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
| 2746 | pci_disable_msi(np->pci_dev); | ||
| 2747 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2748 | goto out_err; | ||
| 2749 | } | ||
| 2750 | |||
| 2751 | /* map interrupts to vector 0 */ | ||
| 2752 | writel(0, base + NvRegMSIMap0); | ||
| 2753 | writel(0, base + NvRegMSIMap1); | ||
| 2754 | /* enable msi vector 0 */ | ||
| 2755 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
| 2756 | } | ||
| 2757 | } | ||
| 2758 | if (ret != 0) { | ||
| 2759 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
| 2760 | goto out_err; | ||
| 2761 | } | ||
| 2762 | |||
| 2763 | return 0; | ||
| 2764 | out_free_tx: | ||
| 2765 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev); | ||
| 2766 | out_free_rx: | ||
| 2767 | free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev); | ||
| 2768 | out_err: | ||
| 2769 | return 1; | ||
| 2770 | } | ||
| 2771 | |||
| 2772 | static void nv_free_irq(struct net_device *dev) | ||
| 2773 | { | ||
| 2774 | struct fe_priv *np = get_nvpriv(dev); | ||
| 2775 | int i; | ||
| 2776 | |||
| 2777 | if (np->msi_flags & NV_MSI_X_ENABLED) { | ||
| 2778 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
| 2779 | free_irq(np->msi_x_entry[i].vector, dev); | ||
| 2780 | } | ||
| 2781 | pci_disable_msix(np->pci_dev); | ||
| 2782 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2783 | } else { | ||
| 2784 | free_irq(np->pci_dev->irq, dev); | ||
| 2785 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
| 2786 | pci_disable_msi(np->pci_dev); | ||
| 2787 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2788 | } | ||
| 2789 | } | ||
| 2790 | } | ||
| 2791 | |||
| 2631 | static int nv_open(struct net_device *dev) | 2792 | static int nv_open(struct net_device *dev) |
| 2632 | { | 2793 | { |
| 2633 | struct fe_priv *np = netdev_priv(dev); | 2794 | struct fe_priv *np = netdev_priv(dev); |
| @@ -2720,86 +2881,18 @@ static int nv_open(struct net_device *dev) | |||
| 2720 | udelay(10); | 2881 | udelay(10); |
| 2721 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); | 2882 | writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); |
| 2722 | 2883 | ||
| 2723 | writel(0, base + NvRegIrqMask); | 2884 | nv_disable_hw_interrupts(dev, np->irqmask); |
| 2724 | pci_push(base); | 2885 | pci_push(base); |
| 2725 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); | 2886 | writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); |
| 2726 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); | 2887 | writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); |
| 2727 | pci_push(base); | 2888 | pci_push(base); |
| 2728 | 2889 | ||
| 2729 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | 2890 | if (nv_request_irq(dev)) { |
| 2730 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | 2891 | goto out_drain; |
| 2731 | np->msi_x_entry[i].entry = i; | ||
| 2732 | } | ||
| 2733 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
| 2734 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
| 2735 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
| 2736 | /* Request irq for rx handling */ | ||
| 2737 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2738 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
| 2739 | pci_disable_msix(np->pci_dev); | ||
| 2740 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2741 | goto out_drain; | ||
| 2742 | } | ||
| 2743 | /* Request irq for tx handling */ | ||
| 2744 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2745 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
| 2746 | pci_disable_msix(np->pci_dev); | ||
| 2747 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2748 | goto out_drain; | ||
| 2749 | } | ||
| 2750 | /* Request irq for link and timer handling */ | ||
| 2751 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2752 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
| 2753 | pci_disable_msix(np->pci_dev); | ||
| 2754 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2755 | goto out_drain; | ||
| 2756 | } | ||
| 2757 | |||
| 2758 | /* map interrupts to their respective vector */ | ||
| 2759 | writel(0, base + NvRegMSIXMap0); | ||
| 2760 | writel(0, base + NvRegMSIXMap1); | ||
| 2761 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
| 2762 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
| 2763 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
| 2764 | } else { | ||
| 2765 | /* Request irq for all interrupts */ | ||
| 2766 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2767 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
| 2768 | pci_disable_msix(np->pci_dev); | ||
| 2769 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2770 | goto out_drain; | ||
| 2771 | } | ||
| 2772 | |||
| 2773 | /* map interrupts to vector 0 */ | ||
| 2774 | writel(0, base + NvRegMSIXMap0); | ||
| 2775 | writel(0, base + NvRegMSIXMap1); | ||
| 2776 | } | ||
| 2777 | } | ||
| 2778 | } | ||
| 2779 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
| 2780 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
| 2781 | np->msi_flags |= NV_MSI_ENABLED; | ||
| 2782 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
| 2783 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
| 2784 | pci_disable_msi(np->pci_dev); | ||
| 2785 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2786 | goto out_drain; | ||
| 2787 | } | ||
| 2788 | |||
| 2789 | /* map interrupts to vector 0 */ | ||
| 2790 | writel(0, base + NvRegMSIMap0); | ||
| 2791 | writel(0, base + NvRegMSIMap1); | ||
| 2792 | /* enable msi vector 0 */ | ||
| 2793 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
| 2794 | } | ||
| 2795 | } | ||
| 2796 | if (ret != 0) { | ||
| 2797 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
| 2798 | goto out_drain; | ||
| 2799 | } | 2892 | } |
| 2800 | 2893 | ||
| 2801 | /* ask for interrupts */ | 2894 | /* ask for interrupts */ |
| 2802 | writel(np->irqmask, base + NvRegIrqMask); | 2895 | nv_enable_hw_interrupts(dev, np->irqmask); |
| 2803 | 2896 | ||
| 2804 | spin_lock_irq(&np->lock); | 2897 | spin_lock_irq(&np->lock); |
| 2805 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); | 2898 | writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); |
| @@ -2843,7 +2936,6 @@ static int nv_close(struct net_device *dev) | |||
| 2843 | { | 2936 | { |
| 2844 | struct fe_priv *np = netdev_priv(dev); | 2937 | struct fe_priv *np = netdev_priv(dev); |
| 2845 | u8 __iomem *base; | 2938 | u8 __iomem *base; |
| 2846 | int i; | ||
| 2847 | 2939 | ||
| 2848 | spin_lock_irq(&np->lock); | 2940 | spin_lock_irq(&np->lock); |
| 2849 | np->in_shutdown = 1; | 2941 | np->in_shutdown = 1; |
| @@ -2861,31 +2953,13 @@ static int nv_close(struct net_device *dev) | |||
| 2861 | 2953 | ||
| 2862 | /* disable interrupts on the nic or we will lock up */ | 2954 | /* disable interrupts on the nic or we will lock up */ |
| 2863 | base = get_hwbase(dev); | 2955 | base = get_hwbase(dev); |
| 2864 | if (np->msi_flags & NV_MSI_X_ENABLED) { | 2956 | nv_disable_hw_interrupts(dev, np->irqmask); |
| 2865 | writel(np->irqmask, base + NvRegIrqMask); | ||
| 2866 | } else { | ||
| 2867 | if (np->msi_flags & NV_MSI_ENABLED) | ||
| 2868 | writel(0, base + NvRegMSIIrqMask); | ||
| 2869 | writel(0, base + NvRegIrqMask); | ||
| 2870 | } | ||
| 2871 | pci_push(base); | 2957 | pci_push(base); |
| 2872 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); | 2958 | dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); |
| 2873 | 2959 | ||
| 2874 | spin_unlock_irq(&np->lock); | 2960 | spin_unlock_irq(&np->lock); |
| 2875 | 2961 | ||
| 2876 | if (np->msi_flags & NV_MSI_X_ENABLED) { | 2962 | nv_free_irq(dev); |
| 2877 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
| 2878 | free_irq(np->msi_x_entry[i].vector, dev); | ||
| 2879 | } | ||
| 2880 | pci_disable_msix(np->pci_dev); | ||
| 2881 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
| 2882 | } else { | ||
| 2883 | free_irq(np->pci_dev->irq, dev); | ||
| 2884 | if (np->msi_flags & NV_MSI_ENABLED) { | ||
| 2885 | pci_disable_msi(np->pci_dev); | ||
| 2886 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
| 2887 | } | ||
| 2888 | } | ||
| 2889 | 2963 | ||
| 2890 | drain_ring(dev); | 2964 | drain_ring(dev); |
| 2891 | 2965 | ||
| @@ -2974,20 +3048,18 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
| 2974 | if (id->driver_data & DEV_HAS_HIGH_DMA) { | 3048 | if (id->driver_data & DEV_HAS_HIGH_DMA) { |
| 2975 | /* packet format 3: supports 40-bit addressing */ | 3049 | /* packet format 3: supports 40-bit addressing */ |
| 2976 | np->desc_ver = DESC_VER_3; | 3050 | np->desc_ver = DESC_VER_3; |
| 3051 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | ||
| 2977 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { | 3052 | if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { |
| 2978 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", | 3053 | printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", |
| 2979 | pci_name(pci_dev)); | 3054 | pci_name(pci_dev)); |
| 2980 | } else { | 3055 | } else { |
| 2981 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { | 3056 | dev->features |= NETIF_F_HIGHDMA; |
| 2982 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", | 3057 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); |
| 2983 | pci_name(pci_dev)); | 3058 | } |
| 2984 | goto out_relreg; | 3059 | if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { |
| 2985 | } else { | 3060 | printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", |
| 2986 | dev->features |= NETIF_F_HIGHDMA; | 3061 | pci_name(pci_dev)); |
| 2987 | printk(KERN_INFO "forcedeth: using HIGHDMA\n"); | ||
| 2988 | } | ||
| 2989 | } | 3062 | } |
| 2990 | np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; | ||
| 2991 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { | 3063 | } else if (id->driver_data & DEV_HAS_LARGEDESC) { |
| 2992 | /* packet format 2: supports jumbo frames */ | 3064 | /* packet format 2: supports jumbo frames */ |
| 2993 | np->desc_ver = DESC_VER_2; | 3065 | np->desc_ver = DESC_VER_2; |
