aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/forcedeth.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/forcedeth.c')
-rw-r--r--drivers/net/forcedeth.c473
1 files changed, 308 insertions, 165 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7627a75f4f7c..feb5b223cd60 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -105,6 +105,8 @@
105 * 0.50: 20 Jan 2006: Add 8021pq tagging support. 105 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings. 106 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
107 * 0.52: 20 Jan 2006: Add MSI/MSIX support. 107 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
108 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
109 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
108 * 110 *
109 * Known bugs: 111 * Known bugs:
110 * We suspect that on some hardware no TX done interrupts are generated. 112 * We suspect that on some hardware no TX done interrupts are generated.
@@ -116,7 +118,7 @@
116 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 118 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
117 * superfluous timer interrupts from the nic. 119 * superfluous timer interrupts from the nic.
118 */ 120 */
119#define FORCEDETH_VERSION "0.52" 121#define FORCEDETH_VERSION "0.54"
120#define DRV_NAME "forcedeth" 122#define DRV_NAME "forcedeth"
121 123
122#include <linux/module.h> 124#include <linux/module.h>
@@ -160,6 +162,7 @@
160#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 162#define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */
161#define DEV_HAS_MSI 0x0040 /* device supports MSI */ 163#define DEV_HAS_MSI 0x0040 /* device supports MSI */
162#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 164#define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */
165#define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */
163 166
164enum { 167enum {
165 NvRegIrqStatus = 0x000, 168 NvRegIrqStatus = 0x000,
@@ -203,6 +206,8 @@ enum {
203#define NVREG_MISC1_HD 0x02 206#define NVREG_MISC1_HD 0x02
204#define NVREG_MISC1_FORCE 0x3b0f3c 207#define NVREG_MISC1_FORCE 0x3b0f3c
205 208
209 NvRegMacReset = 0x3c,
210#define NVREG_MAC_RESET_ASSERT 0x0F3
206 NvRegTransmitterControl = 0x084, 211 NvRegTransmitterControl = 0x084,
207#define NVREG_XMITCTL_START 0x01 212#define NVREG_XMITCTL_START 0x01
208 NvRegTransmitterStatus = 0x088, 213 NvRegTransmitterStatus = 0x088,
@@ -326,6 +331,10 @@ enum {
326 NvRegMSIXMap0 = 0x3e0, 331 NvRegMSIXMap0 = 0x3e0,
327 NvRegMSIXMap1 = 0x3e4, 332 NvRegMSIXMap1 = 0x3e4,
328 NvRegMSIXIrqStatus = 0x3f0, 333 NvRegMSIXIrqStatus = 0x3f0,
334
335 NvRegPowerState2 = 0x600,
336#define NVREG_POWERSTATE2_POWERUP_MASK 0x0F11
337#define NVREG_POWERSTATE2_POWERUP_REV_A3 0x0001
329}; 338};
330 339
331/* Big endian: should work, but is untested */ 340/* Big endian: should work, but is untested */
@@ -414,7 +423,8 @@ typedef union _ring_type {
414#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 423#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
415 424
416/* Miscelaneous hardware related defines: */ 425/* Miscelaneous hardware related defines: */
417#define NV_PCI_REGSZ 0x270 426#define NV_PCI_REGSZ_VER1 0x270
427#define NV_PCI_REGSZ_VER2 0x604
418 428
419/* various timeout delays: all in usec */ 429/* various timeout delays: all in usec */
420#define NV_TXRX_RESET_DELAY 4 430#define NV_TXRX_RESET_DELAY 4
@@ -431,6 +441,7 @@ typedef union _ring_type {
431#define NV_MIIBUSY_DELAY 50 441#define NV_MIIBUSY_DELAY 50
432#define NV_MIIPHY_DELAY 10 442#define NV_MIIPHY_DELAY 10
433#define NV_MIIPHY_DELAYMAX 10000 443#define NV_MIIPHY_DELAYMAX 10000
444#define NV_MAC_RESET_DELAY 64
434 445
435#define NV_WAKEUPPATTERNS 5 446#define NV_WAKEUPPATTERNS 5
436#define NV_WAKEUPMASKENTRIES 4 447#define NV_WAKEUPMASKENTRIES 4
@@ -552,6 +563,8 @@ struct fe_priv {
552 u32 desc_ver; 563 u32 desc_ver;
553 u32 txrxctl_bits; 564 u32 txrxctl_bits;
554 u32 vlanctl_bits; 565 u32 vlanctl_bits;
566 u32 driver_data;
567 u32 register_size;
555 568
556 void __iomem *base; 569 void __iomem *base;
557 570
@@ -698,6 +711,72 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
698 } 711 }
699} 712}
700 713
714static int using_multi_irqs(struct net_device *dev)
715{
716 struct fe_priv *np = get_nvpriv(dev);
717
718 if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
719 ((np->msi_flags & NV_MSI_X_ENABLED) &&
720 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1)))
721 return 0;
722 else
723 return 1;
724}
725
726static void nv_enable_irq(struct net_device *dev)
727{
728 struct fe_priv *np = get_nvpriv(dev);
729
730 if (!using_multi_irqs(dev)) {
731 if (np->msi_flags & NV_MSI_X_ENABLED)
732 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
733 else
734 enable_irq(dev->irq);
735 } else {
736 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
737 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
738 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
739 }
740}
741
742static void nv_disable_irq(struct net_device *dev)
743{
744 struct fe_priv *np = get_nvpriv(dev);
745
746 if (!using_multi_irqs(dev)) {
747 if (np->msi_flags & NV_MSI_X_ENABLED)
748 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
749 else
750 disable_irq(dev->irq);
751 } else {
752 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
753 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
754 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
755 }
756}
757
758/* In MSIX mode, a write to irqmask behaves as XOR */
759static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
760{
761 u8 __iomem *base = get_hwbase(dev);
762
763 writel(mask, base + NvRegIrqMask);
764}
765
766static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
767{
768 struct fe_priv *np = get_nvpriv(dev);
769 u8 __iomem *base = get_hwbase(dev);
770
771 if (np->msi_flags & NV_MSI_X_ENABLED) {
772 writel(mask, base + NvRegIrqMask);
773 } else {
774 if (np->msi_flags & NV_MSI_ENABLED)
775 writel(0, base + NvRegMSIIrqMask);
776 writel(0, base + NvRegIrqMask);
777 }
778}
779
701#define MII_READ (-1) 780#define MII_READ (-1)
702/* mii_rw: read/write a register on the PHY. 781/* mii_rw: read/write a register on the PHY.
703 * 782 *
@@ -919,6 +998,24 @@ static void nv_txrx_reset(struct net_device *dev)
919 pci_push(base); 998 pci_push(base);
920} 999}
921 1000
1001static void nv_mac_reset(struct net_device *dev)
1002{
1003 struct fe_priv *np = netdev_priv(dev);
1004 u8 __iomem *base = get_hwbase(dev);
1005
1006 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1007 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1008 pci_push(base);
1009 writel(NVREG_MAC_RESET_ASSERT, base + NvRegMacReset);
1010 pci_push(base);
1011 udelay(NV_MAC_RESET_DELAY);
1012 writel(0, base + NvRegMacReset);
1013 pci_push(base);
1014 udelay(NV_MAC_RESET_DELAY);
1015 writel(NVREG_TXRXCTL_BIT2 | np->txrxctl_bits, base + NvRegTxRxControl);
1016 pci_push(base);
1017}
1018
922/* 1019/*
923 * nv_get_stats: dev->get_stats function 1020 * nv_get_stats: dev->get_stats function
924 * Get latest stats value from the nic. 1021 * Get latest stats value from the nic.
@@ -989,24 +1086,25 @@ static void nv_do_rx_refill(unsigned long data)
989 struct net_device *dev = (struct net_device *) data; 1086 struct net_device *dev = (struct net_device *) data;
990 struct fe_priv *np = netdev_priv(dev); 1087 struct fe_priv *np = netdev_priv(dev);
991 1088
992 1089 if (!using_multi_irqs(dev)) {
993 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1090 if (np->msi_flags & NV_MSI_X_ENABLED)
994 ((np->msi_flags & NV_MSI_X_ENABLED) && 1091 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
995 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1092 else
996 disable_irq(dev->irq); 1093 disable_irq(dev->irq);
997 } else { 1094 } else {
998 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1095 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
999 } 1096 }
1000 if (nv_alloc_rx(dev)) { 1097 if (nv_alloc_rx(dev)) {
1001 spin_lock(&np->lock); 1098 spin_lock_irq(&np->lock);
1002 if (!np->in_shutdown) 1099 if (!np->in_shutdown)
1003 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 1100 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1004 spin_unlock(&np->lock); 1101 spin_unlock_irq(&np->lock);
1005 } 1102 }
1006 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1103 if (!using_multi_irqs(dev)) {
1007 ((np->msi_flags & NV_MSI_X_ENABLED) && 1104 if (np->msi_flags & NV_MSI_X_ENABLED)
1008 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 1105 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1009 enable_irq(dev->irq); 1106 else
1107 enable_irq(dev->irq);
1010 } else { 1108 } else {
1011 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1109 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1012 } 1110 }
@@ -1331,7 +1429,7 @@ static void nv_tx_timeout(struct net_device *dev)
1331 dev->name, (unsigned long)np->ring_addr, 1429 dev->name, (unsigned long)np->ring_addr,
1332 np->next_tx, np->nic_tx); 1430 np->next_tx, np->nic_tx);
1333 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 1431 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
1334 for (i=0;i<0x400;i+= 32) { 1432 for (i=0;i<=np->register_size;i+= 32) {
1335 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 1433 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
1336 i, 1434 i,
1337 readl(base + i + 0), readl(base + i + 4), 1435 readl(base + i + 0), readl(base + i + 4),
@@ -1638,15 +1736,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1638 * guessed, there is probably a simpler approach. 1736 * guessed, there is probably a simpler approach.
1639 * Changing the MTU is a rare event, it shouldn't matter. 1737 * Changing the MTU is a rare event, it shouldn't matter.
1640 */ 1738 */
1641 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1739 nv_disable_irq(dev);
1642 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1643 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1644 disable_irq(dev->irq);
1645 } else {
1646 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1647 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1648 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1649 }
1650 spin_lock_bh(&dev->xmit_lock); 1740 spin_lock_bh(&dev->xmit_lock);
1651 spin_lock(&np->lock); 1741 spin_lock(&np->lock);
1652 /* stop engines */ 1742 /* stop engines */
@@ -1679,15 +1769,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
1679 nv_start_tx(dev); 1769 nv_start_tx(dev);
1680 spin_unlock(&np->lock); 1770 spin_unlock(&np->lock);
1681 spin_unlock_bh(&dev->xmit_lock); 1771 spin_unlock_bh(&dev->xmit_lock);
1682 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 1772 nv_enable_irq(dev);
1683 ((np->msi_flags & NV_MSI_X_ENABLED) &&
1684 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
1685 enable_irq(dev->irq);
1686 } else {
1687 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1688 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
1689 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
1690 }
1691 } 1773 }
1692 return 0; 1774 return 0;
1693} 1775}
@@ -2078,16 +2160,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2078 if (!(events & np->irqmask)) 2160 if (!(events & np->irqmask))
2079 break; 2161 break;
2080 2162
2081 spin_lock(&np->lock); 2163 spin_lock_irq(&np->lock);
2082 nv_tx_done(dev); 2164 nv_tx_done(dev);
2083 spin_unlock(&np->lock); 2165 spin_unlock_irq(&np->lock);
2084 2166
2085 if (events & (NVREG_IRQ_TX_ERR)) { 2167 if (events & (NVREG_IRQ_TX_ERR)) {
2086 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", 2168 dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
2087 dev->name, events); 2169 dev->name, events);
2088 } 2170 }
2089 if (i > max_interrupt_work) { 2171 if (i > max_interrupt_work) {
2090 spin_lock(&np->lock); 2172 spin_lock_irq(&np->lock);
2091 /* disable interrupts on the nic */ 2173 /* disable interrupts on the nic */
2092 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); 2174 writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
2093 pci_push(base); 2175 pci_push(base);
@@ -2097,7 +2179,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data, struct pt_regs *regs)
2097 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2179 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2098 } 2180 }
2099 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 2181 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
2100 spin_unlock(&np->lock); 2182 spin_unlock_irq(&np->lock);
2101 break; 2183 break;
2102 } 2184 }
2103 2185
@@ -2127,14 +2209,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2127 2209
2128 nv_rx_process(dev); 2210 nv_rx_process(dev);
2129 if (nv_alloc_rx(dev)) { 2211 if (nv_alloc_rx(dev)) {
2130 spin_lock(&np->lock); 2212 spin_lock_irq(&np->lock);
2131 if (!np->in_shutdown) 2213 if (!np->in_shutdown)
2132 mod_timer(&np->oom_kick, jiffies + OOM_REFILL); 2214 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
2133 spin_unlock(&np->lock); 2215 spin_unlock_irq(&np->lock);
2134 } 2216 }
2135 2217
2136 if (i > max_interrupt_work) { 2218 if (i > max_interrupt_work) {
2137 spin_lock(&np->lock); 2219 spin_lock_irq(&np->lock);
2138 /* disable interrupts on the nic */ 2220 /* disable interrupts on the nic */
2139 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); 2221 writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
2140 pci_push(base); 2222 pci_push(base);
@@ -2144,7 +2226,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data, struct pt_regs *regs)
2144 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2226 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2145 } 2227 }
2146 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 2228 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
2147 spin_unlock(&np->lock); 2229 spin_unlock_irq(&np->lock);
2148 break; 2230 break;
2149 } 2231 }
2150 2232
@@ -2173,14 +2255,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2173 break; 2255 break;
2174 2256
2175 if (events & NVREG_IRQ_LINK) { 2257 if (events & NVREG_IRQ_LINK) {
2176 spin_lock(&np->lock); 2258 spin_lock_irq(&np->lock);
2177 nv_link_irq(dev); 2259 nv_link_irq(dev);
2178 spin_unlock(&np->lock); 2260 spin_unlock_irq(&np->lock);
2179 } 2261 }
2180 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { 2262 if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
2181 spin_lock(&np->lock); 2263 spin_lock_irq(&np->lock);
2182 nv_linkchange(dev); 2264 nv_linkchange(dev);
2183 spin_unlock(&np->lock); 2265 spin_unlock_irq(&np->lock);
2184 np->link_timeout = jiffies + LINK_TIMEOUT; 2266 np->link_timeout = jiffies + LINK_TIMEOUT;
2185 } 2267 }
2186 if (events & (NVREG_IRQ_UNKNOWN)) { 2268 if (events & (NVREG_IRQ_UNKNOWN)) {
@@ -2188,7 +2270,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2188 dev->name, events); 2270 dev->name, events);
2189 } 2271 }
2190 if (i > max_interrupt_work) { 2272 if (i > max_interrupt_work) {
2191 spin_lock(&np->lock); 2273 spin_lock_irq(&np->lock);
2192 /* disable interrupts on the nic */ 2274 /* disable interrupts on the nic */
2193 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); 2275 writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
2194 pci_push(base); 2276 pci_push(base);
@@ -2198,7 +2280,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data, struct pt_regs *regs)
2198 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 2280 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
2199 } 2281 }
2200 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 2282 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
2201 spin_unlock(&np->lock); 2283 spin_unlock_irq(&np->lock);
2202 break; 2284 break;
2203 } 2285 }
2204 2286
@@ -2221,10 +2303,11 @@ static void nv_do_nic_poll(unsigned long data)
2221 * nv_nic_irq because that may decide to do otherwise 2303 * nv_nic_irq because that may decide to do otherwise
2222 */ 2304 */
2223 2305
2224 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2306 if (!using_multi_irqs(dev)) {
2225 ((np->msi_flags & NV_MSI_X_ENABLED) && 2307 if (np->msi_flags & NV_MSI_X_ENABLED)
2226 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) { 2308 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2227 disable_irq(dev->irq); 2309 else
2310 disable_irq(dev->irq);
2228 mask = np->irqmask; 2311 mask = np->irqmask;
2229 } else { 2312 } else {
2230 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2313 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
@@ -2247,11 +2330,12 @@ static void nv_do_nic_poll(unsigned long data)
2247 writel(mask, base + NvRegIrqMask); 2330 writel(mask, base + NvRegIrqMask);
2248 pci_push(base); 2331 pci_push(base);
2249 2332
2250 if (!(np->msi_flags & NV_MSI_X_ENABLED) || 2333 if (!using_multi_irqs(dev)) {
2251 ((np->msi_flags & NV_MSI_X_ENABLED) &&
2252 ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
2253 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL); 2334 nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
2254 enable_irq(dev->irq); 2335 if (np->msi_flags & NV_MSI_X_ENABLED)
2336 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
2337 else
2338 enable_irq(dev->irq);
2255 } else { 2339 } else {
2256 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) { 2340 if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
2257 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL); 2341 nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
@@ -2488,11 +2572,11 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
2488} 2572}
2489 2573
2490#define FORCEDETH_REGS_VER 1 2574#define FORCEDETH_REGS_VER 1
2491#define FORCEDETH_REGS_SIZE 0x400 /* 256 32-bit registers */
2492 2575
2493static int nv_get_regs_len(struct net_device *dev) 2576static int nv_get_regs_len(struct net_device *dev)
2494{ 2577{
2495 return FORCEDETH_REGS_SIZE; 2578 struct fe_priv *np = netdev_priv(dev);
2579 return np->register_size;
2496} 2580}
2497 2581
2498static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 2582static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
@@ -2504,7 +2588,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
2504 2588
2505 regs->version = FORCEDETH_REGS_VER; 2589 regs->version = FORCEDETH_REGS_VER;
2506 spin_lock_irq(&np->lock); 2590 spin_lock_irq(&np->lock);
2507 for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++) 2591 for (i = 0;i <= np->register_size/sizeof(u32); i++)
2508 rbuf[i] = readl(base + i*sizeof(u32)); 2592 rbuf[i] = readl(base + i*sizeof(u32));
2509 spin_unlock_irq(&np->lock); 2593 spin_unlock_irq(&np->lock);
2510} 2594}
@@ -2531,6 +2615,18 @@ static int nv_nway_reset(struct net_device *dev)
2531 return ret; 2615 return ret;
2532} 2616}
2533 2617
2618#ifdef NETIF_F_TSO
2619static int nv_set_tso(struct net_device *dev, u32 value)
2620{
2621 struct fe_priv *np = netdev_priv(dev);
2622
2623 if ((np->driver_data & DEV_HAS_CHECKSUM))
2624 return ethtool_op_set_tso(dev, value);
2625 else
2626 return value ? -EOPNOTSUPP : 0;
2627}
2628#endif
2629
2534static struct ethtool_ops ops = { 2630static struct ethtool_ops ops = {
2535 .get_drvinfo = nv_get_drvinfo, 2631 .get_drvinfo = nv_get_drvinfo,
2536 .get_link = ethtool_op_get_link, 2632 .get_link = ethtool_op_get_link,
@@ -2542,6 +2638,10 @@ static struct ethtool_ops ops = {
2542 .get_regs = nv_get_regs, 2638 .get_regs = nv_get_regs,
2543 .nway_reset = nv_nway_reset, 2639 .nway_reset = nv_nway_reset,
2544 .get_perm_addr = ethtool_op_get_perm_addr, 2640 .get_perm_addr = ethtool_op_get_perm_addr,
2641#ifdef NETIF_F_TSO
2642 .get_tso = ethtool_op_get_tso,
2643 .set_tso = nv_set_tso
2644#endif
2545}; 2645};
2546 2646
2547static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 2647static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
@@ -2598,6 +2698,113 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
2598 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 2698 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
2599} 2699}
2600 2700
2701static int nv_request_irq(struct net_device *dev)
2702{
2703 struct fe_priv *np = get_nvpriv(dev);
2704 u8 __iomem *base = get_hwbase(dev);
2705 int ret = 1;
2706 int i;
2707
2708 if (np->msi_flags & NV_MSI_X_CAPABLE) {
2709 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2710 np->msi_x_entry[i].entry = i;
2711 }
2712 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2713 np->msi_flags |= NV_MSI_X_ENABLED;
2714 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2715 /* Request irq for rx handling */
2716 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2717 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2718 pci_disable_msix(np->pci_dev);
2719 np->msi_flags &= ~NV_MSI_X_ENABLED;
2720 goto out_err;
2721 }
2722 /* Request irq for tx handling */
2723 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2724 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2725 pci_disable_msix(np->pci_dev);
2726 np->msi_flags &= ~NV_MSI_X_ENABLED;
2727 goto out_free_rx;
2728 }
2729 /* Request irq for link and timer handling */
2730 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2731 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2732 pci_disable_msix(np->pci_dev);
2733 np->msi_flags &= ~NV_MSI_X_ENABLED;
2734 goto out_free_tx;
2735 }
2736 /* map interrupts to their respective vector */
2737 writel(0, base + NvRegMSIXMap0);
2738 writel(0, base + NvRegMSIXMap1);
2739 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2740 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2741 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2742 } else {
2743 /* Request irq for all interrupts */
2744 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2745 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2746 pci_disable_msix(np->pci_dev);
2747 np->msi_flags &= ~NV_MSI_X_ENABLED;
2748 goto out_err;
2749 }
2750
2751 /* map interrupts to vector 0 */
2752 writel(0, base + NvRegMSIXMap0);
2753 writel(0, base + NvRegMSIXMap1);
2754 }
2755 }
2756 }
2757 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2758 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2759 np->msi_flags |= NV_MSI_ENABLED;
2760 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2761 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2762 pci_disable_msi(np->pci_dev);
2763 np->msi_flags &= ~NV_MSI_ENABLED;
2764 goto out_err;
2765 }
2766
2767 /* map interrupts to vector 0 */
2768 writel(0, base + NvRegMSIMap0);
2769 writel(0, base + NvRegMSIMap1);
2770 /* enable msi vector 0 */
2771 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2772 }
2773 }
2774 if (ret != 0) {
2775 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2776 goto out_err;
2777 }
2778
2779 return 0;
2780out_free_tx:
2781 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, dev);
2782out_free_rx:
2783 free_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, dev);
2784out_err:
2785 return 1;
2786}
2787
2788static void nv_free_irq(struct net_device *dev)
2789{
2790 struct fe_priv *np = get_nvpriv(dev);
2791 int i;
2792
2793 if (np->msi_flags & NV_MSI_X_ENABLED) {
2794 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2795 free_irq(np->msi_x_entry[i].vector, dev);
2796 }
2797 pci_disable_msix(np->pci_dev);
2798 np->msi_flags &= ~NV_MSI_X_ENABLED;
2799 } else {
2800 free_irq(np->pci_dev->irq, dev);
2801 if (np->msi_flags & NV_MSI_ENABLED) {
2802 pci_disable_msi(np->pci_dev);
2803 np->msi_flags &= ~NV_MSI_ENABLED;
2804 }
2805 }
2806}
2807
2601static int nv_open(struct net_device *dev) 2808static int nv_open(struct net_device *dev)
2602{ 2809{
2603 struct fe_priv *np = netdev_priv(dev); 2810 struct fe_priv *np = netdev_priv(dev);
@@ -2608,6 +2815,8 @@ static int nv_open(struct net_device *dev)
2608 dprintk(KERN_DEBUG "nv_open: begin\n"); 2815 dprintk(KERN_DEBUG "nv_open: begin\n");
2609 2816
2610 /* 1) erase previous misconfiguration */ 2817 /* 1) erase previous misconfiguration */
2818 if (np->driver_data & DEV_HAS_POWER_CNTRL)
2819 nv_mac_reset(dev);
2611 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */ 2820 /* 4.1-1: stop adapter: ignored, 4.3 seems to be overkill */
2612 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2821 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
2613 writel(0, base + NvRegMulticastAddrB); 2822 writel(0, base + NvRegMulticastAddrB);
@@ -2688,86 +2897,18 @@ static int nv_open(struct net_device *dev)
2688 udelay(10); 2897 udelay(10);
2689 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState); 2898 writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
2690 2899
2691 writel(0, base + NvRegIrqMask); 2900 nv_disable_hw_interrupts(dev, np->irqmask);
2692 pci_push(base); 2901 pci_push(base);
2693 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus); 2902 writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
2694 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 2903 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
2695 pci_push(base); 2904 pci_push(base);
2696 2905
2697 if (np->msi_flags & NV_MSI_X_CAPABLE) { 2906 if (nv_request_irq(dev)) {
2698 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 2907 goto out_drain;
2699 np->msi_x_entry[i].entry = i;
2700 }
2701 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
2702 np->msi_flags |= NV_MSI_X_ENABLED;
2703 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
2704 /* Request irq for rx handling */
2705 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
2706 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
2707 pci_disable_msix(np->pci_dev);
2708 np->msi_flags &= ~NV_MSI_X_ENABLED;
2709 goto out_drain;
2710 }
2711 /* Request irq for tx handling */
2712 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
2713 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
2714 pci_disable_msix(np->pci_dev);
2715 np->msi_flags &= ~NV_MSI_X_ENABLED;
2716 goto out_drain;
2717 }
2718 /* Request irq for link and timer handling */
2719 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
2720 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
2721 pci_disable_msix(np->pci_dev);
2722 np->msi_flags &= ~NV_MSI_X_ENABLED;
2723 goto out_drain;
2724 }
2725
2726 /* map interrupts to their respective vector */
2727 writel(0, base + NvRegMSIXMap0);
2728 writel(0, base + NvRegMSIXMap1);
2729 set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
2730 set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
2731 set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
2732 } else {
2733 /* Request irq for all interrupts */
2734 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2735 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2736 pci_disable_msix(np->pci_dev);
2737 np->msi_flags &= ~NV_MSI_X_ENABLED;
2738 goto out_drain;
2739 }
2740
2741 /* map interrupts to vector 0 */
2742 writel(0, base + NvRegMSIXMap0);
2743 writel(0, base + NvRegMSIXMap1);
2744 }
2745 }
2746 }
2747 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
2748 if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
2749 np->msi_flags |= NV_MSI_ENABLED;
2750 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
2751 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
2752 pci_disable_msi(np->pci_dev);
2753 np->msi_flags &= ~NV_MSI_ENABLED;
2754 goto out_drain;
2755 }
2756
2757 /* map interrupts to vector 0 */
2758 writel(0, base + NvRegMSIMap0);
2759 writel(0, base + NvRegMSIMap1);
2760 /* enable msi vector 0 */
2761 writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
2762 }
2763 }
2764 if (ret != 0) {
2765 if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
2766 goto out_drain;
2767 } 2908 }
2768 2909
2769 /* ask for interrupts */ 2910 /* ask for interrupts */
2770 writel(np->irqmask, base + NvRegIrqMask); 2911 nv_enable_hw_interrupts(dev, np->irqmask);
2771 2912
2772 spin_lock_irq(&np->lock); 2913 spin_lock_irq(&np->lock);
2773 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA); 2914 writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -2811,7 +2952,6 @@ static int nv_close(struct net_device *dev)
2811{ 2952{
2812 struct fe_priv *np = netdev_priv(dev); 2953 struct fe_priv *np = netdev_priv(dev);
2813 u8 __iomem *base; 2954 u8 __iomem *base;
2814 int i;
2815 2955
2816 spin_lock_irq(&np->lock); 2956 spin_lock_irq(&np->lock);
2817 np->in_shutdown = 1; 2957 np->in_shutdown = 1;
@@ -2829,31 +2969,13 @@ static int nv_close(struct net_device *dev)
2829 2969
2830 /* disable interrupts on the nic or we will lock up */ 2970 /* disable interrupts on the nic or we will lock up */
2831 base = get_hwbase(dev); 2971 base = get_hwbase(dev);
2832 if (np->msi_flags & NV_MSI_X_ENABLED) { 2972 nv_disable_hw_interrupts(dev, np->irqmask);
2833 writel(np->irqmask, base + NvRegIrqMask);
2834 } else {
2835 if (np->msi_flags & NV_MSI_ENABLED)
2836 writel(0, base + NvRegMSIIrqMask);
2837 writel(0, base + NvRegIrqMask);
2838 }
2839 pci_push(base); 2973 pci_push(base);
2840 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name); 2974 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
2841 2975
2842 spin_unlock_irq(&np->lock); 2976 spin_unlock_irq(&np->lock);
2843 2977
2844 if (np->msi_flags & NV_MSI_X_ENABLED) { 2978 nv_free_irq(dev);
2845 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
2846 free_irq(np->msi_x_entry[i].vector, dev);
2847 }
2848 pci_disable_msix(np->pci_dev);
2849 np->msi_flags &= ~NV_MSI_X_ENABLED;
2850 } else {
2851 free_irq(np->pci_dev->irq, dev);
2852 if (np->msi_flags & NV_MSI_ENABLED) {
2853 pci_disable_msi(np->pci_dev);
2854 np->msi_flags &= ~NV_MSI_ENABLED;
2855 }
2856 }
2857 2979
2858 drain_ring(dev); 2980 drain_ring(dev);
2859 2981
@@ -2878,6 +3000,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2878 unsigned long addr; 3000 unsigned long addr;
2879 u8 __iomem *base; 3001 u8 __iomem *base;
2880 int err, i; 3002 int err, i;
3003 u32 powerstate;
2881 3004
2882 dev = alloc_etherdev(sizeof(struct fe_priv)); 3005 dev = alloc_etherdev(sizeof(struct fe_priv));
2883 err = -ENOMEM; 3006 err = -ENOMEM;
@@ -2910,6 +3033,11 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2910 if (err < 0) 3033 if (err < 0)
2911 goto out_disable; 3034 goto out_disable;
2912 3035
3036 if (id->driver_data & (DEV_HAS_VLAN|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL))
3037 np->register_size = NV_PCI_REGSZ_VER2;
3038 else
3039 np->register_size = NV_PCI_REGSZ_VER1;
3040
2913 err = -EINVAL; 3041 err = -EINVAL;
2914 addr = 0; 3042 addr = 0;
2915 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 3043 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2918,7 +3046,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2918 pci_resource_len(pci_dev, i), 3046 pci_resource_len(pci_dev, i),
2919 pci_resource_flags(pci_dev, i)); 3047 pci_resource_flags(pci_dev, i));
2920 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 3048 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
2921 pci_resource_len(pci_dev, i) >= NV_PCI_REGSZ) { 3049 pci_resource_len(pci_dev, i) >= np->register_size) {
2922 addr = pci_resource_start(pci_dev, i); 3050 addr = pci_resource_start(pci_dev, i);
2923 break; 3051 break;
2924 } 3052 }
@@ -2929,24 +3057,25 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2929 goto out_relreg; 3057 goto out_relreg;
2930 } 3058 }
2931 3059
3060 /* copy of driver data */
3061 np->driver_data = id->driver_data;
3062
2932 /* handle different descriptor versions */ 3063 /* handle different descriptor versions */
2933 if (id->driver_data & DEV_HAS_HIGH_DMA) { 3064 if (id->driver_data & DEV_HAS_HIGH_DMA) {
2934 /* packet format 3: supports 40-bit addressing */ 3065 /* packet format 3: supports 40-bit addressing */
2935 np->desc_ver = DESC_VER_3; 3066 np->desc_ver = DESC_VER_3;
3067 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2936 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) { 3068 if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
2937 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 3069 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
2938 pci_name(pci_dev)); 3070 pci_name(pci_dev));
2939 } else { 3071 } else {
2940 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 3072 dev->features |= NETIF_F_HIGHDMA;
2941 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n", 3073 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2942 pci_name(pci_dev)); 3074 }
2943 goto out_relreg; 3075 if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
2944 } else { 3076 printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
2945 dev->features |= NETIF_F_HIGHDMA; 3077 pci_name(pci_dev));
2946 printk(KERN_INFO "forcedeth: using HIGHDMA\n");
2947 }
2948 } 3078 }
2949 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
2950 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 3079 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
2951 /* packet format 2: supports jumbo frames */ 3080 /* packet format 2: supports jumbo frames */
2952 np->desc_ver = DESC_VER_2; 3081 np->desc_ver = DESC_VER_2;
@@ -2986,7 +3115,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
2986 } 3115 }
2987 3116
2988 err = -ENOMEM; 3117 err = -ENOMEM;
2989 np->base = ioremap(addr, NV_PCI_REGSZ); 3118 np->base = ioremap(addr, np->register_size);
2990 if (!np->base) 3119 if (!np->base)
2991 goto out_relreg; 3120 goto out_relreg;
2992 dev->base_addr = (unsigned long)np->base; 3121 dev->base_addr = (unsigned long)np->base;
@@ -3062,6 +3191,20 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
3062 writel(0, base + NvRegWakeUpFlags); 3191 writel(0, base + NvRegWakeUpFlags);
3063 np->wolenabled = 0; 3192 np->wolenabled = 0;
3064 3193
3194 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
3195 u8 revision_id;
3196 pci_read_config_byte(pci_dev, PCI_REVISION_ID, &revision_id);
3197
3198 /* take phy and nic out of low power mode */
3199 powerstate = readl(base + NvRegPowerState2);
3200 powerstate &= ~NVREG_POWERSTATE2_POWERUP_MASK;
3201 if ((id->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
3202 id->device == PCI_DEVICE_ID_NVIDIA_NVENET_13) &&
3203 revision_id >= 0xA3)
3204 powerstate |= NVREG_POWERSTATE2_POWERUP_REV_A3;
3205 writel(powerstate, base + NvRegPowerState2);
3206 }
3207
3065 if (np->desc_ver == DESC_VER_1) { 3208 if (np->desc_ver == DESC_VER_1) {
3066 np->tx_flags = NV_TX_VALID; 3209 np->tx_flags = NV_TX_VALID;
3067 } else { 3210 } else {
@@ -3223,19 +3366,19 @@ static struct pci_device_id pci_tbl[] = {
3223 }, 3366 },
3224 { /* MCP51 Ethernet Controller */ 3367 { /* MCP51 Ethernet Controller */
3225 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12), 3368 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
3226 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, 3369 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
3227 }, 3370 },
3228 { /* MCP51 Ethernet Controller */ 3371 { /* MCP51 Ethernet Controller */
3229 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13), 3372 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
3230 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA, 3373 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL,
3231 }, 3374 },
3232 { /* MCP55 Ethernet Controller */ 3375 { /* MCP55 Ethernet Controller */
3233 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 3376 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
3234 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, 3377 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
3235 }, 3378 },
3236 { /* MCP55 Ethernet Controller */ 3379 { /* MCP55 Ethernet Controller */
3237 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 3380 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
3238 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X, 3381 .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
3239 }, 3382 },
3240 {0,}, 3383 {0,},
3241}; 3384};